]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.45-201108162115.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.45-201108162115.patch
CommitLineData
ea04bca2
PK
1diff -urNp linux-2.6.32.45/arch/alpha/include/asm/elf.h linux-2.6.32.45/arch/alpha/include/asm/elf.h
2--- linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3+++ linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-2.6.32.45/arch/alpha/include/asm/pgtable.h linux-2.6.32.45/arch/alpha/include/asm/pgtable.h
19--- linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20+++ linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-2.6.32.45/arch/alpha/kernel/module.c linux-2.6.32.45/arch/alpha/kernel/module.c
40--- linux-2.6.32.45/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41+++ linux-2.6.32.45/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-2.6.32.45/arch/alpha/kernel/osf_sys.c linux-2.6.32.45/arch/alpha/kernel/osf_sys.c
52--- linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-08-09 18:35:28.000000000 -0400
53+++ linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-2.6.32.45/arch/alpha/mm/fault.c linux-2.6.32.45/arch/alpha/mm/fault.c
86--- linux-2.6.32.45/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87+++ linux-2.6.32.45/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-2.6.32.45/arch/arm/include/asm/elf.h linux-2.6.32.45/arch/arm/include/asm/elf.h
245--- linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246+++ linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263diff -urNp linux-2.6.32.45/arch/arm/include/asm/kmap_types.h linux-2.6.32.45/arch/arm/include/asm/kmap_types.h
264--- linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265+++ linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266@@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270+ KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274diff -urNp linux-2.6.32.45/arch/arm/include/asm/uaccess.h linux-2.6.32.45/arch/arm/include/asm/uaccess.h
275--- linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276+++ linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
277@@ -22,6 +22,8 @@
278 #define VERIFY_READ 0
279 #define VERIFY_WRITE 1
280
281+extern void check_object_size(const void *ptr, unsigned long n, bool to);
282+
283 /*
284 * The exception table consists of pairs of addresses: the first is the
285 * address of an instruction that is allowed to fault, and the second is
286@@ -387,8 +389,23 @@ do { \
287
288
289 #ifdef CONFIG_MMU
290-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
291-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
292+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
293+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
294+
295+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
296+{
297+ if (!__builtin_constant_p(n))
298+ check_object_size(to, n, false);
299+ return ___copy_from_user(to, from, n);
300+}
301+
302+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
303+{
304+ if (!__builtin_constant_p(n))
305+ check_object_size(from, n, true);
306+ return ___copy_to_user(to, from, n);
307+}
308+
309 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
310 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
311 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
312@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
313
314 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
315 {
316+ if ((long)n < 0)
317+ return n;
318+
319 if (access_ok(VERIFY_READ, from, n))
320 n = __copy_from_user(to, from, n);
321 else /* security hole - plug it */
322@@ -412,6 +432,9 @@ static inline unsigned long __must_check
323
324 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
325 {
326+ if ((long)n < 0)
327+ return n;
328+
329 if (access_ok(VERIFY_WRITE, to, n))
330 n = __copy_to_user(to, from, n);
331 return n;
332diff -urNp linux-2.6.32.45/arch/arm/kernel/armksyms.c linux-2.6.32.45/arch/arm/kernel/armksyms.c
333--- linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
334+++ linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
335@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
336 #ifdef CONFIG_MMU
337 EXPORT_SYMBOL(copy_page);
338
339-EXPORT_SYMBOL(__copy_from_user);
340-EXPORT_SYMBOL(__copy_to_user);
341+EXPORT_SYMBOL(___copy_from_user);
342+EXPORT_SYMBOL(___copy_to_user);
343 EXPORT_SYMBOL(__clear_user);
344
345 EXPORT_SYMBOL(__get_user_1);
346diff -urNp linux-2.6.32.45/arch/arm/kernel/kgdb.c linux-2.6.32.45/arch/arm/kernel/kgdb.c
347--- linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
348+++ linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
349@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
350 * and we handle the normal undef case within the do_undefinstr
351 * handler.
352 */
353-struct kgdb_arch arch_kgdb_ops = {
354+const struct kgdb_arch arch_kgdb_ops = {
355 #ifndef __ARMEB__
356 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
357 #else /* ! __ARMEB__ */
358diff -urNp linux-2.6.32.45/arch/arm/kernel/traps.c linux-2.6.32.45/arch/arm/kernel/traps.c
359--- linux-2.6.32.45/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
360+++ linux-2.6.32.45/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
361@@ -247,6 +247,8 @@ static void __die(const char *str, int e
362
363 DEFINE_SPINLOCK(die_lock);
364
365+extern void gr_handle_kernel_exploit(void);
366+
367 /*
368 * This function is protected against re-entrancy.
369 */
370@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
371 if (panic_on_oops)
372 panic("Fatal exception");
373
374+ gr_handle_kernel_exploit();
375+
376 do_exit(SIGSEGV);
377 }
378
379diff -urNp linux-2.6.32.45/arch/arm/lib/copy_from_user.S linux-2.6.32.45/arch/arm/lib/copy_from_user.S
380--- linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
381+++ linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
382@@ -16,7 +16,7 @@
383 /*
384 * Prototype:
385 *
386- * size_t __copy_from_user(void *to, const void *from, size_t n)
387+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
388 *
389 * Purpose:
390 *
391@@ -84,11 +84,11 @@
392
393 .text
394
395-ENTRY(__copy_from_user)
396+ENTRY(___copy_from_user)
397
398 #include "copy_template.S"
399
400-ENDPROC(__copy_from_user)
401+ENDPROC(___copy_from_user)
402
403 .section .fixup,"ax"
404 .align 0
405diff -urNp linux-2.6.32.45/arch/arm/lib/copy_to_user.S linux-2.6.32.45/arch/arm/lib/copy_to_user.S
406--- linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
407+++ linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
408@@ -16,7 +16,7 @@
409 /*
410 * Prototype:
411 *
412- * size_t __copy_to_user(void *to, const void *from, size_t n)
413+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
414 *
415 * Purpose:
416 *
417@@ -88,11 +88,11 @@
418 .text
419
420 ENTRY(__copy_to_user_std)
421-WEAK(__copy_to_user)
422+WEAK(___copy_to_user)
423
424 #include "copy_template.S"
425
426-ENDPROC(__copy_to_user)
427+ENDPROC(___copy_to_user)
428
429 .section .fixup,"ax"
430 .align 0
431diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess.S linux-2.6.32.45/arch/arm/lib/uaccess.S
432--- linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
433+++ linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
434@@ -19,7 +19,7 @@
435
436 #define PAGE_SHIFT 12
437
438-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
439+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
440 * Purpose : copy a block to user memory from kernel memory
441 * Params : to - user memory
442 * : from - kernel memory
443@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
444 sub r2, r2, ip
445 b .Lc2u_dest_aligned
446
447-ENTRY(__copy_to_user)
448+ENTRY(___copy_to_user)
449 stmfd sp!, {r2, r4 - r7, lr}
450 cmp r2, #4
451 blt .Lc2u_not_enough
452@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
453 ldrgtb r3, [r1], #0
454 USER( strgtbt r3, [r0], #1) @ May fault
455 b .Lc2u_finished
456-ENDPROC(__copy_to_user)
457+ENDPROC(___copy_to_user)
458
459 .section .fixup,"ax"
460 .align 0
461 9001: ldmfd sp!, {r0, r4 - r7, pc}
462 .previous
463
464-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
465+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
466 * Purpose : copy a block from user memory to kernel memory
467 * Params : to - kernel memory
468 * : from - user memory
469@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
470 sub r2, r2, ip
471 b .Lcfu_dest_aligned
472
473-ENTRY(__copy_from_user)
474+ENTRY(___copy_from_user)
475 stmfd sp!, {r0, r2, r4 - r7, lr}
476 cmp r2, #4
477 blt .Lcfu_not_enough
478@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
479 USER( ldrgtbt r3, [r1], #1) @ May fault
480 strgtb r3, [r0], #1
481 b .Lcfu_finished
482-ENDPROC(__copy_from_user)
483+ENDPROC(___copy_from_user)
484
485 .section .fixup,"ax"
486 .align 0
487diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c
488--- linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
489+++ linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
490@@ -97,7 +97,7 @@ out:
491 }
492
493 unsigned long
494-__copy_to_user(void __user *to, const void *from, unsigned long n)
495+___copy_to_user(void __user *to, const void *from, unsigned long n)
496 {
497 /*
498 * This test is stubbed out of the main function above to keep
499diff -urNp linux-2.6.32.45/arch/arm/mach-at91/pm.c linux-2.6.32.45/arch/arm/mach-at91/pm.c
500--- linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
501+++ linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
502@@ -348,7 +348,7 @@ static void at91_pm_end(void)
503 }
504
505
506-static struct platform_suspend_ops at91_pm_ops ={
507+static const struct platform_suspend_ops at91_pm_ops ={
508 .valid = at91_pm_valid_state,
509 .begin = at91_pm_begin,
510 .enter = at91_pm_enter,
511diff -urNp linux-2.6.32.45/arch/arm/mach-omap1/pm.c linux-2.6.32.45/arch/arm/mach-omap1/pm.c
512--- linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
513+++ linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
514@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
515
516
517
518-static struct platform_suspend_ops omap_pm_ops ={
519+static const struct platform_suspend_ops omap_pm_ops ={
520 .prepare = omap_pm_prepare,
521 .enter = omap_pm_enter,
522 .finish = omap_pm_finish,
523diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c
524--- linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
525+++ linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
526@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
527 enable_hlt();
528 }
529
530-static struct platform_suspend_ops omap_pm_ops = {
531+static const struct platform_suspend_ops omap_pm_ops = {
532 .prepare = omap2_pm_prepare,
533 .enter = omap2_pm_enter,
534 .finish = omap2_pm_finish,
535diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c
536--- linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
537+++ linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
538@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
539 return;
540 }
541
542-static struct platform_suspend_ops omap_pm_ops = {
543+static const struct platform_suspend_ops omap_pm_ops = {
544 .begin = omap3_pm_begin,
545 .end = omap3_pm_end,
546 .prepare = omap3_pm_prepare,
547diff -urNp linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c
548--- linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
549+++ linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
550@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
551 (state == PM_SUSPEND_MEM);
552 }
553
554-static struct platform_suspend_ops pnx4008_pm_ops = {
555+static const struct platform_suspend_ops pnx4008_pm_ops = {
556 .enter = pnx4008_pm_enter,
557 .valid = pnx4008_pm_valid,
558 };
559diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/pm.c linux-2.6.32.45/arch/arm/mach-pxa/pm.c
560--- linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
561+++ linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
562@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
563 pxa_cpu_pm_fns->finish();
564 }
565
566-static struct platform_suspend_ops pxa_pm_ops = {
567+static const struct platform_suspend_ops pxa_pm_ops = {
568 .valid = pxa_pm_valid,
569 .enter = pxa_pm_enter,
570 .prepare = pxa_pm_prepare,
571diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c
572--- linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
573+++ linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
574@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
575 }
576
577 #ifdef CONFIG_PM
578-static struct platform_suspend_ops sharpsl_pm_ops = {
579+static const struct platform_suspend_ops sharpsl_pm_ops = {
580 .prepare = pxa_pm_prepare,
581 .finish = pxa_pm_finish,
582 .enter = corgi_pxa_pm_enter,
583diff -urNp linux-2.6.32.45/arch/arm/mach-sa1100/pm.c linux-2.6.32.45/arch/arm/mach-sa1100/pm.c
584--- linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
585+++ linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
586@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
587 return virt_to_phys(sp);
588 }
589
590-static struct platform_suspend_ops sa11x0_pm_ops = {
591+static const struct platform_suspend_ops sa11x0_pm_ops = {
592 .enter = sa11x0_pm_enter,
593 .valid = suspend_valid_only_mem,
594 };
595diff -urNp linux-2.6.32.45/arch/arm/mm/fault.c linux-2.6.32.45/arch/arm/mm/fault.c
596--- linux-2.6.32.45/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
597+++ linux-2.6.32.45/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
598@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
599 }
600 #endif
601
602+#ifdef CONFIG_PAX_PAGEEXEC
603+ if (fsr & FSR_LNX_PF) {
604+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
605+ do_group_exit(SIGKILL);
606+ }
607+#endif
608+
609 tsk->thread.address = addr;
610 tsk->thread.error_code = fsr;
611 tsk->thread.trap_no = 14;
612@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
613 }
614 #endif /* CONFIG_MMU */
615
616+#ifdef CONFIG_PAX_PAGEEXEC
617+void pax_report_insns(void *pc, void *sp)
618+{
619+ long i;
620+
621+ printk(KERN_ERR "PAX: bytes at PC: ");
622+ for (i = 0; i < 20; i++) {
623+ unsigned char c;
624+ if (get_user(c, (__force unsigned char __user *)pc+i))
625+ printk(KERN_CONT "?? ");
626+ else
627+ printk(KERN_CONT "%02x ", c);
628+ }
629+ printk("\n");
630+
631+ printk(KERN_ERR "PAX: bytes at SP-4: ");
632+ for (i = -1; i < 20; i++) {
633+ unsigned long c;
634+ if (get_user(c, (__force unsigned long __user *)sp+i))
635+ printk(KERN_CONT "???????? ");
636+ else
637+ printk(KERN_CONT "%08lx ", c);
638+ }
639+ printk("\n");
640+}
641+#endif
642+
643 /*
644 * First Level Translation Fault Handler
645 *
646diff -urNp linux-2.6.32.45/arch/arm/mm/mmap.c linux-2.6.32.45/arch/arm/mm/mmap.c
647--- linux-2.6.32.45/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
648+++ linux-2.6.32.45/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
649@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
650 if (len > TASK_SIZE)
651 return -ENOMEM;
652
653+#ifdef CONFIG_PAX_RANDMMAP
654+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
655+#endif
656+
657 if (addr) {
658 if (do_align)
659 addr = COLOUR_ALIGN(addr, pgoff);
660@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
661 addr = PAGE_ALIGN(addr);
662
663 vma = find_vma(mm, addr);
664- if (TASK_SIZE - len >= addr &&
665- (!vma || addr + len <= vma->vm_start))
666+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
667 return addr;
668 }
669 if (len > mm->cached_hole_size) {
670- start_addr = addr = mm->free_area_cache;
671+ start_addr = addr = mm->free_area_cache;
672 } else {
673- start_addr = addr = TASK_UNMAPPED_BASE;
674- mm->cached_hole_size = 0;
675+ start_addr = addr = mm->mmap_base;
676+ mm->cached_hole_size = 0;
677 }
678
679 full_search:
680@@ -94,14 +97,14 @@ full_search:
681 * Start a new search - just in case we missed
682 * some holes.
683 */
684- if (start_addr != TASK_UNMAPPED_BASE) {
685- start_addr = addr = TASK_UNMAPPED_BASE;
686+ if (start_addr != mm->mmap_base) {
687+ start_addr = addr = mm->mmap_base;
688 mm->cached_hole_size = 0;
689 goto full_search;
690 }
691 return -ENOMEM;
692 }
693- if (!vma || addr + len <= vma->vm_start) {
694+ if (check_heap_stack_gap(vma, addr, len)) {
695 /*
696 * Remember the place where we stopped the search:
697 */
698diff -urNp linux-2.6.32.45/arch/arm/plat-s3c/pm.c linux-2.6.32.45/arch/arm/plat-s3c/pm.c
699--- linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
700+++ linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
701@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
702 s3c_pm_check_cleanup();
703 }
704
705-static struct platform_suspend_ops s3c_pm_ops = {
706+static const struct platform_suspend_ops s3c_pm_ops = {
707 .enter = s3c_pm_enter,
708 .prepare = s3c_pm_prepare,
709 .finish = s3c_pm_finish,
710diff -urNp linux-2.6.32.45/arch/avr32/include/asm/elf.h linux-2.6.32.45/arch/avr32/include/asm/elf.h
711--- linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
712+++ linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
713@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719
720+#ifdef CONFIG_PAX_ASLR
721+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
722+
723+#define PAX_DELTA_MMAP_LEN 15
724+#define PAX_DELTA_STACK_LEN 15
725+#endif
726
727 /* This yields a mask that user programs can use to figure out what
728 instruction set this CPU supports. This could be done in user space,
729diff -urNp linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h
730--- linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
731+++ linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
732@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
733 D(11) KM_IRQ1,
734 D(12) KM_SOFTIRQ0,
735 D(13) KM_SOFTIRQ1,
736-D(14) KM_TYPE_NR
737+D(14) KM_CLEARPAGE,
738+D(15) KM_TYPE_NR
739 };
740
741 #undef D
742diff -urNp linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c
743--- linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
744+++ linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
745@@ -176,7 +176,7 @@ out:
746 return 0;
747 }
748
749-static struct platform_suspend_ops avr32_pm_ops = {
750+static const struct platform_suspend_ops avr32_pm_ops = {
751 .valid = avr32_pm_valid_state,
752 .enter = avr32_pm_enter,
753 };
754diff -urNp linux-2.6.32.45/arch/avr32/mm/fault.c linux-2.6.32.45/arch/avr32/mm/fault.c
755--- linux-2.6.32.45/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
756+++ linux-2.6.32.45/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
757@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
758
759 int exception_trace = 1;
760
761+#ifdef CONFIG_PAX_PAGEEXEC
762+void pax_report_insns(void *pc, void *sp)
763+{
764+ unsigned long i;
765+
766+ printk(KERN_ERR "PAX: bytes at PC: ");
767+ for (i = 0; i < 20; i++) {
768+ unsigned char c;
769+ if (get_user(c, (unsigned char *)pc+i))
770+ printk(KERN_CONT "???????? ");
771+ else
772+ printk(KERN_CONT "%02x ", c);
773+ }
774+ printk("\n");
775+}
776+#endif
777+
778 /*
779 * This routine handles page faults. It determines the address and the
780 * problem, and then passes it off to one of the appropriate routines.
781@@ -157,6 +174,16 @@ bad_area:
782 up_read(&mm->mmap_sem);
783
784 if (user_mode(regs)) {
785+
786+#ifdef CONFIG_PAX_PAGEEXEC
787+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
788+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
789+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
790+ do_group_exit(SIGKILL);
791+ }
792+ }
793+#endif
794+
795 if (exception_trace && printk_ratelimit())
796 printk("%s%s[%d]: segfault at %08lx pc %08lx "
797 "sp %08lx ecr %lu\n",
798diff -urNp linux-2.6.32.45/arch/blackfin/kernel/kgdb.c linux-2.6.32.45/arch/blackfin/kernel/kgdb.c
799--- linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
800+++ linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
801@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
802 return -1; /* this means that we do not want to exit from the handler */
803 }
804
805-struct kgdb_arch arch_kgdb_ops = {
806+const struct kgdb_arch arch_kgdb_ops = {
807 .gdb_bpt_instr = {0xa1},
808 #ifdef CONFIG_SMP
809 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
810diff -urNp linux-2.6.32.45/arch/blackfin/mach-common/pm.c linux-2.6.32.45/arch/blackfin/mach-common/pm.c
811--- linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
812+++ linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
813@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
814 return 0;
815 }
816
817-struct platform_suspend_ops bfin_pm_ops = {
818+const struct platform_suspend_ops bfin_pm_ops = {
819 .enter = bfin_pm_enter,
820 .valid = bfin_pm_valid,
821 };
822diff -urNp linux-2.6.32.45/arch/frv/include/asm/kmap_types.h linux-2.6.32.45/arch/frv/include/asm/kmap_types.h
823--- linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
824+++ linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
825@@ -23,6 +23,7 @@ enum km_type {
826 KM_IRQ1,
827 KM_SOFTIRQ0,
828 KM_SOFTIRQ1,
829+ KM_CLEARPAGE,
830 KM_TYPE_NR
831 };
832
833diff -urNp linux-2.6.32.45/arch/frv/mm/elf-fdpic.c linux-2.6.32.45/arch/frv/mm/elf-fdpic.c
834--- linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
835+++ linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
836@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
837 if (addr) {
838 addr = PAGE_ALIGN(addr);
839 vma = find_vma(current->mm, addr);
840- if (TASK_SIZE - len >= addr &&
841- (!vma || addr + len <= vma->vm_start))
842+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
843 goto success;
844 }
845
846@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
847 for (; vma; vma = vma->vm_next) {
848 if (addr > limit)
849 break;
850- if (addr + len <= vma->vm_start)
851+ if (check_heap_stack_gap(vma, addr, len))
852 goto success;
853 addr = vma->vm_end;
854 }
855@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
856 for (; vma; vma = vma->vm_next) {
857 if (addr > limit)
858 break;
859- if (addr + len <= vma->vm_start)
860+ if (check_heap_stack_gap(vma, addr, len))
861 goto success;
862 addr = vma->vm_end;
863 }
864diff -urNp linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c
865--- linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
866+++ linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
867@@ -17,7 +17,7 @@
868 #include <linux/swiotlb.h>
869 #include <asm/machvec.h>
870
871-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
872+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
873
874 /* swiotlb declarations & definitions: */
875 extern int swiotlb_late_init_with_default_size (size_t size);
876@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
877 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
878 }
879
880-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
881+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
882 {
883 if (use_swiotlb(dev))
884 return &swiotlb_dma_ops;
885diff -urNp linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c
886--- linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
887+++ linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
888@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
889 },
890 };
891
892-extern struct dma_map_ops swiotlb_dma_ops;
893+extern const struct dma_map_ops swiotlb_dma_ops;
894
895 static int __init
896 sba_init(void)
897@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
898
899 __setup("sbapagesize=",sba_page_override);
900
901-struct dma_map_ops sba_dma_ops = {
902+const struct dma_map_ops sba_dma_ops = {
903 .alloc_coherent = sba_alloc_coherent,
904 .free_coherent = sba_free_coherent,
905 .map_page = sba_map_page,
906diff -urNp linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c
907--- linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
908+++ linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
909@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
910
911 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
912
913+#ifdef CONFIG_PAX_ASLR
914+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
915+
916+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
917+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
918+#endif
919+
920 /* Ugly but avoids duplication */
921 #include "../../../fs/binfmt_elf.c"
922
923diff -urNp linux-2.6.32.45/arch/ia64/ia32/ia32priv.h linux-2.6.32.45/arch/ia64/ia32/ia32priv.h
924--- linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
925+++ linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
926@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
927 #define ELF_DATA ELFDATA2LSB
928 #define ELF_ARCH EM_386
929
930-#define IA32_STACK_TOP IA32_PAGE_OFFSET
931+#ifdef CONFIG_PAX_RANDUSTACK
932+#define __IA32_DELTA_STACK (current->mm->delta_stack)
933+#else
934+#define __IA32_DELTA_STACK 0UL
935+#endif
936+
937+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
938+
939 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
940 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
941
942diff -urNp linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h
943--- linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
944+++ linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
945@@ -12,7 +12,7 @@
946
947 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
948
949-extern struct dma_map_ops *dma_ops;
950+extern const struct dma_map_ops *dma_ops;
951 extern struct ia64_machine_vector ia64_mv;
952 extern void set_iommu_machvec(void);
953
954@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
955 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
956 dma_addr_t *daddr, gfp_t gfp)
957 {
958- struct dma_map_ops *ops = platform_dma_get_ops(dev);
959+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
960 void *caddr;
961
962 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
963@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
964 static inline void dma_free_coherent(struct device *dev, size_t size,
965 void *caddr, dma_addr_t daddr)
966 {
967- struct dma_map_ops *ops = platform_dma_get_ops(dev);
968+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
969 debug_dma_free_coherent(dev, size, caddr, daddr);
970 ops->free_coherent(dev, size, caddr, daddr);
971 }
972@@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
973
974 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
975 {
976- struct dma_map_ops *ops = platform_dma_get_ops(dev);
977+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
978 return ops->mapping_error(dev, daddr);
979 }
980
981 static inline int dma_supported(struct device *dev, u64 mask)
982 {
983- struct dma_map_ops *ops = platform_dma_get_ops(dev);
984+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
985 return ops->dma_supported(dev, mask);
986 }
987
988diff -urNp linux-2.6.32.45/arch/ia64/include/asm/elf.h linux-2.6.32.45/arch/ia64/include/asm/elf.h
989--- linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
990+++ linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
991@@ -43,6 +43,13 @@
992 */
993 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
994
995+#ifdef CONFIG_PAX_ASLR
996+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
997+
998+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
999+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1000+#endif
1001+
1002 #define PT_IA_64_UNWIND 0x70000001
1003
1004 /* IA-64 relocations: */
1005diff -urNp linux-2.6.32.45/arch/ia64/include/asm/machvec.h linux-2.6.32.45/arch/ia64/include/asm/machvec.h
1006--- linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1007+++ linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1008@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1009 /* DMA-mapping interface: */
1010 typedef void ia64_mv_dma_init (void);
1011 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1012-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1013+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1014
1015 /*
1016 * WARNING: The legacy I/O space is _architected_. Platforms are
1017@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1018 # endif /* CONFIG_IA64_GENERIC */
1019
1020 extern void swiotlb_dma_init(void);
1021-extern struct dma_map_ops *dma_get_ops(struct device *);
1022+extern const struct dma_map_ops *dma_get_ops(struct device *);
1023
1024 /*
1025 * Define default versions so we can extend machvec for new platforms without having
1026diff -urNp linux-2.6.32.45/arch/ia64/include/asm/pgtable.h linux-2.6.32.45/arch/ia64/include/asm/pgtable.h
1027--- linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1028+++ linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1029@@ -12,7 +12,7 @@
1030 * David Mosberger-Tang <davidm@hpl.hp.com>
1031 */
1032
1033-
1034+#include <linux/const.h>
1035 #include <asm/mman.h>
1036 #include <asm/page.h>
1037 #include <asm/processor.h>
1038@@ -143,6 +143,17 @@
1039 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1040 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1041 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1042+
1043+#ifdef CONFIG_PAX_PAGEEXEC
1044+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1045+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1046+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1047+#else
1048+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1049+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1050+# define PAGE_COPY_NOEXEC PAGE_COPY
1051+#endif
1052+
1053 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1054 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1055 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1056diff -urNp linux-2.6.32.45/arch/ia64/include/asm/spinlock.h linux-2.6.32.45/arch/ia64/include/asm/spinlock.h
1057--- linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1058+++ linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1059@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1060 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1061
1062 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1063- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1064+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1065 }
1066
1067 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1068diff -urNp linux-2.6.32.45/arch/ia64/include/asm/uaccess.h linux-2.6.32.45/arch/ia64/include/asm/uaccess.h
1069--- linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1070+++ linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1071@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1072 const void *__cu_from = (from); \
1073 long __cu_len = (n); \
1074 \
1075- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1076+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1077 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1078 __cu_len; \
1079 })
1080@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1081 long __cu_len = (n); \
1082 \
1083 __chk_user_ptr(__cu_from); \
1084- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1085+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1086 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1087 __cu_len; \
1088 })
1089diff -urNp linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c
1090--- linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1091+++ linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1092@@ -3,7 +3,7 @@
1093 /* Set this to 1 if there is a HW IOMMU in the system */
1094 int iommu_detected __read_mostly;
1095
1096-struct dma_map_ops *dma_ops;
1097+const struct dma_map_ops *dma_ops;
1098 EXPORT_SYMBOL(dma_ops);
1099
1100 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1101@@ -16,7 +16,7 @@ static int __init dma_init(void)
1102 }
1103 fs_initcall(dma_init);
1104
1105-struct dma_map_ops *dma_get_ops(struct device *dev)
1106+const struct dma_map_ops *dma_get_ops(struct device *dev)
1107 {
1108 return dma_ops;
1109 }
1110diff -urNp linux-2.6.32.45/arch/ia64/kernel/module.c linux-2.6.32.45/arch/ia64/kernel/module.c
1111--- linux-2.6.32.45/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1112+++ linux-2.6.32.45/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1113@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1114 void
1115 module_free (struct module *mod, void *module_region)
1116 {
1117- if (mod && mod->arch.init_unw_table &&
1118- module_region == mod->module_init) {
1119+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1120 unw_remove_unwind_table(mod->arch.init_unw_table);
1121 mod->arch.init_unw_table = NULL;
1122 }
1123@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1124 }
1125
1126 static inline int
1127+in_init_rx (const struct module *mod, uint64_t addr)
1128+{
1129+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1130+}
1131+
1132+static inline int
1133+in_init_rw (const struct module *mod, uint64_t addr)
1134+{
1135+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1136+}
1137+
1138+static inline int
1139 in_init (const struct module *mod, uint64_t addr)
1140 {
1141- return addr - (uint64_t) mod->module_init < mod->init_size;
1142+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1143+}
1144+
1145+static inline int
1146+in_core_rx (const struct module *mod, uint64_t addr)
1147+{
1148+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1149+}
1150+
1151+static inline int
1152+in_core_rw (const struct module *mod, uint64_t addr)
1153+{
1154+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1155 }
1156
1157 static inline int
1158 in_core (const struct module *mod, uint64_t addr)
1159 {
1160- return addr - (uint64_t) mod->module_core < mod->core_size;
1161+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1162 }
1163
1164 static inline int
1165@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1166 break;
1167
1168 case RV_BDREL:
1169- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1170+ if (in_init_rx(mod, val))
1171+ val -= (uint64_t) mod->module_init_rx;
1172+ else if (in_init_rw(mod, val))
1173+ val -= (uint64_t) mod->module_init_rw;
1174+ else if (in_core_rx(mod, val))
1175+ val -= (uint64_t) mod->module_core_rx;
1176+ else if (in_core_rw(mod, val))
1177+ val -= (uint64_t) mod->module_core_rw;
1178 break;
1179
1180 case RV_LTV:
1181@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1182 * addresses have been selected...
1183 */
1184 uint64_t gp;
1185- if (mod->core_size > MAX_LTOFF)
1186+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1187 /*
1188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1189 * at the end of the module.
1190 */
1191- gp = mod->core_size - MAX_LTOFF / 2;
1192+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1193 else
1194- gp = mod->core_size / 2;
1195- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1196+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1197+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1198 mod->arch.gp = gp;
1199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1200 }
1201diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-dma.c linux-2.6.32.45/arch/ia64/kernel/pci-dma.c
1202--- linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1203+++ linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1204@@ -43,7 +43,7 @@ struct device fallback_dev = {
1205 .dma_mask = &fallback_dev.coherent_dma_mask,
1206 };
1207
1208-extern struct dma_map_ops intel_dma_ops;
1209+extern const struct dma_map_ops intel_dma_ops;
1210
1211 static int __init pci_iommu_init(void)
1212 {
1213@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1214 }
1215 EXPORT_SYMBOL(iommu_dma_supported);
1216
1217+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1218+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1219+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1220+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1221+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1222+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1223+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1224+
1225+static const struct dma_map_ops intel_iommu_dma_ops = {
1226+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1227+ .alloc_coherent = intel_alloc_coherent,
1228+ .free_coherent = intel_free_coherent,
1229+ .map_sg = intel_map_sg,
1230+ .unmap_sg = intel_unmap_sg,
1231+ .map_page = intel_map_page,
1232+ .unmap_page = intel_unmap_page,
1233+ .mapping_error = intel_mapping_error,
1234+
1235+ .sync_single_for_cpu = machvec_dma_sync_single,
1236+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1237+ .sync_single_for_device = machvec_dma_sync_single,
1238+ .sync_sg_for_device = machvec_dma_sync_sg,
1239+ .dma_supported = iommu_dma_supported,
1240+};
1241+
1242 void __init pci_iommu_alloc(void)
1243 {
1244- dma_ops = &intel_dma_ops;
1245-
1246- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1247- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1248- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1249- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1250- dma_ops->dma_supported = iommu_dma_supported;
1251+ dma_ops = &intel_iommu_dma_ops;
1252
1253 /*
1254 * The order of these functions is important for
1255diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c
1256--- linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1257+++ linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1258@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1259 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1260 }
1261
1262-struct dma_map_ops swiotlb_dma_ops = {
1263+const struct dma_map_ops swiotlb_dma_ops = {
1264 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1265 .free_coherent = swiotlb_free_coherent,
1266 .map_page = swiotlb_map_page,
1267diff -urNp linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c
1268--- linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1269+++ linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1270@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1271 if (REGION_NUMBER(addr) == RGN_HPAGE)
1272 addr = 0;
1273 #endif
1274+
1275+#ifdef CONFIG_PAX_RANDMMAP
1276+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1277+ addr = mm->free_area_cache;
1278+ else
1279+#endif
1280+
1281 if (!addr)
1282 addr = mm->free_area_cache;
1283
1284@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1285 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1286 /* At this point: (!vma || addr < vma->vm_end). */
1287 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1288- if (start_addr != TASK_UNMAPPED_BASE) {
1289+ if (start_addr != mm->mmap_base) {
1290 /* Start a new search --- just in case we missed some holes. */
1291- addr = TASK_UNMAPPED_BASE;
1292+ addr = mm->mmap_base;
1293 goto full_search;
1294 }
1295 return -ENOMEM;
1296 }
1297- if (!vma || addr + len <= vma->vm_start) {
1298+ if (check_heap_stack_gap(vma, addr, len)) {
1299 /* Remember the address where we stopped this search: */
1300 mm->free_area_cache = addr + len;
1301 return addr;
1302diff -urNp linux-2.6.32.45/arch/ia64/kernel/topology.c linux-2.6.32.45/arch/ia64/kernel/topology.c
1303--- linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1304+++ linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1305@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1306 return ret;
1307 }
1308
1309-static struct sysfs_ops cache_sysfs_ops = {
1310+static const struct sysfs_ops cache_sysfs_ops = {
1311 .show = cache_show
1312 };
1313
1314diff -urNp linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S
1315--- linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1316+++ linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1317@@ -190,7 +190,7 @@ SECTIONS
1318 /* Per-cpu data: */
1319 . = ALIGN(PERCPU_PAGE_SIZE);
1320 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1321- __phys_per_cpu_start = __per_cpu_load;
1322+ __phys_per_cpu_start = per_cpu_load;
1323 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1324 * into percpu page size
1325 */
1326diff -urNp linux-2.6.32.45/arch/ia64/mm/fault.c linux-2.6.32.45/arch/ia64/mm/fault.c
1327--- linux-2.6.32.45/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1328+++ linux-2.6.32.45/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1329@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1330 return pte_present(pte);
1331 }
1332
1333+#ifdef CONFIG_PAX_PAGEEXEC
1334+void pax_report_insns(void *pc, void *sp)
1335+{
1336+ unsigned long i;
1337+
1338+ printk(KERN_ERR "PAX: bytes at PC: ");
1339+ for (i = 0; i < 8; i++) {
1340+ unsigned int c;
1341+ if (get_user(c, (unsigned int *)pc+i))
1342+ printk(KERN_CONT "???????? ");
1343+ else
1344+ printk(KERN_CONT "%08x ", c);
1345+ }
1346+ printk("\n");
1347+}
1348+#endif
1349+
1350 void __kprobes
1351 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1352 {
1353@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1354 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1355 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1356
1357- if ((vma->vm_flags & mask) != mask)
1358+ if ((vma->vm_flags & mask) != mask) {
1359+
1360+#ifdef CONFIG_PAX_PAGEEXEC
1361+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1362+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1363+ goto bad_area;
1364+
1365+ up_read(&mm->mmap_sem);
1366+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1367+ do_group_exit(SIGKILL);
1368+ }
1369+#endif
1370+
1371 goto bad_area;
1372
1373+ }
1374+
1375 survive:
1376 /*
1377 * If for any reason at all we couldn't handle the fault, make
1378diff -urNp linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c
1379--- linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1380+++ linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1381@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1382 /* At this point: (!vmm || addr < vmm->vm_end). */
1383 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1384 return -ENOMEM;
1385- if (!vmm || (addr + len) <= vmm->vm_start)
1386+ if (check_heap_stack_gap(vmm, addr, len))
1387 return addr;
1388 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1389 }
1390diff -urNp linux-2.6.32.45/arch/ia64/mm/init.c linux-2.6.32.45/arch/ia64/mm/init.c
1391--- linux-2.6.32.45/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1392+++ linux-2.6.32.45/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1393@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1394 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1395 vma->vm_end = vma->vm_start + PAGE_SIZE;
1396 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1397+
1398+#ifdef CONFIG_PAX_PAGEEXEC
1399+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1400+ vma->vm_flags &= ~VM_EXEC;
1401+
1402+#ifdef CONFIG_PAX_MPROTECT
1403+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1404+ vma->vm_flags &= ~VM_MAYEXEC;
1405+#endif
1406+
1407+ }
1408+#endif
1409+
1410 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1411 down_write(&current->mm->mmap_sem);
1412 if (insert_vm_struct(current->mm, vma)) {
1413diff -urNp linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c
1414--- linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1415+++ linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1416@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1417 return ret;
1418 }
1419
1420-static struct dma_map_ops sn_dma_ops = {
1421+static const struct dma_map_ops sn_dma_ops = {
1422 .alloc_coherent = sn_dma_alloc_coherent,
1423 .free_coherent = sn_dma_free_coherent,
1424 .map_page = sn_dma_map_page,
1425diff -urNp linux-2.6.32.45/arch/m32r/lib/usercopy.c linux-2.6.32.45/arch/m32r/lib/usercopy.c
1426--- linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1427+++ linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1428@@ -14,6 +14,9 @@
1429 unsigned long
1430 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1431 {
1432+ if ((long)n < 0)
1433+ return n;
1434+
1435 prefetch(from);
1436 if (access_ok(VERIFY_WRITE, to, n))
1437 __copy_user(to,from,n);
1438@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1439 unsigned long
1440 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1441 {
1442+ if ((long)n < 0)
1443+ return n;
1444+
1445 prefetchw(to);
1446 if (access_ok(VERIFY_READ, from, n))
1447 __copy_user_zeroing(to,from,n);
1448diff -urNp linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c
1449--- linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1450+++ linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1451@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1452
1453 }
1454
1455-static struct platform_suspend_ops db1x_pm_ops = {
1456+static const struct platform_suspend_ops db1x_pm_ops = {
1457 .valid = suspend_valid_only_mem,
1458 .begin = db1x_pm_begin,
1459 .enter = db1x_pm_enter,
1460diff -urNp linux-2.6.32.45/arch/mips/include/asm/elf.h linux-2.6.32.45/arch/mips/include/asm/elf.h
1461--- linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1462+++ linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1463@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1464 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1465 #endif
1466
1467+#ifdef CONFIG_PAX_ASLR
1468+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1469+
1470+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1471+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1472+#endif
1473+
1474 #endif /* _ASM_ELF_H */
1475diff -urNp linux-2.6.32.45/arch/mips/include/asm/page.h linux-2.6.32.45/arch/mips/include/asm/page.h
1476--- linux-2.6.32.45/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1477+++ linux-2.6.32.45/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1478@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1479 #ifdef CONFIG_CPU_MIPS32
1480 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1481 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1482- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1483+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1484 #else
1485 typedef struct { unsigned long long pte; } pte_t;
1486 #define pte_val(x) ((x).pte)
1487diff -urNp linux-2.6.32.45/arch/mips/include/asm/system.h linux-2.6.32.45/arch/mips/include/asm/system.h
1488--- linux-2.6.32.45/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1489+++ linux-2.6.32.45/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1490@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1491 */
1492 #define __ARCH_WANT_UNLOCKED_CTXSW
1493
1494-extern unsigned long arch_align_stack(unsigned long sp);
1495+#define arch_align_stack(x) ((x) & ~0xfUL)
1496
1497 #endif /* _ASM_SYSTEM_H */
1498diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c
1499--- linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1500+++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1501@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1502 #undef ELF_ET_DYN_BASE
1503 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1504
1505+#ifdef CONFIG_PAX_ASLR
1506+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1507+
1508+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1509+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1510+#endif
1511+
1512 #include <asm/processor.h>
1513 #include <linux/module.h>
1514 #include <linux/elfcore.h>
1515diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c
1516--- linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1517+++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1518@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1519 #undef ELF_ET_DYN_BASE
1520 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1521
1522+#ifdef CONFIG_PAX_ASLR
1523+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1524+
1525+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1526+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1527+#endif
1528+
1529 #include <asm/processor.h>
1530
1531 /*
1532diff -urNp linux-2.6.32.45/arch/mips/kernel/kgdb.c linux-2.6.32.45/arch/mips/kernel/kgdb.c
1533--- linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1534+++ linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1535@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1536 return -1;
1537 }
1538
1539+/* cannot be const */
1540 struct kgdb_arch arch_kgdb_ops;
1541
1542 /*
1543diff -urNp linux-2.6.32.45/arch/mips/kernel/process.c linux-2.6.32.45/arch/mips/kernel/process.c
1544--- linux-2.6.32.45/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1545+++ linux-2.6.32.45/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1546@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1547 out:
1548 return pc;
1549 }
1550-
1551-/*
1552- * Don't forget that the stack pointer must be aligned on a 8 bytes
1553- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1554- */
1555-unsigned long arch_align_stack(unsigned long sp)
1556-{
1557- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1558- sp -= get_random_int() & ~PAGE_MASK;
1559-
1560- return sp & ALMASK;
1561-}
1562diff -urNp linux-2.6.32.45/arch/mips/kernel/syscall.c linux-2.6.32.45/arch/mips/kernel/syscall.c
1563--- linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1564+++ linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1565@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1566 do_color_align = 0;
1567 if (filp || (flags & MAP_SHARED))
1568 do_color_align = 1;
1569+
1570+#ifdef CONFIG_PAX_RANDMMAP
1571+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1572+#endif
1573+
1574 if (addr) {
1575 if (do_color_align)
1576 addr = COLOUR_ALIGN(addr, pgoff);
1577 else
1578 addr = PAGE_ALIGN(addr);
1579 vmm = find_vma(current->mm, addr);
1580- if (task_size - len >= addr &&
1581- (!vmm || addr + len <= vmm->vm_start))
1582+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1583 return addr;
1584 }
1585- addr = TASK_UNMAPPED_BASE;
1586+ addr = current->mm->mmap_base;
1587 if (do_color_align)
1588 addr = COLOUR_ALIGN(addr, pgoff);
1589 else
1590@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1591 /* At this point: (!vmm || addr < vmm->vm_end). */
1592 if (task_size - len < addr)
1593 return -ENOMEM;
1594- if (!vmm || addr + len <= vmm->vm_start)
1595+ if (check_heap_stack_gap(vmm, addr, len))
1596 return addr;
1597 addr = vmm->vm_end;
1598 if (do_color_align)
1599diff -urNp linux-2.6.32.45/arch/mips/mm/fault.c linux-2.6.32.45/arch/mips/mm/fault.c
1600--- linux-2.6.32.45/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1601+++ linux-2.6.32.45/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1602@@ -26,6 +26,23 @@
1603 #include <asm/ptrace.h>
1604 #include <asm/highmem.h> /* For VMALLOC_END */
1605
1606+#ifdef CONFIG_PAX_PAGEEXEC
1607+void pax_report_insns(void *pc, void *sp)
1608+{
1609+ unsigned long i;
1610+
1611+ printk(KERN_ERR "PAX: bytes at PC: ");
1612+ for (i = 0; i < 5; i++) {
1613+ unsigned int c;
1614+ if (get_user(c, (unsigned int *)pc+i))
1615+ printk(KERN_CONT "???????? ");
1616+ else
1617+ printk(KERN_CONT "%08x ", c);
1618+ }
1619+ printk("\n");
1620+}
1621+#endif
1622+
1623 /*
1624 * This routine handles page faults. It determines the address,
1625 * and the problem, and then passes it off to one of the appropriate
1626diff -urNp linux-2.6.32.45/arch/parisc/include/asm/elf.h linux-2.6.32.45/arch/parisc/include/asm/elf.h
1627--- linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1628+++ linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1629@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1630
1631 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1632
1633+#ifdef CONFIG_PAX_ASLR
1634+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1635+
1636+#define PAX_DELTA_MMAP_LEN 16
1637+#define PAX_DELTA_STACK_LEN 16
1638+#endif
1639+
1640 /* This yields a mask that user programs can use to figure out what
1641 instruction set this CPU supports. This could be done in user space,
1642 but it's not easy, and we've already done it here. */
1643diff -urNp linux-2.6.32.45/arch/parisc/include/asm/pgtable.h linux-2.6.32.45/arch/parisc/include/asm/pgtable.h
1644--- linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1645+++ linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1646@@ -207,6 +207,17 @@
1647 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1648 #define PAGE_COPY PAGE_EXECREAD
1649 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1650+
1651+#ifdef CONFIG_PAX_PAGEEXEC
1652+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1653+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1654+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1655+#else
1656+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1657+# define PAGE_COPY_NOEXEC PAGE_COPY
1658+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1659+#endif
1660+
1661 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1662 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1663 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1664diff -urNp linux-2.6.32.45/arch/parisc/kernel/module.c linux-2.6.32.45/arch/parisc/kernel/module.c
1665--- linux-2.6.32.45/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1666+++ linux-2.6.32.45/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1667@@ -95,16 +95,38 @@
1668
1669 /* three functions to determine where in the module core
1670 * or init pieces the location is */
1671+static inline int in_init_rx(struct module *me, void *loc)
1672+{
1673+ return (loc >= me->module_init_rx &&
1674+ loc < (me->module_init_rx + me->init_size_rx));
1675+}
1676+
1677+static inline int in_init_rw(struct module *me, void *loc)
1678+{
1679+ return (loc >= me->module_init_rw &&
1680+ loc < (me->module_init_rw + me->init_size_rw));
1681+}
1682+
1683 static inline int in_init(struct module *me, void *loc)
1684 {
1685- return (loc >= me->module_init &&
1686- loc <= (me->module_init + me->init_size));
1687+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1688+}
1689+
1690+static inline int in_core_rx(struct module *me, void *loc)
1691+{
1692+ return (loc >= me->module_core_rx &&
1693+ loc < (me->module_core_rx + me->core_size_rx));
1694+}
1695+
1696+static inline int in_core_rw(struct module *me, void *loc)
1697+{
1698+ return (loc >= me->module_core_rw &&
1699+ loc < (me->module_core_rw + me->core_size_rw));
1700 }
1701
1702 static inline int in_core(struct module *me, void *loc)
1703 {
1704- return (loc >= me->module_core &&
1705- loc <= (me->module_core + me->core_size));
1706+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1707 }
1708
1709 static inline int in_local(struct module *me, void *loc)
1710@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1711 }
1712
1713 /* align things a bit */
1714- me->core_size = ALIGN(me->core_size, 16);
1715- me->arch.got_offset = me->core_size;
1716- me->core_size += gots * sizeof(struct got_entry);
1717-
1718- me->core_size = ALIGN(me->core_size, 16);
1719- me->arch.fdesc_offset = me->core_size;
1720- me->core_size += fdescs * sizeof(Elf_Fdesc);
1721+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1722+ me->arch.got_offset = me->core_size_rw;
1723+ me->core_size_rw += gots * sizeof(struct got_entry);
1724+
1725+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1726+ me->arch.fdesc_offset = me->core_size_rw;
1727+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1728
1729 me->arch.got_max = gots;
1730 me->arch.fdesc_max = fdescs;
1731@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1732
1733 BUG_ON(value == 0);
1734
1735- got = me->module_core + me->arch.got_offset;
1736+ got = me->module_core_rw + me->arch.got_offset;
1737 for (i = 0; got[i].addr; i++)
1738 if (got[i].addr == value)
1739 goto out;
1740@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1741 #ifdef CONFIG_64BIT
1742 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1743 {
1744- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1745+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1746
1747 if (!value) {
1748 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1749@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1750
1751 /* Create new one */
1752 fdesc->addr = value;
1753- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1754+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1755 return (Elf_Addr)fdesc;
1756 }
1757 #endif /* CONFIG_64BIT */
1758@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1759
1760 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1761 end = table + sechdrs[me->arch.unwind_section].sh_size;
1762- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1763+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1764
1765 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1766 me->arch.unwind_section, table, end, gp);
1767diff -urNp linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c
1768--- linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1769+++ linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1770@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1771 /* At this point: (!vma || addr < vma->vm_end). */
1772 if (TASK_SIZE - len < addr)
1773 return -ENOMEM;
1774- if (!vma || addr + len <= vma->vm_start)
1775+ if (check_heap_stack_gap(vma, addr, len))
1776 return addr;
1777 addr = vma->vm_end;
1778 }
1779@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1780 /* At this point: (!vma || addr < vma->vm_end). */
1781 if (TASK_SIZE - len < addr)
1782 return -ENOMEM;
1783- if (!vma || addr + len <= vma->vm_start)
1784+ if (check_heap_stack_gap(vma, addr, len))
1785 return addr;
1786 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1787 if (addr < vma->vm_end) /* handle wraparound */
1788@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1789 if (flags & MAP_FIXED)
1790 return addr;
1791 if (!addr)
1792- addr = TASK_UNMAPPED_BASE;
1793+ addr = current->mm->mmap_base;
1794
1795 if (filp) {
1796 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1797diff -urNp linux-2.6.32.45/arch/parisc/kernel/traps.c linux-2.6.32.45/arch/parisc/kernel/traps.c
1798--- linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1799+++ linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1800@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1801
1802 down_read(&current->mm->mmap_sem);
1803 vma = find_vma(current->mm,regs->iaoq[0]);
1804- if (vma && (regs->iaoq[0] >= vma->vm_start)
1805- && (vma->vm_flags & VM_EXEC)) {
1806-
1807+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1808 fault_address = regs->iaoq[0];
1809 fault_space = regs->iasq[0];
1810
1811diff -urNp linux-2.6.32.45/arch/parisc/mm/fault.c linux-2.6.32.45/arch/parisc/mm/fault.c
1812--- linux-2.6.32.45/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1813+++ linux-2.6.32.45/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1814@@ -15,6 +15,7 @@
1815 #include <linux/sched.h>
1816 #include <linux/interrupt.h>
1817 #include <linux/module.h>
1818+#include <linux/unistd.h>
1819
1820 #include <asm/uaccess.h>
1821 #include <asm/traps.h>
1822@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1823 static unsigned long
1824 parisc_acctyp(unsigned long code, unsigned int inst)
1825 {
1826- if (code == 6 || code == 16)
1827+ if (code == 6 || code == 7 || code == 16)
1828 return VM_EXEC;
1829
1830 switch (inst & 0xf0000000) {
1831@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1832 }
1833 #endif
1834
1835+#ifdef CONFIG_PAX_PAGEEXEC
1836+/*
1837+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1838+ *
1839+ * returns 1 when task should be killed
1840+ * 2 when rt_sigreturn trampoline was detected
1841+ * 3 when unpatched PLT trampoline was detected
1842+ */
1843+static int pax_handle_fetch_fault(struct pt_regs *regs)
1844+{
1845+
1846+#ifdef CONFIG_PAX_EMUPLT
1847+ int err;
1848+
1849+ do { /* PaX: unpatched PLT emulation */
1850+ unsigned int bl, depwi;
1851+
1852+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1853+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1854+
1855+ if (err)
1856+ break;
1857+
1858+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1859+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1860+
1861+ err = get_user(ldw, (unsigned int *)addr);
1862+ err |= get_user(bv, (unsigned int *)(addr+4));
1863+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1864+
1865+ if (err)
1866+ break;
1867+
1868+ if (ldw == 0x0E801096U &&
1869+ bv == 0xEAC0C000U &&
1870+ ldw2 == 0x0E881095U)
1871+ {
1872+ unsigned int resolver, map;
1873+
1874+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1875+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1876+ if (err)
1877+ break;
1878+
1879+ regs->gr[20] = instruction_pointer(regs)+8;
1880+ regs->gr[21] = map;
1881+ regs->gr[22] = resolver;
1882+ regs->iaoq[0] = resolver | 3UL;
1883+ regs->iaoq[1] = regs->iaoq[0] + 4;
1884+ return 3;
1885+ }
1886+ }
1887+ } while (0);
1888+#endif
1889+
1890+#ifdef CONFIG_PAX_EMUTRAMP
1891+
1892+#ifndef CONFIG_PAX_EMUSIGRT
1893+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1894+ return 1;
1895+#endif
1896+
1897+ do { /* PaX: rt_sigreturn emulation */
1898+ unsigned int ldi1, ldi2, bel, nop;
1899+
1900+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1901+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1902+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1903+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1904+
1905+ if (err)
1906+ break;
1907+
1908+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1909+ ldi2 == 0x3414015AU &&
1910+ bel == 0xE4008200U &&
1911+ nop == 0x08000240U)
1912+ {
1913+ regs->gr[25] = (ldi1 & 2) >> 1;
1914+ regs->gr[20] = __NR_rt_sigreturn;
1915+ regs->gr[31] = regs->iaoq[1] + 16;
1916+ regs->sr[0] = regs->iasq[1];
1917+ regs->iaoq[0] = 0x100UL;
1918+ regs->iaoq[1] = regs->iaoq[0] + 4;
1919+ regs->iasq[0] = regs->sr[2];
1920+ regs->iasq[1] = regs->sr[2];
1921+ return 2;
1922+ }
1923+ } while (0);
1924+#endif
1925+
1926+ return 1;
1927+}
1928+
1929+void pax_report_insns(void *pc, void *sp)
1930+{
1931+ unsigned long i;
1932+
1933+ printk(KERN_ERR "PAX: bytes at PC: ");
1934+ for (i = 0; i < 5; i++) {
1935+ unsigned int c;
1936+ if (get_user(c, (unsigned int *)pc+i))
1937+ printk(KERN_CONT "???????? ");
1938+ else
1939+ printk(KERN_CONT "%08x ", c);
1940+ }
1941+ printk("\n");
1942+}
1943+#endif
1944+
1945 int fixup_exception(struct pt_regs *regs)
1946 {
1947 const struct exception_table_entry *fix;
1948@@ -192,8 +303,33 @@ good_area:
1949
1950 acc_type = parisc_acctyp(code,regs->iir);
1951
1952- if ((vma->vm_flags & acc_type) != acc_type)
1953+ if ((vma->vm_flags & acc_type) != acc_type) {
1954+
1955+#ifdef CONFIG_PAX_PAGEEXEC
1956+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1957+ (address & ~3UL) == instruction_pointer(regs))
1958+ {
1959+ up_read(&mm->mmap_sem);
1960+ switch (pax_handle_fetch_fault(regs)) {
1961+
1962+#ifdef CONFIG_PAX_EMUPLT
1963+ case 3:
1964+ return;
1965+#endif
1966+
1967+#ifdef CONFIG_PAX_EMUTRAMP
1968+ case 2:
1969+ return;
1970+#endif
1971+
1972+ }
1973+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1974+ do_group_exit(SIGKILL);
1975+ }
1976+#endif
1977+
1978 goto bad_area;
1979+ }
1980
1981 /*
1982 * If for any reason at all we couldn't handle the fault, make
1983diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/device.h linux-2.6.32.45/arch/powerpc/include/asm/device.h
1984--- linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
1985+++ linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
1986@@ -14,7 +14,7 @@ struct dev_archdata {
1987 struct device_node *of_node;
1988
1989 /* DMA operations on that device */
1990- struct dma_map_ops *dma_ops;
1991+ const struct dma_map_ops *dma_ops;
1992
1993 /*
1994 * When an iommu is in use, dma_data is used as a ptr to the base of the
1995diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h
1996--- linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
1997+++ linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
1998@@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
1999 #ifdef CONFIG_PPC64
2000 extern struct dma_map_ops dma_iommu_ops;
2001 #endif
2002-extern struct dma_map_ops dma_direct_ops;
2003+extern const struct dma_map_ops dma_direct_ops;
2004
2005-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2006+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2007 {
2008 /* We don't handle the NULL dev case for ISA for now. We could
2009 * do it via an out of line call but it is not needed for now. The
2010@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2011 return dev->archdata.dma_ops;
2012 }
2013
2014-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2015+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2016 {
2017 dev->archdata.dma_ops = ops;
2018 }
2019@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2020
2021 static inline int dma_supported(struct device *dev, u64 mask)
2022 {
2023- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2024+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2025
2026 if (unlikely(dma_ops == NULL))
2027 return 0;
2028@@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2029
2030 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2031 {
2032- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2033+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2034
2035 if (unlikely(dma_ops == NULL))
2036 return -EIO;
2037@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2038 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2039 dma_addr_t *dma_handle, gfp_t flag)
2040 {
2041- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2042+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2043 void *cpu_addr;
2044
2045 BUG_ON(!dma_ops);
2046@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2047 static inline void dma_free_coherent(struct device *dev, size_t size,
2048 void *cpu_addr, dma_addr_t dma_handle)
2049 {
2050- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2051+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2052
2053 BUG_ON(!dma_ops);
2054
2055@@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2056
2057 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2058 {
2059- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2060+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2061
2062 if (dma_ops->mapping_error)
2063 return dma_ops->mapping_error(dev, dma_addr);
2064diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/elf.h linux-2.6.32.45/arch/powerpc/include/asm/elf.h
2065--- linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2066+++ linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2067@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2068 the loader. We need to make sure that it is out of the way of the program
2069 that it will "exec", and that there is sufficient room for the brk. */
2070
2071-extern unsigned long randomize_et_dyn(unsigned long base);
2072-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2073+#define ELF_ET_DYN_BASE (0x20000000)
2074+
2075+#ifdef CONFIG_PAX_ASLR
2076+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2077+
2078+#ifdef __powerpc64__
2079+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2080+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2081+#else
2082+#define PAX_DELTA_MMAP_LEN 15
2083+#define PAX_DELTA_STACK_LEN 15
2084+#endif
2085+#endif
2086
2087 /*
2088 * Our registers are always unsigned longs, whether we're a 32 bit
2089@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2090 (0x7ff >> (PAGE_SHIFT - 12)) : \
2091 (0x3ffff >> (PAGE_SHIFT - 12)))
2092
2093-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2094-#define arch_randomize_brk arch_randomize_brk
2095-
2096 #endif /* __KERNEL__ */
2097
2098 /*
2099diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/iommu.h linux-2.6.32.45/arch/powerpc/include/asm/iommu.h
2100--- linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2101+++ linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2102@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2103 extern void iommu_init_early_dart(void);
2104 extern void iommu_init_early_pasemi(void);
2105
2106+/* dma-iommu.c */
2107+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2108+
2109 #ifdef CONFIG_PCI
2110 extern void pci_iommu_init(void);
2111 extern void pci_direct_iommu_init(void);
2112diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h
2113--- linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2114+++ linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2115@@ -26,6 +26,7 @@ enum km_type {
2116 KM_SOFTIRQ1,
2117 KM_PPC_SYNC_PAGE,
2118 KM_PPC_SYNC_ICACHE,
2119+ KM_CLEARPAGE,
2120 KM_TYPE_NR
2121 };
2122
2123diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page_64.h linux-2.6.32.45/arch/powerpc/include/asm/page_64.h
2124--- linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2125+++ linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2126@@ -180,15 +180,18 @@ do { \
2127 * stack by default, so in the absense of a PT_GNU_STACK program header
2128 * we turn execute permission off.
2129 */
2130-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2131- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2132+#define VM_STACK_DEFAULT_FLAGS32 \
2133+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2134+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2135
2136 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2137 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2138
2139+#ifndef CONFIG_PAX_PAGEEXEC
2140 #define VM_STACK_DEFAULT_FLAGS \
2141 (test_thread_flag(TIF_32BIT) ? \
2142 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2143+#endif
2144
2145 #include <asm-generic/getorder.h>
2146
2147diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page.h linux-2.6.32.45/arch/powerpc/include/asm/page.h
2148--- linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2149+++ linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2150@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2151 * and needs to be executable. This means the whole heap ends
2152 * up being executable.
2153 */
2154-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2155- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156+#define VM_DATA_DEFAULT_FLAGS32 \
2157+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2158+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159
2160 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2163 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2164 #endif
2165
2166+#define ktla_ktva(addr) (addr)
2167+#define ktva_ktla(addr) (addr)
2168+
2169 #ifndef __ASSEMBLY__
2170
2171 #undef STRICT_MM_TYPECHECKS
2172diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pci.h linux-2.6.32.45/arch/powerpc/include/asm/pci.h
2173--- linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2174+++ linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2175@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2176 }
2177
2178 #ifdef CONFIG_PCI
2179-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2180-extern struct dma_map_ops *get_pci_dma_ops(void);
2181+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2182+extern const struct dma_map_ops *get_pci_dma_ops(void);
2183 #else /* CONFIG_PCI */
2184 #define set_pci_dma_ops(d)
2185 #define get_pci_dma_ops() NULL
2186diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h
2187--- linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2188+++ linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2189@@ -2,6 +2,7 @@
2190 #define _ASM_POWERPC_PGTABLE_H
2191 #ifdef __KERNEL__
2192
2193+#include <linux/const.h>
2194 #ifndef __ASSEMBLY__
2195 #include <asm/processor.h> /* For TASK_SIZE */
2196 #include <asm/mmu.h>
2197diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h
2198--- linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2199+++ linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2200@@ -21,6 +21,7 @@
2201 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2202 #define _PAGE_USER 0x004 /* usermode access allowed */
2203 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2204+#define _PAGE_EXEC _PAGE_GUARDED
2205 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2206 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2207 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2208diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/reg.h linux-2.6.32.45/arch/powerpc/include/asm/reg.h
2209--- linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2210+++ linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2211@@ -191,6 +191,7 @@
2212 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2213 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2214 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2215+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2216 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2217 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2218 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2219diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h
2220--- linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2221+++ linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2222@@ -13,7 +13,7 @@
2223
2224 #include <linux/swiotlb.h>
2225
2226-extern struct dma_map_ops swiotlb_dma_ops;
2227+extern const struct dma_map_ops swiotlb_dma_ops;
2228
2229 static inline void dma_mark_clean(void *addr, size_t size) {}
2230
2231diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/system.h linux-2.6.32.45/arch/powerpc/include/asm/system.h
2232--- linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2233+++ linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2234@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2235 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2236 #endif
2237
2238-extern unsigned long arch_align_stack(unsigned long sp);
2239+#define arch_align_stack(x) ((x) & ~0xfUL)
2240
2241 /* Used in very early kernel initialization. */
2242 extern unsigned long reloc_offset(void);
2243diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h
2244--- linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2245+++ linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2246@@ -13,6 +13,8 @@
2247 #define VERIFY_READ 0
2248 #define VERIFY_WRITE 1
2249
2250+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2251+
2252 /*
2253 * The fs value determines whether argument validity checking should be
2254 * performed or not. If get_fs() == USER_DS, checking is performed, with
2255@@ -327,52 +329,6 @@ do { \
2256 extern unsigned long __copy_tofrom_user(void __user *to,
2257 const void __user *from, unsigned long size);
2258
2259-#ifndef __powerpc64__
2260-
2261-static inline unsigned long copy_from_user(void *to,
2262- const void __user *from, unsigned long n)
2263-{
2264- unsigned long over;
2265-
2266- if (access_ok(VERIFY_READ, from, n))
2267- return __copy_tofrom_user((__force void __user *)to, from, n);
2268- if ((unsigned long)from < TASK_SIZE) {
2269- over = (unsigned long)from + n - TASK_SIZE;
2270- return __copy_tofrom_user((__force void __user *)to, from,
2271- n - over) + over;
2272- }
2273- return n;
2274-}
2275-
2276-static inline unsigned long copy_to_user(void __user *to,
2277- const void *from, unsigned long n)
2278-{
2279- unsigned long over;
2280-
2281- if (access_ok(VERIFY_WRITE, to, n))
2282- return __copy_tofrom_user(to, (__force void __user *)from, n);
2283- if ((unsigned long)to < TASK_SIZE) {
2284- over = (unsigned long)to + n - TASK_SIZE;
2285- return __copy_tofrom_user(to, (__force void __user *)from,
2286- n - over) + over;
2287- }
2288- return n;
2289-}
2290-
2291-#else /* __powerpc64__ */
2292-
2293-#define __copy_in_user(to, from, size) \
2294- __copy_tofrom_user((to), (from), (size))
2295-
2296-extern unsigned long copy_from_user(void *to, const void __user *from,
2297- unsigned long n);
2298-extern unsigned long copy_to_user(void __user *to, const void *from,
2299- unsigned long n);
2300-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2301- unsigned long n);
2302-
2303-#endif /* __powerpc64__ */
2304-
2305 static inline unsigned long __copy_from_user_inatomic(void *to,
2306 const void __user *from, unsigned long n)
2307 {
2308@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2309 if (ret == 0)
2310 return 0;
2311 }
2312+
2313+ if (!__builtin_constant_p(n))
2314+ check_object_size(to, n, false);
2315+
2316 return __copy_tofrom_user((__force void __user *)to, from, n);
2317 }
2318
2319@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2320 if (ret == 0)
2321 return 0;
2322 }
2323+
2324+ if (!__builtin_constant_p(n))
2325+ check_object_size(from, n, true);
2326+
2327 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2328 }
2329
2330@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2331 return __copy_to_user_inatomic(to, from, size);
2332 }
2333
2334+#ifndef __powerpc64__
2335+
2336+static inline unsigned long __must_check copy_from_user(void *to,
2337+ const void __user *from, unsigned long n)
2338+{
2339+ unsigned long over;
2340+
2341+ if ((long)n < 0)
2342+ return n;
2343+
2344+ if (access_ok(VERIFY_READ, from, n)) {
2345+ if (!__builtin_constant_p(n))
2346+ check_object_size(to, n, false);
2347+ return __copy_tofrom_user((__force void __user *)to, from, n);
2348+ }
2349+ if ((unsigned long)from < TASK_SIZE) {
2350+ over = (unsigned long)from + n - TASK_SIZE;
2351+ if (!__builtin_constant_p(n - over))
2352+ check_object_size(to, n - over, false);
2353+ return __copy_tofrom_user((__force void __user *)to, from,
2354+ n - over) + over;
2355+ }
2356+ return n;
2357+}
2358+
2359+static inline unsigned long __must_check copy_to_user(void __user *to,
2360+ const void *from, unsigned long n)
2361+{
2362+ unsigned long over;
2363+
2364+ if ((long)n < 0)
2365+ return n;
2366+
2367+ if (access_ok(VERIFY_WRITE, to, n)) {
2368+ if (!__builtin_constant_p(n))
2369+ check_object_size(from, n, true);
2370+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2371+ }
2372+ if ((unsigned long)to < TASK_SIZE) {
2373+ over = (unsigned long)to + n - TASK_SIZE;
2374+ if (!__builtin_constant_p(n))
2375+ check_object_size(from, n - over, true);
2376+ return __copy_tofrom_user(to, (__force void __user *)from,
2377+ n - over) + over;
2378+ }
2379+ return n;
2380+}
2381+
2382+#else /* __powerpc64__ */
2383+
2384+#define __copy_in_user(to, from, size) \
2385+ __copy_tofrom_user((to), (from), (size))
2386+
2387+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2388+{
2389+ if ((long)n < 0 || n > INT_MAX)
2390+ return n;
2391+
2392+ if (!__builtin_constant_p(n))
2393+ check_object_size(to, n, false);
2394+
2395+ if (likely(access_ok(VERIFY_READ, from, n)))
2396+ n = __copy_from_user(to, from, n);
2397+ else
2398+ memset(to, 0, n);
2399+ return n;
2400+}
2401+
2402+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2403+{
2404+ if ((long)n < 0 || n > INT_MAX)
2405+ return n;
2406+
2407+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2408+ if (!__builtin_constant_p(n))
2409+ check_object_size(from, n, true);
2410+ n = __copy_to_user(to, from, n);
2411+ }
2412+ return n;
2413+}
2414+
2415+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2416+ unsigned long n);
2417+
2418+#endif /* __powerpc64__ */
2419+
2420 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2421
2422 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2423diff -urNp linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c
2424--- linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2425+++ linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2426@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2427 &cache_assoc_attr,
2428 };
2429
2430-static struct sysfs_ops cache_index_ops = {
2431+static const struct sysfs_ops cache_index_ops = {
2432 .show = cache_index_show,
2433 };
2434
2435diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma.c linux-2.6.32.45/arch/powerpc/kernel/dma.c
2436--- linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2437+++ linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2438@@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2439 }
2440 #endif
2441
2442-struct dma_map_ops dma_direct_ops = {
2443+const struct dma_map_ops dma_direct_ops = {
2444 .alloc_coherent = dma_direct_alloc_coherent,
2445 .free_coherent = dma_direct_free_coherent,
2446 .map_sg = dma_direct_map_sg,
2447diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c
2448--- linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2449+++ linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2450@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2451 }
2452
2453 /* We support DMA to/from any memory page via the iommu */
2454-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2455+int dma_iommu_dma_supported(struct device *dev, u64 mask)
2456 {
2457 struct iommu_table *tbl = get_iommu_table_base(dev);
2458
2459diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c
2460--- linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2461+++ linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2462@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2463 * map_page, and unmap_page on highmem, use normal dma_ops
2464 * for everything else.
2465 */
2466-struct dma_map_ops swiotlb_dma_ops = {
2467+const struct dma_map_ops swiotlb_dma_ops = {
2468 .alloc_coherent = dma_direct_alloc_coherent,
2469 .free_coherent = dma_direct_free_coherent,
2470 .map_sg = swiotlb_map_sg_attrs,
2471diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S
2472--- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2473+++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2474@@ -455,6 +455,7 @@ storage_fault_common:
2475 std r14,_DAR(r1)
2476 std r15,_DSISR(r1)
2477 addi r3,r1,STACK_FRAME_OVERHEAD
2478+ bl .save_nvgprs
2479 mr r4,r14
2480 mr r5,r15
2481 ld r14,PACA_EXGEN+EX_R14(r13)
2482@@ -464,8 +465,7 @@ storage_fault_common:
2483 cmpdi r3,0
2484 bne- 1f
2485 b .ret_from_except_lite
2486-1: bl .save_nvgprs
2487- mr r5,r3
2488+1: mr r5,r3
2489 addi r3,r1,STACK_FRAME_OVERHEAD
2490 ld r4,_DAR(r1)
2491 bl .bad_page_fault
2492diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S
2493--- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2494+++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2495@@ -818,10 +818,10 @@ handle_page_fault:
2496 11: ld r4,_DAR(r1)
2497 ld r5,_DSISR(r1)
2498 addi r3,r1,STACK_FRAME_OVERHEAD
2499+ bl .save_nvgprs
2500 bl .do_page_fault
2501 cmpdi r3,0
2502 beq+ 13f
2503- bl .save_nvgprs
2504 mr r5,r3
2505 addi r3,r1,STACK_FRAME_OVERHEAD
2506 lwz r4,_DAR(r1)
2507diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c
2508--- linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2509+++ linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2510@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2511 return 1;
2512 }
2513
2514-static struct dma_map_ops ibmebus_dma_ops = {
2515+static const struct dma_map_ops ibmebus_dma_ops = {
2516 .alloc_coherent = ibmebus_alloc_coherent,
2517 .free_coherent = ibmebus_free_coherent,
2518 .map_sg = ibmebus_map_sg,
2519diff -urNp linux-2.6.32.45/arch/powerpc/kernel/kgdb.c linux-2.6.32.45/arch/powerpc/kernel/kgdb.c
2520--- linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2521+++ linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2522@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2523 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2524 return 0;
2525
2526- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2527+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2528 regs->nip += 4;
2529
2530 return 1;
2531@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2532 /*
2533 * Global data
2534 */
2535-struct kgdb_arch arch_kgdb_ops = {
2536+const struct kgdb_arch arch_kgdb_ops = {
2537 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2538 };
2539
2540diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module_32.c linux-2.6.32.45/arch/powerpc/kernel/module_32.c
2541--- linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2542+++ linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2543@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2544 me->arch.core_plt_section = i;
2545 }
2546 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2547- printk("Module doesn't contain .plt or .init.plt sections.\n");
2548+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2549 return -ENOEXEC;
2550 }
2551
2552@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2553
2554 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2555 /* Init, or core PLT? */
2556- if (location >= mod->module_core
2557- && location < mod->module_core + mod->core_size)
2558+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2559+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2560 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2561- else
2562+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2563+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2564 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2565+ else {
2566+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2567+ return ~0UL;
2568+ }
2569
2570 /* Find this entry, or if that fails, the next avail. entry */
2571 while (entry->jump[0]) {
2572diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module.c linux-2.6.32.45/arch/powerpc/kernel/module.c
2573--- linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2574+++ linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2575@@ -31,11 +31,24 @@
2576
2577 LIST_HEAD(module_bug_list);
2578
2579+#ifdef CONFIG_PAX_KERNEXEC
2580 void *module_alloc(unsigned long size)
2581 {
2582 if (size == 0)
2583 return NULL;
2584
2585+ return vmalloc(size);
2586+}
2587+
2588+void *module_alloc_exec(unsigned long size)
2589+#else
2590+void *module_alloc(unsigned long size)
2591+#endif
2592+
2593+{
2594+ if (size == 0)
2595+ return NULL;
2596+
2597 return vmalloc_exec(size);
2598 }
2599
2600@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2601 vfree(module_region);
2602 }
2603
2604+#ifdef CONFIG_PAX_KERNEXEC
2605+void module_free_exec(struct module *mod, void *module_region)
2606+{
2607+ module_free(mod, module_region);
2608+}
2609+#endif
2610+
2611 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2612 const Elf_Shdr *sechdrs,
2613 const char *name)
2614diff -urNp linux-2.6.32.45/arch/powerpc/kernel/pci-common.c linux-2.6.32.45/arch/powerpc/kernel/pci-common.c
2615--- linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2616+++ linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2617@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2618 unsigned int ppc_pci_flags = 0;
2619
2620
2621-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2622+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2623
2624-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2625+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2626 {
2627 pci_dma_ops = dma_ops;
2628 }
2629
2630-struct dma_map_ops *get_pci_dma_ops(void)
2631+const struct dma_map_ops *get_pci_dma_ops(void)
2632 {
2633 return pci_dma_ops;
2634 }
2635diff -urNp linux-2.6.32.45/arch/powerpc/kernel/process.c linux-2.6.32.45/arch/powerpc/kernel/process.c
2636--- linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2637+++ linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2638@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2639 * Lookup NIP late so we have the best change of getting the
2640 * above info out without failing
2641 */
2642- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2643- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2644+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2645+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2646 #endif
2647 show_stack(current, (unsigned long *) regs->gpr[1]);
2648 if (!user_mode(regs))
2649@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2650 newsp = stack[0];
2651 ip = stack[STACK_FRAME_LR_SAVE];
2652 if (!firstframe || ip != lr) {
2653- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2654+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2655 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2656 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2657- printk(" (%pS)",
2658+ printk(" (%pA)",
2659 (void *)current->ret_stack[curr_frame].ret);
2660 curr_frame--;
2661 }
2662@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2663 struct pt_regs *regs = (struct pt_regs *)
2664 (sp + STACK_FRAME_OVERHEAD);
2665 lr = regs->link;
2666- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2667+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2668 regs->trap, (void *)regs->nip, (void *)lr);
2669 firstframe = 1;
2670 }
2671@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2672 }
2673
2674 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2675-
2676-unsigned long arch_align_stack(unsigned long sp)
2677-{
2678- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2679- sp -= get_random_int() & ~PAGE_MASK;
2680- return sp & ~0xf;
2681-}
2682-
2683-static inline unsigned long brk_rnd(void)
2684-{
2685- unsigned long rnd = 0;
2686-
2687- /* 8MB for 32bit, 1GB for 64bit */
2688- if (is_32bit_task())
2689- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2690- else
2691- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2692-
2693- return rnd << PAGE_SHIFT;
2694-}
2695-
2696-unsigned long arch_randomize_brk(struct mm_struct *mm)
2697-{
2698- unsigned long base = mm->brk;
2699- unsigned long ret;
2700-
2701-#ifdef CONFIG_PPC_STD_MMU_64
2702- /*
2703- * If we are using 1TB segments and we are allowed to randomise
2704- * the heap, we can put it above 1TB so it is backed by a 1TB
2705- * segment. Otherwise the heap will be in the bottom 1TB
2706- * which always uses 256MB segments and this may result in a
2707- * performance penalty.
2708- */
2709- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2710- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2711-#endif
2712-
2713- ret = PAGE_ALIGN(base + brk_rnd());
2714-
2715- if (ret < mm->brk)
2716- return mm->brk;
2717-
2718- return ret;
2719-}
2720-
2721-unsigned long randomize_et_dyn(unsigned long base)
2722-{
2723- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2724-
2725- if (ret < base)
2726- return base;
2727-
2728- return ret;
2729-}
2730diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_32.c linux-2.6.32.45/arch/powerpc/kernel/signal_32.c
2731--- linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2732+++ linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2733@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2734 /* Save user registers on the stack */
2735 frame = &rt_sf->uc.uc_mcontext;
2736 addr = frame;
2737- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2738+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2739 if (save_user_regs(regs, frame, 0, 1))
2740 goto badframe;
2741 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2742diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_64.c linux-2.6.32.45/arch/powerpc/kernel/signal_64.c
2743--- linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2744+++ linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2745@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2746 current->thread.fpscr.val = 0;
2747
2748 /* Set up to return from userspace. */
2749- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2750+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2751 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2752 } else {
2753 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2754diff -urNp linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c
2755--- linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2756+++ linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2757@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2758 if (oldlenp) {
2759 if (!error) {
2760 if (get_user(oldlen, oldlenp) ||
2761- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2762+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2763+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2764 error = -EFAULT;
2765 }
2766- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2767 }
2768 return error;
2769 }
2770diff -urNp linux-2.6.32.45/arch/powerpc/kernel/traps.c linux-2.6.32.45/arch/powerpc/kernel/traps.c
2771--- linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2772+++ linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2773@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2774 static inline void pmac_backlight_unblank(void) { }
2775 #endif
2776
2777+extern void gr_handle_kernel_exploit(void);
2778+
2779 int die(const char *str, struct pt_regs *regs, long err)
2780 {
2781 static struct {
2782@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2783 if (panic_on_oops)
2784 panic("Fatal exception");
2785
2786+ gr_handle_kernel_exploit();
2787+
2788 oops_exit();
2789 do_exit(err);
2790
2791diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vdso.c linux-2.6.32.45/arch/powerpc/kernel/vdso.c
2792--- linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2793+++ linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2794@@ -36,6 +36,7 @@
2795 #include <asm/firmware.h>
2796 #include <asm/vdso.h>
2797 #include <asm/vdso_datapage.h>
2798+#include <asm/mman.h>
2799
2800 #include "setup.h"
2801
2802@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2803 vdso_base = VDSO32_MBASE;
2804 #endif
2805
2806- current->mm->context.vdso_base = 0;
2807+ current->mm->context.vdso_base = ~0UL;
2808
2809 /* vDSO has a problem and was disabled, just don't "enable" it for the
2810 * process
2811@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2812 vdso_base = get_unmapped_area(NULL, vdso_base,
2813 (vdso_pages << PAGE_SHIFT) +
2814 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2815- 0, 0);
2816+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2817 if (IS_ERR_VALUE(vdso_base)) {
2818 rc = vdso_base;
2819 goto fail_mmapsem;
2820diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vio.c linux-2.6.32.45/arch/powerpc/kernel/vio.c
2821--- linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2822+++ linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2823@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2824 vio_cmo_dealloc(viodev, alloc_size);
2825 }
2826
2827-struct dma_map_ops vio_dma_mapping_ops = {
2828+static const struct dma_map_ops vio_dma_mapping_ops = {
2829 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2830 .free_coherent = vio_dma_iommu_free_coherent,
2831 .map_sg = vio_dma_iommu_map_sg,
2832 .unmap_sg = vio_dma_iommu_unmap_sg,
2833+ .dma_supported = dma_iommu_dma_supported,
2834 .map_page = vio_dma_iommu_map_page,
2835 .unmap_page = vio_dma_iommu_unmap_page,
2836
2837@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2838
2839 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2840 {
2841- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2842 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2843 }
2844
2845diff -urNp linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c
2846--- linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2847+++ linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2848@@ -9,22 +9,6 @@
2849 #include <linux/module.h>
2850 #include <asm/uaccess.h>
2851
2852-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2853-{
2854- if (likely(access_ok(VERIFY_READ, from, n)))
2855- n = __copy_from_user(to, from, n);
2856- else
2857- memset(to, 0, n);
2858- return n;
2859-}
2860-
2861-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2862-{
2863- if (likely(access_ok(VERIFY_WRITE, to, n)))
2864- n = __copy_to_user(to, from, n);
2865- return n;
2866-}
2867-
2868 unsigned long copy_in_user(void __user *to, const void __user *from,
2869 unsigned long n)
2870 {
2871@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2872 return n;
2873 }
2874
2875-EXPORT_SYMBOL(copy_from_user);
2876-EXPORT_SYMBOL(copy_to_user);
2877 EXPORT_SYMBOL(copy_in_user);
2878
2879diff -urNp linux-2.6.32.45/arch/powerpc/mm/fault.c linux-2.6.32.45/arch/powerpc/mm/fault.c
2880--- linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2881+++ linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2882@@ -30,6 +30,10 @@
2883 #include <linux/kprobes.h>
2884 #include <linux/kdebug.h>
2885 #include <linux/perf_event.h>
2886+#include <linux/slab.h>
2887+#include <linux/pagemap.h>
2888+#include <linux/compiler.h>
2889+#include <linux/unistd.h>
2890
2891 #include <asm/firmware.h>
2892 #include <asm/page.h>
2893@@ -40,6 +44,7 @@
2894 #include <asm/uaccess.h>
2895 #include <asm/tlbflush.h>
2896 #include <asm/siginfo.h>
2897+#include <asm/ptrace.h>
2898
2899
2900 #ifdef CONFIG_KPROBES
2901@@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2902 }
2903 #endif
2904
2905+#ifdef CONFIG_PAX_PAGEEXEC
2906+/*
2907+ * PaX: decide what to do with offenders (regs->nip = fault address)
2908+ *
2909+ * returns 1 when task should be killed
2910+ */
2911+static int pax_handle_fetch_fault(struct pt_regs *regs)
2912+{
2913+ return 1;
2914+}
2915+
2916+void pax_report_insns(void *pc, void *sp)
2917+{
2918+ unsigned long i;
2919+
2920+ printk(KERN_ERR "PAX: bytes at PC: ");
2921+ for (i = 0; i < 5; i++) {
2922+ unsigned int c;
2923+ if (get_user(c, (unsigned int __user *)pc+i))
2924+ printk(KERN_CONT "???????? ");
2925+ else
2926+ printk(KERN_CONT "%08x ", c);
2927+ }
2928+ printk("\n");
2929+}
2930+#endif
2931+
2932 /*
2933 * Check whether the instruction at regs->nip is a store using
2934 * an update addressing form which will update r1.
2935@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2936 * indicate errors in DSISR but can validly be set in SRR1.
2937 */
2938 if (trap == 0x400)
2939- error_code &= 0x48200000;
2940+ error_code &= 0x58200000;
2941 else
2942 is_write = error_code & DSISR_ISSTORE;
2943 #else
2944@@ -250,7 +282,7 @@ good_area:
2945 * "undefined". Of those that can be set, this is the only
2946 * one which seems bad.
2947 */
2948- if (error_code & 0x10000000)
2949+ if (error_code & DSISR_GUARDED)
2950 /* Guarded storage error. */
2951 goto bad_area;
2952 #endif /* CONFIG_8xx */
2953@@ -265,7 +297,7 @@ good_area:
2954 * processors use the same I/D cache coherency mechanism
2955 * as embedded.
2956 */
2957- if (error_code & DSISR_PROTFAULT)
2958+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2959 goto bad_area;
2960 #endif /* CONFIG_PPC_STD_MMU */
2961
2962@@ -335,6 +367,23 @@ bad_area:
2963 bad_area_nosemaphore:
2964 /* User mode accesses cause a SIGSEGV */
2965 if (user_mode(regs)) {
2966+
2967+#ifdef CONFIG_PAX_PAGEEXEC
2968+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2969+#ifdef CONFIG_PPC_STD_MMU
2970+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2971+#else
2972+ if (is_exec && regs->nip == address) {
2973+#endif
2974+ switch (pax_handle_fetch_fault(regs)) {
2975+ }
2976+
2977+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2978+ do_group_exit(SIGKILL);
2979+ }
2980+ }
2981+#endif
2982+
2983 _exception(SIGSEGV, regs, code, address);
2984 return 0;
2985 }
2986diff -urNp linux-2.6.32.45/arch/powerpc/mm/mmap_64.c linux-2.6.32.45/arch/powerpc/mm/mmap_64.c
2987--- linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
2988+++ linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
2989@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2990 */
2991 if (mmap_is_legacy()) {
2992 mm->mmap_base = TASK_UNMAPPED_BASE;
2993+
2994+#ifdef CONFIG_PAX_RANDMMAP
2995+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2996+ mm->mmap_base += mm->delta_mmap;
2997+#endif
2998+
2999 mm->get_unmapped_area = arch_get_unmapped_area;
3000 mm->unmap_area = arch_unmap_area;
3001 } else {
3002 mm->mmap_base = mmap_base();
3003+
3004+#ifdef CONFIG_PAX_RANDMMAP
3005+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3006+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3007+#endif
3008+
3009 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3010 mm->unmap_area = arch_unmap_area_topdown;
3011 }
3012diff -urNp linux-2.6.32.45/arch/powerpc/mm/slice.c linux-2.6.32.45/arch/powerpc/mm/slice.c
3013--- linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3014+++ linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3015@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3016 if ((mm->task_size - len) < addr)
3017 return 0;
3018 vma = find_vma(mm, addr);
3019- return (!vma || (addr + len) <= vma->vm_start);
3020+ return check_heap_stack_gap(vma, addr, len);
3021 }
3022
3023 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3024@@ -256,7 +256,7 @@ full_search:
3025 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3026 continue;
3027 }
3028- if (!vma || addr + len <= vma->vm_start) {
3029+ if (check_heap_stack_gap(vma, addr, len)) {
3030 /*
3031 * Remember the place where we stopped the search:
3032 */
3033@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3034 }
3035 }
3036
3037- addr = mm->mmap_base;
3038- while (addr > len) {
3039+ if (mm->mmap_base < len)
3040+ addr = -ENOMEM;
3041+ else
3042+ addr = mm->mmap_base - len;
3043+
3044+ while (!IS_ERR_VALUE(addr)) {
3045 /* Go down by chunk size */
3046- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3047+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3048
3049 /* Check for hit with different page size */
3050 mask = slice_range_to_mask(addr, len);
3051@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3052 * return with success:
3053 */
3054 vma = find_vma(mm, addr);
3055- if (!vma || (addr + len) <= vma->vm_start) {
3056+ if (check_heap_stack_gap(vma, addr, len)) {
3057 /* remember the address as a hint for next time */
3058 if (use_cache)
3059 mm->free_area_cache = addr;
3060@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3061 mm->cached_hole_size = vma->vm_start - addr;
3062
3063 /* try just below the current vma->vm_start */
3064- addr = vma->vm_start;
3065+ addr = skip_heap_stack_gap(vma, len);
3066 }
3067
3068 /*
3069@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3070 if (fixed && addr > (mm->task_size - len))
3071 return -EINVAL;
3072
3073+#ifdef CONFIG_PAX_RANDMMAP
3074+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3075+ addr = 0;
3076+#endif
3077+
3078 /* If hint, make sure it matches our alignment restrictions */
3079 if (!fixed && addr) {
3080 addr = _ALIGN_UP(addr, 1ul << pshift);
3081diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c
3082--- linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3083+++ linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3084@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3085 lite5200_pm_target_state = PM_SUSPEND_ON;
3086 }
3087
3088-static struct platform_suspend_ops lite5200_pm_ops = {
3089+static const struct platform_suspend_ops lite5200_pm_ops = {
3090 .valid = lite5200_pm_valid,
3091 .begin = lite5200_pm_begin,
3092 .prepare = lite5200_pm_prepare,
3093diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3094--- linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3095+++ linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3096@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3097 iounmap(mbar);
3098 }
3099
3100-static struct platform_suspend_ops mpc52xx_pm_ops = {
3101+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3102 .valid = mpc52xx_pm_valid,
3103 .prepare = mpc52xx_pm_prepare,
3104 .enter = mpc52xx_pm_enter,
3105diff -urNp linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c
3106--- linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3107+++ linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3108@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3109 return ret;
3110 }
3111
3112-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3113+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3114 .valid = mpc83xx_suspend_valid,
3115 .begin = mpc83xx_suspend_begin,
3116 .enter = mpc83xx_suspend_enter,
3117diff -urNp linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c
3118--- linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3119+++ linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3120@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3121
3122 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3123
3124-struct dma_map_ops dma_iommu_fixed_ops = {
3125+const struct dma_map_ops dma_iommu_fixed_ops = {
3126 .alloc_coherent = dma_fixed_alloc_coherent,
3127 .free_coherent = dma_fixed_free_coherent,
3128 .map_sg = dma_fixed_map_sg,
3129diff -urNp linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c
3130--- linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3131+++ linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3132@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3133 return mask >= DMA_BIT_MASK(32);
3134 }
3135
3136-static struct dma_map_ops ps3_sb_dma_ops = {
3137+static const struct dma_map_ops ps3_sb_dma_ops = {
3138 .alloc_coherent = ps3_alloc_coherent,
3139 .free_coherent = ps3_free_coherent,
3140 .map_sg = ps3_sb_map_sg,
3141@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3142 .unmap_page = ps3_unmap_page,
3143 };
3144
3145-static struct dma_map_ops ps3_ioc0_dma_ops = {
3146+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3147 .alloc_coherent = ps3_alloc_coherent,
3148 .free_coherent = ps3_free_coherent,
3149 .map_sg = ps3_ioc0_map_sg,
3150diff -urNp linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig
3151--- linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3152+++ linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3153@@ -2,6 +2,8 @@ config PPC_PSERIES
3154 depends on PPC64 && PPC_BOOK3S
3155 bool "IBM pSeries & new (POWER5-based) iSeries"
3156 select MPIC
3157+ select PCI_MSI
3158+ select XICS
3159 select PPC_I8259
3160 select PPC_RTAS
3161 select RTAS_ERROR_LOGGING
3162diff -urNp linux-2.6.32.45/arch/s390/include/asm/elf.h linux-2.6.32.45/arch/s390/include/asm/elf.h
3163--- linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3164+++ linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3165@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3166 that it will "exec", and that there is sufficient room for the brk. */
3167 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3168
3169+#ifdef CONFIG_PAX_ASLR
3170+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3171+
3172+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3173+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3174+#endif
3175+
3176 /* This yields a mask that user programs can use to figure out what
3177 instruction set this CPU supports. */
3178
3179diff -urNp linux-2.6.32.45/arch/s390/include/asm/setup.h linux-2.6.32.45/arch/s390/include/asm/setup.h
3180--- linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3181+++ linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3182@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3183 void detect_memory_layout(struct mem_chunk chunk[]);
3184
3185 #ifdef CONFIG_S390_SWITCH_AMODE
3186-extern unsigned int switch_amode;
3187+#define switch_amode (1)
3188 #else
3189 #define switch_amode (0)
3190 #endif
3191
3192 #ifdef CONFIG_S390_EXEC_PROTECT
3193-extern unsigned int s390_noexec;
3194+#define s390_noexec (1)
3195 #else
3196 #define s390_noexec (0)
3197 #endif
3198diff -urNp linux-2.6.32.45/arch/s390/include/asm/uaccess.h linux-2.6.32.45/arch/s390/include/asm/uaccess.h
3199--- linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3200+++ linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3201@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3202 copy_to_user(void __user *to, const void *from, unsigned long n)
3203 {
3204 might_fault();
3205+
3206+ if ((long)n < 0)
3207+ return n;
3208+
3209 if (access_ok(VERIFY_WRITE, to, n))
3210 n = __copy_to_user(to, from, n);
3211 return n;
3212@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3213 static inline unsigned long __must_check
3214 __copy_from_user(void *to, const void __user *from, unsigned long n)
3215 {
3216+ if ((long)n < 0)
3217+ return n;
3218+
3219 if (__builtin_constant_p(n) && (n <= 256))
3220 return uaccess.copy_from_user_small(n, from, to);
3221 else
3222@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3223 copy_from_user(void *to, const void __user *from, unsigned long n)
3224 {
3225 might_fault();
3226+
3227+ if ((long)n < 0)
3228+ return n;
3229+
3230 if (access_ok(VERIFY_READ, from, n))
3231 n = __copy_from_user(to, from, n);
3232 else
3233diff -urNp linux-2.6.32.45/arch/s390/Kconfig linux-2.6.32.45/arch/s390/Kconfig
3234--- linux-2.6.32.45/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3235+++ linux-2.6.32.45/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3236@@ -194,28 +194,26 @@ config AUDIT_ARCH
3237
3238 config S390_SWITCH_AMODE
3239 bool "Switch kernel/user addressing modes"
3240+ default y
3241 help
3242 This option allows to switch the addressing modes of kernel and user
3243- space. The kernel parameter switch_amode=on will enable this feature,
3244- default is disabled. Enabling this (via kernel parameter) on machines
3245- earlier than IBM System z9-109 EC/BC will reduce system performance.
3246+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3247+ will reduce system performance.
3248
3249 Note that this option will also be selected by selecting the execute
3250- protection option below. Enabling the execute protection via the
3251- noexec kernel parameter will also switch the addressing modes,
3252- independent of the switch_amode kernel parameter.
3253+ protection option below. Enabling the execute protection will also
3254+ switch the addressing modes, independent of this option.
3255
3256
3257 config S390_EXEC_PROTECT
3258 bool "Data execute protection"
3259+ default y
3260 select S390_SWITCH_AMODE
3261 help
3262 This option allows to enable a buffer overflow protection for user
3263 space programs and it also selects the addressing mode option above.
3264- The kernel parameter noexec=on will enable this feature and also
3265- switch the addressing modes, default is disabled. Enabling this (via
3266- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3267- will reduce system performance.
3268+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3269+ reduce system performance.
3270
3271 comment "Code generation options"
3272
3273diff -urNp linux-2.6.32.45/arch/s390/kernel/module.c linux-2.6.32.45/arch/s390/kernel/module.c
3274--- linux-2.6.32.45/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3275+++ linux-2.6.32.45/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3276@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3277
3278 /* Increase core size by size of got & plt and set start
3279 offsets for got and plt. */
3280- me->core_size = ALIGN(me->core_size, 4);
3281- me->arch.got_offset = me->core_size;
3282- me->core_size += me->arch.got_size;
3283- me->arch.plt_offset = me->core_size;
3284- me->core_size += me->arch.plt_size;
3285+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3286+ me->arch.got_offset = me->core_size_rw;
3287+ me->core_size_rw += me->arch.got_size;
3288+ me->arch.plt_offset = me->core_size_rx;
3289+ me->core_size_rx += me->arch.plt_size;
3290 return 0;
3291 }
3292
3293@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3294 if (info->got_initialized == 0) {
3295 Elf_Addr *gotent;
3296
3297- gotent = me->module_core + me->arch.got_offset +
3298+ gotent = me->module_core_rw + me->arch.got_offset +
3299 info->got_offset;
3300 *gotent = val;
3301 info->got_initialized = 1;
3302@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3303 else if (r_type == R_390_GOTENT ||
3304 r_type == R_390_GOTPLTENT)
3305 *(unsigned int *) loc =
3306- (val + (Elf_Addr) me->module_core - loc) >> 1;
3307+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3308 else if (r_type == R_390_GOT64 ||
3309 r_type == R_390_GOTPLT64)
3310 *(unsigned long *) loc = val;
3311@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3312 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3313 if (info->plt_initialized == 0) {
3314 unsigned int *ip;
3315- ip = me->module_core + me->arch.plt_offset +
3316+ ip = me->module_core_rx + me->arch.plt_offset +
3317 info->plt_offset;
3318 #ifndef CONFIG_64BIT
3319 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3320@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3321 val - loc + 0xffffUL < 0x1ffffeUL) ||
3322 (r_type == R_390_PLT32DBL &&
3323 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3324- val = (Elf_Addr) me->module_core +
3325+ val = (Elf_Addr) me->module_core_rx +
3326 me->arch.plt_offset +
3327 info->plt_offset;
3328 val += rela->r_addend - loc;
3329@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3330 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3331 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3332 val = val + rela->r_addend -
3333- ((Elf_Addr) me->module_core + me->arch.got_offset);
3334+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3335 if (r_type == R_390_GOTOFF16)
3336 *(unsigned short *) loc = val;
3337 else if (r_type == R_390_GOTOFF32)
3338@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3339 break;
3340 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3341 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3342- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3343+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3344 rela->r_addend - loc;
3345 if (r_type == R_390_GOTPC)
3346 *(unsigned int *) loc = val;
3347diff -urNp linux-2.6.32.45/arch/s390/kernel/setup.c linux-2.6.32.45/arch/s390/kernel/setup.c
3348--- linux-2.6.32.45/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3349+++ linux-2.6.32.45/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3350@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3351 early_param("mem", early_parse_mem);
3352
3353 #ifdef CONFIG_S390_SWITCH_AMODE
3354-unsigned int switch_amode = 0;
3355-EXPORT_SYMBOL_GPL(switch_amode);
3356-
3357 static int set_amode_and_uaccess(unsigned long user_amode,
3358 unsigned long user32_amode)
3359 {
3360@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3361 return 0;
3362 }
3363 }
3364-
3365-/*
3366- * Switch kernel/user addressing modes?
3367- */
3368-static int __init early_parse_switch_amode(char *p)
3369-{
3370- switch_amode = 1;
3371- return 0;
3372-}
3373-early_param("switch_amode", early_parse_switch_amode);
3374-
3375 #else /* CONFIG_S390_SWITCH_AMODE */
3376 static inline int set_amode_and_uaccess(unsigned long user_amode,
3377 unsigned long user32_amode)
3378@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3379 }
3380 #endif /* CONFIG_S390_SWITCH_AMODE */
3381
3382-#ifdef CONFIG_S390_EXEC_PROTECT
3383-unsigned int s390_noexec = 0;
3384-EXPORT_SYMBOL_GPL(s390_noexec);
3385-
3386-/*
3387- * Enable execute protection?
3388- */
3389-static int __init early_parse_noexec(char *p)
3390-{
3391- if (!strncmp(p, "off", 3))
3392- return 0;
3393- switch_amode = 1;
3394- s390_noexec = 1;
3395- return 0;
3396-}
3397-early_param("noexec", early_parse_noexec);
3398-#endif /* CONFIG_S390_EXEC_PROTECT */
3399-
3400 static void setup_addressing_mode(void)
3401 {
3402 if (s390_noexec) {
3403diff -urNp linux-2.6.32.45/arch/s390/mm/mmap.c linux-2.6.32.45/arch/s390/mm/mmap.c
3404--- linux-2.6.32.45/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3405+++ linux-2.6.32.45/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3406@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3407 */
3408 if (mmap_is_legacy()) {
3409 mm->mmap_base = TASK_UNMAPPED_BASE;
3410+
3411+#ifdef CONFIG_PAX_RANDMMAP
3412+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3413+ mm->mmap_base += mm->delta_mmap;
3414+#endif
3415+
3416 mm->get_unmapped_area = arch_get_unmapped_area;
3417 mm->unmap_area = arch_unmap_area;
3418 } else {
3419 mm->mmap_base = mmap_base();
3420+
3421+#ifdef CONFIG_PAX_RANDMMAP
3422+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3423+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3424+#endif
3425+
3426 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3427 mm->unmap_area = arch_unmap_area_topdown;
3428 }
3429@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3430 */
3431 if (mmap_is_legacy()) {
3432 mm->mmap_base = TASK_UNMAPPED_BASE;
3433+
3434+#ifdef CONFIG_PAX_RANDMMAP
3435+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3436+ mm->mmap_base += mm->delta_mmap;
3437+#endif
3438+
3439 mm->get_unmapped_area = s390_get_unmapped_area;
3440 mm->unmap_area = arch_unmap_area;
3441 } else {
3442 mm->mmap_base = mmap_base();
3443+
3444+#ifdef CONFIG_PAX_RANDMMAP
3445+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3446+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3447+#endif
3448+
3449 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3450 mm->unmap_area = arch_unmap_area_topdown;
3451 }
3452diff -urNp linux-2.6.32.45/arch/score/include/asm/system.h linux-2.6.32.45/arch/score/include/asm/system.h
3453--- linux-2.6.32.45/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3454+++ linux-2.6.32.45/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3455@@ -17,7 +17,7 @@ do { \
3456 #define finish_arch_switch(prev) do {} while (0)
3457
3458 typedef void (*vi_handler_t)(void);
3459-extern unsigned long arch_align_stack(unsigned long sp);
3460+#define arch_align_stack(x) (x)
3461
3462 #define mb() barrier()
3463 #define rmb() barrier()
3464diff -urNp linux-2.6.32.45/arch/score/kernel/process.c linux-2.6.32.45/arch/score/kernel/process.c
3465--- linux-2.6.32.45/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3466+++ linux-2.6.32.45/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3467@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3468
3469 return task_pt_regs(task)->cp0_epc;
3470 }
3471-
3472-unsigned long arch_align_stack(unsigned long sp)
3473-{
3474- return sp;
3475-}
3476diff -urNp linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c
3477--- linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3478+++ linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3479@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3480 return 0;
3481 }
3482
3483-static struct platform_suspend_ops hp6x0_pm_ops = {
3484+static const struct platform_suspend_ops hp6x0_pm_ops = {
3485 .enter = hp6x0_pm_enter,
3486 .valid = suspend_valid_only_mem,
3487 };
3488diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c
3489--- linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3490+++ linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3491@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3492 NULL,
3493 };
3494
3495-static struct sysfs_ops sq_sysfs_ops = {
3496+static const struct sysfs_ops sq_sysfs_ops = {
3497 .show = sq_sysfs_show,
3498 .store = sq_sysfs_store,
3499 };
3500diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c
3501--- linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3502+++ linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3503@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3504 return 0;
3505 }
3506
3507-static struct platform_suspend_ops sh_pm_ops = {
3508+static const struct platform_suspend_ops sh_pm_ops = {
3509 .enter = sh_pm_enter,
3510 .valid = suspend_valid_only_mem,
3511 };
3512diff -urNp linux-2.6.32.45/arch/sh/kernel/kgdb.c linux-2.6.32.45/arch/sh/kernel/kgdb.c
3513--- linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3514+++ linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3515@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3516 {
3517 }
3518
3519-struct kgdb_arch arch_kgdb_ops = {
3520+const struct kgdb_arch arch_kgdb_ops = {
3521 /* Breakpoint instruction: trapa #0x3c */
3522 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3523 .gdb_bpt_instr = { 0x3c, 0xc3 },
3524diff -urNp linux-2.6.32.45/arch/sh/mm/mmap.c linux-2.6.32.45/arch/sh/mm/mmap.c
3525--- linux-2.6.32.45/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3526+++ linux-2.6.32.45/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3527@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3528 addr = PAGE_ALIGN(addr);
3529
3530 vma = find_vma(mm, addr);
3531- if (TASK_SIZE - len >= addr &&
3532- (!vma || addr + len <= vma->vm_start))
3533+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3534 return addr;
3535 }
3536
3537@@ -106,7 +105,7 @@ full_search:
3538 }
3539 return -ENOMEM;
3540 }
3541- if (likely(!vma || addr + len <= vma->vm_start)) {
3542+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3543 /*
3544 * Remember the place where we stopped the search:
3545 */
3546@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3547 addr = PAGE_ALIGN(addr);
3548
3549 vma = find_vma(mm, addr);
3550- if (TASK_SIZE - len >= addr &&
3551- (!vma || addr + len <= vma->vm_start))
3552+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3553 return addr;
3554 }
3555
3556@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3557 /* make sure it can fit in the remaining address space */
3558 if (likely(addr > len)) {
3559 vma = find_vma(mm, addr-len);
3560- if (!vma || addr <= vma->vm_start) {
3561+ if (check_heap_stack_gap(vma, addr - len, len)) {
3562 /* remember the address as a hint for next time */
3563 return (mm->free_area_cache = addr-len);
3564 }
3565@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3566 if (unlikely(mm->mmap_base < len))
3567 goto bottomup;
3568
3569- addr = mm->mmap_base-len;
3570- if (do_colour_align)
3571- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3572+ addr = mm->mmap_base - len;
3573
3574 do {
3575+ if (do_colour_align)
3576+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3577 /*
3578 * Lookup failure means no vma is above this address,
3579 * else if new region fits below vma->vm_start,
3580 * return with success:
3581 */
3582 vma = find_vma(mm, addr);
3583- if (likely(!vma || addr+len <= vma->vm_start)) {
3584+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3585 /* remember the address as a hint for next time */
3586 return (mm->free_area_cache = addr);
3587 }
3588@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3589 mm->cached_hole_size = vma->vm_start - addr;
3590
3591 /* try just below the current vma->vm_start */
3592- addr = vma->vm_start-len;
3593- if (do_colour_align)
3594- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3595- } while (likely(len < vma->vm_start));
3596+ addr = skip_heap_stack_gap(vma, len);
3597+ } while (!IS_ERR_VALUE(addr));
3598
3599 bottomup:
3600 /*
3601diff -urNp linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h
3602--- linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3603+++ linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-07-13 22:22:56.000000000 -0400
3604@@ -14,18 +14,40 @@
3605 #define ATOMIC64_INIT(i) { (i) }
3606
3607 #define atomic_read(v) ((v)->counter)
3608+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3609+{
3610+ return v->counter;
3611+}
3612 #define atomic64_read(v) ((v)->counter)
3613+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3614+{
3615+ return v->counter;
3616+}
3617
3618 #define atomic_set(v, i) (((v)->counter) = i)
3619+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3620+{
3621+ v->counter = i;
3622+}
3623 #define atomic64_set(v, i) (((v)->counter) = i)
3624+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3625+{
3626+ v->counter = i;
3627+}
3628
3629 extern void atomic_add(int, atomic_t *);
3630+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3631 extern void atomic64_add(long, atomic64_t *);
3632+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3633 extern void atomic_sub(int, atomic_t *);
3634+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3635 extern void atomic64_sub(long, atomic64_t *);
3636+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3637
3638 extern int atomic_add_ret(int, atomic_t *);
3639+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3640 extern long atomic64_add_ret(long, atomic64_t *);
3641+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3642 extern int atomic_sub_ret(int, atomic_t *);
3643 extern long atomic64_sub_ret(long, atomic64_t *);
3644
3645@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3646 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3647
3648 #define atomic_inc_return(v) atomic_add_ret(1, v)
3649+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3650+{
3651+ return atomic_add_ret_unchecked(1, v);
3652+}
3653 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3654+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3655+{
3656+ return atomic64_add_ret_unchecked(1, v);
3657+}
3658
3659 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3660 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3661
3662 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3663+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3664+{
3665+ return atomic_add_ret_unchecked(i, v);
3666+}
3667 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3668+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3669+{
3670+ return atomic64_add_ret_unchecked(i, v);
3671+}
3672
3673 /*
3674 * atomic_inc_and_test - increment and test
3675@@ -50,6 +88,7 @@ extern long atomic64_sub_ret(long, atomi
3676 * other cases.
3677 */
3678 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3679+#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3680 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3681
3682 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3683@@ -59,30 +98,59 @@ extern long atomic64_sub_ret(long, atomi
3684 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3685
3686 #define atomic_inc(v) atomic_add(1, v)
3687+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3688+{
3689+ atomic_add_unchecked(1, v);
3690+}
3691 #define atomic64_inc(v) atomic64_add(1, v)
3692+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3693+{
3694+ atomic64_add_unchecked(1, v);
3695+}
3696
3697 #define atomic_dec(v) atomic_sub(1, v)
3698+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3699+{
3700+ atomic_sub_unchecked(1, v);
3701+}
3702 #define atomic64_dec(v) atomic64_sub(1, v)
3703+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3704+{
3705+ atomic64_sub_unchecked(1, v);
3706+}
3707
3708 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3709 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3710
3711 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3712+#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3713 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3714+#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3715
3716 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3717 {
3718- int c, old;
3719+ int c, old, new;
3720 c = atomic_read(v);
3721 for (;;) {
3722- if (unlikely(c == (u)))
3723+ if (unlikely(c == u))
3724 break;
3725- old = atomic_cmpxchg((v), c, c + (a));
3726+
3727+ asm volatile("addcc %2, %0, %0\n"
3728+
3729+#ifdef CONFIG_PAX_REFCOUNT
3730+ "tvs %%icc, 6\n"
3731+#endif
3732+
3733+ : "=r" (new)
3734+ : "0" (c), "ir" (a)
3735+ : "cc");
3736+
3737+ old = atomic_cmpxchg(v, c, new);
3738 if (likely(old == c))
3739 break;
3740 c = old;
3741 }
3742- return c != (u);
3743+ return c != u;
3744 }
3745
3746 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3747@@ -93,17 +161,28 @@ static inline int atomic_add_unless(atom
3748
3749 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3750 {
3751- long c, old;
3752+ long c, old, new;
3753 c = atomic64_read(v);
3754 for (;;) {
3755- if (unlikely(c == (u)))
3756+ if (unlikely(c == u))
3757 break;
3758- old = atomic64_cmpxchg((v), c, c + (a));
3759+
3760+ asm volatile("addcc %2, %0, %0\n"
3761+
3762+#ifdef CONFIG_PAX_REFCOUNT
3763+ "tvs %%xcc, 6\n"
3764+#endif
3765+
3766+ : "=r" (new)
3767+ : "0" (c), "ir" (a)
3768+ : "cc");
3769+
3770+ old = atomic64_cmpxchg(v, c, new);
3771 if (likely(old == c))
3772 break;
3773 c = old;
3774 }
3775- return c != (u);
3776+ return c != u;
3777 }
3778
3779 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3780diff -urNp linux-2.6.32.45/arch/sparc/include/asm/cache.h linux-2.6.32.45/arch/sparc/include/asm/cache.h
3781--- linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3782+++ linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3783@@ -8,7 +8,7 @@
3784 #define _SPARC_CACHE_H
3785
3786 #define L1_CACHE_SHIFT 5
3787-#define L1_CACHE_BYTES 32
3788+#define L1_CACHE_BYTES 32UL
3789 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3790
3791 #ifdef CONFIG_SPARC32
3792diff -urNp linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h
3793--- linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3794+++ linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3795@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3796 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3797 #define dma_is_consistent(d, h) (1)
3798
3799-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3800+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3801 extern struct bus_type pci_bus_type;
3802
3803-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3804+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3805 {
3806 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3807 if (dev->bus == &pci_bus_type)
3808@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3809 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3810 dma_addr_t *dma_handle, gfp_t flag)
3811 {
3812- struct dma_map_ops *ops = get_dma_ops(dev);
3813+ const struct dma_map_ops *ops = get_dma_ops(dev);
3814 void *cpu_addr;
3815
3816 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3817@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3818 static inline void dma_free_coherent(struct device *dev, size_t size,
3819 void *cpu_addr, dma_addr_t dma_handle)
3820 {
3821- struct dma_map_ops *ops = get_dma_ops(dev);
3822+ const struct dma_map_ops *ops = get_dma_ops(dev);
3823
3824 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3825 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3826diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_32.h linux-2.6.32.45/arch/sparc/include/asm/elf_32.h
3827--- linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3828+++ linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3829@@ -116,6 +116,13 @@ typedef struct {
3830
3831 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3832
3833+#ifdef CONFIG_PAX_ASLR
3834+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3835+
3836+#define PAX_DELTA_MMAP_LEN 16
3837+#define PAX_DELTA_STACK_LEN 16
3838+#endif
3839+
3840 /* This yields a mask that user programs can use to figure out what
3841 instruction set this cpu supports. This can NOT be done in userspace
3842 on Sparc. */
3843diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_64.h linux-2.6.32.45/arch/sparc/include/asm/elf_64.h
3844--- linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3845+++ linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3846@@ -163,6 +163,12 @@ typedef struct {
3847 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3848 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3849
3850+#ifdef CONFIG_PAX_ASLR
3851+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3852+
3853+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3854+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3855+#endif
3856
3857 /* This yields a mask that user programs can use to figure out what
3858 instruction set this cpu supports. */
3859diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h
3860--- linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3861+++ linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3862@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3863 BTFIXUPDEF_INT(page_none)
3864 BTFIXUPDEF_INT(page_copy)
3865 BTFIXUPDEF_INT(page_readonly)
3866+
3867+#ifdef CONFIG_PAX_PAGEEXEC
3868+BTFIXUPDEF_INT(page_shared_noexec)
3869+BTFIXUPDEF_INT(page_copy_noexec)
3870+BTFIXUPDEF_INT(page_readonly_noexec)
3871+#endif
3872+
3873 BTFIXUPDEF_INT(page_kernel)
3874
3875 #define PMD_SHIFT SUN4C_PMD_SHIFT
3876@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3877 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3878 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3879
3880+#ifdef CONFIG_PAX_PAGEEXEC
3881+extern pgprot_t PAGE_SHARED_NOEXEC;
3882+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3883+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3884+#else
3885+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3886+# define PAGE_COPY_NOEXEC PAGE_COPY
3887+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3888+#endif
3889+
3890 extern unsigned long page_kernel;
3891
3892 #ifdef MODULE
3893diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h
3894--- linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3895+++ linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3896@@ -115,6 +115,13 @@
3897 SRMMU_EXEC | SRMMU_REF)
3898 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3899 SRMMU_EXEC | SRMMU_REF)
3900+
3901+#ifdef CONFIG_PAX_PAGEEXEC
3902+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3903+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3904+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3905+#endif
3906+
3907 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3908 SRMMU_DIRTY | SRMMU_REF)
3909
3910diff -urNp linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h
3911--- linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3912+++ linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3913@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3914
3915 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3916
3917-static void inline arch_read_lock(raw_rwlock_t *lock)
3918+static inline void arch_read_lock(raw_rwlock_t *lock)
3919 {
3920 unsigned long tmp1, tmp2;
3921
3922 __asm__ __volatile__ (
3923 "1: ldsw [%2], %0\n"
3924 " brlz,pn %0, 2f\n"
3925-"4: add %0, 1, %1\n"
3926+"4: addcc %0, 1, %1\n"
3927+
3928+#ifdef CONFIG_PAX_REFCOUNT
3929+" tvs %%icc, 6\n"
3930+#endif
3931+
3932 " cas [%2], %0, %1\n"
3933 " cmp %0, %1\n"
3934 " bne,pn %%icc, 1b\n"
3935@@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3936 " .previous"
3937 : "=&r" (tmp1), "=&r" (tmp2)
3938 : "r" (lock)
3939- : "memory");
3940+ : "memory", "cc");
3941 }
3942
3943 static int inline arch_read_trylock(raw_rwlock_t *lock)
3944@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3945 "1: ldsw [%2], %0\n"
3946 " brlz,a,pn %0, 2f\n"
3947 " mov 0, %0\n"
3948-" add %0, 1, %1\n"
3949+" addcc %0, 1, %1\n"
3950+
3951+#ifdef CONFIG_PAX_REFCOUNT
3952+" tvs %%icc, 6\n"
3953+#endif
3954+
3955 " cas [%2], %0, %1\n"
3956 " cmp %0, %1\n"
3957 " bne,pn %%icc, 1b\n"
3958@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3959 return tmp1;
3960 }
3961
3962-static void inline arch_read_unlock(raw_rwlock_t *lock)
3963+static inline void arch_read_unlock(raw_rwlock_t *lock)
3964 {
3965 unsigned long tmp1, tmp2;
3966
3967 __asm__ __volatile__(
3968 "1: lduw [%2], %0\n"
3969-" sub %0, 1, %1\n"
3970+" subcc %0, 1, %1\n"
3971+
3972+#ifdef CONFIG_PAX_REFCOUNT
3973+" tvs %%icc, 6\n"
3974+#endif
3975+
3976 " cas [%2], %0, %1\n"
3977 " cmp %0, %1\n"
3978 " bne,pn %%xcc, 1b\n"
3979@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
3980 : "memory");
3981 }
3982
3983-static void inline arch_write_lock(raw_rwlock_t *lock)
3984+static inline void arch_write_lock(raw_rwlock_t *lock)
3985 {
3986 unsigned long mask, tmp1, tmp2;
3987
3988@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
3989 : "memory");
3990 }
3991
3992-static void inline arch_write_unlock(raw_rwlock_t *lock)
3993+static inline void arch_write_unlock(raw_rwlock_t *lock)
3994 {
3995 __asm__ __volatile__(
3996 " stw %%g0, [%0]"
3997diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h
3998--- linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
3999+++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4000@@ -50,6 +50,8 @@ struct thread_info {
4001 unsigned long w_saved;
4002
4003 struct restart_block restart_block;
4004+
4005+ unsigned long lowest_stack;
4006 };
4007
4008 /*
4009diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h
4010--- linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4011+++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4012@@ -68,6 +68,8 @@ struct thread_info {
4013 struct pt_regs *kern_una_regs;
4014 unsigned int kern_una_insn;
4015
4016+ unsigned long lowest_stack;
4017+
4018 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4019 };
4020
4021diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h
4022--- linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4023+++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4024@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4025
4026 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4027 {
4028- if (n && __access_ok((unsigned long) to, n))
4029+ if ((long)n < 0)
4030+ return n;
4031+
4032+ if (n && __access_ok((unsigned long) to, n)) {
4033+ if (!__builtin_constant_p(n))
4034+ check_object_size(from, n, true);
4035 return __copy_user(to, (__force void __user *) from, n);
4036- else
4037+ } else
4038 return n;
4039 }
4040
4041 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4042 {
4043+ if ((long)n < 0)
4044+ return n;
4045+
4046+ if (!__builtin_constant_p(n))
4047+ check_object_size(from, n, true);
4048+
4049 return __copy_user(to, (__force void __user *) from, n);
4050 }
4051
4052 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4053 {
4054- if (n && __access_ok((unsigned long) from, n))
4055+ if ((long)n < 0)
4056+ return n;
4057+
4058+ if (n && __access_ok((unsigned long) from, n)) {
4059+ if (!__builtin_constant_p(n))
4060+ check_object_size(to, n, false);
4061 return __copy_user((__force void __user *) to, from, n);
4062- else
4063+ } else
4064 return n;
4065 }
4066
4067 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4068 {
4069+ if ((long)n < 0)
4070+ return n;
4071+
4072 return __copy_user((__force void __user *) to, from, n);
4073 }
4074
4075diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h
4076--- linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4077+++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4078@@ -9,6 +9,7 @@
4079 #include <linux/compiler.h>
4080 #include <linux/string.h>
4081 #include <linux/thread_info.h>
4082+#include <linux/kernel.h>
4083 #include <asm/asi.h>
4084 #include <asm/system.h>
4085 #include <asm/spitfire.h>
4086@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4087 static inline unsigned long __must_check
4088 copy_from_user(void *to, const void __user *from, unsigned long size)
4089 {
4090- unsigned long ret = ___copy_from_user(to, from, size);
4091+ unsigned long ret;
4092
4093+ if ((long)size < 0 || size > INT_MAX)
4094+ return size;
4095+
4096+ if (!__builtin_constant_p(size))
4097+ check_object_size(to, size, false);
4098+
4099+ ret = ___copy_from_user(to, from, size);
4100 if (unlikely(ret))
4101 ret = copy_from_user_fixup(to, from, size);
4102 return ret;
4103@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4104 static inline unsigned long __must_check
4105 copy_to_user(void __user *to, const void *from, unsigned long size)
4106 {
4107- unsigned long ret = ___copy_to_user(to, from, size);
4108+ unsigned long ret;
4109+
4110+ if ((long)size < 0 || size > INT_MAX)
4111+ return size;
4112+
4113+ if (!__builtin_constant_p(size))
4114+ check_object_size(from, size, true);
4115
4116+ ret = ___copy_to_user(to, from, size);
4117 if (unlikely(ret))
4118 ret = copy_to_user_fixup(to, from, size);
4119 return ret;
4120diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess.h linux-2.6.32.45/arch/sparc/include/asm/uaccess.h
4121--- linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4122+++ linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4123@@ -1,5 +1,13 @@
4124 #ifndef ___ASM_SPARC_UACCESS_H
4125 #define ___ASM_SPARC_UACCESS_H
4126+
4127+#ifdef __KERNEL__
4128+#ifndef __ASSEMBLY__
4129+#include <linux/types.h>
4130+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4131+#endif
4132+#endif
4133+
4134 #if defined(__sparc__) && defined(__arch64__)
4135 #include <asm/uaccess_64.h>
4136 #else
4137diff -urNp linux-2.6.32.45/arch/sparc/kernel/iommu.c linux-2.6.32.45/arch/sparc/kernel/iommu.c
4138--- linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4139+++ linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4140@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4141 spin_unlock_irqrestore(&iommu->lock, flags);
4142 }
4143
4144-static struct dma_map_ops sun4u_dma_ops = {
4145+static const struct dma_map_ops sun4u_dma_ops = {
4146 .alloc_coherent = dma_4u_alloc_coherent,
4147 .free_coherent = dma_4u_free_coherent,
4148 .map_page = dma_4u_map_page,
4149@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4150 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4151 };
4152
4153-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4154+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4155 EXPORT_SYMBOL(dma_ops);
4156
4157 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4158diff -urNp linux-2.6.32.45/arch/sparc/kernel/ioport.c linux-2.6.32.45/arch/sparc/kernel/ioport.c
4159--- linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4160+++ linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4161@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4162 BUG();
4163 }
4164
4165-struct dma_map_ops sbus_dma_ops = {
4166+const struct dma_map_ops sbus_dma_ops = {
4167 .alloc_coherent = sbus_alloc_coherent,
4168 .free_coherent = sbus_free_coherent,
4169 .map_page = sbus_map_page,
4170@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4171 .sync_sg_for_device = sbus_sync_sg_for_device,
4172 };
4173
4174-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4175+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4176 EXPORT_SYMBOL(dma_ops);
4177
4178 static int __init sparc_register_ioport(void)
4179@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4180 }
4181 }
4182
4183-struct dma_map_ops pci32_dma_ops = {
4184+const struct dma_map_ops pci32_dma_ops = {
4185 .alloc_coherent = pci32_alloc_coherent,
4186 .free_coherent = pci32_free_coherent,
4187 .map_page = pci32_map_page,
4188diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c
4189--- linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4190+++ linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4191@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4192 {
4193 }
4194
4195-struct kgdb_arch arch_kgdb_ops = {
4196+const struct kgdb_arch arch_kgdb_ops = {
4197 /* Breakpoint instruction: ta 0x7d */
4198 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4199 };
4200diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c
4201--- linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4202+++ linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4203@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4204 {
4205 }
4206
4207-struct kgdb_arch arch_kgdb_ops = {
4208+const struct kgdb_arch arch_kgdb_ops = {
4209 /* Breakpoint instruction: ta 0x72 */
4210 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4211 };
4212diff -urNp linux-2.6.32.45/arch/sparc/kernel/Makefile linux-2.6.32.45/arch/sparc/kernel/Makefile
4213--- linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4214+++ linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4215@@ -3,7 +3,7 @@
4216 #
4217
4218 asflags-y := -ansi
4219-ccflags-y := -Werror
4220+#ccflags-y := -Werror
4221
4222 extra-y := head_$(BITS).o
4223 extra-y += init_task.o
4224diff -urNp linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c
4225--- linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4226+++ linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4227@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4228 spin_unlock_irqrestore(&iommu->lock, flags);
4229 }
4230
4231-static struct dma_map_ops sun4v_dma_ops = {
4232+static const struct dma_map_ops sun4v_dma_ops = {
4233 .alloc_coherent = dma_4v_alloc_coherent,
4234 .free_coherent = dma_4v_free_coherent,
4235 .map_page = dma_4v_map_page,
4236diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_32.c linux-2.6.32.45/arch/sparc/kernel/process_32.c
4237--- linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4238+++ linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4239@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4240 rw->ins[4], rw->ins[5],
4241 rw->ins[6],
4242 rw->ins[7]);
4243- printk("%pS\n", (void *) rw->ins[7]);
4244+ printk("%pA\n", (void *) rw->ins[7]);
4245 rw = (struct reg_window32 *) rw->ins[6];
4246 }
4247 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4248@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4249
4250 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4251 r->psr, r->pc, r->npc, r->y, print_tainted());
4252- printk("PC: <%pS>\n", (void *) r->pc);
4253+ printk("PC: <%pA>\n", (void *) r->pc);
4254 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4255 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4256 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4257 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4258 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4259 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4260- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4261+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4262
4263 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4264 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4265@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4266 rw = (struct reg_window32 *) fp;
4267 pc = rw->ins[7];
4268 printk("[%08lx : ", pc);
4269- printk("%pS ] ", (void *) pc);
4270+ printk("%pA ] ", (void *) pc);
4271 fp = rw->ins[6];
4272 } while (++count < 16);
4273 printk("\n");
4274diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_64.c linux-2.6.32.45/arch/sparc/kernel/process_64.c
4275--- linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4276+++ linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4277@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4278 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4279 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4280 if (regs->tstate & TSTATE_PRIV)
4281- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4282+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4283 }
4284
4285 void show_regs(struct pt_regs *regs)
4286 {
4287 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4288 regs->tpc, regs->tnpc, regs->y, print_tainted());
4289- printk("TPC: <%pS>\n", (void *) regs->tpc);
4290+ printk("TPC: <%pA>\n", (void *) regs->tpc);
4291 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4292 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4293 regs->u_regs[3]);
4294@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4295 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4296 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4297 regs->u_regs[15]);
4298- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4299+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4300 show_regwindow(regs);
4301 }
4302
4303@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4304 ((tp && tp->task) ? tp->task->pid : -1));
4305
4306 if (gp->tstate & TSTATE_PRIV) {
4307- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4308+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4309 (void *) gp->tpc,
4310 (void *) gp->o7,
4311 (void *) gp->i7,
4312diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c
4313--- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4314+++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4315@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4316 if (ARCH_SUN4C && len > 0x20000000)
4317 return -ENOMEM;
4318 if (!addr)
4319- addr = TASK_UNMAPPED_BASE;
4320+ addr = current->mm->mmap_base;
4321
4322 if (flags & MAP_SHARED)
4323 addr = COLOUR_ALIGN(addr);
4324@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4325 }
4326 if (TASK_SIZE - PAGE_SIZE - len < addr)
4327 return -ENOMEM;
4328- if (!vmm || addr + len <= vmm->vm_start)
4329+ if (check_heap_stack_gap(vmm, addr, len))
4330 return addr;
4331 addr = vmm->vm_end;
4332 if (flags & MAP_SHARED)
4333diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c
4334--- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4335+++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4336@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4337 /* We do not accept a shared mapping if it would violate
4338 * cache aliasing constraints.
4339 */
4340- if ((flags & MAP_SHARED) &&
4341+ if ((filp || (flags & MAP_SHARED)) &&
4342 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4343 return -EINVAL;
4344 return addr;
4345@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4346 if (filp || (flags & MAP_SHARED))
4347 do_color_align = 1;
4348
4349+#ifdef CONFIG_PAX_RANDMMAP
4350+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4351+#endif
4352+
4353 if (addr) {
4354 if (do_color_align)
4355 addr = COLOUR_ALIGN(addr, pgoff);
4356@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4357 addr = PAGE_ALIGN(addr);
4358
4359 vma = find_vma(mm, addr);
4360- if (task_size - len >= addr &&
4361- (!vma || addr + len <= vma->vm_start))
4362+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4363 return addr;
4364 }
4365
4366 if (len > mm->cached_hole_size) {
4367- start_addr = addr = mm->free_area_cache;
4368+ start_addr = addr = mm->free_area_cache;
4369 } else {
4370- start_addr = addr = TASK_UNMAPPED_BASE;
4371+ start_addr = addr = mm->mmap_base;
4372 mm->cached_hole_size = 0;
4373 }
4374
4375@@ -175,14 +178,14 @@ full_search:
4376 vma = find_vma(mm, VA_EXCLUDE_END);
4377 }
4378 if (unlikely(task_size < addr)) {
4379- if (start_addr != TASK_UNMAPPED_BASE) {
4380- start_addr = addr = TASK_UNMAPPED_BASE;
4381+ if (start_addr != mm->mmap_base) {
4382+ start_addr = addr = mm->mmap_base;
4383 mm->cached_hole_size = 0;
4384 goto full_search;
4385 }
4386 return -ENOMEM;
4387 }
4388- if (likely(!vma || addr + len <= vma->vm_start)) {
4389+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4390 /*
4391 * Remember the place where we stopped the search:
4392 */
4393@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4394 /* We do not accept a shared mapping if it would violate
4395 * cache aliasing constraints.
4396 */
4397- if ((flags & MAP_SHARED) &&
4398+ if ((filp || (flags & MAP_SHARED)) &&
4399 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4400 return -EINVAL;
4401 return addr;
4402@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4403 addr = PAGE_ALIGN(addr);
4404
4405 vma = find_vma(mm, addr);
4406- if (task_size - len >= addr &&
4407- (!vma || addr + len <= vma->vm_start))
4408+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4409 return addr;
4410 }
4411
4412@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4413 /* make sure it can fit in the remaining address space */
4414 if (likely(addr > len)) {
4415 vma = find_vma(mm, addr-len);
4416- if (!vma || addr <= vma->vm_start) {
4417+ if (check_heap_stack_gap(vma, addr - len, len)) {
4418 /* remember the address as a hint for next time */
4419 return (mm->free_area_cache = addr-len);
4420 }
4421@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4422 if (unlikely(mm->mmap_base < len))
4423 goto bottomup;
4424
4425- addr = mm->mmap_base-len;
4426- if (do_color_align)
4427- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4428+ addr = mm->mmap_base - len;
4429
4430 do {
4431+ if (do_color_align)
4432+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4433 /*
4434 * Lookup failure means no vma is above this address,
4435 * else if new region fits below vma->vm_start,
4436 * return with success:
4437 */
4438 vma = find_vma(mm, addr);
4439- if (likely(!vma || addr+len <= vma->vm_start)) {
4440+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4441 /* remember the address as a hint for next time */
4442 return (mm->free_area_cache = addr);
4443 }
4444@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4445 mm->cached_hole_size = vma->vm_start - addr;
4446
4447 /* try just below the current vma->vm_start */
4448- addr = vma->vm_start-len;
4449- if (do_color_align)
4450- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4451- } while (likely(len < vma->vm_start));
4452+ addr = skip_heap_stack_gap(vma, len);
4453+ } while (!IS_ERR_VALUE(addr));
4454
4455 bottomup:
4456 /*
4457@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4458 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4459 sysctl_legacy_va_layout) {
4460 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4461+
4462+#ifdef CONFIG_PAX_RANDMMAP
4463+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4464+ mm->mmap_base += mm->delta_mmap;
4465+#endif
4466+
4467 mm->get_unmapped_area = arch_get_unmapped_area;
4468 mm->unmap_area = arch_unmap_area;
4469 } else {
4470@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4471 gap = (task_size / 6 * 5);
4472
4473 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4474+
4475+#ifdef CONFIG_PAX_RANDMMAP
4476+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4477+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4478+#endif
4479+
4480 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4481 mm->unmap_area = arch_unmap_area_topdown;
4482 }
4483diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_32.c linux-2.6.32.45/arch/sparc/kernel/traps_32.c
4484--- linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4485+++ linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4486@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4487 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4488 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4489
4490+extern void gr_handle_kernel_exploit(void);
4491+
4492 void die_if_kernel(char *str, struct pt_regs *regs)
4493 {
4494 static int die_counter;
4495@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4496 count++ < 30 &&
4497 (((unsigned long) rw) >= PAGE_OFFSET) &&
4498 !(((unsigned long) rw) & 0x7)) {
4499- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4500+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4501 (void *) rw->ins[7]);
4502 rw = (struct reg_window32 *)rw->ins[6];
4503 }
4504 }
4505 printk("Instruction DUMP:");
4506 instruction_dump ((unsigned long *) regs->pc);
4507- if(regs->psr & PSR_PS)
4508+ if(regs->psr & PSR_PS) {
4509+ gr_handle_kernel_exploit();
4510 do_exit(SIGKILL);
4511+ }
4512 do_exit(SIGSEGV);
4513 }
4514
4515diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_64.c linux-2.6.32.45/arch/sparc/kernel/traps_64.c
4516--- linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4517+++ linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4518@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4519 i + 1,
4520 p->trapstack[i].tstate, p->trapstack[i].tpc,
4521 p->trapstack[i].tnpc, p->trapstack[i].tt);
4522- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4523+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4524 }
4525 }
4526
4527@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4528
4529 lvl -= 0x100;
4530 if (regs->tstate & TSTATE_PRIV) {
4531+
4532+#ifdef CONFIG_PAX_REFCOUNT
4533+ if (lvl == 6)
4534+ pax_report_refcount_overflow(regs);
4535+#endif
4536+
4537 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4538 die_if_kernel(buffer, regs);
4539 }
4540@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4541 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4542 {
4543 char buffer[32];
4544-
4545+
4546 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4547 0, lvl, SIGTRAP) == NOTIFY_STOP)
4548 return;
4549
4550+#ifdef CONFIG_PAX_REFCOUNT
4551+ if (lvl == 6)
4552+ pax_report_refcount_overflow(regs);
4553+#endif
4554+
4555 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4556
4557 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4558@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4559 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4560 printk("%s" "ERROR(%d): ",
4561 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4562- printk("TPC<%pS>\n", (void *) regs->tpc);
4563+ printk("TPC<%pA>\n", (void *) regs->tpc);
4564 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4565 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4566 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4567@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4568 smp_processor_id(),
4569 (type & 0x1) ? 'I' : 'D',
4570 regs->tpc);
4571- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4572+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4573 panic("Irrecoverable Cheetah+ parity error.");
4574 }
4575
4576@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4577 smp_processor_id(),
4578 (type & 0x1) ? 'I' : 'D',
4579 regs->tpc);
4580- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4581+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4582 }
4583
4584 struct sun4v_error_entry {
4585@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4586
4587 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4588 regs->tpc, tl);
4589- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4590+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4591 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4592- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4593+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4594 (void *) regs->u_regs[UREG_I7]);
4595 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4596 "pte[%lx] error[%lx]\n",
4597@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4598
4599 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4600 regs->tpc, tl);
4601- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4602+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4603 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4604- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4605+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4606 (void *) regs->u_regs[UREG_I7]);
4607 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4608 "pte[%lx] error[%lx]\n",
4609@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4610 fp = (unsigned long)sf->fp + STACK_BIAS;
4611 }
4612
4613- printk(" [%016lx] %pS\n", pc, (void *) pc);
4614+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4615 } while (++count < 16);
4616 }
4617
4618@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4619 return (struct reg_window *) (fp + STACK_BIAS);
4620 }
4621
4622+extern void gr_handle_kernel_exploit(void);
4623+
4624 void die_if_kernel(char *str, struct pt_regs *regs)
4625 {
4626 static int die_counter;
4627@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4628 while (rw &&
4629 count++ < 30&&
4630 is_kernel_stack(current, rw)) {
4631- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4632+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4633 (void *) rw->ins[7]);
4634
4635 rw = kernel_stack_up(rw);
4636@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4637 }
4638 user_instruction_dump ((unsigned int __user *) regs->tpc);
4639 }
4640- if (regs->tstate & TSTATE_PRIV)
4641+ if (regs->tstate & TSTATE_PRIV) {
4642+ gr_handle_kernel_exploit();
4643 do_exit(SIGKILL);
4644+ }
4645+
4646 do_exit(SIGSEGV);
4647 }
4648 EXPORT_SYMBOL(die_if_kernel);
4649diff -urNp linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S
4650--- linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4651+++ linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4652@@ -127,7 +127,7 @@ do_int_load:
4653 wr %o5, 0x0, %asi
4654 retl
4655 mov 0, %o0
4656- .size __do_int_load, .-__do_int_load
4657+ .size do_int_load, .-do_int_load
4658
4659 .section __ex_table,"a"
4660 .word 4b, __retl_efault
4661diff -urNp linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c
4662--- linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4663+++ linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4664@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4665 if (count < 5) {
4666 last_time = jiffies;
4667 count++;
4668- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4669+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4670 regs->tpc, (void *) regs->tpc);
4671 }
4672 }
4673diff -urNp linux-2.6.32.45/arch/sparc/lib/atomic_64.S linux-2.6.32.45/arch/sparc/lib/atomic_64.S
4674--- linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4675+++ linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4676@@ -18,7 +18,12 @@
4677 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4678 BACKOFF_SETUP(%o2)
4679 1: lduw [%o1], %g1
4680- add %g1, %o0, %g7
4681+ addcc %g1, %o0, %g7
4682+
4683+#ifdef CONFIG_PAX_REFCOUNT
4684+ tvs %icc, 6
4685+#endif
4686+
4687 cas [%o1], %g1, %g7
4688 cmp %g1, %g7
4689 bne,pn %icc, 2f
4690@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4691 2: BACKOFF_SPIN(%o2, %o3, 1b)
4692 .size atomic_add, .-atomic_add
4693
4694+ .globl atomic_add_unchecked
4695+ .type atomic_add_unchecked,#function
4696+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4697+ BACKOFF_SETUP(%o2)
4698+1: lduw [%o1], %g1
4699+ add %g1, %o0, %g7
4700+ cas [%o1], %g1, %g7
4701+ cmp %g1, %g7
4702+ bne,pn %icc, 2f
4703+ nop
4704+ retl
4705+ nop
4706+2: BACKOFF_SPIN(%o2, %o3, 1b)
4707+ .size atomic_add_unchecked, .-atomic_add_unchecked
4708+
4709 .globl atomic_sub
4710 .type atomic_sub,#function
4711 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4712 BACKOFF_SETUP(%o2)
4713 1: lduw [%o1], %g1
4714- sub %g1, %o0, %g7
4715+ subcc %g1, %o0, %g7
4716+
4717+#ifdef CONFIG_PAX_REFCOUNT
4718+ tvs %icc, 6
4719+#endif
4720+
4721 cas [%o1], %g1, %g7
4722 cmp %g1, %g7
4723 bne,pn %icc, 2f
4724@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4725 2: BACKOFF_SPIN(%o2, %o3, 1b)
4726 .size atomic_sub, .-atomic_sub
4727
4728+ .globl atomic_sub_unchecked
4729+ .type atomic_sub_unchecked,#function
4730+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4731+ BACKOFF_SETUP(%o2)
4732+1: lduw [%o1], %g1
4733+ sub %g1, %o0, %g7
4734+ cas [%o1], %g1, %g7
4735+ cmp %g1, %g7
4736+ bne,pn %icc, 2f
4737+ nop
4738+ retl
4739+ nop
4740+2: BACKOFF_SPIN(%o2, %o3, 1b)
4741+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4742+
4743 .globl atomic_add_ret
4744 .type atomic_add_ret,#function
4745 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4746 BACKOFF_SETUP(%o2)
4747 1: lduw [%o1], %g1
4748- add %g1, %o0, %g7
4749+ addcc %g1, %o0, %g7
4750+
4751+#ifdef CONFIG_PAX_REFCOUNT
4752+ tvs %icc, 6
4753+#endif
4754+
4755 cas [%o1], %g1, %g7
4756 cmp %g1, %g7
4757 bne,pn %icc, 2f
4758@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4759 2: BACKOFF_SPIN(%o2, %o3, 1b)
4760 .size atomic_add_ret, .-atomic_add_ret
4761
4762+ .globl atomic_add_ret_unchecked
4763+ .type atomic_add_ret_unchecked,#function
4764+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4765+ BACKOFF_SETUP(%o2)
4766+1: lduw [%o1], %g1
4767+ addcc %g1, %o0, %g7
4768+ cas [%o1], %g1, %g7
4769+ cmp %g1, %g7
4770+ bne,pn %icc, 2f
4771+ add %g7, %o0, %g7
4772+ sra %g7, 0, %o0
4773+ retl
4774+ nop
4775+2: BACKOFF_SPIN(%o2, %o3, 1b)
4776+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4777+
4778 .globl atomic_sub_ret
4779 .type atomic_sub_ret,#function
4780 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4781 BACKOFF_SETUP(%o2)
4782 1: lduw [%o1], %g1
4783- sub %g1, %o0, %g7
4784+ subcc %g1, %o0, %g7
4785+
4786+#ifdef CONFIG_PAX_REFCOUNT
4787+ tvs %icc, 6
4788+#endif
4789+
4790 cas [%o1], %g1, %g7
4791 cmp %g1, %g7
4792 bne,pn %icc, 2f
4793@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4794 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4795 BACKOFF_SETUP(%o2)
4796 1: ldx [%o1], %g1
4797- add %g1, %o0, %g7
4798+ addcc %g1, %o0, %g7
4799+
4800+#ifdef CONFIG_PAX_REFCOUNT
4801+ tvs %xcc, 6
4802+#endif
4803+
4804 casx [%o1], %g1, %g7
4805 cmp %g1, %g7
4806 bne,pn %xcc, 2f
4807@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4808 2: BACKOFF_SPIN(%o2, %o3, 1b)
4809 .size atomic64_add, .-atomic64_add
4810
4811+ .globl atomic64_add_unchecked
4812+ .type atomic64_add_unchecked,#function
4813+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4814+ BACKOFF_SETUP(%o2)
4815+1: ldx [%o1], %g1
4816+ addcc %g1, %o0, %g7
4817+ casx [%o1], %g1, %g7
4818+ cmp %g1, %g7
4819+ bne,pn %xcc, 2f
4820+ nop
4821+ retl
4822+ nop
4823+2: BACKOFF_SPIN(%o2, %o3, 1b)
4824+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4825+
4826 .globl atomic64_sub
4827 .type atomic64_sub,#function
4828 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4829 BACKOFF_SETUP(%o2)
4830 1: ldx [%o1], %g1
4831- sub %g1, %o0, %g7
4832+ subcc %g1, %o0, %g7
4833+
4834+#ifdef CONFIG_PAX_REFCOUNT
4835+ tvs %xcc, 6
4836+#endif
4837+
4838 casx [%o1], %g1, %g7
4839 cmp %g1, %g7
4840 bne,pn %xcc, 2f
4841@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4842 2: BACKOFF_SPIN(%o2, %o3, 1b)
4843 .size atomic64_sub, .-atomic64_sub
4844
4845+ .globl atomic64_sub_unchecked
4846+ .type atomic64_sub_unchecked,#function
4847+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4848+ BACKOFF_SETUP(%o2)
4849+1: ldx [%o1], %g1
4850+ subcc %g1, %o0, %g7
4851+ casx [%o1], %g1, %g7
4852+ cmp %g1, %g7
4853+ bne,pn %xcc, 2f
4854+ nop
4855+ retl
4856+ nop
4857+2: BACKOFF_SPIN(%o2, %o3, 1b)
4858+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4859+
4860 .globl atomic64_add_ret
4861 .type atomic64_add_ret,#function
4862 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4863 BACKOFF_SETUP(%o2)
4864 1: ldx [%o1], %g1
4865- add %g1, %o0, %g7
4866+ addcc %g1, %o0, %g7
4867+
4868+#ifdef CONFIG_PAX_REFCOUNT
4869+ tvs %xcc, 6
4870+#endif
4871+
4872 casx [%o1], %g1, %g7
4873 cmp %g1, %g7
4874 bne,pn %xcc, 2f
4875@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4876 2: BACKOFF_SPIN(%o2, %o3, 1b)
4877 .size atomic64_add_ret, .-atomic64_add_ret
4878
4879+ .globl atomic64_add_ret_unchecked
4880+ .type atomic64_add_ret_unchecked,#function
4881+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4882+ BACKOFF_SETUP(%o2)
4883+1: ldx [%o1], %g1
4884+ addcc %g1, %o0, %g7
4885+ casx [%o1], %g1, %g7
4886+ cmp %g1, %g7
4887+ bne,pn %xcc, 2f
4888+ add %g7, %o0, %g7
4889+ mov %g7, %o0
4890+ retl
4891+ nop
4892+2: BACKOFF_SPIN(%o2, %o3, 1b)
4893+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4894+
4895 .globl atomic64_sub_ret
4896 .type atomic64_sub_ret,#function
4897 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4898 BACKOFF_SETUP(%o2)
4899 1: ldx [%o1], %g1
4900- sub %g1, %o0, %g7
4901+ subcc %g1, %o0, %g7
4902+
4903+#ifdef CONFIG_PAX_REFCOUNT
4904+ tvs %xcc, 6
4905+#endif
4906+
4907 casx [%o1], %g1, %g7
4908 cmp %g1, %g7
4909 bne,pn %xcc, 2f
4910diff -urNp linux-2.6.32.45/arch/sparc/lib/ksyms.c linux-2.6.32.45/arch/sparc/lib/ksyms.c
4911--- linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4912+++ linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4913@@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4914
4915 /* Atomic counter implementation. */
4916 EXPORT_SYMBOL(atomic_add);
4917+EXPORT_SYMBOL(atomic_add_unchecked);
4918 EXPORT_SYMBOL(atomic_add_ret);
4919 EXPORT_SYMBOL(atomic_sub);
4920+EXPORT_SYMBOL(atomic_sub_unchecked);
4921 EXPORT_SYMBOL(atomic_sub_ret);
4922 EXPORT_SYMBOL(atomic64_add);
4923+EXPORT_SYMBOL(atomic64_add_unchecked);
4924 EXPORT_SYMBOL(atomic64_add_ret);
4925+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4926 EXPORT_SYMBOL(atomic64_sub);
4927+EXPORT_SYMBOL(atomic64_sub_unchecked);
4928 EXPORT_SYMBOL(atomic64_sub_ret);
4929
4930 /* Atomic bit operations. */
4931diff -urNp linux-2.6.32.45/arch/sparc/lib/Makefile linux-2.6.32.45/arch/sparc/lib/Makefile
4932--- linux-2.6.32.45/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4933+++ linux-2.6.32.45/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4934@@ -2,7 +2,7 @@
4935 #
4936
4937 asflags-y := -ansi -DST_DIV0=0x02
4938-ccflags-y := -Werror
4939+#ccflags-y := -Werror
4940
4941 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4942 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4943diff -urNp linux-2.6.32.45/arch/sparc/lib/rwsem_64.S linux-2.6.32.45/arch/sparc/lib/rwsem_64.S
4944--- linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4945+++ linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4946@@ -11,7 +11,12 @@
4947 .globl __down_read
4948 __down_read:
4949 1: lduw [%o0], %g1
4950- add %g1, 1, %g7
4951+ addcc %g1, 1, %g7
4952+
4953+#ifdef CONFIG_PAX_REFCOUNT
4954+ tvs %icc, 6
4955+#endif
4956+
4957 cas [%o0], %g1, %g7
4958 cmp %g1, %g7
4959 bne,pn %icc, 1b
4960@@ -33,7 +38,12 @@ __down_read:
4961 .globl __down_read_trylock
4962 __down_read_trylock:
4963 1: lduw [%o0], %g1
4964- add %g1, 1, %g7
4965+ addcc %g1, 1, %g7
4966+
4967+#ifdef CONFIG_PAX_REFCOUNT
4968+ tvs %icc, 6
4969+#endif
4970+
4971 cmp %g7, 0
4972 bl,pn %icc, 2f
4973 mov 0, %o1
4974@@ -51,7 +61,12 @@ __down_write:
4975 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4976 1:
4977 lduw [%o0], %g3
4978- add %g3, %g1, %g7
4979+ addcc %g3, %g1, %g7
4980+
4981+#ifdef CONFIG_PAX_REFCOUNT
4982+ tvs %icc, 6
4983+#endif
4984+
4985 cas [%o0], %g3, %g7
4986 cmp %g3, %g7
4987 bne,pn %icc, 1b
4988@@ -77,7 +92,12 @@ __down_write_trylock:
4989 cmp %g3, 0
4990 bne,pn %icc, 2f
4991 mov 0, %o1
4992- add %g3, %g1, %g7
4993+ addcc %g3, %g1, %g7
4994+
4995+#ifdef CONFIG_PAX_REFCOUNT
4996+ tvs %icc, 6
4997+#endif
4998+
4999 cas [%o0], %g3, %g7
5000 cmp %g3, %g7
5001 bne,pn %icc, 1b
5002@@ -90,7 +110,12 @@ __down_write_trylock:
5003 __up_read:
5004 1:
5005 lduw [%o0], %g1
5006- sub %g1, 1, %g7
5007+ subcc %g1, 1, %g7
5008+
5009+#ifdef CONFIG_PAX_REFCOUNT
5010+ tvs %icc, 6
5011+#endif
5012+
5013 cas [%o0], %g1, %g7
5014 cmp %g1, %g7
5015 bne,pn %icc, 1b
5016@@ -118,7 +143,12 @@ __up_write:
5017 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5018 1:
5019 lduw [%o0], %g3
5020- sub %g3, %g1, %g7
5021+ subcc %g3, %g1, %g7
5022+
5023+#ifdef CONFIG_PAX_REFCOUNT
5024+ tvs %icc, 6
5025+#endif
5026+
5027 cas [%o0], %g3, %g7
5028 cmp %g3, %g7
5029 bne,pn %icc, 1b
5030@@ -143,7 +173,12 @@ __downgrade_write:
5031 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5032 1:
5033 lduw [%o0], %g3
5034- sub %g3, %g1, %g7
5035+ subcc %g3, %g1, %g7
5036+
5037+#ifdef CONFIG_PAX_REFCOUNT
5038+ tvs %icc, 6
5039+#endif
5040+
5041 cas [%o0], %g3, %g7
5042 cmp %g3, %g7
5043 bne,pn %icc, 1b
5044diff -urNp linux-2.6.32.45/arch/sparc/Makefile linux-2.6.32.45/arch/sparc/Makefile
5045--- linux-2.6.32.45/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5046+++ linux-2.6.32.45/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5047@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5048 # Export what is needed by arch/sparc/boot/Makefile
5049 export VMLINUX_INIT VMLINUX_MAIN
5050 VMLINUX_INIT := $(head-y) $(init-y)
5051-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5052+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5053 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5054 VMLINUX_MAIN += $(drivers-y) $(net-y)
5055
5056diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_32.c linux-2.6.32.45/arch/sparc/mm/fault_32.c
5057--- linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5058+++ linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5059@@ -21,6 +21,9 @@
5060 #include <linux/interrupt.h>
5061 #include <linux/module.h>
5062 #include <linux/kdebug.h>
5063+#include <linux/slab.h>
5064+#include <linux/pagemap.h>
5065+#include <linux/compiler.h>
5066
5067 #include <asm/system.h>
5068 #include <asm/page.h>
5069@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5070 return safe_compute_effective_address(regs, insn);
5071 }
5072
5073+#ifdef CONFIG_PAX_PAGEEXEC
5074+#ifdef CONFIG_PAX_DLRESOLVE
5075+static void pax_emuplt_close(struct vm_area_struct *vma)
5076+{
5077+ vma->vm_mm->call_dl_resolve = 0UL;
5078+}
5079+
5080+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5081+{
5082+ unsigned int *kaddr;
5083+
5084+ vmf->page = alloc_page(GFP_HIGHUSER);
5085+ if (!vmf->page)
5086+ return VM_FAULT_OOM;
5087+
5088+ kaddr = kmap(vmf->page);
5089+ memset(kaddr, 0, PAGE_SIZE);
5090+ kaddr[0] = 0x9DE3BFA8U; /* save */
5091+ flush_dcache_page(vmf->page);
5092+ kunmap(vmf->page);
5093+ return VM_FAULT_MAJOR;
5094+}
5095+
5096+static const struct vm_operations_struct pax_vm_ops = {
5097+ .close = pax_emuplt_close,
5098+ .fault = pax_emuplt_fault
5099+};
5100+
5101+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5102+{
5103+ int ret;
5104+
5105+ vma->vm_mm = current->mm;
5106+ vma->vm_start = addr;
5107+ vma->vm_end = addr + PAGE_SIZE;
5108+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5109+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5110+ vma->vm_ops = &pax_vm_ops;
5111+
5112+ ret = insert_vm_struct(current->mm, vma);
5113+ if (ret)
5114+ return ret;
5115+
5116+ ++current->mm->total_vm;
5117+ return 0;
5118+}
5119+#endif
5120+
5121+/*
5122+ * PaX: decide what to do with offenders (regs->pc = fault address)
5123+ *
5124+ * returns 1 when task should be killed
5125+ * 2 when patched PLT trampoline was detected
5126+ * 3 when unpatched PLT trampoline was detected
5127+ */
5128+static int pax_handle_fetch_fault(struct pt_regs *regs)
5129+{
5130+
5131+#ifdef CONFIG_PAX_EMUPLT
5132+ int err;
5133+
5134+ do { /* PaX: patched PLT emulation #1 */
5135+ unsigned int sethi1, sethi2, jmpl;
5136+
5137+ err = get_user(sethi1, (unsigned int *)regs->pc);
5138+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5139+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5140+
5141+ if (err)
5142+ break;
5143+
5144+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5145+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5146+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5147+ {
5148+ unsigned int addr;
5149+
5150+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5151+ addr = regs->u_regs[UREG_G1];
5152+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5153+ regs->pc = addr;
5154+ regs->npc = addr+4;
5155+ return 2;
5156+ }
5157+ } while (0);
5158+
5159+ { /* PaX: patched PLT emulation #2 */
5160+ unsigned int ba;
5161+
5162+ err = get_user(ba, (unsigned int *)regs->pc);
5163+
5164+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5165+ unsigned int addr;
5166+
5167+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5168+ regs->pc = addr;
5169+ regs->npc = addr+4;
5170+ return 2;
5171+ }
5172+ }
5173+
5174+ do { /* PaX: patched PLT emulation #3 */
5175+ unsigned int sethi, jmpl, nop;
5176+
5177+ err = get_user(sethi, (unsigned int *)regs->pc);
5178+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5179+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5180+
5181+ if (err)
5182+ break;
5183+
5184+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5185+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5186+ nop == 0x01000000U)
5187+ {
5188+ unsigned int addr;
5189+
5190+ addr = (sethi & 0x003FFFFFU) << 10;
5191+ regs->u_regs[UREG_G1] = addr;
5192+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5193+ regs->pc = addr;
5194+ regs->npc = addr+4;
5195+ return 2;
5196+ }
5197+ } while (0);
5198+
5199+ do { /* PaX: unpatched PLT emulation step 1 */
5200+ unsigned int sethi, ba, nop;
5201+
5202+ err = get_user(sethi, (unsigned int *)regs->pc);
5203+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5204+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5205+
5206+ if (err)
5207+ break;
5208+
5209+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5210+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5211+ nop == 0x01000000U)
5212+ {
5213+ unsigned int addr, save, call;
5214+
5215+ if ((ba & 0xFFC00000U) == 0x30800000U)
5216+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5217+ else
5218+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5219+
5220+ err = get_user(save, (unsigned int *)addr);
5221+ err |= get_user(call, (unsigned int *)(addr+4));
5222+ err |= get_user(nop, (unsigned int *)(addr+8));
5223+ if (err)
5224+ break;
5225+
5226+#ifdef CONFIG_PAX_DLRESOLVE
5227+ if (save == 0x9DE3BFA8U &&
5228+ (call & 0xC0000000U) == 0x40000000U &&
5229+ nop == 0x01000000U)
5230+ {
5231+ struct vm_area_struct *vma;
5232+ unsigned long call_dl_resolve;
5233+
5234+ down_read(&current->mm->mmap_sem);
5235+ call_dl_resolve = current->mm->call_dl_resolve;
5236+ up_read(&current->mm->mmap_sem);
5237+ if (likely(call_dl_resolve))
5238+ goto emulate;
5239+
5240+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5241+
5242+ down_write(&current->mm->mmap_sem);
5243+ if (current->mm->call_dl_resolve) {
5244+ call_dl_resolve = current->mm->call_dl_resolve;
5245+ up_write(&current->mm->mmap_sem);
5246+ if (vma)
5247+ kmem_cache_free(vm_area_cachep, vma);
5248+ goto emulate;
5249+ }
5250+
5251+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5252+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5253+ up_write(&current->mm->mmap_sem);
5254+ if (vma)
5255+ kmem_cache_free(vm_area_cachep, vma);
5256+ return 1;
5257+ }
5258+
5259+ if (pax_insert_vma(vma, call_dl_resolve)) {
5260+ up_write(&current->mm->mmap_sem);
5261+ kmem_cache_free(vm_area_cachep, vma);
5262+ return 1;
5263+ }
5264+
5265+ current->mm->call_dl_resolve = call_dl_resolve;
5266+ up_write(&current->mm->mmap_sem);
5267+
5268+emulate:
5269+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5270+ regs->pc = call_dl_resolve;
5271+ regs->npc = addr+4;
5272+ return 3;
5273+ }
5274+#endif
5275+
5276+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5277+ if ((save & 0xFFC00000U) == 0x05000000U &&
5278+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5279+ nop == 0x01000000U)
5280+ {
5281+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5282+ regs->u_regs[UREG_G2] = addr + 4;
5283+ addr = (save & 0x003FFFFFU) << 10;
5284+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5285+ regs->pc = addr;
5286+ regs->npc = addr+4;
5287+ return 3;
5288+ }
5289+ }
5290+ } while (0);
5291+
5292+ do { /* PaX: unpatched PLT emulation step 2 */
5293+ unsigned int save, call, nop;
5294+
5295+ err = get_user(save, (unsigned int *)(regs->pc-4));
5296+ err |= get_user(call, (unsigned int *)regs->pc);
5297+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
5298+ if (err)
5299+ break;
5300+
5301+ if (save == 0x9DE3BFA8U &&
5302+ (call & 0xC0000000U) == 0x40000000U &&
5303+ nop == 0x01000000U)
5304+ {
5305+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5306+
5307+ regs->u_regs[UREG_RETPC] = regs->pc;
5308+ regs->pc = dl_resolve;
5309+ regs->npc = dl_resolve+4;
5310+ return 3;
5311+ }
5312+ } while (0);
5313+#endif
5314+
5315+ return 1;
5316+}
5317+
5318+void pax_report_insns(void *pc, void *sp)
5319+{
5320+ unsigned long i;
5321+
5322+ printk(KERN_ERR "PAX: bytes at PC: ");
5323+ for (i = 0; i < 8; i++) {
5324+ unsigned int c;
5325+ if (get_user(c, (unsigned int *)pc+i))
5326+ printk(KERN_CONT "???????? ");
5327+ else
5328+ printk(KERN_CONT "%08x ", c);
5329+ }
5330+ printk("\n");
5331+}
5332+#endif
5333+
5334 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5335 unsigned long address)
5336 {
5337@@ -231,6 +495,24 @@ good_area:
5338 if(!(vma->vm_flags & VM_WRITE))
5339 goto bad_area;
5340 } else {
5341+
5342+#ifdef CONFIG_PAX_PAGEEXEC
5343+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5344+ up_read(&mm->mmap_sem);
5345+ switch (pax_handle_fetch_fault(regs)) {
5346+
5347+#ifdef CONFIG_PAX_EMUPLT
5348+ case 2:
5349+ case 3:
5350+ return;
5351+#endif
5352+
5353+ }
5354+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5355+ do_group_exit(SIGKILL);
5356+ }
5357+#endif
5358+
5359 /* Allow reads even for write-only mappings */
5360 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5361 goto bad_area;
5362diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_64.c linux-2.6.32.45/arch/sparc/mm/fault_64.c
5363--- linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5364+++ linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5365@@ -20,6 +20,9 @@
5366 #include <linux/kprobes.h>
5367 #include <linux/kdebug.h>
5368 #include <linux/percpu.h>
5369+#include <linux/slab.h>
5370+#include <linux/pagemap.h>
5371+#include <linux/compiler.h>
5372
5373 #include <asm/page.h>
5374 #include <asm/pgtable.h>
5375@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5376 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5377 regs->tpc);
5378 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5379- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5380+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5381 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5382 dump_stack();
5383 unhandled_fault(regs->tpc, current, regs);
5384@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5385 show_regs(regs);
5386 }
5387
5388+#ifdef CONFIG_PAX_PAGEEXEC
5389+#ifdef CONFIG_PAX_DLRESOLVE
5390+static void pax_emuplt_close(struct vm_area_struct *vma)
5391+{
5392+ vma->vm_mm->call_dl_resolve = 0UL;
5393+}
5394+
5395+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5396+{
5397+ unsigned int *kaddr;
5398+
5399+ vmf->page = alloc_page(GFP_HIGHUSER);
5400+ if (!vmf->page)
5401+ return VM_FAULT_OOM;
5402+
5403+ kaddr = kmap(vmf->page);
5404+ memset(kaddr, 0, PAGE_SIZE);
5405+ kaddr[0] = 0x9DE3BFA8U; /* save */
5406+ flush_dcache_page(vmf->page);
5407+ kunmap(vmf->page);
5408+ return VM_FAULT_MAJOR;
5409+}
5410+
5411+static const struct vm_operations_struct pax_vm_ops = {
5412+ .close = pax_emuplt_close,
5413+ .fault = pax_emuplt_fault
5414+};
5415+
5416+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5417+{
5418+ int ret;
5419+
5420+ vma->vm_mm = current->mm;
5421+ vma->vm_start = addr;
5422+ vma->vm_end = addr + PAGE_SIZE;
5423+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5424+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5425+ vma->vm_ops = &pax_vm_ops;
5426+
5427+ ret = insert_vm_struct(current->mm, vma);
5428+ if (ret)
5429+ return ret;
5430+
5431+ ++current->mm->total_vm;
5432+ return 0;
5433+}
5434+#endif
5435+
5436+/*
5437+ * PaX: decide what to do with offenders (regs->tpc = fault address)
5438+ *
5439+ * returns 1 when task should be killed
5440+ * 2 when patched PLT trampoline was detected
5441+ * 3 when unpatched PLT trampoline was detected
5442+ */
5443+static int pax_handle_fetch_fault(struct pt_regs *regs)
5444+{
5445+
5446+#ifdef CONFIG_PAX_EMUPLT
5447+ int err;
5448+
5449+ do { /* PaX: patched PLT emulation #1 */
5450+ unsigned int sethi1, sethi2, jmpl;
5451+
5452+ err = get_user(sethi1, (unsigned int *)regs->tpc);
5453+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5454+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5455+
5456+ if (err)
5457+ break;
5458+
5459+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5460+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5461+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5462+ {
5463+ unsigned long addr;
5464+
5465+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5466+ addr = regs->u_regs[UREG_G1];
5467+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5468+
5469+ if (test_thread_flag(TIF_32BIT))
5470+ addr &= 0xFFFFFFFFUL;
5471+
5472+ regs->tpc = addr;
5473+ regs->tnpc = addr+4;
5474+ return 2;
5475+ }
5476+ } while (0);
5477+
5478+ { /* PaX: patched PLT emulation #2 */
5479+ unsigned int ba;
5480+
5481+ err = get_user(ba, (unsigned int *)regs->tpc);
5482+
5483+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5484+ unsigned long addr;
5485+
5486+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5487+
5488+ if (test_thread_flag(TIF_32BIT))
5489+ addr &= 0xFFFFFFFFUL;
5490+
5491+ regs->tpc = addr;
5492+ regs->tnpc = addr+4;
5493+ return 2;
5494+ }
5495+ }
5496+
5497+ do { /* PaX: patched PLT emulation #3 */
5498+ unsigned int sethi, jmpl, nop;
5499+
5500+ err = get_user(sethi, (unsigned int *)regs->tpc);
5501+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5502+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5503+
5504+ if (err)
5505+ break;
5506+
5507+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5508+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5509+ nop == 0x01000000U)
5510+ {
5511+ unsigned long addr;
5512+
5513+ addr = (sethi & 0x003FFFFFU) << 10;
5514+ regs->u_regs[UREG_G1] = addr;
5515+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5516+
5517+ if (test_thread_flag(TIF_32BIT))
5518+ addr &= 0xFFFFFFFFUL;
5519+
5520+ regs->tpc = addr;
5521+ regs->tnpc = addr+4;
5522+ return 2;
5523+ }
5524+ } while (0);
5525+
5526+ do { /* PaX: patched PLT emulation #4 */
5527+ unsigned int sethi, mov1, call, mov2;
5528+
5529+ err = get_user(sethi, (unsigned int *)regs->tpc);
5530+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5531+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5532+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5533+
5534+ if (err)
5535+ break;
5536+
5537+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5538+ mov1 == 0x8210000FU &&
5539+ (call & 0xC0000000U) == 0x40000000U &&
5540+ mov2 == 0x9E100001U)
5541+ {
5542+ unsigned long addr;
5543+
5544+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5545+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5546+
5547+ if (test_thread_flag(TIF_32BIT))
5548+ addr &= 0xFFFFFFFFUL;
5549+
5550+ regs->tpc = addr;
5551+ regs->tnpc = addr+4;
5552+ return 2;
5553+ }
5554+ } while (0);
5555+
5556+ do { /* PaX: patched PLT emulation #5 */
5557+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5558+
5559+ err = get_user(sethi, (unsigned int *)regs->tpc);
5560+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5561+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5562+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5563+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5564+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5565+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5566+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5567+
5568+ if (err)
5569+ break;
5570+
5571+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5572+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5573+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5574+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5575+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5576+ sllx == 0x83287020U &&
5577+ jmpl == 0x81C04005U &&
5578+ nop == 0x01000000U)
5579+ {
5580+ unsigned long addr;
5581+
5582+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5583+ regs->u_regs[UREG_G1] <<= 32;
5584+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5585+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5586+ regs->tpc = addr;
5587+ regs->tnpc = addr+4;
5588+ return 2;
5589+ }
5590+ } while (0);
5591+
5592+ do { /* PaX: patched PLT emulation #6 */
5593+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5594+
5595+ err = get_user(sethi, (unsigned int *)regs->tpc);
5596+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5597+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5598+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5599+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5600+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5601+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5602+
5603+ if (err)
5604+ break;
5605+
5606+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5607+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5608+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5609+ sllx == 0x83287020U &&
5610+ (or & 0xFFFFE000U) == 0x8A116000U &&
5611+ jmpl == 0x81C04005U &&
5612+ nop == 0x01000000U)
5613+ {
5614+ unsigned long addr;
5615+
5616+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5617+ regs->u_regs[UREG_G1] <<= 32;
5618+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5619+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5620+ regs->tpc = addr;
5621+ regs->tnpc = addr+4;
5622+ return 2;
5623+ }
5624+ } while (0);
5625+
5626+ do { /* PaX: unpatched PLT emulation step 1 */
5627+ unsigned int sethi, ba, nop;
5628+
5629+ err = get_user(sethi, (unsigned int *)regs->tpc);
5630+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5631+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5632+
5633+ if (err)
5634+ break;
5635+
5636+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5637+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5638+ nop == 0x01000000U)
5639+ {
5640+ unsigned long addr;
5641+ unsigned int save, call;
5642+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5643+
5644+ if ((ba & 0xFFC00000U) == 0x30800000U)
5645+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5646+ else
5647+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5648+
5649+ if (test_thread_flag(TIF_32BIT))
5650+ addr &= 0xFFFFFFFFUL;
5651+
5652+ err = get_user(save, (unsigned int *)addr);
5653+ err |= get_user(call, (unsigned int *)(addr+4));
5654+ err |= get_user(nop, (unsigned int *)(addr+8));
5655+ if (err)
5656+ break;
5657+
5658+#ifdef CONFIG_PAX_DLRESOLVE
5659+ if (save == 0x9DE3BFA8U &&
5660+ (call & 0xC0000000U) == 0x40000000U &&
5661+ nop == 0x01000000U)
5662+ {
5663+ struct vm_area_struct *vma;
5664+ unsigned long call_dl_resolve;
5665+
5666+ down_read(&current->mm->mmap_sem);
5667+ call_dl_resolve = current->mm->call_dl_resolve;
5668+ up_read(&current->mm->mmap_sem);
5669+ if (likely(call_dl_resolve))
5670+ goto emulate;
5671+
5672+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5673+
5674+ down_write(&current->mm->mmap_sem);
5675+ if (current->mm->call_dl_resolve) {
5676+ call_dl_resolve = current->mm->call_dl_resolve;
5677+ up_write(&current->mm->mmap_sem);
5678+ if (vma)
5679+ kmem_cache_free(vm_area_cachep, vma);
5680+ goto emulate;
5681+ }
5682+
5683+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5684+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5685+ up_write(&current->mm->mmap_sem);
5686+ if (vma)
5687+ kmem_cache_free(vm_area_cachep, vma);
5688+ return 1;
5689+ }
5690+
5691+ if (pax_insert_vma(vma, call_dl_resolve)) {
5692+ up_write(&current->mm->mmap_sem);
5693+ kmem_cache_free(vm_area_cachep, vma);
5694+ return 1;
5695+ }
5696+
5697+ current->mm->call_dl_resolve = call_dl_resolve;
5698+ up_write(&current->mm->mmap_sem);
5699+
5700+emulate:
5701+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5702+ regs->tpc = call_dl_resolve;
5703+ regs->tnpc = addr+4;
5704+ return 3;
5705+ }
5706+#endif
5707+
5708+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5709+ if ((save & 0xFFC00000U) == 0x05000000U &&
5710+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5711+ nop == 0x01000000U)
5712+ {
5713+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5714+ regs->u_regs[UREG_G2] = addr + 4;
5715+ addr = (save & 0x003FFFFFU) << 10;
5716+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5717+
5718+ if (test_thread_flag(TIF_32BIT))
5719+ addr &= 0xFFFFFFFFUL;
5720+
5721+ regs->tpc = addr;
5722+ regs->tnpc = addr+4;
5723+ return 3;
5724+ }
5725+
5726+ /* PaX: 64-bit PLT stub */
5727+ err = get_user(sethi1, (unsigned int *)addr);
5728+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5729+ err |= get_user(or1, (unsigned int *)(addr+8));
5730+ err |= get_user(or2, (unsigned int *)(addr+12));
5731+ err |= get_user(sllx, (unsigned int *)(addr+16));
5732+ err |= get_user(add, (unsigned int *)(addr+20));
5733+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5734+ err |= get_user(nop, (unsigned int *)(addr+28));
5735+ if (err)
5736+ break;
5737+
5738+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5739+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5740+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5741+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5742+ sllx == 0x89293020U &&
5743+ add == 0x8A010005U &&
5744+ jmpl == 0x89C14000U &&
5745+ nop == 0x01000000U)
5746+ {
5747+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5748+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5749+ regs->u_regs[UREG_G4] <<= 32;
5750+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5751+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5752+ regs->u_regs[UREG_G4] = addr + 24;
5753+ addr = regs->u_regs[UREG_G5];
5754+ regs->tpc = addr;
5755+ regs->tnpc = addr+4;
5756+ return 3;
5757+ }
5758+ }
5759+ } while (0);
5760+
5761+#ifdef CONFIG_PAX_DLRESOLVE
5762+ do { /* PaX: unpatched PLT emulation step 2 */
5763+ unsigned int save, call, nop;
5764+
5765+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5766+ err |= get_user(call, (unsigned int *)regs->tpc);
5767+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5768+ if (err)
5769+ break;
5770+
5771+ if (save == 0x9DE3BFA8U &&
5772+ (call & 0xC0000000U) == 0x40000000U &&
5773+ nop == 0x01000000U)
5774+ {
5775+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5776+
5777+ if (test_thread_flag(TIF_32BIT))
5778+ dl_resolve &= 0xFFFFFFFFUL;
5779+
5780+ regs->u_regs[UREG_RETPC] = regs->tpc;
5781+ regs->tpc = dl_resolve;
5782+ regs->tnpc = dl_resolve+4;
5783+ return 3;
5784+ }
5785+ } while (0);
5786+#endif
5787+
5788+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5789+ unsigned int sethi, ba, nop;
5790+
5791+ err = get_user(sethi, (unsigned int *)regs->tpc);
5792+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5793+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5794+
5795+ if (err)
5796+ break;
5797+
5798+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5799+ (ba & 0xFFF00000U) == 0x30600000U &&
5800+ nop == 0x01000000U)
5801+ {
5802+ unsigned long addr;
5803+
5804+ addr = (sethi & 0x003FFFFFU) << 10;
5805+ regs->u_regs[UREG_G1] = addr;
5806+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5807+
5808+ if (test_thread_flag(TIF_32BIT))
5809+ addr &= 0xFFFFFFFFUL;
5810+
5811+ regs->tpc = addr;
5812+ regs->tnpc = addr+4;
5813+ return 2;
5814+ }
5815+ } while (0);
5816+
5817+#endif
5818+
5819+ return 1;
5820+}
5821+
5822+void pax_report_insns(void *pc, void *sp)
5823+{
5824+ unsigned long i;
5825+
5826+ printk(KERN_ERR "PAX: bytes at PC: ");
5827+ for (i = 0; i < 8; i++) {
5828+ unsigned int c;
5829+ if (get_user(c, (unsigned int *)pc+i))
5830+ printk(KERN_CONT "???????? ");
5831+ else
5832+ printk(KERN_CONT "%08x ", c);
5833+ }
5834+ printk("\n");
5835+}
5836+#endif
5837+
5838 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5839 {
5840 struct mm_struct *mm = current->mm;
5841@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5842 if (!vma)
5843 goto bad_area;
5844
5845+#ifdef CONFIG_PAX_PAGEEXEC
5846+ /* PaX: detect ITLB misses on non-exec pages */
5847+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5848+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5849+ {
5850+ if (address != regs->tpc)
5851+ goto good_area;
5852+
5853+ up_read(&mm->mmap_sem);
5854+ switch (pax_handle_fetch_fault(regs)) {
5855+
5856+#ifdef CONFIG_PAX_EMUPLT
5857+ case 2:
5858+ case 3:
5859+ return;
5860+#endif
5861+
5862+ }
5863+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5864+ do_group_exit(SIGKILL);
5865+ }
5866+#endif
5867+
5868 /* Pure DTLB misses do not tell us whether the fault causing
5869 * load/store/atomic was a write or not, it only says that there
5870 * was no match. So in such a case we (carefully) read the
5871diff -urNp linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c
5872--- linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5873+++ linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5874@@ -69,7 +69,7 @@ full_search:
5875 }
5876 return -ENOMEM;
5877 }
5878- if (likely(!vma || addr + len <= vma->vm_start)) {
5879+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5880 /*
5881 * Remember the place where we stopped the search:
5882 */
5883@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5884 /* make sure it can fit in the remaining address space */
5885 if (likely(addr > len)) {
5886 vma = find_vma(mm, addr-len);
5887- if (!vma || addr <= vma->vm_start) {
5888+ if (check_heap_stack_gap(vma, addr - len, len)) {
5889 /* remember the address as a hint for next time */
5890 return (mm->free_area_cache = addr-len);
5891 }
5892@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5893 if (unlikely(mm->mmap_base < len))
5894 goto bottomup;
5895
5896- addr = (mm->mmap_base-len) & HPAGE_MASK;
5897+ addr = mm->mmap_base - len;
5898
5899 do {
5900+ addr &= HPAGE_MASK;
5901 /*
5902 * Lookup failure means no vma is above this address,
5903 * else if new region fits below vma->vm_start,
5904 * return with success:
5905 */
5906 vma = find_vma(mm, addr);
5907- if (likely(!vma || addr+len <= vma->vm_start)) {
5908+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5909 /* remember the address as a hint for next time */
5910 return (mm->free_area_cache = addr);
5911 }
5912@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5913 mm->cached_hole_size = vma->vm_start - addr;
5914
5915 /* try just below the current vma->vm_start */
5916- addr = (vma->vm_start-len) & HPAGE_MASK;
5917- } while (likely(len < vma->vm_start));
5918+ addr = skip_heap_stack_gap(vma, len);
5919+ } while (!IS_ERR_VALUE(addr));
5920
5921 bottomup:
5922 /*
5923@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5924 if (addr) {
5925 addr = ALIGN(addr, HPAGE_SIZE);
5926 vma = find_vma(mm, addr);
5927- if (task_size - len >= addr &&
5928- (!vma || addr + len <= vma->vm_start))
5929+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5930 return addr;
5931 }
5932 if (mm->get_unmapped_area == arch_get_unmapped_area)
5933diff -urNp linux-2.6.32.45/arch/sparc/mm/init_32.c linux-2.6.32.45/arch/sparc/mm/init_32.c
5934--- linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5935+++ linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5936@@ -317,6 +317,9 @@ extern void device_scan(void);
5937 pgprot_t PAGE_SHARED __read_mostly;
5938 EXPORT_SYMBOL(PAGE_SHARED);
5939
5940+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5941+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5942+
5943 void __init paging_init(void)
5944 {
5945 switch(sparc_cpu_model) {
5946@@ -345,17 +348,17 @@ void __init paging_init(void)
5947
5948 /* Initialize the protection map with non-constant, MMU dependent values. */
5949 protection_map[0] = PAGE_NONE;
5950- protection_map[1] = PAGE_READONLY;
5951- protection_map[2] = PAGE_COPY;
5952- protection_map[3] = PAGE_COPY;
5953+ protection_map[1] = PAGE_READONLY_NOEXEC;
5954+ protection_map[2] = PAGE_COPY_NOEXEC;
5955+ protection_map[3] = PAGE_COPY_NOEXEC;
5956 protection_map[4] = PAGE_READONLY;
5957 protection_map[5] = PAGE_READONLY;
5958 protection_map[6] = PAGE_COPY;
5959 protection_map[7] = PAGE_COPY;
5960 protection_map[8] = PAGE_NONE;
5961- protection_map[9] = PAGE_READONLY;
5962- protection_map[10] = PAGE_SHARED;
5963- protection_map[11] = PAGE_SHARED;
5964+ protection_map[9] = PAGE_READONLY_NOEXEC;
5965+ protection_map[10] = PAGE_SHARED_NOEXEC;
5966+ protection_map[11] = PAGE_SHARED_NOEXEC;
5967 protection_map[12] = PAGE_READONLY;
5968 protection_map[13] = PAGE_READONLY;
5969 protection_map[14] = PAGE_SHARED;
5970diff -urNp linux-2.6.32.45/arch/sparc/mm/Makefile linux-2.6.32.45/arch/sparc/mm/Makefile
5971--- linux-2.6.32.45/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
5972+++ linux-2.6.32.45/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
5973@@ -2,7 +2,7 @@
5974 #
5975
5976 asflags-y := -ansi
5977-ccflags-y := -Werror
5978+#ccflags-y := -Werror
5979
5980 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5981 obj-y += fault_$(BITS).o
5982diff -urNp linux-2.6.32.45/arch/sparc/mm/srmmu.c linux-2.6.32.45/arch/sparc/mm/srmmu.c
5983--- linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
5984+++ linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
5985@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5986 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5987 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5988 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5989+
5990+#ifdef CONFIG_PAX_PAGEEXEC
5991+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5992+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5993+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5994+#endif
5995+
5996 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5997 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5998
5999diff -urNp linux-2.6.32.45/arch/um/include/asm/kmap_types.h linux-2.6.32.45/arch/um/include/asm/kmap_types.h
6000--- linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6001+++ linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6002@@ -23,6 +23,7 @@ enum km_type {
6003 KM_IRQ1,
6004 KM_SOFTIRQ0,
6005 KM_SOFTIRQ1,
6006+ KM_CLEARPAGE,
6007 KM_TYPE_NR
6008 };
6009
6010diff -urNp linux-2.6.32.45/arch/um/include/asm/page.h linux-2.6.32.45/arch/um/include/asm/page.h
6011--- linux-2.6.32.45/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6012+++ linux-2.6.32.45/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6013@@ -14,6 +14,9 @@
6014 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6015 #define PAGE_MASK (~(PAGE_SIZE-1))
6016
6017+#define ktla_ktva(addr) (addr)
6018+#define ktva_ktla(addr) (addr)
6019+
6020 #ifndef __ASSEMBLY__
6021
6022 struct page;
6023diff -urNp linux-2.6.32.45/arch/um/kernel/process.c linux-2.6.32.45/arch/um/kernel/process.c
6024--- linux-2.6.32.45/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6025+++ linux-2.6.32.45/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6026@@ -393,22 +393,6 @@ int singlestepping(void * t)
6027 return 2;
6028 }
6029
6030-/*
6031- * Only x86 and x86_64 have an arch_align_stack().
6032- * All other arches have "#define arch_align_stack(x) (x)"
6033- * in their asm/system.h
6034- * As this is included in UML from asm-um/system-generic.h,
6035- * we can use it to behave as the subarch does.
6036- */
6037-#ifndef arch_align_stack
6038-unsigned long arch_align_stack(unsigned long sp)
6039-{
6040- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6041- sp -= get_random_int() % 8192;
6042- return sp & ~0xf;
6043-}
6044-#endif
6045-
6046 unsigned long get_wchan(struct task_struct *p)
6047 {
6048 unsigned long stack_page, sp, ip;
6049diff -urNp linux-2.6.32.45/arch/um/sys-i386/syscalls.c linux-2.6.32.45/arch/um/sys-i386/syscalls.c
6050--- linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6051+++ linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6052@@ -11,6 +11,21 @@
6053 #include "asm/uaccess.h"
6054 #include "asm/unistd.h"
6055
6056+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6057+{
6058+ unsigned long pax_task_size = TASK_SIZE;
6059+
6060+#ifdef CONFIG_PAX_SEGMEXEC
6061+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6062+ pax_task_size = SEGMEXEC_TASK_SIZE;
6063+#endif
6064+
6065+ if (len > pax_task_size || addr > pax_task_size - len)
6066+ return -EINVAL;
6067+
6068+ return 0;
6069+}
6070+
6071 /*
6072 * Perform the select(nd, in, out, ex, tv) and mmap() system
6073 * calls. Linux/i386 didn't use to be able to handle more than
6074diff -urNp linux-2.6.32.45/arch/x86/boot/bitops.h linux-2.6.32.45/arch/x86/boot/bitops.h
6075--- linux-2.6.32.45/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6076+++ linux-2.6.32.45/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6077@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6078 u8 v;
6079 const u32 *p = (const u32 *)addr;
6080
6081- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6082+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6083 return v;
6084 }
6085
6086@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6087
6088 static inline void set_bit(int nr, void *addr)
6089 {
6090- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6091+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6092 }
6093
6094 #endif /* BOOT_BITOPS_H */
6095diff -urNp linux-2.6.32.45/arch/x86/boot/boot.h linux-2.6.32.45/arch/x86/boot/boot.h
6096--- linux-2.6.32.45/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6097+++ linux-2.6.32.45/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6098@@ -82,7 +82,7 @@ static inline void io_delay(void)
6099 static inline u16 ds(void)
6100 {
6101 u16 seg;
6102- asm("movw %%ds,%0" : "=rm" (seg));
6103+ asm volatile("movw %%ds,%0" : "=rm" (seg));
6104 return seg;
6105 }
6106
6107@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6108 static inline int memcmp(const void *s1, const void *s2, size_t len)
6109 {
6110 u8 diff;
6111- asm("repe; cmpsb; setnz %0"
6112+ asm volatile("repe; cmpsb; setnz %0"
6113 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6114 return diff;
6115 }
6116diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_32.S linux-2.6.32.45/arch/x86/boot/compressed/head_32.S
6117--- linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6118+++ linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6119@@ -76,7 +76,7 @@ ENTRY(startup_32)
6120 notl %eax
6121 andl %eax, %ebx
6122 #else
6123- movl $LOAD_PHYSICAL_ADDR, %ebx
6124+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6125 #endif
6126
6127 /* Target address to relocate to for decompression */
6128@@ -149,7 +149,7 @@ relocated:
6129 * and where it was actually loaded.
6130 */
6131 movl %ebp, %ebx
6132- subl $LOAD_PHYSICAL_ADDR, %ebx
6133+ subl $____LOAD_PHYSICAL_ADDR, %ebx
6134 jz 2f /* Nothing to be done if loaded at compiled addr. */
6135 /*
6136 * Process relocations.
6137@@ -157,8 +157,7 @@ relocated:
6138
6139 1: subl $4, %edi
6140 movl (%edi), %ecx
6141- testl %ecx, %ecx
6142- jz 2f
6143+ jecxz 2f
6144 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6145 jmp 1b
6146 2:
6147diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_64.S linux-2.6.32.45/arch/x86/boot/compressed/head_64.S
6148--- linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6149+++ linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6150@@ -91,7 +91,7 @@ ENTRY(startup_32)
6151 notl %eax
6152 andl %eax, %ebx
6153 #else
6154- movl $LOAD_PHYSICAL_ADDR, %ebx
6155+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6156 #endif
6157
6158 /* Target address to relocate to for decompression */
6159@@ -183,7 +183,7 @@ no_longmode:
6160 hlt
6161 jmp 1b
6162
6163-#include "../../kernel/verify_cpu_64.S"
6164+#include "../../kernel/verify_cpu.S"
6165
6166 /*
6167 * Be careful here startup_64 needs to be at a predictable
6168@@ -234,7 +234,7 @@ ENTRY(startup_64)
6169 notq %rax
6170 andq %rax, %rbp
6171 #else
6172- movq $LOAD_PHYSICAL_ADDR, %rbp
6173+ movq $____LOAD_PHYSICAL_ADDR, %rbp
6174 #endif
6175
6176 /* Target address to relocate to for decompression */
6177diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/Makefile linux-2.6.32.45/arch/x86/boot/compressed/Makefile
6178--- linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-03-27 14:31:47.000000000 -0400
6179+++ linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-08-07 14:38:34.000000000 -0400
6180@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
6181 KBUILD_CFLAGS += $(cflags-y)
6182 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6183 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6184+ifdef CONSTIFY_PLUGIN
6185+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6186+endif
6187
6188 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6189 GCOV_PROFILE := n
6190diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/misc.c linux-2.6.32.45/arch/x86/boot/compressed/misc.c
6191--- linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6192+++ linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6193@@ -288,7 +288,7 @@ static void parse_elf(void *output)
6194 case PT_LOAD:
6195 #ifdef CONFIG_RELOCATABLE
6196 dest = output;
6197- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6198+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6199 #else
6200 dest = (void *)(phdr->p_paddr);
6201 #endif
6202@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6203 error("Destination address too large");
6204 #endif
6205 #ifndef CONFIG_RELOCATABLE
6206- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6207+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6208 error("Wrong destination address");
6209 #endif
6210
6211diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c
6212--- linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6213+++ linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6214@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6215
6216 offs = (olen > ilen) ? olen - ilen : 0;
6217 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6218- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6219+ offs += 64*1024; /* Add 64K bytes slack */
6220 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6221
6222 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6223diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/relocs.c linux-2.6.32.45/arch/x86/boot/compressed/relocs.c
6224--- linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6225+++ linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6226@@ -10,8 +10,11 @@
6227 #define USE_BSD
6228 #include <endian.h>
6229
6230+#include "../../../../include/linux/autoconf.h"
6231+
6232 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6233 static Elf32_Ehdr ehdr;
6234+static Elf32_Phdr *phdr;
6235 static unsigned long reloc_count, reloc_idx;
6236 static unsigned long *relocs;
6237
6238@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6239
6240 static int is_safe_abs_reloc(const char* sym_name)
6241 {
6242- int i;
6243+ unsigned int i;
6244
6245 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6246 if (!strcmp(sym_name, safe_abs_relocs[i]))
6247@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6248 }
6249 }
6250
6251+static void read_phdrs(FILE *fp)
6252+{
6253+ unsigned int i;
6254+
6255+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6256+ if (!phdr) {
6257+ die("Unable to allocate %d program headers\n",
6258+ ehdr.e_phnum);
6259+ }
6260+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6261+ die("Seek to %d failed: %s\n",
6262+ ehdr.e_phoff, strerror(errno));
6263+ }
6264+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6265+ die("Cannot read ELF program headers: %s\n",
6266+ strerror(errno));
6267+ }
6268+ for(i = 0; i < ehdr.e_phnum; i++) {
6269+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6270+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6271+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6272+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6273+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6274+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6275+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6276+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6277+ }
6278+
6279+}
6280+
6281 static void read_shdrs(FILE *fp)
6282 {
6283- int i;
6284+ unsigned int i;
6285 Elf32_Shdr shdr;
6286
6287 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6288@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6289
6290 static void read_strtabs(FILE *fp)
6291 {
6292- int i;
6293+ unsigned int i;
6294 for (i = 0; i < ehdr.e_shnum; i++) {
6295 struct section *sec = &secs[i];
6296 if (sec->shdr.sh_type != SHT_STRTAB) {
6297@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6298
6299 static void read_symtabs(FILE *fp)
6300 {
6301- int i,j;
6302+ unsigned int i,j;
6303 for (i = 0; i < ehdr.e_shnum; i++) {
6304 struct section *sec = &secs[i];
6305 if (sec->shdr.sh_type != SHT_SYMTAB) {
6306@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6307
6308 static void read_relocs(FILE *fp)
6309 {
6310- int i,j;
6311+ unsigned int i,j;
6312+ uint32_t base;
6313+
6314 for (i = 0; i < ehdr.e_shnum; i++) {
6315 struct section *sec = &secs[i];
6316 if (sec->shdr.sh_type != SHT_REL) {
6317@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6318 die("Cannot read symbol table: %s\n",
6319 strerror(errno));
6320 }
6321+ base = 0;
6322+ for (j = 0; j < ehdr.e_phnum; j++) {
6323+ if (phdr[j].p_type != PT_LOAD )
6324+ continue;
6325+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6326+ continue;
6327+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6328+ break;
6329+ }
6330 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6331 Elf32_Rel *rel = &sec->reltab[j];
6332- rel->r_offset = elf32_to_cpu(rel->r_offset);
6333+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6334 rel->r_info = elf32_to_cpu(rel->r_info);
6335 }
6336 }
6337@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6338
6339 static void print_absolute_symbols(void)
6340 {
6341- int i;
6342+ unsigned int i;
6343 printf("Absolute symbols\n");
6344 printf(" Num: Value Size Type Bind Visibility Name\n");
6345 for (i = 0; i < ehdr.e_shnum; i++) {
6346 struct section *sec = &secs[i];
6347 char *sym_strtab;
6348 Elf32_Sym *sh_symtab;
6349- int j;
6350+ unsigned int j;
6351
6352 if (sec->shdr.sh_type != SHT_SYMTAB) {
6353 continue;
6354@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6355
6356 static void print_absolute_relocs(void)
6357 {
6358- int i, printed = 0;
6359+ unsigned int i, printed = 0;
6360
6361 for (i = 0; i < ehdr.e_shnum; i++) {
6362 struct section *sec = &secs[i];
6363 struct section *sec_applies, *sec_symtab;
6364 char *sym_strtab;
6365 Elf32_Sym *sh_symtab;
6366- int j;
6367+ unsigned int j;
6368 if (sec->shdr.sh_type != SHT_REL) {
6369 continue;
6370 }
6371@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6372
6373 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6374 {
6375- int i;
6376+ unsigned int i;
6377 /* Walk through the relocations */
6378 for (i = 0; i < ehdr.e_shnum; i++) {
6379 char *sym_strtab;
6380 Elf32_Sym *sh_symtab;
6381 struct section *sec_applies, *sec_symtab;
6382- int j;
6383+ unsigned int j;
6384 struct section *sec = &secs[i];
6385
6386 if (sec->shdr.sh_type != SHT_REL) {
6387@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6388 if (sym->st_shndx == SHN_ABS) {
6389 continue;
6390 }
6391+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6392+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6393+ continue;
6394+
6395+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6396+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6397+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6398+ continue;
6399+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6400+ continue;
6401+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6402+ continue;
6403+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6404+ continue;
6405+#endif
6406 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6407 /*
6408 * NONE can be ignored and and PC relative
6409@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6410
6411 static void emit_relocs(int as_text)
6412 {
6413- int i;
6414+ unsigned int i;
6415 /* Count how many relocations I have and allocate space for them. */
6416 reloc_count = 0;
6417 walk_relocs(count_reloc);
6418@@ -634,6 +693,7 @@ int main(int argc, char **argv)
6419 fname, strerror(errno));
6420 }
6421 read_ehdr(fp);
6422+ read_phdrs(fp);
6423 read_shdrs(fp);
6424 read_strtabs(fp);
6425 read_symtabs(fp);
6426diff -urNp linux-2.6.32.45/arch/x86/boot/cpucheck.c linux-2.6.32.45/arch/x86/boot/cpucheck.c
6427--- linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6428+++ linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6429@@ -74,7 +74,7 @@ static int has_fpu(void)
6430 u16 fcw = -1, fsw = -1;
6431 u32 cr0;
6432
6433- asm("movl %%cr0,%0" : "=r" (cr0));
6434+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6435 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6436 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6437 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6438@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6439 {
6440 u32 f0, f1;
6441
6442- asm("pushfl ; "
6443+ asm volatile("pushfl ; "
6444 "pushfl ; "
6445 "popl %0 ; "
6446 "movl %0,%1 ; "
6447@@ -115,7 +115,7 @@ static void get_flags(void)
6448 set_bit(X86_FEATURE_FPU, cpu.flags);
6449
6450 if (has_eflag(X86_EFLAGS_ID)) {
6451- asm("cpuid"
6452+ asm volatile("cpuid"
6453 : "=a" (max_intel_level),
6454 "=b" (cpu_vendor[0]),
6455 "=d" (cpu_vendor[1]),
6456@@ -124,7 +124,7 @@ static void get_flags(void)
6457
6458 if (max_intel_level >= 0x00000001 &&
6459 max_intel_level <= 0x0000ffff) {
6460- asm("cpuid"
6461+ asm volatile("cpuid"
6462 : "=a" (tfms),
6463 "=c" (cpu.flags[4]),
6464 "=d" (cpu.flags[0])
6465@@ -136,7 +136,7 @@ static void get_flags(void)
6466 cpu.model += ((tfms >> 16) & 0xf) << 4;
6467 }
6468
6469- asm("cpuid"
6470+ asm volatile("cpuid"
6471 : "=a" (max_amd_level)
6472 : "a" (0x80000000)
6473 : "ebx", "ecx", "edx");
6474@@ -144,7 +144,7 @@ static void get_flags(void)
6475 if (max_amd_level >= 0x80000001 &&
6476 max_amd_level <= 0x8000ffff) {
6477 u32 eax = 0x80000001;
6478- asm("cpuid"
6479+ asm volatile("cpuid"
6480 : "+a" (eax),
6481 "=c" (cpu.flags[6]),
6482 "=d" (cpu.flags[1])
6483@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6484 u32 ecx = MSR_K7_HWCR;
6485 u32 eax, edx;
6486
6487- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6488+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6489 eax &= ~(1 << 15);
6490- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6491+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6492
6493 get_flags(); /* Make sure it really did something */
6494 err = check_flags();
6495@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6496 u32 ecx = MSR_VIA_FCR;
6497 u32 eax, edx;
6498
6499- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6500+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6501 eax |= (1<<1)|(1<<7);
6502- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6503+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6504
6505 set_bit(X86_FEATURE_CX8, cpu.flags);
6506 err = check_flags();
6507@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6508 u32 eax, edx;
6509 u32 level = 1;
6510
6511- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6512- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6513- asm("cpuid"
6514+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6515+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6516+ asm volatile("cpuid"
6517 : "+a" (level), "=d" (cpu.flags[0])
6518 : : "ecx", "ebx");
6519- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6520+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6521
6522 err = check_flags();
6523 }
6524diff -urNp linux-2.6.32.45/arch/x86/boot/header.S linux-2.6.32.45/arch/x86/boot/header.S
6525--- linux-2.6.32.45/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6526+++ linux-2.6.32.45/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6527@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6528 # single linked list of
6529 # struct setup_data
6530
6531-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6532+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6533
6534 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6535 #define VO_INIT_SIZE (VO__end - VO__text)
6536diff -urNp linux-2.6.32.45/arch/x86/boot/Makefile linux-2.6.32.45/arch/x86/boot/Makefile
6537--- linux-2.6.32.45/arch/x86/boot/Makefile 2011-03-27 14:31:47.000000000 -0400
6538+++ linux-2.6.32.45/arch/x86/boot/Makefile 2011-08-07 14:38:13.000000000 -0400
6539@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
6540 $(call cc-option, -fno-stack-protector) \
6541 $(call cc-option, -mpreferred-stack-boundary=2)
6542 KBUILD_CFLAGS += $(call cc-option, -m32)
6543+ifdef CONSTIFY_PLUGIN
6544+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6545+endif
6546 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6547 GCOV_PROFILE := n
6548
6549diff -urNp linux-2.6.32.45/arch/x86/boot/memory.c linux-2.6.32.45/arch/x86/boot/memory.c
6550--- linux-2.6.32.45/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6551+++ linux-2.6.32.45/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6552@@ -19,7 +19,7 @@
6553
6554 static int detect_memory_e820(void)
6555 {
6556- int count = 0;
6557+ unsigned int count = 0;
6558 struct biosregs ireg, oreg;
6559 struct e820entry *desc = boot_params.e820_map;
6560 static struct e820entry buf; /* static so it is zeroed */
6561diff -urNp linux-2.6.32.45/arch/x86/boot/video.c linux-2.6.32.45/arch/x86/boot/video.c
6562--- linux-2.6.32.45/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6563+++ linux-2.6.32.45/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6564@@ -90,7 +90,7 @@ static void store_mode_params(void)
6565 static unsigned int get_entry(void)
6566 {
6567 char entry_buf[4];
6568- int i, len = 0;
6569+ unsigned int i, len = 0;
6570 int key;
6571 unsigned int v;
6572
6573diff -urNp linux-2.6.32.45/arch/x86/boot/video-vesa.c linux-2.6.32.45/arch/x86/boot/video-vesa.c
6574--- linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6575+++ linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6576@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6577
6578 boot_params.screen_info.vesapm_seg = oreg.es;
6579 boot_params.screen_info.vesapm_off = oreg.di;
6580+ boot_params.screen_info.vesapm_size = oreg.cx;
6581 }
6582
6583 /*
6584diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_aout.c linux-2.6.32.45/arch/x86/ia32/ia32_aout.c
6585--- linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6586+++ linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6587@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6588 unsigned long dump_start, dump_size;
6589 struct user32 dump;
6590
6591+ memset(&dump, 0, sizeof(dump));
6592+
6593 fs = get_fs();
6594 set_fs(KERNEL_DS);
6595 has_dumped = 1;
6596@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6597 dump_size = dump.u_ssize << PAGE_SHIFT;
6598 DUMP_WRITE(dump_start, dump_size);
6599 }
6600- /*
6601- * Finally dump the task struct. Not be used by gdb, but
6602- * could be useful
6603- */
6604- set_fs(KERNEL_DS);
6605- DUMP_WRITE(current, sizeof(*current));
6606 end_coredump:
6607 set_fs(fs);
6608 return has_dumped;
6609diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32entry.S linux-2.6.32.45/arch/x86/ia32/ia32entry.S
6610--- linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6611+++ linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6612@@ -13,6 +13,7 @@
6613 #include <asm/thread_info.h>
6614 #include <asm/segment.h>
6615 #include <asm/irqflags.h>
6616+#include <asm/pgtable.h>
6617 #include <linux/linkage.h>
6618
6619 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6620@@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6621 ENDPROC(native_irq_enable_sysexit)
6622 #endif
6623
6624+ .macro pax_enter_kernel_user
6625+#ifdef CONFIG_PAX_MEMORY_UDEREF
6626+ call pax_enter_kernel_user
6627+#endif
6628+ .endm
6629+
6630+ .macro pax_exit_kernel_user
6631+#ifdef CONFIG_PAX_MEMORY_UDEREF
6632+ call pax_exit_kernel_user
6633+#endif
6634+#ifdef CONFIG_PAX_RANDKSTACK
6635+ pushq %rax
6636+ call pax_randomize_kstack
6637+ popq %rax
6638+#endif
6639+ pax_erase_kstack
6640+ .endm
6641+
6642+.macro pax_erase_kstack
6643+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6644+ call pax_erase_kstack
6645+#endif
6646+.endm
6647+
6648 /*
6649 * 32bit SYSENTER instruction entry.
6650 *
6651@@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6652 CFI_REGISTER rsp,rbp
6653 SWAPGS_UNSAFE_STACK
6654 movq PER_CPU_VAR(kernel_stack), %rsp
6655- addq $(KERNEL_STACK_OFFSET),%rsp
6656+ pax_enter_kernel_user
6657 /*
6658 * No need to follow this irqs on/off section: the syscall
6659 * disabled irqs, here we enable it straight after entry:
6660@@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6661 pushfq
6662 CFI_ADJUST_CFA_OFFSET 8
6663 /*CFI_REL_OFFSET rflags,0*/
6664- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6665+ GET_THREAD_INFO(%r10)
6666+ movl TI_sysenter_return(%r10), %r10d
6667 CFI_REGISTER rip,r10
6668 pushq $__USER32_CS
6669 CFI_ADJUST_CFA_OFFSET 8
6670@@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6671 SAVE_ARGS 0,0,1
6672 /* no need to do an access_ok check here because rbp has been
6673 32bit zero extended */
6674+
6675+#ifdef CONFIG_PAX_MEMORY_UDEREF
6676+ mov $PAX_USER_SHADOW_BASE,%r10
6677+ add %r10,%rbp
6678+#endif
6679+
6680 1: movl (%rbp),%ebp
6681 .section __ex_table,"a"
6682 .quad 1b,ia32_badarg
6683@@ -172,6 +204,7 @@ sysenter_dispatch:
6684 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6685 jnz sysexit_audit
6686 sysexit_from_sys_call:
6687+ pax_exit_kernel_user
6688 andl $~TS_COMPAT,TI_status(%r10)
6689 /* clear IF, that popfq doesn't enable interrupts early */
6690 andl $~0x200,EFLAGS-R11(%rsp)
6691@@ -200,6 +233,9 @@ sysexit_from_sys_call:
6692 movl %eax,%esi /* 2nd arg: syscall number */
6693 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6694 call audit_syscall_entry
6695+
6696+ pax_erase_kstack
6697+
6698 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6699 cmpq $(IA32_NR_syscalls-1),%rax
6700 ja ia32_badsys
6701@@ -252,6 +288,9 @@ sysenter_tracesys:
6702 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6703 movq %rsp,%rdi /* &pt_regs -> arg1 */
6704 call syscall_trace_enter
6705+
6706+ pax_erase_kstack
6707+
6708 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6709 RESTORE_REST
6710 cmpq $(IA32_NR_syscalls-1),%rax
6711@@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6712 ENTRY(ia32_cstar_target)
6713 CFI_STARTPROC32 simple
6714 CFI_SIGNAL_FRAME
6715- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6716+ CFI_DEF_CFA rsp,0
6717 CFI_REGISTER rip,rcx
6718 /*CFI_REGISTER rflags,r11*/
6719 SWAPGS_UNSAFE_STACK
6720 movl %esp,%r8d
6721 CFI_REGISTER rsp,r8
6722 movq PER_CPU_VAR(kernel_stack),%rsp
6723+
6724+#ifdef CONFIG_PAX_MEMORY_UDEREF
6725+ pax_enter_kernel_user
6726+#endif
6727+
6728 /*
6729 * No need to follow this irqs on/off section: the syscall
6730 * disabled irqs and here we enable it straight after entry:
6731 */
6732 ENABLE_INTERRUPTS(CLBR_NONE)
6733- SAVE_ARGS 8,1,1
6734+ SAVE_ARGS 8*6,1,1
6735 movl %eax,%eax /* zero extension */
6736 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6737 movq %rcx,RIP-ARGOFFSET(%rsp)
6738@@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6739 /* no need to do an access_ok check here because r8 has been
6740 32bit zero extended */
6741 /* hardware stack frame is complete now */
6742+
6743+#ifdef CONFIG_PAX_MEMORY_UDEREF
6744+ mov $PAX_USER_SHADOW_BASE,%r10
6745+ add %r10,%r8
6746+#endif
6747+
6748 1: movl (%r8),%r9d
6749 .section __ex_table,"a"
6750 .quad 1b,ia32_badarg
6751@@ -333,6 +383,7 @@ cstar_dispatch:
6752 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6753 jnz sysretl_audit
6754 sysretl_from_sys_call:
6755+ pax_exit_kernel_user
6756 andl $~TS_COMPAT,TI_status(%r10)
6757 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6758 movl RIP-ARGOFFSET(%rsp),%ecx
6759@@ -370,6 +421,9 @@ cstar_tracesys:
6760 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6761 movq %rsp,%rdi /* &pt_regs -> arg1 */
6762 call syscall_trace_enter
6763+
6764+ pax_erase_kstack
6765+
6766 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6767 RESTORE_REST
6768 xchgl %ebp,%r9d
6769@@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6770 CFI_REL_OFFSET rip,RIP-RIP
6771 PARAVIRT_ADJUST_EXCEPTION_FRAME
6772 SWAPGS
6773+ pax_enter_kernel_user
6774 /*
6775 * No need to follow this irqs on/off section: the syscall
6776 * disabled irqs and here we enable it straight after entry:
6777@@ -448,6 +503,9 @@ ia32_tracesys:
6778 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6779 movq %rsp,%rdi /* &pt_regs -> arg1 */
6780 call syscall_trace_enter
6781+
6782+ pax_erase_kstack
6783+
6784 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6785 RESTORE_REST
6786 cmpq $(IA32_NR_syscalls-1),%rax
6787diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_signal.c linux-2.6.32.45/arch/x86/ia32/ia32_signal.c
6788--- linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6789+++ linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6790@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6791 sp -= frame_size;
6792 /* Align the stack pointer according to the i386 ABI,
6793 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6794- sp = ((sp + 4) & -16ul) - 4;
6795+ sp = ((sp - 12) & -16ul) - 4;
6796 return (void __user *) sp;
6797 }
6798
6799@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6800 * These are actually not used anymore, but left because some
6801 * gdb versions depend on them as a marker.
6802 */
6803- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6804+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6805 } put_user_catch(err);
6806
6807 if (err)
6808@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6809 0xb8,
6810 __NR_ia32_rt_sigreturn,
6811 0x80cd,
6812- 0,
6813+ 0
6814 };
6815
6816 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6817@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6818
6819 if (ka->sa.sa_flags & SA_RESTORER)
6820 restorer = ka->sa.sa_restorer;
6821+ else if (current->mm->context.vdso)
6822+ /* Return stub is in 32bit vsyscall page */
6823+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6824 else
6825- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6826- rt_sigreturn);
6827+ restorer = &frame->retcode;
6828 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6829
6830 /*
6831 * Not actually used anymore, but left because some gdb
6832 * versions need it.
6833 */
6834- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6835+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6836 } put_user_catch(err);
6837
6838 if (err)
6839diff -urNp linux-2.6.32.45/arch/x86/include/asm/alternative.h linux-2.6.32.45/arch/x86/include/asm/alternative.h
6840--- linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6841+++ linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6842@@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6843 " .byte 662b-661b\n" /* sourcelen */ \
6844 " .byte 664f-663f\n" /* replacementlen */ \
6845 ".previous\n" \
6846- ".section .altinstr_replacement, \"ax\"\n" \
6847+ ".section .altinstr_replacement, \"a\"\n" \
6848 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6849 ".previous"
6850
6851diff -urNp linux-2.6.32.45/arch/x86/include/asm/apm.h linux-2.6.32.45/arch/x86/include/asm/apm.h
6852--- linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6853+++ linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6854@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6855 __asm__ __volatile__(APM_DO_ZERO_SEGS
6856 "pushl %%edi\n\t"
6857 "pushl %%ebp\n\t"
6858- "lcall *%%cs:apm_bios_entry\n\t"
6859+ "lcall *%%ss:apm_bios_entry\n\t"
6860 "setc %%al\n\t"
6861 "popl %%ebp\n\t"
6862 "popl %%edi\n\t"
6863@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6864 __asm__ __volatile__(APM_DO_ZERO_SEGS
6865 "pushl %%edi\n\t"
6866 "pushl %%ebp\n\t"
6867- "lcall *%%cs:apm_bios_entry\n\t"
6868+ "lcall *%%ss:apm_bios_entry\n\t"
6869 "setc %%bl\n\t"
6870 "popl %%ebp\n\t"
6871 "popl %%edi\n\t"
6872diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_32.h linux-2.6.32.45/arch/x86/include/asm/atomic_32.h
6873--- linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6874+++ linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6875@@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6876 }
6877
6878 /**
6879+ * atomic_read_unchecked - read atomic variable
6880+ * @v: pointer of type atomic_unchecked_t
6881+ *
6882+ * Atomically reads the value of @v.
6883+ */
6884+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6885+{
6886+ return v->counter;
6887+}
6888+
6889+/**
6890 * atomic_set - set atomic variable
6891 * @v: pointer of type atomic_t
6892 * @i: required value
6893@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6894 }
6895
6896 /**
6897+ * atomic_set_unchecked - set atomic variable
6898+ * @v: pointer of type atomic_unchecked_t
6899+ * @i: required value
6900+ *
6901+ * Atomically sets the value of @v to @i.
6902+ */
6903+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6904+{
6905+ v->counter = i;
6906+}
6907+
6908+/**
6909 * atomic_add - add integer to atomic variable
6910 * @i: integer value to add
6911 * @v: pointer of type atomic_t
6912@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6913 */
6914 static inline void atomic_add(int i, atomic_t *v)
6915 {
6916- asm volatile(LOCK_PREFIX "addl %1,%0"
6917+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6918+
6919+#ifdef CONFIG_PAX_REFCOUNT
6920+ "jno 0f\n"
6921+ LOCK_PREFIX "subl %1,%0\n"
6922+ "int $4\n0:\n"
6923+ _ASM_EXTABLE(0b, 0b)
6924+#endif
6925+
6926+ : "+m" (v->counter)
6927+ : "ir" (i));
6928+}
6929+
6930+/**
6931+ * atomic_add_unchecked - add integer to atomic variable
6932+ * @i: integer value to add
6933+ * @v: pointer of type atomic_unchecked_t
6934+ *
6935+ * Atomically adds @i to @v.
6936+ */
6937+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6938+{
6939+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6940 : "+m" (v->counter)
6941 : "ir" (i));
6942 }
6943@@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6944 */
6945 static inline void atomic_sub(int i, atomic_t *v)
6946 {
6947- asm volatile(LOCK_PREFIX "subl %1,%0"
6948+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6949+
6950+#ifdef CONFIG_PAX_REFCOUNT
6951+ "jno 0f\n"
6952+ LOCK_PREFIX "addl %1,%0\n"
6953+ "int $4\n0:\n"
6954+ _ASM_EXTABLE(0b, 0b)
6955+#endif
6956+
6957+ : "+m" (v->counter)
6958+ : "ir" (i));
6959+}
6960+
6961+/**
6962+ * atomic_sub_unchecked - subtract integer from atomic variable
6963+ * @i: integer value to subtract
6964+ * @v: pointer of type atomic_unchecked_t
6965+ *
6966+ * Atomically subtracts @i from @v.
6967+ */
6968+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6969+{
6970+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6971 : "+m" (v->counter)
6972 : "ir" (i));
6973 }
6974@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
6975 {
6976 unsigned char c;
6977
6978- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6979+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6980+
6981+#ifdef CONFIG_PAX_REFCOUNT
6982+ "jno 0f\n"
6983+ LOCK_PREFIX "addl %2,%0\n"
6984+ "int $4\n0:\n"
6985+ _ASM_EXTABLE(0b, 0b)
6986+#endif
6987+
6988+ "sete %1\n"
6989 : "+m" (v->counter), "=qm" (c)
6990 : "ir" (i) : "memory");
6991 return c;
6992@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
6993 */
6994 static inline void atomic_inc(atomic_t *v)
6995 {
6996- asm volatile(LOCK_PREFIX "incl %0"
6997+ asm volatile(LOCK_PREFIX "incl %0\n"
6998+
6999+#ifdef CONFIG_PAX_REFCOUNT
7000+ "jno 0f\n"
7001+ LOCK_PREFIX "decl %0\n"
7002+ "int $4\n0:\n"
7003+ _ASM_EXTABLE(0b, 0b)
7004+#endif
7005+
7006+ : "+m" (v->counter));
7007+}
7008+
7009+/**
7010+ * atomic_inc_unchecked - increment atomic variable
7011+ * @v: pointer of type atomic_unchecked_t
7012+ *
7013+ * Atomically increments @v by 1.
7014+ */
7015+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7016+{
7017+ asm volatile(LOCK_PREFIX "incl %0\n"
7018 : "+m" (v->counter));
7019 }
7020
7021@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7022 */
7023 static inline void atomic_dec(atomic_t *v)
7024 {
7025- asm volatile(LOCK_PREFIX "decl %0"
7026+ asm volatile(LOCK_PREFIX "decl %0\n"
7027+
7028+#ifdef CONFIG_PAX_REFCOUNT
7029+ "jno 0f\n"
7030+ LOCK_PREFIX "incl %0\n"
7031+ "int $4\n0:\n"
7032+ _ASM_EXTABLE(0b, 0b)
7033+#endif
7034+
7035+ : "+m" (v->counter));
7036+}
7037+
7038+/**
7039+ * atomic_dec_unchecked - decrement atomic variable
7040+ * @v: pointer of type atomic_unchecked_t
7041+ *
7042+ * Atomically decrements @v by 1.
7043+ */
7044+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7045+{
7046+ asm volatile(LOCK_PREFIX "decl %0\n"
7047 : "+m" (v->counter));
7048 }
7049
7050@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7051 {
7052 unsigned char c;
7053
7054- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7055+ asm volatile(LOCK_PREFIX "decl %0\n"
7056+
7057+#ifdef CONFIG_PAX_REFCOUNT
7058+ "jno 0f\n"
7059+ LOCK_PREFIX "incl %0\n"
7060+ "int $4\n0:\n"
7061+ _ASM_EXTABLE(0b, 0b)
7062+#endif
7063+
7064+ "sete %1\n"
7065 : "+m" (v->counter), "=qm" (c)
7066 : : "memory");
7067 return c != 0;
7068@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7069 {
7070 unsigned char c;
7071
7072- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7073+ asm volatile(LOCK_PREFIX "incl %0\n"
7074+
7075+#ifdef CONFIG_PAX_REFCOUNT
7076+ "jno 0f\n"
7077+ LOCK_PREFIX "decl %0\n"
7078+ "into\n0:\n"
7079+ _ASM_EXTABLE(0b, 0b)
7080+#endif
7081+
7082+ "sete %1\n"
7083+ : "+m" (v->counter), "=qm" (c)
7084+ : : "memory");
7085+ return c != 0;
7086+}
7087+
7088+/**
7089+ * atomic_inc_and_test_unchecked - increment and test
7090+ * @v: pointer of type atomic_unchecked_t
7091+ *
7092+ * Atomically increments @v by 1
7093+ * and returns true if the result is zero, or false for all
7094+ * other cases.
7095+ */
7096+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7097+{
7098+ unsigned char c;
7099+
7100+ asm volatile(LOCK_PREFIX "incl %0\n"
7101+ "sete %1\n"
7102 : "+m" (v->counter), "=qm" (c)
7103 : : "memory");
7104 return c != 0;
7105@@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7106 {
7107 unsigned char c;
7108
7109- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7110+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7111+
7112+#ifdef CONFIG_PAX_REFCOUNT
7113+ "jno 0f\n"
7114+ LOCK_PREFIX "subl %2,%0\n"
7115+ "int $4\n0:\n"
7116+ _ASM_EXTABLE(0b, 0b)
7117+#endif
7118+
7119+ "sets %1\n"
7120 : "+m" (v->counter), "=qm" (c)
7121 : "ir" (i) : "memory");
7122 return c;
7123@@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7124 #endif
7125 /* Modern 486+ processor */
7126 __i = i;
7127+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7128+
7129+#ifdef CONFIG_PAX_REFCOUNT
7130+ "jno 0f\n"
7131+ "movl %0, %1\n"
7132+ "int $4\n0:\n"
7133+ _ASM_EXTABLE(0b, 0b)
7134+#endif
7135+
7136+ : "+r" (i), "+m" (v->counter)
7137+ : : "memory");
7138+ return i + __i;
7139+
7140+#ifdef CONFIG_M386
7141+no_xadd: /* Legacy 386 processor */
7142+ local_irq_save(flags);
7143+ __i = atomic_read(v);
7144+ atomic_set(v, i + __i);
7145+ local_irq_restore(flags);
7146+ return i + __i;
7147+#endif
7148+}
7149+
7150+/**
7151+ * atomic_add_return_unchecked - add integer and return
7152+ * @v: pointer of type atomic_unchecked_t
7153+ * @i: integer value to add
7154+ *
7155+ * Atomically adds @i to @v and returns @i + @v
7156+ */
7157+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7158+{
7159+ int __i;
7160+#ifdef CONFIG_M386
7161+ unsigned long flags;
7162+ if (unlikely(boot_cpu_data.x86 <= 3))
7163+ goto no_xadd;
7164+#endif
7165+ /* Modern 486+ processor */
7166+ __i = i;
7167 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7168 : "+r" (i), "+m" (v->counter)
7169 : : "memory");
7170@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7171 return cmpxchg(&v->counter, old, new);
7172 }
7173
7174+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7175+{
7176+ return cmpxchg(&v->counter, old, new);
7177+}
7178+
7179 static inline int atomic_xchg(atomic_t *v, int new)
7180 {
7181 return xchg(&v->counter, new);
7182 }
7183
7184+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7185+{
7186+ return xchg(&v->counter, new);
7187+}
7188+
7189 /**
7190 * atomic_add_unless - add unless the number is already a given value
7191 * @v: pointer of type atomic_t
7192@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7193 */
7194 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7195 {
7196- int c, old;
7197+ int c, old, new;
7198 c = atomic_read(v);
7199 for (;;) {
7200- if (unlikely(c == (u)))
7201+ if (unlikely(c == u))
7202 break;
7203- old = atomic_cmpxchg((v), c, c + (a));
7204+
7205+ asm volatile("addl %2,%0\n"
7206+
7207+#ifdef CONFIG_PAX_REFCOUNT
7208+ "jno 0f\n"
7209+ "subl %2,%0\n"
7210+ "int $4\n0:\n"
7211+ _ASM_EXTABLE(0b, 0b)
7212+#endif
7213+
7214+ : "=r" (new)
7215+ : "0" (c), "ir" (a));
7216+
7217+ old = atomic_cmpxchg(v, c, new);
7218 if (likely(old == c))
7219 break;
7220 c = old;
7221 }
7222- return c != (u);
7223+ return c != u;
7224 }
7225
7226 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7227
7228 #define atomic_inc_return(v) (atomic_add_return(1, v))
7229+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7230+{
7231+ return atomic_add_return_unchecked(1, v);
7232+}
7233 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7234
7235 /* These are x86-specific, used by some header files */
7236@@ -266,9 +495,18 @@ typedef struct {
7237 u64 __aligned(8) counter;
7238 } atomic64_t;
7239
7240+#ifdef CONFIG_PAX_REFCOUNT
7241+typedef struct {
7242+ u64 __aligned(8) counter;
7243+} atomic64_unchecked_t;
7244+#else
7245+typedef atomic64_t atomic64_unchecked_t;
7246+#endif
7247+
7248 #define ATOMIC64_INIT(val) { (val) }
7249
7250 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7251+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7252
7253 /**
7254 * atomic64_xchg - xchg atomic64 variable
7255@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7256 * the old value.
7257 */
7258 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7259+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7260
7261 /**
7262 * atomic64_set - set atomic64 variable
7263@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7264 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7265
7266 /**
7267+ * atomic64_unchecked_set - set atomic64 variable
7268+ * @ptr: pointer to type atomic64_unchecked_t
7269+ * @new_val: value to assign
7270+ *
7271+ * Atomically sets the value of @ptr to @new_val.
7272+ */
7273+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7274+
7275+/**
7276 * atomic64_read - read atomic64 variable
7277 * @ptr: pointer to type atomic64_t
7278 *
7279@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7280 return res;
7281 }
7282
7283-extern u64 atomic64_read(atomic64_t *ptr);
7284+/**
7285+ * atomic64_read_unchecked - read atomic64 variable
7286+ * @ptr: pointer to type atomic64_unchecked_t
7287+ *
7288+ * Atomically reads the value of @ptr and returns it.
7289+ */
7290+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7291+{
7292+ u64 res;
7293+
7294+ /*
7295+ * Note, we inline this atomic64_unchecked_t primitive because
7296+ * it only clobbers EAX/EDX and leaves the others
7297+ * untouched. We also (somewhat subtly) rely on the
7298+ * fact that cmpxchg8b returns the current 64-bit value
7299+ * of the memory location we are touching:
7300+ */
7301+ asm volatile(
7302+ "mov %%ebx, %%eax\n\t"
7303+ "mov %%ecx, %%edx\n\t"
7304+ LOCK_PREFIX "cmpxchg8b %1\n"
7305+ : "=&A" (res)
7306+ : "m" (*ptr)
7307+ );
7308+
7309+ return res;
7310+}
7311
7312 /**
7313 * atomic64_add_return - add and return
7314@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7315 * Other variants with different arithmetic operators:
7316 */
7317 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7318+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7319 extern u64 atomic64_inc_return(atomic64_t *ptr);
7320+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7321 extern u64 atomic64_dec_return(atomic64_t *ptr);
7322+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7323
7324 /**
7325 * atomic64_add - add integer to atomic64 variable
7326@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7327 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7328
7329 /**
7330+ * atomic64_add_unchecked - add integer to atomic64 variable
7331+ * @delta: integer value to add
7332+ * @ptr: pointer to type atomic64_unchecked_t
7333+ *
7334+ * Atomically adds @delta to @ptr.
7335+ */
7336+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7337+
7338+/**
7339 * atomic64_sub - subtract the atomic64 variable
7340 * @delta: integer value to subtract
7341 * @ptr: pointer to type atomic64_t
7342@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7343 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7344
7345 /**
7346+ * atomic64_sub_unchecked - subtract the atomic64 variable
7347+ * @delta: integer value to subtract
7348+ * @ptr: pointer to type atomic64_unchecked_t
7349+ *
7350+ * Atomically subtracts @delta from @ptr.
7351+ */
7352+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7353+
7354+/**
7355 * atomic64_sub_and_test - subtract value from variable and test result
7356 * @delta: integer value to subtract
7357 * @ptr: pointer to type atomic64_t
7358@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7359 extern void atomic64_inc(atomic64_t *ptr);
7360
7361 /**
7362+ * atomic64_inc_unchecked - increment atomic64 variable
7363+ * @ptr: pointer to type atomic64_unchecked_t
7364+ *
7365+ * Atomically increments @ptr by 1.
7366+ */
7367+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7368+
7369+/**
7370 * atomic64_dec - decrement atomic64 variable
7371 * @ptr: pointer to type atomic64_t
7372 *
7373@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7374 extern void atomic64_dec(atomic64_t *ptr);
7375
7376 /**
7377+ * atomic64_dec_unchecked - decrement atomic64 variable
7378+ * @ptr: pointer to type atomic64_unchecked_t
7379+ *
7380+ * Atomically decrements @ptr by 1.
7381+ */
7382+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7383+
7384+/**
7385 * atomic64_dec_and_test - decrement and test
7386 * @ptr: pointer to type atomic64_t
7387 *
7388diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_64.h linux-2.6.32.45/arch/x86/include/asm/atomic_64.h
7389--- linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7390+++ linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7391@@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7392 }
7393
7394 /**
7395+ * atomic_read_unchecked - read atomic variable
7396+ * @v: pointer of type atomic_unchecked_t
7397+ *
7398+ * Atomically reads the value of @v.
7399+ */
7400+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7401+{
7402+ return v->counter;
7403+}
7404+
7405+/**
7406 * atomic_set - set atomic variable
7407 * @v: pointer of type atomic_t
7408 * @i: required value
7409@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7410 }
7411
7412 /**
7413+ * atomic_set_unchecked - set atomic variable
7414+ * @v: pointer of type atomic_unchecked_t
7415+ * @i: required value
7416+ *
7417+ * Atomically sets the value of @v to @i.
7418+ */
7419+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7420+{
7421+ v->counter = i;
7422+}
7423+
7424+/**
7425 * atomic_add - add integer to atomic variable
7426 * @i: integer value to add
7427 * @v: pointer of type atomic_t
7428@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7429 */
7430 static inline void atomic_add(int i, atomic_t *v)
7431 {
7432- asm volatile(LOCK_PREFIX "addl %1,%0"
7433+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7434+
7435+#ifdef CONFIG_PAX_REFCOUNT
7436+ "jno 0f\n"
7437+ LOCK_PREFIX "subl %1,%0\n"
7438+ "int $4\n0:\n"
7439+ _ASM_EXTABLE(0b, 0b)
7440+#endif
7441+
7442+ : "=m" (v->counter)
7443+ : "ir" (i), "m" (v->counter));
7444+}
7445+
7446+/**
7447+ * atomic_add_unchecked - add integer to atomic variable
7448+ * @i: integer value to add
7449+ * @v: pointer of type atomic_unchecked_t
7450+ *
7451+ * Atomically adds @i to @v.
7452+ */
7453+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7454+{
7455+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7456 : "=m" (v->counter)
7457 : "ir" (i), "m" (v->counter));
7458 }
7459@@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7460 */
7461 static inline void atomic_sub(int i, atomic_t *v)
7462 {
7463- asm volatile(LOCK_PREFIX "subl %1,%0"
7464+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7465+
7466+#ifdef CONFIG_PAX_REFCOUNT
7467+ "jno 0f\n"
7468+ LOCK_PREFIX "addl %1,%0\n"
7469+ "int $4\n0:\n"
7470+ _ASM_EXTABLE(0b, 0b)
7471+#endif
7472+
7473+ : "=m" (v->counter)
7474+ : "ir" (i), "m" (v->counter));
7475+}
7476+
7477+/**
7478+ * atomic_sub_unchecked - subtract the atomic variable
7479+ * @i: integer value to subtract
7480+ * @v: pointer of type atomic_unchecked_t
7481+ *
7482+ * Atomically subtracts @i from @v.
7483+ */
7484+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7485+{
7486+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7487 : "=m" (v->counter)
7488 : "ir" (i), "m" (v->counter));
7489 }
7490@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7491 {
7492 unsigned char c;
7493
7494- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7495+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7496+
7497+#ifdef CONFIG_PAX_REFCOUNT
7498+ "jno 0f\n"
7499+ LOCK_PREFIX "addl %2,%0\n"
7500+ "int $4\n0:\n"
7501+ _ASM_EXTABLE(0b, 0b)
7502+#endif
7503+
7504+ "sete %1\n"
7505 : "=m" (v->counter), "=qm" (c)
7506 : "ir" (i), "m" (v->counter) : "memory");
7507 return c;
7508@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7509 */
7510 static inline void atomic_inc(atomic_t *v)
7511 {
7512- asm volatile(LOCK_PREFIX "incl %0"
7513+ asm volatile(LOCK_PREFIX "incl %0\n"
7514+
7515+#ifdef CONFIG_PAX_REFCOUNT
7516+ "jno 0f\n"
7517+ LOCK_PREFIX "decl %0\n"
7518+ "int $4\n0:\n"
7519+ _ASM_EXTABLE(0b, 0b)
7520+#endif
7521+
7522+ : "=m" (v->counter)
7523+ : "m" (v->counter));
7524+}
7525+
7526+/**
7527+ * atomic_inc_unchecked - increment atomic variable
7528+ * @v: pointer of type atomic_unchecked_t
7529+ *
7530+ * Atomically increments @v by 1.
7531+ */
7532+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7533+{
7534+ asm volatile(LOCK_PREFIX "incl %0\n"
7535 : "=m" (v->counter)
7536 : "m" (v->counter));
7537 }
7538@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7539 */
7540 static inline void atomic_dec(atomic_t *v)
7541 {
7542- asm volatile(LOCK_PREFIX "decl %0"
7543+ asm volatile(LOCK_PREFIX "decl %0\n"
7544+
7545+#ifdef CONFIG_PAX_REFCOUNT
7546+ "jno 0f\n"
7547+ LOCK_PREFIX "incl %0\n"
7548+ "int $4\n0:\n"
7549+ _ASM_EXTABLE(0b, 0b)
7550+#endif
7551+
7552+ : "=m" (v->counter)
7553+ : "m" (v->counter));
7554+}
7555+
7556+/**
7557+ * atomic_dec_unchecked - decrement atomic variable
7558+ * @v: pointer of type atomic_unchecked_t
7559+ *
7560+ * Atomically decrements @v by 1.
7561+ */
7562+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7563+{
7564+ asm volatile(LOCK_PREFIX "decl %0\n"
7565 : "=m" (v->counter)
7566 : "m" (v->counter));
7567 }
7568@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7569 {
7570 unsigned char c;
7571
7572- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7573+ asm volatile(LOCK_PREFIX "decl %0\n"
7574+
7575+#ifdef CONFIG_PAX_REFCOUNT
7576+ "jno 0f\n"
7577+ LOCK_PREFIX "incl %0\n"
7578+ "int $4\n0:\n"
7579+ _ASM_EXTABLE(0b, 0b)
7580+#endif
7581+
7582+ "sete %1\n"
7583 : "=m" (v->counter), "=qm" (c)
7584 : "m" (v->counter) : "memory");
7585 return c != 0;
7586@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7587 {
7588 unsigned char c;
7589
7590- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7591+ asm volatile(LOCK_PREFIX "incl %0\n"
7592+
7593+#ifdef CONFIG_PAX_REFCOUNT
7594+ "jno 0f\n"
7595+ LOCK_PREFIX "decl %0\n"
7596+ "int $4\n0:\n"
7597+ _ASM_EXTABLE(0b, 0b)
7598+#endif
7599+
7600+ "sete %1\n"
7601+ : "=m" (v->counter), "=qm" (c)
7602+ : "m" (v->counter) : "memory");
7603+ return c != 0;
7604+}
7605+
7606+/**
7607+ * atomic_inc_and_test_unchecked - increment and test
7608+ * @v: pointer of type atomic_unchecked_t
7609+ *
7610+ * Atomically increments @v by 1
7611+ * and returns true if the result is zero, or false for all
7612+ * other cases.
7613+ */
7614+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7615+{
7616+ unsigned char c;
7617+
7618+ asm volatile(LOCK_PREFIX "incl %0\n"
7619+ "sete %1\n"
7620 : "=m" (v->counter), "=qm" (c)
7621 : "m" (v->counter) : "memory");
7622 return c != 0;
7623@@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7624 {
7625 unsigned char c;
7626
7627- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7628+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7629+
7630+#ifdef CONFIG_PAX_REFCOUNT
7631+ "jno 0f\n"
7632+ LOCK_PREFIX "subl %2,%0\n"
7633+ "int $4\n0:\n"
7634+ _ASM_EXTABLE(0b, 0b)
7635+#endif
7636+
7637+ "sets %1\n"
7638 : "=m" (v->counter), "=qm" (c)
7639 : "ir" (i), "m" (v->counter) : "memory");
7640 return c;
7641@@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7642 static inline int atomic_add_return(int i, atomic_t *v)
7643 {
7644 int __i = i;
7645- asm volatile(LOCK_PREFIX "xaddl %0, %1"
7646+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7647+
7648+#ifdef CONFIG_PAX_REFCOUNT
7649+ "jno 0f\n"
7650+ "movl %0, %1\n"
7651+ "int $4\n0:\n"
7652+ _ASM_EXTABLE(0b, 0b)
7653+#endif
7654+
7655+ : "+r" (i), "+m" (v->counter)
7656+ : : "memory");
7657+ return i + __i;
7658+}
7659+
7660+/**
7661+ * atomic_add_return_unchecked - add and return
7662+ * @i: integer value to add
7663+ * @v: pointer of type atomic_unchecked_t
7664+ *
7665+ * Atomically adds @i to @v and returns @i + @v
7666+ */
7667+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7668+{
7669+ int __i = i;
7670+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7671 : "+r" (i), "+m" (v->counter)
7672 : : "memory");
7673 return i + __i;
7674@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7675 }
7676
7677 #define atomic_inc_return(v) (atomic_add_return(1, v))
7678+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7679+{
7680+ return atomic_add_return_unchecked(1, v);
7681+}
7682 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7683
7684 /* The 64-bit atomic type */
7685@@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7686 }
7687
7688 /**
7689+ * atomic64_read_unchecked - read atomic64 variable
7690+ * @v: pointer of type atomic64_unchecked_t
7691+ *
7692+ * Atomically reads the value of @v.
7693+ * Doesn't imply a read memory barrier.
7694+ */
7695+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7696+{
7697+ return v->counter;
7698+}
7699+
7700+/**
7701 * atomic64_set - set atomic64 variable
7702 * @v: pointer to type atomic64_t
7703 * @i: required value
7704@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7705 }
7706
7707 /**
7708+ * atomic64_set_unchecked - set atomic64 variable
7709+ * @v: pointer to type atomic64_unchecked_t
7710+ * @i: required value
7711+ *
7712+ * Atomically sets the value of @v to @i.
7713+ */
7714+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7715+{
7716+ v->counter = i;
7717+}
7718+
7719+/**
7720 * atomic64_add - add integer to atomic64 variable
7721 * @i: integer value to add
7722 * @v: pointer to type atomic64_t
7723@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7724 */
7725 static inline void atomic64_add(long i, atomic64_t *v)
7726 {
7727+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7728+
7729+#ifdef CONFIG_PAX_REFCOUNT
7730+ "jno 0f\n"
7731+ LOCK_PREFIX "subq %1,%0\n"
7732+ "int $4\n0:\n"
7733+ _ASM_EXTABLE(0b, 0b)
7734+#endif
7735+
7736+ : "=m" (v->counter)
7737+ : "er" (i), "m" (v->counter));
7738+}
7739+
7740+/**
7741+ * atomic64_add_unchecked - add integer to atomic64 variable
7742+ * @i: integer value to add
7743+ * @v: pointer to type atomic64_unchecked_t
7744+ *
7745+ * Atomically adds @i to @v.
7746+ */
7747+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7748+{
7749 asm volatile(LOCK_PREFIX "addq %1,%0"
7750 : "=m" (v->counter)
7751 : "er" (i), "m" (v->counter));
7752@@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7753 */
7754 static inline void atomic64_sub(long i, atomic64_t *v)
7755 {
7756- asm volatile(LOCK_PREFIX "subq %1,%0"
7757+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7758+
7759+#ifdef CONFIG_PAX_REFCOUNT
7760+ "jno 0f\n"
7761+ LOCK_PREFIX "addq %1,%0\n"
7762+ "int $4\n0:\n"
7763+ _ASM_EXTABLE(0b, 0b)
7764+#endif
7765+
7766 : "=m" (v->counter)
7767 : "er" (i), "m" (v->counter));
7768 }
7769@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7770 {
7771 unsigned char c;
7772
7773- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7774+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7775+
7776+#ifdef CONFIG_PAX_REFCOUNT
7777+ "jno 0f\n"
7778+ LOCK_PREFIX "addq %2,%0\n"
7779+ "int $4\n0:\n"
7780+ _ASM_EXTABLE(0b, 0b)
7781+#endif
7782+
7783+ "sete %1\n"
7784 : "=m" (v->counter), "=qm" (c)
7785 : "er" (i), "m" (v->counter) : "memory");
7786 return c;
7787@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7788 */
7789 static inline void atomic64_inc(atomic64_t *v)
7790 {
7791+ asm volatile(LOCK_PREFIX "incq %0\n"
7792+
7793+#ifdef CONFIG_PAX_REFCOUNT
7794+ "jno 0f\n"
7795+ LOCK_PREFIX "decq %0\n"
7796+ "int $4\n0:\n"
7797+ _ASM_EXTABLE(0b, 0b)
7798+#endif
7799+
7800+ : "=m" (v->counter)
7801+ : "m" (v->counter));
7802+}
7803+
7804+/**
7805+ * atomic64_inc_unchecked - increment atomic64 variable
7806+ * @v: pointer to type atomic64_unchecked_t
7807+ *
7808+ * Atomically increments @v by 1.
7809+ */
7810+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7811+{
7812 asm volatile(LOCK_PREFIX "incq %0"
7813 : "=m" (v->counter)
7814 : "m" (v->counter));
7815@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7816 */
7817 static inline void atomic64_dec(atomic64_t *v)
7818 {
7819- asm volatile(LOCK_PREFIX "decq %0"
7820+ asm volatile(LOCK_PREFIX "decq %0\n"
7821+
7822+#ifdef CONFIG_PAX_REFCOUNT
7823+ "jno 0f\n"
7824+ LOCK_PREFIX "incq %0\n"
7825+ "int $4\n0:\n"
7826+ _ASM_EXTABLE(0b, 0b)
7827+#endif
7828+
7829+ : "=m" (v->counter)
7830+ : "m" (v->counter));
7831+}
7832+
7833+/**
7834+ * atomic64_dec_unchecked - decrement atomic64 variable
7835+ * @v: pointer to type atomic64_t
7836+ *
7837+ * Atomically decrements @v by 1.
7838+ */
7839+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7840+{
7841+ asm volatile(LOCK_PREFIX "decq %0\n"
7842 : "=m" (v->counter)
7843 : "m" (v->counter));
7844 }
7845@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7846 {
7847 unsigned char c;
7848
7849- asm volatile(LOCK_PREFIX "decq %0; sete %1"
7850+ asm volatile(LOCK_PREFIX "decq %0\n"
7851+
7852+#ifdef CONFIG_PAX_REFCOUNT
7853+ "jno 0f\n"
7854+ LOCK_PREFIX "incq %0\n"
7855+ "int $4\n0:\n"
7856+ _ASM_EXTABLE(0b, 0b)
7857+#endif
7858+
7859+ "sete %1\n"
7860 : "=m" (v->counter), "=qm" (c)
7861 : "m" (v->counter) : "memory");
7862 return c != 0;
7863@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7864 {
7865 unsigned char c;
7866
7867- asm volatile(LOCK_PREFIX "incq %0; sete %1"
7868+ asm volatile(LOCK_PREFIX "incq %0\n"
7869+
7870+#ifdef CONFIG_PAX_REFCOUNT
7871+ "jno 0f\n"
7872+ LOCK_PREFIX "decq %0\n"
7873+ "int $4\n0:\n"
7874+ _ASM_EXTABLE(0b, 0b)
7875+#endif
7876+
7877+ "sete %1\n"
7878 : "=m" (v->counter), "=qm" (c)
7879 : "m" (v->counter) : "memory");
7880 return c != 0;
7881@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7882 {
7883 unsigned char c;
7884
7885- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7886+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
7887+
7888+#ifdef CONFIG_PAX_REFCOUNT
7889+ "jno 0f\n"
7890+ LOCK_PREFIX "subq %2,%0\n"
7891+ "int $4\n0:\n"
7892+ _ASM_EXTABLE(0b, 0b)
7893+#endif
7894+
7895+ "sets %1\n"
7896 : "=m" (v->counter), "=qm" (c)
7897 : "er" (i), "m" (v->counter) : "memory");
7898 return c;
7899@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7900 static inline long atomic64_add_return(long i, atomic64_t *v)
7901 {
7902 long __i = i;
7903- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7904+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7905+
7906+#ifdef CONFIG_PAX_REFCOUNT
7907+ "jno 0f\n"
7908+ "movq %0, %1\n"
7909+ "int $4\n0:\n"
7910+ _ASM_EXTABLE(0b, 0b)
7911+#endif
7912+
7913+ : "+r" (i), "+m" (v->counter)
7914+ : : "memory");
7915+ return i + __i;
7916+}
7917+
7918+/**
7919+ * atomic64_add_return_unchecked - add and return
7920+ * @i: integer value to add
7921+ * @v: pointer to type atomic64_unchecked_t
7922+ *
7923+ * Atomically adds @i to @v and returns @i + @v
7924+ */
7925+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7926+{
7927+ long __i = i;
7928+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
7929 : "+r" (i), "+m" (v->counter)
7930 : : "memory");
7931 return i + __i;
7932@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7933 }
7934
7935 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7936+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7937+{
7938+ return atomic64_add_return_unchecked(1, v);
7939+}
7940 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7941
7942 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7943@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7944 return cmpxchg(&v->counter, old, new);
7945 }
7946
7947+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7948+{
7949+ return cmpxchg(&v->counter, old, new);
7950+}
7951+
7952 static inline long atomic64_xchg(atomic64_t *v, long new)
7953 {
7954 return xchg(&v->counter, new);
7955 }
7956
7957+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7958+{
7959+ return xchg(&v->counter, new);
7960+}
7961+
7962 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7963 {
7964 return cmpxchg(&v->counter, old, new);
7965 }
7966
7967+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7968+{
7969+ return cmpxchg(&v->counter, old, new);
7970+}
7971+
7972 static inline long atomic_xchg(atomic_t *v, int new)
7973 {
7974 return xchg(&v->counter, new);
7975 }
7976
7977+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7978+{
7979+ return xchg(&v->counter, new);
7980+}
7981+
7982 /**
7983 * atomic_add_unless - add unless the number is a given value
7984 * @v: pointer of type atomic_t
7985@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
7986 */
7987 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7988 {
7989- int c, old;
7990+ int c, old, new;
7991 c = atomic_read(v);
7992 for (;;) {
7993- if (unlikely(c == (u)))
7994+ if (unlikely(c == u))
7995 break;
7996- old = atomic_cmpxchg((v), c, c + (a));
7997+
7998+ asm volatile("addl %2,%0\n"
7999+
8000+#ifdef CONFIG_PAX_REFCOUNT
8001+ "jno 0f\n"
8002+ "subl %2,%0\n"
8003+ "int $4\n0:\n"
8004+ _ASM_EXTABLE(0b, 0b)
8005+#endif
8006+
8007+ : "=r" (new)
8008+ : "0" (c), "ir" (a));
8009+
8010+ old = atomic_cmpxchg(v, c, new);
8011 if (likely(old == c))
8012 break;
8013 c = old;
8014 }
8015- return c != (u);
8016+ return c != u;
8017 }
8018
8019 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8020@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8021 */
8022 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8023 {
8024- long c, old;
8025+ long c, old, new;
8026 c = atomic64_read(v);
8027 for (;;) {
8028- if (unlikely(c == (u)))
8029+ if (unlikely(c == u))
8030 break;
8031- old = atomic64_cmpxchg((v), c, c + (a));
8032+
8033+ asm volatile("addq %2,%0\n"
8034+
8035+#ifdef CONFIG_PAX_REFCOUNT
8036+ "jno 0f\n"
8037+ "subq %2,%0\n"
8038+ "int $4\n0:\n"
8039+ _ASM_EXTABLE(0b, 0b)
8040+#endif
8041+
8042+ : "=r" (new)
8043+ : "0" (c), "er" (a));
8044+
8045+ old = atomic64_cmpxchg(v, c, new);
8046 if (likely(old == c))
8047 break;
8048 c = old;
8049 }
8050- return c != (u);
8051+ return c != u;
8052 }
8053
8054 /**
8055diff -urNp linux-2.6.32.45/arch/x86/include/asm/bitops.h linux-2.6.32.45/arch/x86/include/asm/bitops.h
8056--- linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8057+++ linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8058@@ -38,7 +38,7 @@
8059 * a mask operation on a byte.
8060 */
8061 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8062-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8063+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8064 #define CONST_MASK(nr) (1 << ((nr) & 7))
8065
8066 /**
8067diff -urNp linux-2.6.32.45/arch/x86/include/asm/boot.h linux-2.6.32.45/arch/x86/include/asm/boot.h
8068--- linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8069+++ linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8070@@ -11,10 +11,15 @@
8071 #include <asm/pgtable_types.h>
8072
8073 /* Physical address where kernel should be loaded. */
8074-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8075+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8076 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8077 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8078
8079+#ifndef __ASSEMBLY__
8080+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8081+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8082+#endif
8083+
8084 /* Minimum kernel alignment, as a power of two */
8085 #ifdef CONFIG_X86_64
8086 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8087diff -urNp linux-2.6.32.45/arch/x86/include/asm/cacheflush.h linux-2.6.32.45/arch/x86/include/asm/cacheflush.h
8088--- linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8089+++ linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8090@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8091 static inline unsigned long get_page_memtype(struct page *pg)
8092 {
8093 if (!PageUncached(pg) && !PageWC(pg))
8094- return -1;
8095+ return ~0UL;
8096 else if (!PageUncached(pg) && PageWC(pg))
8097 return _PAGE_CACHE_WC;
8098 else if (PageUncached(pg) && !PageWC(pg))
8099@@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8100 SetPageWC(pg);
8101 break;
8102 default:
8103- case -1:
8104+ case ~0UL:
8105 ClearPageUncached(pg);
8106 ClearPageWC(pg);
8107 break;
8108diff -urNp linux-2.6.32.45/arch/x86/include/asm/cache.h linux-2.6.32.45/arch/x86/include/asm/cache.h
8109--- linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8110+++ linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8111@@ -5,9 +5,10 @@
8112
8113 /* L1 cache line size */
8114 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8115-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8116+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8117
8118 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8119+#define __read_only __attribute__((__section__(".data.read_only")))
8120
8121 #ifdef CONFIG_X86_VSMP
8122 /* vSMP Internode cacheline shift */
8123diff -urNp linux-2.6.32.45/arch/x86/include/asm/checksum_32.h linux-2.6.32.45/arch/x86/include/asm/checksum_32.h
8124--- linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8125+++ linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8126@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8127 int len, __wsum sum,
8128 int *src_err_ptr, int *dst_err_ptr);
8129
8130+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8131+ int len, __wsum sum,
8132+ int *src_err_ptr, int *dst_err_ptr);
8133+
8134+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8135+ int len, __wsum sum,
8136+ int *src_err_ptr, int *dst_err_ptr);
8137+
8138 /*
8139 * Note: when you get a NULL pointer exception here this means someone
8140 * passed in an incorrect kernel address to one of these functions.
8141@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8142 int *err_ptr)
8143 {
8144 might_sleep();
8145- return csum_partial_copy_generic((__force void *)src, dst,
8146+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8147 len, sum, err_ptr, NULL);
8148 }
8149
8150@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8151 {
8152 might_sleep();
8153 if (access_ok(VERIFY_WRITE, dst, len))
8154- return csum_partial_copy_generic(src, (__force void *)dst,
8155+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8156 len, sum, NULL, err_ptr);
8157
8158 if (len)
8159diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc_defs.h linux-2.6.32.45/arch/x86/include/asm/desc_defs.h
8160--- linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8161+++ linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8162@@ -31,6 +31,12 @@ struct desc_struct {
8163 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8164 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8165 };
8166+ struct {
8167+ u16 offset_low;
8168+ u16 seg;
8169+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8170+ unsigned offset_high: 16;
8171+ } gate;
8172 };
8173 } __attribute__((packed));
8174
8175diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc.h linux-2.6.32.45/arch/x86/include/asm/desc.h
8176--- linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8177+++ linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8178@@ -4,6 +4,7 @@
8179 #include <asm/desc_defs.h>
8180 #include <asm/ldt.h>
8181 #include <asm/mmu.h>
8182+#include <asm/pgtable.h>
8183 #include <linux/smp.h>
8184
8185 static inline void fill_ldt(struct desc_struct *desc,
8186@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8187 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8188 desc->type = (info->read_exec_only ^ 1) << 1;
8189 desc->type |= info->contents << 2;
8190+ desc->type |= info->seg_not_present ^ 1;
8191 desc->s = 1;
8192 desc->dpl = 0x3;
8193 desc->p = info->seg_not_present ^ 1;
8194@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8195 }
8196
8197 extern struct desc_ptr idt_descr;
8198-extern gate_desc idt_table[];
8199-
8200-struct gdt_page {
8201- struct desc_struct gdt[GDT_ENTRIES];
8202-} __attribute__((aligned(PAGE_SIZE)));
8203-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8204+extern gate_desc idt_table[256];
8205
8206+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8207 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8208 {
8209- return per_cpu(gdt_page, cpu).gdt;
8210+ return cpu_gdt_table[cpu];
8211 }
8212
8213 #ifdef CONFIG_X86_64
8214@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8215 unsigned long base, unsigned dpl, unsigned flags,
8216 unsigned short seg)
8217 {
8218- gate->a = (seg << 16) | (base & 0xffff);
8219- gate->b = (base & 0xffff0000) |
8220- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8221+ gate->gate.offset_low = base;
8222+ gate->gate.seg = seg;
8223+ gate->gate.reserved = 0;
8224+ gate->gate.type = type;
8225+ gate->gate.s = 0;
8226+ gate->gate.dpl = dpl;
8227+ gate->gate.p = 1;
8228+ gate->gate.offset_high = base >> 16;
8229 }
8230
8231 #endif
8232@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8233 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8234 const gate_desc *gate)
8235 {
8236+ pax_open_kernel();
8237 memcpy(&idt[entry], gate, sizeof(*gate));
8238+ pax_close_kernel();
8239 }
8240
8241 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8242 const void *desc)
8243 {
8244+ pax_open_kernel();
8245 memcpy(&ldt[entry], desc, 8);
8246+ pax_close_kernel();
8247 }
8248
8249 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8250@@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8251 size = sizeof(struct desc_struct);
8252 break;
8253 }
8254+
8255+ pax_open_kernel();
8256 memcpy(&gdt[entry], desc, size);
8257+ pax_close_kernel();
8258 }
8259
8260 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8261@@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8262
8263 static inline void native_load_tr_desc(void)
8264 {
8265+ pax_open_kernel();
8266 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8267+ pax_close_kernel();
8268 }
8269
8270 static inline void native_load_gdt(const struct desc_ptr *dtr)
8271@@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8272 unsigned int i;
8273 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8274
8275+ pax_open_kernel();
8276 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8277 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8278+ pax_close_kernel();
8279 }
8280
8281 #define _LDT_empty(info) \
8282@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8283 desc->limit = (limit >> 16) & 0xf;
8284 }
8285
8286-static inline void _set_gate(int gate, unsigned type, void *addr,
8287+static inline void _set_gate(int gate, unsigned type, const void *addr,
8288 unsigned dpl, unsigned ist, unsigned seg)
8289 {
8290 gate_desc s;
8291@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8292 * Pentium F0 0F bugfix can have resulted in the mapped
8293 * IDT being write-protected.
8294 */
8295-static inline void set_intr_gate(unsigned int n, void *addr)
8296+static inline void set_intr_gate(unsigned int n, const void *addr)
8297 {
8298 BUG_ON((unsigned)n > 0xFF);
8299 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8300@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8301 /*
8302 * This routine sets up an interrupt gate at directory privilege level 3.
8303 */
8304-static inline void set_system_intr_gate(unsigned int n, void *addr)
8305+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8306 {
8307 BUG_ON((unsigned)n > 0xFF);
8308 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8309 }
8310
8311-static inline void set_system_trap_gate(unsigned int n, void *addr)
8312+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8313 {
8314 BUG_ON((unsigned)n > 0xFF);
8315 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8316 }
8317
8318-static inline void set_trap_gate(unsigned int n, void *addr)
8319+static inline void set_trap_gate(unsigned int n, const void *addr)
8320 {
8321 BUG_ON((unsigned)n > 0xFF);
8322 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8323@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8324 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8325 {
8326 BUG_ON((unsigned)n > 0xFF);
8327- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8328+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8329 }
8330
8331-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8332+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8333 {
8334 BUG_ON((unsigned)n > 0xFF);
8335 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8336 }
8337
8338-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8339+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8340 {
8341 BUG_ON((unsigned)n > 0xFF);
8342 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8343 }
8344
8345+#ifdef CONFIG_X86_32
8346+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8347+{
8348+ struct desc_struct d;
8349+
8350+ if (likely(limit))
8351+ limit = (limit - 1UL) >> PAGE_SHIFT;
8352+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8353+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8354+}
8355+#endif
8356+
8357 #endif /* _ASM_X86_DESC_H */
8358diff -urNp linux-2.6.32.45/arch/x86/include/asm/device.h linux-2.6.32.45/arch/x86/include/asm/device.h
8359--- linux-2.6.32.45/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8360+++ linux-2.6.32.45/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8361@@ -6,7 +6,7 @@ struct dev_archdata {
8362 void *acpi_handle;
8363 #endif
8364 #ifdef CONFIG_X86_64
8365-struct dma_map_ops *dma_ops;
8366+ const struct dma_map_ops *dma_ops;
8367 #endif
8368 #ifdef CONFIG_DMAR
8369 void *iommu; /* hook for IOMMU specific extension */
8370diff -urNp linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h
8371--- linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8372+++ linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8373@@ -25,9 +25,9 @@ extern int iommu_merge;
8374 extern struct device x86_dma_fallback_dev;
8375 extern int panic_on_overflow;
8376
8377-extern struct dma_map_ops *dma_ops;
8378+extern const struct dma_map_ops *dma_ops;
8379
8380-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8381+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8382 {
8383 #ifdef CONFIG_X86_32
8384 return dma_ops;
8385@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8386 /* Make sure we keep the same behaviour */
8387 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8388 {
8389- struct dma_map_ops *ops = get_dma_ops(dev);
8390+ const struct dma_map_ops *ops = get_dma_ops(dev);
8391 if (ops->mapping_error)
8392 return ops->mapping_error(dev, dma_addr);
8393
8394@@ -122,7 +122,7 @@ static inline void *
8395 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8396 gfp_t gfp)
8397 {
8398- struct dma_map_ops *ops = get_dma_ops(dev);
8399+ const struct dma_map_ops *ops = get_dma_ops(dev);
8400 void *memory;
8401
8402 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8403@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8404 static inline void dma_free_coherent(struct device *dev, size_t size,
8405 void *vaddr, dma_addr_t bus)
8406 {
8407- struct dma_map_ops *ops = get_dma_ops(dev);
8408+ const struct dma_map_ops *ops = get_dma_ops(dev);
8409
8410 WARN_ON(irqs_disabled()); /* for portability */
8411
8412diff -urNp linux-2.6.32.45/arch/x86/include/asm/e820.h linux-2.6.32.45/arch/x86/include/asm/e820.h
8413--- linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8414+++ linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8415@@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8416 #define ISA_END_ADDRESS 0x100000
8417 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8418
8419-#define BIOS_BEGIN 0x000a0000
8420+#define BIOS_BEGIN 0x000c0000
8421 #define BIOS_END 0x00100000
8422
8423 #ifdef __KERNEL__
8424diff -urNp linux-2.6.32.45/arch/x86/include/asm/elf.h linux-2.6.32.45/arch/x86/include/asm/elf.h
8425--- linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8426+++ linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8427@@ -257,7 +257,25 @@ extern int force_personality32;
8428 the loader. We need to make sure that it is out of the way of the program
8429 that it will "exec", and that there is sufficient room for the brk. */
8430
8431+#ifdef CONFIG_PAX_SEGMEXEC
8432+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8433+#else
8434 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8435+#endif
8436+
8437+#ifdef CONFIG_PAX_ASLR
8438+#ifdef CONFIG_X86_32
8439+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8440+
8441+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8442+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8443+#else
8444+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8445+
8446+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8447+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8448+#endif
8449+#endif
8450
8451 /* This yields a mask that user programs can use to figure out what
8452 instruction set this CPU supports. This could be done in user space,
8453@@ -311,8 +329,7 @@ do { \
8454 #define ARCH_DLINFO \
8455 do { \
8456 if (vdso_enabled) \
8457- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8458- (unsigned long)current->mm->context.vdso); \
8459+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8460 } while (0)
8461
8462 #define AT_SYSINFO 32
8463@@ -323,7 +340,7 @@ do { \
8464
8465 #endif /* !CONFIG_X86_32 */
8466
8467-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8468+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8469
8470 #define VDSO_ENTRY \
8471 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8472@@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8473 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8474 #define compat_arch_setup_additional_pages syscall32_setup_pages
8475
8476-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8477-#define arch_randomize_brk arch_randomize_brk
8478-
8479 #endif /* _ASM_X86_ELF_H */
8480diff -urNp linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h
8481--- linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8482+++ linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8483@@ -15,6 +15,6 @@ enum reboot_type {
8484
8485 extern enum reboot_type reboot_type;
8486
8487-extern void machine_emergency_restart(void);
8488+extern void machine_emergency_restart(void) __noreturn;
8489
8490 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8491diff -urNp linux-2.6.32.45/arch/x86/include/asm/futex.h linux-2.6.32.45/arch/x86/include/asm/futex.h
8492--- linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8493+++ linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8494@@ -12,16 +12,18 @@
8495 #include <asm/system.h>
8496
8497 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8498+ typecheck(u32 *, uaddr); \
8499 asm volatile("1:\t" insn "\n" \
8500 "2:\t.section .fixup,\"ax\"\n" \
8501 "3:\tmov\t%3, %1\n" \
8502 "\tjmp\t2b\n" \
8503 "\t.previous\n" \
8504 _ASM_EXTABLE(1b, 3b) \
8505- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8506+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8507 : "i" (-EFAULT), "0" (oparg), "1" (0))
8508
8509 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8510+ typecheck(u32 *, uaddr); \
8511 asm volatile("1:\tmovl %2, %0\n" \
8512 "\tmovl\t%0, %3\n" \
8513 "\t" insn "\n" \
8514@@ -34,10 +36,10 @@
8515 _ASM_EXTABLE(1b, 4b) \
8516 _ASM_EXTABLE(2b, 4b) \
8517 : "=&a" (oldval), "=&r" (ret), \
8518- "+m" (*uaddr), "=&r" (tem) \
8519+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8520 : "r" (oparg), "i" (-EFAULT), "1" (0))
8521
8522-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8523+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8524 {
8525 int op = (encoded_op >> 28) & 7;
8526 int cmp = (encoded_op >> 24) & 15;
8527@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8528
8529 switch (op) {
8530 case FUTEX_OP_SET:
8531- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8532+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8533 break;
8534 case FUTEX_OP_ADD:
8535- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8536+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8537 uaddr, oparg);
8538 break;
8539 case FUTEX_OP_OR:
8540@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8541 return ret;
8542 }
8543
8544-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8545+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8546 int newval)
8547 {
8548
8549@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8550 return -ENOSYS;
8551 #endif
8552
8553- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8554+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8555 return -EFAULT;
8556
8557- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8558+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8559 "2:\t.section .fixup, \"ax\"\n"
8560 "3:\tmov %2, %0\n"
8561 "\tjmp 2b\n"
8562 "\t.previous\n"
8563 _ASM_EXTABLE(1b, 3b)
8564- : "=a" (oldval), "+m" (*uaddr)
8565+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8566 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8567 : "memory"
8568 );
8569diff -urNp linux-2.6.32.45/arch/x86/include/asm/hw_irq.h linux-2.6.32.45/arch/x86/include/asm/hw_irq.h
8570--- linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8571+++ linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8572@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8573 extern void enable_IO_APIC(void);
8574
8575 /* Statistics */
8576-extern atomic_t irq_err_count;
8577-extern atomic_t irq_mis_count;
8578+extern atomic_unchecked_t irq_err_count;
8579+extern atomic_unchecked_t irq_mis_count;
8580
8581 /* EISA */
8582 extern void eisa_set_level_irq(unsigned int irq);
8583diff -urNp linux-2.6.32.45/arch/x86/include/asm/i387.h linux-2.6.32.45/arch/x86/include/asm/i387.h
8584--- linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8585+++ linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8586@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8587 {
8588 int err;
8589
8590+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8591+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8592+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8593+#endif
8594+
8595 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8596 "2:\n"
8597 ".section .fixup,\"ax\"\n"
8598@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8599 {
8600 int err;
8601
8602+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8603+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8604+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8605+#endif
8606+
8607 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8608 "2:\n"
8609 ".section .fixup,\"ax\"\n"
8610@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8611 }
8612
8613 /* We need a safe address that is cheap to find and that is already
8614- in L1 during context switch. The best choices are unfortunately
8615- different for UP and SMP */
8616-#ifdef CONFIG_SMP
8617-#define safe_address (__per_cpu_offset[0])
8618-#else
8619-#define safe_address (kstat_cpu(0).cpustat.user)
8620-#endif
8621+ in L1 during context switch. */
8622+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8623
8624 /*
8625 * These must be called with preempt disabled
8626@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8627 struct thread_info *me = current_thread_info();
8628 preempt_disable();
8629 if (me->status & TS_USEDFPU)
8630- __save_init_fpu(me->task);
8631+ __save_init_fpu(current);
8632 else
8633 clts();
8634 }
8635diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_32.h linux-2.6.32.45/arch/x86/include/asm/io_32.h
8636--- linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8637+++ linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8638@@ -3,6 +3,7 @@
8639
8640 #include <linux/string.h>
8641 #include <linux/compiler.h>
8642+#include <asm/processor.h>
8643
8644 /*
8645 * This file contains the definitions for the x86 IO instructions
8646@@ -42,6 +43,17 @@
8647
8648 #ifdef __KERNEL__
8649
8650+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8651+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8652+{
8653+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8654+}
8655+
8656+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8657+{
8658+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8659+}
8660+
8661 #include <asm-generic/iomap.h>
8662
8663 #include <linux/vmalloc.h>
8664diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_64.h linux-2.6.32.45/arch/x86/include/asm/io_64.h
8665--- linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8666+++ linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8667@@ -140,6 +140,17 @@ __OUTS(l)
8668
8669 #include <linux/vmalloc.h>
8670
8671+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8672+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8673+{
8674+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8675+}
8676+
8677+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8678+{
8679+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8680+}
8681+
8682 #include <asm-generic/iomap.h>
8683
8684 void __memcpy_fromio(void *, unsigned long, unsigned);
8685diff -urNp linux-2.6.32.45/arch/x86/include/asm/iommu.h linux-2.6.32.45/arch/x86/include/asm/iommu.h
8686--- linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8687+++ linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8688@@ -3,7 +3,7 @@
8689
8690 extern void pci_iommu_shutdown(void);
8691 extern void no_iommu_init(void);
8692-extern struct dma_map_ops nommu_dma_ops;
8693+extern const struct dma_map_ops nommu_dma_ops;
8694 extern int force_iommu, no_iommu;
8695 extern int iommu_detected;
8696 extern int iommu_pass_through;
8697diff -urNp linux-2.6.32.45/arch/x86/include/asm/irqflags.h linux-2.6.32.45/arch/x86/include/asm/irqflags.h
8698--- linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8699+++ linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8700@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8701 sti; \
8702 sysexit
8703
8704+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8705+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8706+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8707+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8708+
8709 #else
8710 #define INTERRUPT_RETURN iret
8711 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8712diff -urNp linux-2.6.32.45/arch/x86/include/asm/kprobes.h linux-2.6.32.45/arch/x86/include/asm/kprobes.h
8713--- linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8714+++ linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8715@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8716 #define BREAKPOINT_INSTRUCTION 0xcc
8717 #define RELATIVEJUMP_INSTRUCTION 0xe9
8718 #define MAX_INSN_SIZE 16
8719-#define MAX_STACK_SIZE 64
8720-#define MIN_STACK_SIZE(ADDR) \
8721- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8722- THREAD_SIZE - (unsigned long)(ADDR))) \
8723- ? (MAX_STACK_SIZE) \
8724- : (((unsigned long)current_thread_info()) + \
8725- THREAD_SIZE - (unsigned long)(ADDR)))
8726+#define MAX_STACK_SIZE 64UL
8727+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8728
8729 #define flush_insn_slot(p) do { } while (0)
8730
8731diff -urNp linux-2.6.32.45/arch/x86/include/asm/kvm_host.h linux-2.6.32.45/arch/x86/include/asm/kvm_host.h
8732--- linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8733+++ linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8734@@ -536,7 +536,7 @@ struct kvm_x86_ops {
8735 const struct trace_print_flags *exit_reasons_str;
8736 };
8737
8738-extern struct kvm_x86_ops *kvm_x86_ops;
8739+extern const struct kvm_x86_ops *kvm_x86_ops;
8740
8741 int kvm_mmu_module_init(void);
8742 void kvm_mmu_module_exit(void);
8743diff -urNp linux-2.6.32.45/arch/x86/include/asm/local.h linux-2.6.32.45/arch/x86/include/asm/local.h
8744--- linux-2.6.32.45/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8745+++ linux-2.6.32.45/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8746@@ -18,26 +18,58 @@ typedef struct {
8747
8748 static inline void local_inc(local_t *l)
8749 {
8750- asm volatile(_ASM_INC "%0"
8751+ asm volatile(_ASM_INC "%0\n"
8752+
8753+#ifdef CONFIG_PAX_REFCOUNT
8754+ "jno 0f\n"
8755+ _ASM_DEC "%0\n"
8756+ "int $4\n0:\n"
8757+ _ASM_EXTABLE(0b, 0b)
8758+#endif
8759+
8760 : "+m" (l->a.counter));
8761 }
8762
8763 static inline void local_dec(local_t *l)
8764 {
8765- asm volatile(_ASM_DEC "%0"
8766+ asm volatile(_ASM_DEC "%0\n"
8767+
8768+#ifdef CONFIG_PAX_REFCOUNT
8769+ "jno 0f\n"
8770+ _ASM_INC "%0\n"
8771+ "int $4\n0:\n"
8772+ _ASM_EXTABLE(0b, 0b)
8773+#endif
8774+
8775 : "+m" (l->a.counter));
8776 }
8777
8778 static inline void local_add(long i, local_t *l)
8779 {
8780- asm volatile(_ASM_ADD "%1,%0"
8781+ asm volatile(_ASM_ADD "%1,%0\n"
8782+
8783+#ifdef CONFIG_PAX_REFCOUNT
8784+ "jno 0f\n"
8785+ _ASM_SUB "%1,%0\n"
8786+ "int $4\n0:\n"
8787+ _ASM_EXTABLE(0b, 0b)
8788+#endif
8789+
8790 : "+m" (l->a.counter)
8791 : "ir" (i));
8792 }
8793
8794 static inline void local_sub(long i, local_t *l)
8795 {
8796- asm volatile(_ASM_SUB "%1,%0"
8797+ asm volatile(_ASM_SUB "%1,%0\n"
8798+
8799+#ifdef CONFIG_PAX_REFCOUNT
8800+ "jno 0f\n"
8801+ _ASM_ADD "%1,%0\n"
8802+ "int $4\n0:\n"
8803+ _ASM_EXTABLE(0b, 0b)
8804+#endif
8805+
8806 : "+m" (l->a.counter)
8807 : "ir" (i));
8808 }
8809@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8810 {
8811 unsigned char c;
8812
8813- asm volatile(_ASM_SUB "%2,%0; sete %1"
8814+ asm volatile(_ASM_SUB "%2,%0\n"
8815+
8816+#ifdef CONFIG_PAX_REFCOUNT
8817+ "jno 0f\n"
8818+ _ASM_ADD "%2,%0\n"
8819+ "int $4\n0:\n"
8820+ _ASM_EXTABLE(0b, 0b)
8821+#endif
8822+
8823+ "sete %1\n"
8824 : "+m" (l->a.counter), "=qm" (c)
8825 : "ir" (i) : "memory");
8826 return c;
8827@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8828 {
8829 unsigned char c;
8830
8831- asm volatile(_ASM_DEC "%0; sete %1"
8832+ asm volatile(_ASM_DEC "%0\n"
8833+
8834+#ifdef CONFIG_PAX_REFCOUNT
8835+ "jno 0f\n"
8836+ _ASM_INC "%0\n"
8837+ "int $4\n0:\n"
8838+ _ASM_EXTABLE(0b, 0b)
8839+#endif
8840+
8841+ "sete %1\n"
8842 : "+m" (l->a.counter), "=qm" (c)
8843 : : "memory");
8844 return c != 0;
8845@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8846 {
8847 unsigned char c;
8848
8849- asm volatile(_ASM_INC "%0; sete %1"
8850+ asm volatile(_ASM_INC "%0\n"
8851+
8852+#ifdef CONFIG_PAX_REFCOUNT
8853+ "jno 0f\n"
8854+ _ASM_DEC "%0\n"
8855+ "int $4\n0:\n"
8856+ _ASM_EXTABLE(0b, 0b)
8857+#endif
8858+
8859+ "sete %1\n"
8860 : "+m" (l->a.counter), "=qm" (c)
8861 : : "memory");
8862 return c != 0;
8863@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8864 {
8865 unsigned char c;
8866
8867- asm volatile(_ASM_ADD "%2,%0; sets %1"
8868+ asm volatile(_ASM_ADD "%2,%0\n"
8869+
8870+#ifdef CONFIG_PAX_REFCOUNT
8871+ "jno 0f\n"
8872+ _ASM_SUB "%2,%0\n"
8873+ "int $4\n0:\n"
8874+ _ASM_EXTABLE(0b, 0b)
8875+#endif
8876+
8877+ "sets %1\n"
8878 : "+m" (l->a.counter), "=qm" (c)
8879 : "ir" (i) : "memory");
8880 return c;
8881@@ -133,7 +201,15 @@ static inline long local_add_return(long
8882 #endif
8883 /* Modern 486+ processor */
8884 __i = i;
8885- asm volatile(_ASM_XADD "%0, %1;"
8886+ asm volatile(_ASM_XADD "%0, %1\n"
8887+
8888+#ifdef CONFIG_PAX_REFCOUNT
8889+ "jno 0f\n"
8890+ _ASM_MOV "%0,%1\n"
8891+ "int $4\n0:\n"
8892+ _ASM_EXTABLE(0b, 0b)
8893+#endif
8894+
8895 : "+r" (i), "+m" (l->a.counter)
8896 : : "memory");
8897 return i + __i;
8898diff -urNp linux-2.6.32.45/arch/x86/include/asm/microcode.h linux-2.6.32.45/arch/x86/include/asm/microcode.h
8899--- linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8900+++ linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8901@@ -12,13 +12,13 @@ struct device;
8902 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8903
8904 struct microcode_ops {
8905- enum ucode_state (*request_microcode_user) (int cpu,
8906+ enum ucode_state (* const request_microcode_user) (int cpu,
8907 const void __user *buf, size_t size);
8908
8909- enum ucode_state (*request_microcode_fw) (int cpu,
8910+ enum ucode_state (* const request_microcode_fw) (int cpu,
8911 struct device *device);
8912
8913- void (*microcode_fini_cpu) (int cpu);
8914+ void (* const microcode_fini_cpu) (int cpu);
8915
8916 /*
8917 * The generic 'microcode_core' part guarantees that
8918@@ -38,18 +38,18 @@ struct ucode_cpu_info {
8919 extern struct ucode_cpu_info ucode_cpu_info[];
8920
8921 #ifdef CONFIG_MICROCODE_INTEL
8922-extern struct microcode_ops * __init init_intel_microcode(void);
8923+extern const struct microcode_ops * __init init_intel_microcode(void);
8924 #else
8925-static inline struct microcode_ops * __init init_intel_microcode(void)
8926+static inline const struct microcode_ops * __init init_intel_microcode(void)
8927 {
8928 return NULL;
8929 }
8930 #endif /* CONFIG_MICROCODE_INTEL */
8931
8932 #ifdef CONFIG_MICROCODE_AMD
8933-extern struct microcode_ops * __init init_amd_microcode(void);
8934+extern const struct microcode_ops * __init init_amd_microcode(void);
8935 #else
8936-static inline struct microcode_ops * __init init_amd_microcode(void)
8937+static inline const struct microcode_ops * __init init_amd_microcode(void)
8938 {
8939 return NULL;
8940 }
8941diff -urNp linux-2.6.32.45/arch/x86/include/asm/mman.h linux-2.6.32.45/arch/x86/include/asm/mman.h
8942--- linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8943+++ linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8944@@ -5,4 +5,14 @@
8945
8946 #include <asm-generic/mman.h>
8947
8948+#ifdef __KERNEL__
8949+#ifndef __ASSEMBLY__
8950+#ifdef CONFIG_X86_32
8951+#define arch_mmap_check i386_mmap_check
8952+int i386_mmap_check(unsigned long addr, unsigned long len,
8953+ unsigned long flags);
8954+#endif
8955+#endif
8956+#endif
8957+
8958 #endif /* _ASM_X86_MMAN_H */
8959diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu_context.h linux-2.6.32.45/arch/x86/include/asm/mmu_context.h
8960--- linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8961+++ linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
8962@@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8963
8964 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8965 {
8966+
8967+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8968+ unsigned int i;
8969+ pgd_t *pgd;
8970+
8971+ pax_open_kernel();
8972+ pgd = get_cpu_pgd(smp_processor_id());
8973+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8974+ if (paravirt_enabled())
8975+ set_pgd(pgd+i, native_make_pgd(0));
8976+ else
8977+ pgd[i] = native_make_pgd(0);
8978+ pax_close_kernel();
8979+#endif
8980+
8981 #ifdef CONFIG_SMP
8982 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8983 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8984@@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
8985 struct task_struct *tsk)
8986 {
8987 unsigned cpu = smp_processor_id();
8988+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
8989+ int tlbstate = TLBSTATE_OK;
8990+#endif
8991
8992 if (likely(prev != next)) {
8993 #ifdef CONFIG_SMP
8994+#ifdef CONFIG_X86_32
8995+ tlbstate = percpu_read(cpu_tlbstate.state);
8996+#endif
8997 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8998 percpu_write(cpu_tlbstate.active_mm, next);
8999 #endif
9000 cpumask_set_cpu(cpu, mm_cpumask(next));
9001
9002 /* Re-load page tables */
9003+#ifdef CONFIG_PAX_PER_CPU_PGD
9004+ pax_open_kernel();
9005+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9006+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9007+ pax_close_kernel();
9008+ load_cr3(get_cpu_pgd(cpu));
9009+#else
9010 load_cr3(next->pgd);
9011+#endif
9012
9013 /* stop flush ipis for the previous mm */
9014 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9015@@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
9016 */
9017 if (unlikely(prev->context.ldt != next->context.ldt))
9018 load_LDT_nolock(&next->context);
9019- }
9020+
9021+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9022+ if (!nx_enabled) {
9023+ smp_mb__before_clear_bit();
9024+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9025+ smp_mb__after_clear_bit();
9026+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9027+ }
9028+#endif
9029+
9030+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9031+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9032+ prev->context.user_cs_limit != next->context.user_cs_limit))
9033+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9034 #ifdef CONFIG_SMP
9035+ else if (unlikely(tlbstate != TLBSTATE_OK))
9036+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9037+#endif
9038+#endif
9039+
9040+ }
9041 else {
9042+
9043+#ifdef CONFIG_PAX_PER_CPU_PGD
9044+ pax_open_kernel();
9045+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9046+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9047+ pax_close_kernel();
9048+ load_cr3(get_cpu_pgd(cpu));
9049+#endif
9050+
9051+#ifdef CONFIG_SMP
9052 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9053 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9054
9055@@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
9056 * tlb flush IPI delivery. We must reload CR3
9057 * to make sure to use no freed page tables.
9058 */
9059+
9060+#ifndef CONFIG_PAX_PER_CPU_PGD
9061 load_cr3(next->pgd);
9062+#endif
9063+
9064 load_LDT_nolock(&next->context);
9065+
9066+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9067+ if (!nx_enabled)
9068+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9069+#endif
9070+
9071+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9072+#ifdef CONFIG_PAX_PAGEEXEC
9073+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9074+#endif
9075+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9076+#endif
9077+
9078 }
9079- }
9080 #endif
9081+ }
9082 }
9083
9084 #define activate_mm(prev, next) \
9085diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu.h linux-2.6.32.45/arch/x86/include/asm/mmu.h
9086--- linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9087+++ linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9088@@ -9,10 +9,23 @@
9089 * we put the segment information here.
9090 */
9091 typedef struct {
9092- void *ldt;
9093+ struct desc_struct *ldt;
9094 int size;
9095 struct mutex lock;
9096- void *vdso;
9097+ unsigned long vdso;
9098+
9099+#ifdef CONFIG_X86_32
9100+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9101+ unsigned long user_cs_base;
9102+ unsigned long user_cs_limit;
9103+
9104+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9105+ cpumask_t cpu_user_cs_mask;
9106+#endif
9107+
9108+#endif
9109+#endif
9110+
9111 } mm_context_t;
9112
9113 #ifdef CONFIG_SMP
9114diff -urNp linux-2.6.32.45/arch/x86/include/asm/module.h linux-2.6.32.45/arch/x86/include/asm/module.h
9115--- linux-2.6.32.45/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9116+++ linux-2.6.32.45/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9117@@ -5,6 +5,7 @@
9118
9119 #ifdef CONFIG_X86_64
9120 /* X86_64 does not define MODULE_PROC_FAMILY */
9121+#define MODULE_PROC_FAMILY ""
9122 #elif defined CONFIG_M386
9123 #define MODULE_PROC_FAMILY "386 "
9124 #elif defined CONFIG_M486
9125@@ -59,13 +60,36 @@
9126 #error unknown processor family
9127 #endif
9128
9129-#ifdef CONFIG_X86_32
9130-# ifdef CONFIG_4KSTACKS
9131-# define MODULE_STACKSIZE "4KSTACKS "
9132-# else
9133-# define MODULE_STACKSIZE ""
9134-# endif
9135-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9136+#ifdef CONFIG_PAX_MEMORY_UDEREF
9137+#define MODULE_PAX_UDEREF "UDEREF "
9138+#else
9139+#define MODULE_PAX_UDEREF ""
9140+#endif
9141+
9142+#ifdef CONFIG_PAX_KERNEXEC
9143+#define MODULE_PAX_KERNEXEC "KERNEXEC "
9144+#else
9145+#define MODULE_PAX_KERNEXEC ""
9146+#endif
9147+
9148+#ifdef CONFIG_PAX_REFCOUNT
9149+#define MODULE_PAX_REFCOUNT "REFCOUNT "
9150+#else
9151+#define MODULE_PAX_REFCOUNT ""
9152 #endif
9153
9154+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9155+#define MODULE_STACKSIZE "4KSTACKS "
9156+#else
9157+#define MODULE_STACKSIZE ""
9158+#endif
9159+
9160+#ifdef CONFIG_GRKERNSEC
9161+#define MODULE_GRSEC "GRSECURITY "
9162+#else
9163+#define MODULE_GRSEC ""
9164+#endif
9165+
9166+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9167+
9168 #endif /* _ASM_X86_MODULE_H */
9169diff -urNp linux-2.6.32.45/arch/x86/include/asm/page_64_types.h linux-2.6.32.45/arch/x86/include/asm/page_64_types.h
9170--- linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9171+++ linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9172@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9173
9174 /* duplicated to the one in bootmem.h */
9175 extern unsigned long max_pfn;
9176-extern unsigned long phys_base;
9177+extern const unsigned long phys_base;
9178
9179 extern unsigned long __phys_addr(unsigned long);
9180 #define __phys_reloc_hide(x) (x)
9181diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt.h linux-2.6.32.45/arch/x86/include/asm/paravirt.h
9182--- linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9183+++ linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9184@@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9185 pv_mmu_ops.set_fixmap(idx, phys, flags);
9186 }
9187
9188+#ifdef CONFIG_PAX_KERNEXEC
9189+static inline unsigned long pax_open_kernel(void)
9190+{
9191+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9192+}
9193+
9194+static inline unsigned long pax_close_kernel(void)
9195+{
9196+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9197+}
9198+#else
9199+static inline unsigned long pax_open_kernel(void) { return 0; }
9200+static inline unsigned long pax_close_kernel(void) { return 0; }
9201+#endif
9202+
9203 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9204
9205 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9206@@ -945,7 +960,7 @@ extern void default_banner(void);
9207
9208 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9209 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9210-#define PARA_INDIRECT(addr) *%cs:addr
9211+#define PARA_INDIRECT(addr) *%ss:addr
9212 #endif
9213
9214 #define INTERRUPT_RETURN \
9215@@ -1022,6 +1037,21 @@ extern void default_banner(void);
9216 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9217 CLBR_NONE, \
9218 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9219+
9220+#define GET_CR0_INTO_RDI \
9221+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9222+ mov %rax,%rdi
9223+
9224+#define SET_RDI_INTO_CR0 \
9225+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9226+
9227+#define GET_CR3_INTO_RDI \
9228+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9229+ mov %rax,%rdi
9230+
9231+#define SET_RDI_INTO_CR3 \
9232+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9233+
9234 #endif /* CONFIG_X86_32 */
9235
9236 #endif /* __ASSEMBLY__ */
9237diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h
9238--- linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9239+++ linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:33:55.000000000 -0400
9240@@ -78,19 +78,19 @@ struct pv_init_ops {
9241 */
9242 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9243 unsigned long addr, unsigned len);
9244-};
9245+} __no_const;
9246
9247
9248 struct pv_lazy_ops {
9249 /* Set deferred update mode, used for batching operations. */
9250 void (*enter)(void);
9251 void (*leave)(void);
9252-};
9253+} __no_const;
9254
9255 struct pv_time_ops {
9256 unsigned long long (*sched_clock)(void);
9257 unsigned long (*get_tsc_khz)(void);
9258-};
9259+} __no_const;
9260
9261 struct pv_cpu_ops {
9262 /* hooks for various privileged instructions */
9263@@ -186,7 +186,7 @@ struct pv_cpu_ops {
9264
9265 void (*start_context_switch)(struct task_struct *prev);
9266 void (*end_context_switch)(struct task_struct *next);
9267-};
9268+} __no_const;
9269
9270 struct pv_irq_ops {
9271 /*
9272@@ -217,7 +217,7 @@ struct pv_apic_ops {
9273 unsigned long start_eip,
9274 unsigned long start_esp);
9275 #endif
9276-};
9277+} __no_const;
9278
9279 struct pv_mmu_ops {
9280 unsigned long (*read_cr2)(void);
9281@@ -316,6 +316,12 @@ struct pv_mmu_ops {
9282 an mfn. We can tell which is which from the index. */
9283 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9284 phys_addr_t phys, pgprot_t flags);
9285+
9286+#ifdef CONFIG_PAX_KERNEXEC
9287+ unsigned long (*pax_open_kernel)(void);
9288+ unsigned long (*pax_close_kernel)(void);
9289+#endif
9290+
9291 };
9292
9293 struct raw_spinlock;
9294@@ -326,7 +332,7 @@ struct pv_lock_ops {
9295 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
9296 int (*spin_trylock)(struct raw_spinlock *lock);
9297 void (*spin_unlock)(struct raw_spinlock *lock);
9298-};
9299+} __no_const;
9300
9301 /* This contains all the paravirt structures: we get a convenient
9302 * number for each function using the offset which we use to indicate
9303diff -urNp linux-2.6.32.45/arch/x86/include/asm/pci_x86.h linux-2.6.32.45/arch/x86/include/asm/pci_x86.h
9304--- linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9305+++ linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9306@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9307 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9308
9309 struct pci_raw_ops {
9310- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9311+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9312 int reg, int len, u32 *val);
9313- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9314+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9315 int reg, int len, u32 val);
9316 };
9317
9318-extern struct pci_raw_ops *raw_pci_ops;
9319-extern struct pci_raw_ops *raw_pci_ext_ops;
9320+extern const struct pci_raw_ops *raw_pci_ops;
9321+extern const struct pci_raw_ops *raw_pci_ext_ops;
9322
9323-extern struct pci_raw_ops pci_direct_conf1;
9324+extern const struct pci_raw_ops pci_direct_conf1;
9325 extern bool port_cf9_safe;
9326
9327 /* arch_initcall level */
9328diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgalloc.h linux-2.6.32.45/arch/x86/include/asm/pgalloc.h
9329--- linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9330+++ linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9331@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9332 pmd_t *pmd, pte_t *pte)
9333 {
9334 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9335+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9336+}
9337+
9338+static inline void pmd_populate_user(struct mm_struct *mm,
9339+ pmd_t *pmd, pte_t *pte)
9340+{
9341+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9342 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9343 }
9344
9345diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h
9346--- linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9347+++ linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9348@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9349
9350 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9351 {
9352+ pax_open_kernel();
9353 *pmdp = pmd;
9354+ pax_close_kernel();
9355 }
9356
9357 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9358diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h
9359--- linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9360+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9361@@ -26,9 +26,6 @@
9362 struct mm_struct;
9363 struct vm_area_struct;
9364
9365-extern pgd_t swapper_pg_dir[1024];
9366-extern pgd_t trampoline_pg_dir[1024];
9367-
9368 static inline void pgtable_cache_init(void) { }
9369 static inline void check_pgt_cache(void) { }
9370 void paging_init(void);
9371@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9372 # include <asm/pgtable-2level.h>
9373 #endif
9374
9375+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9376+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9377+#ifdef CONFIG_X86_PAE
9378+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9379+#endif
9380+
9381 #if defined(CONFIG_HIGHPTE)
9382 #define __KM_PTE \
9383 (in_nmi() ? KM_NMI_PTE : \
9384@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9385 /* Clear a kernel PTE and flush it from the TLB */
9386 #define kpte_clear_flush(ptep, vaddr) \
9387 do { \
9388+ pax_open_kernel(); \
9389 pte_clear(&init_mm, (vaddr), (ptep)); \
9390+ pax_close_kernel(); \
9391 __flush_tlb_one((vaddr)); \
9392 } while (0)
9393
9394@@ -85,6 +90,9 @@ do { \
9395
9396 #endif /* !__ASSEMBLY__ */
9397
9398+#define HAVE_ARCH_UNMAPPED_AREA
9399+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9400+
9401 /*
9402 * kern_addr_valid() is (1) for FLATMEM and (0) for
9403 * SPARSEMEM and DISCONTIGMEM
9404diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h
9405--- linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9406+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9407@@ -8,7 +8,7 @@
9408 */
9409 #ifdef CONFIG_X86_PAE
9410 # include <asm/pgtable-3level_types.h>
9411-# define PMD_SIZE (1UL << PMD_SHIFT)
9412+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9413 # define PMD_MASK (~(PMD_SIZE - 1))
9414 #else
9415 # include <asm/pgtable-2level_types.h>
9416@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9417 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9418 #endif
9419
9420+#ifdef CONFIG_PAX_KERNEXEC
9421+#ifndef __ASSEMBLY__
9422+extern unsigned char MODULES_EXEC_VADDR[];
9423+extern unsigned char MODULES_EXEC_END[];
9424+#endif
9425+#include <asm/boot.h>
9426+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9427+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9428+#else
9429+#define ktla_ktva(addr) (addr)
9430+#define ktva_ktla(addr) (addr)
9431+#endif
9432+
9433 #define MODULES_VADDR VMALLOC_START
9434 #define MODULES_END VMALLOC_END
9435 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9436diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h
9437--- linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9438+++ linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9439@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9440
9441 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9442 {
9443+ pax_open_kernel();
9444 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9445+ pax_close_kernel();
9446 }
9447
9448 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9449 {
9450+ pax_open_kernel();
9451 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9452+ pax_close_kernel();
9453 }
9454
9455 /*
9456diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h
9457--- linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9458+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9459@@ -16,10 +16,13 @@
9460
9461 extern pud_t level3_kernel_pgt[512];
9462 extern pud_t level3_ident_pgt[512];
9463+extern pud_t level3_vmalloc_pgt[512];
9464+extern pud_t level3_vmemmap_pgt[512];
9465+extern pud_t level2_vmemmap_pgt[512];
9466 extern pmd_t level2_kernel_pgt[512];
9467 extern pmd_t level2_fixmap_pgt[512];
9468-extern pmd_t level2_ident_pgt[512];
9469-extern pgd_t init_level4_pgt[];
9470+extern pmd_t level2_ident_pgt[512*2];
9471+extern pgd_t init_level4_pgt[512];
9472
9473 #define swapper_pg_dir init_level4_pgt
9474
9475@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9476
9477 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9478 {
9479+ pax_open_kernel();
9480 *pmdp = pmd;
9481+ pax_close_kernel();
9482 }
9483
9484 static inline void native_pmd_clear(pmd_t *pmd)
9485@@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9486
9487 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9488 {
9489+ pax_open_kernel();
9490 *pgdp = pgd;
9491+ pax_close_kernel();
9492 }
9493
9494 static inline void native_pgd_clear(pgd_t *pgd)
9495diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h
9496--- linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9497+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9498@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9499 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9500 #define MODULES_END _AC(0xffffffffff000000, UL)
9501 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9502+#define MODULES_EXEC_VADDR MODULES_VADDR
9503+#define MODULES_EXEC_END MODULES_END
9504+
9505+#define ktla_ktva(addr) (addr)
9506+#define ktva_ktla(addr) (addr)
9507
9508 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9509diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable.h linux-2.6.32.45/arch/x86/include/asm/pgtable.h
9510--- linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9511+++ linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9512@@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9513
9514 #define arch_end_context_switch(prev) do {} while(0)
9515
9516+#define pax_open_kernel() native_pax_open_kernel()
9517+#define pax_close_kernel() native_pax_close_kernel()
9518 #endif /* CONFIG_PARAVIRT */
9519
9520+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9521+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9522+
9523+#ifdef CONFIG_PAX_KERNEXEC
9524+static inline unsigned long native_pax_open_kernel(void)
9525+{
9526+ unsigned long cr0;
9527+
9528+ preempt_disable();
9529+ barrier();
9530+ cr0 = read_cr0() ^ X86_CR0_WP;
9531+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9532+ write_cr0(cr0);
9533+ return cr0 ^ X86_CR0_WP;
9534+}
9535+
9536+static inline unsigned long native_pax_close_kernel(void)
9537+{
9538+ unsigned long cr0;
9539+
9540+ cr0 = read_cr0() ^ X86_CR0_WP;
9541+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9542+ write_cr0(cr0);
9543+ barrier();
9544+ preempt_enable_no_resched();
9545+ return cr0 ^ X86_CR0_WP;
9546+}
9547+#else
9548+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9549+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9550+#endif
9551+
9552 /*
9553 * The following only work if pte_present() is true.
9554 * Undefined behaviour if not..
9555 */
9556+static inline int pte_user(pte_t pte)
9557+{
9558+ return pte_val(pte) & _PAGE_USER;
9559+}
9560+
9561 static inline int pte_dirty(pte_t pte)
9562 {
9563 return pte_flags(pte) & _PAGE_DIRTY;
9564@@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9565 return pte_clear_flags(pte, _PAGE_RW);
9566 }
9567
9568+static inline pte_t pte_mkread(pte_t pte)
9569+{
9570+ return __pte(pte_val(pte) | _PAGE_USER);
9571+}
9572+
9573 static inline pte_t pte_mkexec(pte_t pte)
9574 {
9575- return pte_clear_flags(pte, _PAGE_NX);
9576+#ifdef CONFIG_X86_PAE
9577+ if (__supported_pte_mask & _PAGE_NX)
9578+ return pte_clear_flags(pte, _PAGE_NX);
9579+ else
9580+#endif
9581+ return pte_set_flags(pte, _PAGE_USER);
9582+}
9583+
9584+static inline pte_t pte_exprotect(pte_t pte)
9585+{
9586+#ifdef CONFIG_X86_PAE
9587+ if (__supported_pte_mask & _PAGE_NX)
9588+ return pte_set_flags(pte, _PAGE_NX);
9589+ else
9590+#endif
9591+ return pte_clear_flags(pte, _PAGE_USER);
9592 }
9593
9594 static inline pte_t pte_mkdirty(pte_t pte)
9595@@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9596 #endif
9597
9598 #ifndef __ASSEMBLY__
9599+
9600+#ifdef CONFIG_PAX_PER_CPU_PGD
9601+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9602+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9603+{
9604+ return cpu_pgd[cpu];
9605+}
9606+#endif
9607+
9608 #include <linux/mm_types.h>
9609
9610 static inline int pte_none(pte_t pte)
9611@@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9612
9613 static inline int pgd_bad(pgd_t pgd)
9614 {
9615- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9616+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9617 }
9618
9619 static inline int pgd_none(pgd_t pgd)
9620@@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9621 * pgd_offset() returns a (pgd_t *)
9622 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9623 */
9624-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9625+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9626+
9627+#ifdef CONFIG_PAX_PER_CPU_PGD
9628+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9629+#endif
9630+
9631 /*
9632 * a shortcut which implies the use of the kernel's pgd, instead
9633 * of a process's
9634@@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9635 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9636 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9637
9638+#ifdef CONFIG_X86_32
9639+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9640+#else
9641+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9642+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9643+
9644+#ifdef CONFIG_PAX_MEMORY_UDEREF
9645+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9646+#else
9647+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9648+#endif
9649+
9650+#endif
9651+
9652 #ifndef __ASSEMBLY__
9653
9654 extern int direct_gbpages;
9655@@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9656 * dst and src can be on the same page, but the range must not overlap,
9657 * and must not cross a page boundary.
9658 */
9659-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9660+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9661 {
9662- memcpy(dst, src, count * sizeof(pgd_t));
9663+ pax_open_kernel();
9664+ while (count--)
9665+ *dst++ = *src++;
9666+ pax_close_kernel();
9667 }
9668
9669+#ifdef CONFIG_PAX_PER_CPU_PGD
9670+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9671+#endif
9672+
9673+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9674+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9675+#else
9676+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9677+#endif
9678
9679 #include <asm-generic/pgtable.h>
9680 #endif /* __ASSEMBLY__ */
9681diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h
9682--- linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9683+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9684@@ -16,12 +16,11 @@
9685 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9686 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9687 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9688-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9689+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9690 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9691 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9692 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9693-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9694-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9695+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9696 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9697
9698 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9699@@ -39,7 +38,6 @@
9700 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9701 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9702 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9703-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9704 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9705 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9706 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9707@@ -55,8 +53,10 @@
9708
9709 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9710 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9711-#else
9712+#elif defined(CONFIG_KMEMCHECK)
9713 #define _PAGE_NX (_AT(pteval_t, 0))
9714+#else
9715+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9716 #endif
9717
9718 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9719@@ -93,6 +93,9 @@
9720 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9721 _PAGE_ACCESSED)
9722
9723+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9724+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9725+
9726 #define __PAGE_KERNEL_EXEC \
9727 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9728 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9729@@ -103,8 +106,8 @@
9730 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9731 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9732 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9733-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9734-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9735+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9736+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9737 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9738 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9739 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9740@@ -163,8 +166,8 @@
9741 * bits are combined, this will alow user to access the high address mapped
9742 * VDSO in the presence of CONFIG_COMPAT_VDSO
9743 */
9744-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9745-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9746+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9747+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9748 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9749 #endif
9750
9751@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9752 {
9753 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9754 }
9755+#endif
9756
9757+#if PAGETABLE_LEVELS == 3
9758+#include <asm-generic/pgtable-nopud.h>
9759+#endif
9760+
9761+#if PAGETABLE_LEVELS == 2
9762+#include <asm-generic/pgtable-nopmd.h>
9763+#endif
9764+
9765+#ifndef __ASSEMBLY__
9766 #if PAGETABLE_LEVELS > 3
9767 typedef struct { pudval_t pud; } pud_t;
9768
9769@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9770 return pud.pud;
9771 }
9772 #else
9773-#include <asm-generic/pgtable-nopud.h>
9774-
9775 static inline pudval_t native_pud_val(pud_t pud)
9776 {
9777 return native_pgd_val(pud.pgd);
9778@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9779 return pmd.pmd;
9780 }
9781 #else
9782-#include <asm-generic/pgtable-nopmd.h>
9783-
9784 static inline pmdval_t native_pmd_val(pmd_t pmd)
9785 {
9786 return native_pgd_val(pmd.pud.pgd);
9787@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9788
9789 extern pteval_t __supported_pte_mask;
9790 extern void set_nx(void);
9791+
9792+#ifdef CONFIG_X86_32
9793+#ifdef CONFIG_X86_PAE
9794 extern int nx_enabled;
9795+#else
9796+#define nx_enabled (0)
9797+#endif
9798+#else
9799+#define nx_enabled (1)
9800+#endif
9801
9802 #define pgprot_writecombine pgprot_writecombine
9803 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9804diff -urNp linux-2.6.32.45/arch/x86/include/asm/processor.h linux-2.6.32.45/arch/x86/include/asm/processor.h
9805--- linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9806+++ linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9807@@ -272,7 +272,7 @@ struct tss_struct {
9808
9809 } ____cacheline_aligned;
9810
9811-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9812+extern struct tss_struct init_tss[NR_CPUS];
9813
9814 /*
9815 * Save the original ist values for checking stack pointers during debugging
9816@@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9817 */
9818 #define TASK_SIZE PAGE_OFFSET
9819 #define TASK_SIZE_MAX TASK_SIZE
9820+
9821+#ifdef CONFIG_PAX_SEGMEXEC
9822+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9823+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9824+#else
9825 #define STACK_TOP TASK_SIZE
9826-#define STACK_TOP_MAX STACK_TOP
9827+#endif
9828+
9829+#define STACK_TOP_MAX TASK_SIZE
9830
9831 #define INIT_THREAD { \
9832- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9833+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9834 .vm86_info = NULL, \
9835 .sysenter_cs = __KERNEL_CS, \
9836 .io_bitmap_ptr = NULL, \
9837@@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9838 */
9839 #define INIT_TSS { \
9840 .x86_tss = { \
9841- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9842+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9843 .ss0 = __KERNEL_DS, \
9844 .ss1 = __KERNEL_CS, \
9845 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9846@@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9847 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9848
9849 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9850-#define KSTK_TOP(info) \
9851-({ \
9852- unsigned long *__ptr = (unsigned long *)(info); \
9853- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9854-})
9855+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9856
9857 /*
9858 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9859@@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9860 #define task_pt_regs(task) \
9861 ({ \
9862 struct pt_regs *__regs__; \
9863- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9864+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9865 __regs__ - 1; \
9866 })
9867
9868@@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9869 /*
9870 * User space process size. 47bits minus one guard page.
9871 */
9872-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9873+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9874
9875 /* This decides where the kernel will search for a free chunk of vm
9876 * space during mmap's.
9877 */
9878 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9879- 0xc0000000 : 0xFFFFe000)
9880+ 0xc0000000 : 0xFFFFf000)
9881
9882 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9883 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9884@@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9885 #define STACK_TOP_MAX TASK_SIZE_MAX
9886
9887 #define INIT_THREAD { \
9888- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9889+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9890 }
9891
9892 #define INIT_TSS { \
9893- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9894+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9895 }
9896
9897 /*
9898@@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9899 */
9900 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9901
9902+#ifdef CONFIG_PAX_SEGMEXEC
9903+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9904+#endif
9905+
9906 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9907
9908 /* Get/set a process' ability to use the timestamp counter instruction */
9909diff -urNp linux-2.6.32.45/arch/x86/include/asm/ptrace.h linux-2.6.32.45/arch/x86/include/asm/ptrace.h
9910--- linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9911+++ linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9912@@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9913 }
9914
9915 /*
9916- * user_mode_vm(regs) determines whether a register set came from user mode.
9917+ * user_mode(regs) determines whether a register set came from user mode.
9918 * This is true if V8086 mode was enabled OR if the register set was from
9919 * protected mode with RPL-3 CS value. This tricky test checks that with
9920 * one comparison. Many places in the kernel can bypass this full check
9921- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9922+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9923+ * be used.
9924 */
9925-static inline int user_mode(struct pt_regs *regs)
9926+static inline int user_mode_novm(struct pt_regs *regs)
9927 {
9928 #ifdef CONFIG_X86_32
9929 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9930 #else
9931- return !!(regs->cs & 3);
9932+ return !!(regs->cs & SEGMENT_RPL_MASK);
9933 #endif
9934 }
9935
9936-static inline int user_mode_vm(struct pt_regs *regs)
9937+static inline int user_mode(struct pt_regs *regs)
9938 {
9939 #ifdef CONFIG_X86_32
9940 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9941 USER_RPL;
9942 #else
9943- return user_mode(regs);
9944+ return user_mode_novm(regs);
9945 #endif
9946 }
9947
9948diff -urNp linux-2.6.32.45/arch/x86/include/asm/reboot.h linux-2.6.32.45/arch/x86/include/asm/reboot.h
9949--- linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9950+++ linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-08-05 20:33:55.000000000 -0400
9951@@ -6,19 +6,19 @@
9952 struct pt_regs;
9953
9954 struct machine_ops {
9955- void (*restart)(char *cmd);
9956- void (*halt)(void);
9957- void (*power_off)(void);
9958+ void (* __noreturn restart)(char *cmd);
9959+ void (* __noreturn halt)(void);
9960+ void (* __noreturn power_off)(void);
9961 void (*shutdown)(void);
9962 void (*crash_shutdown)(struct pt_regs *);
9963- void (*emergency_restart)(void);
9964-};
9965+ void (* __noreturn emergency_restart)(void);
9966+} __no_const;
9967
9968 extern struct machine_ops machine_ops;
9969
9970 void native_machine_crash_shutdown(struct pt_regs *regs);
9971 void native_machine_shutdown(void);
9972-void machine_real_restart(const unsigned char *code, int length);
9973+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9974
9975 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9976 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
9977diff -urNp linux-2.6.32.45/arch/x86/include/asm/rwsem.h linux-2.6.32.45/arch/x86/include/asm/rwsem.h
9978--- linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
9979+++ linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
9980@@ -118,6 +118,14 @@ static inline void __down_read(struct rw
9981 {
9982 asm volatile("# beginning down_read\n\t"
9983 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9984+
9985+#ifdef CONFIG_PAX_REFCOUNT
9986+ "jno 0f\n"
9987+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
9988+ "int $4\n0:\n"
9989+ _ASM_EXTABLE(0b, 0b)
9990+#endif
9991+
9992 /* adds 0x00000001, returns the old value */
9993 " jns 1f\n"
9994 " call call_rwsem_down_read_failed\n"
9995@@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
9996 "1:\n\t"
9997 " mov %1,%2\n\t"
9998 " add %3,%2\n\t"
9999+
10000+#ifdef CONFIG_PAX_REFCOUNT
10001+ "jno 0f\n"
10002+ "sub %3,%2\n"
10003+ "int $4\n0:\n"
10004+ _ASM_EXTABLE(0b, 0b)
10005+#endif
10006+
10007 " jle 2f\n\t"
10008 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10009 " jnz 1b\n\t"
10010@@ -160,6 +176,14 @@ static inline void __down_write_nested(s
10011 tmp = RWSEM_ACTIVE_WRITE_BIAS;
10012 asm volatile("# beginning down_write\n\t"
10013 LOCK_PREFIX " xadd %1,(%2)\n\t"
10014+
10015+#ifdef CONFIG_PAX_REFCOUNT
10016+ "jno 0f\n"
10017+ "mov %1,(%2)\n"
10018+ "int $4\n0:\n"
10019+ _ASM_EXTABLE(0b, 0b)
10020+#endif
10021+
10022 /* subtract 0x0000ffff, returns the old value */
10023 " test %1,%1\n\t"
10024 /* was the count 0 before? */
10025@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10026 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10027 asm volatile("# beginning __up_read\n\t"
10028 LOCK_PREFIX " xadd %1,(%2)\n\t"
10029+
10030+#ifdef CONFIG_PAX_REFCOUNT
10031+ "jno 0f\n"
10032+ "mov %1,(%2)\n"
10033+ "int $4\n0:\n"
10034+ _ASM_EXTABLE(0b, 0b)
10035+#endif
10036+
10037 /* subtracts 1, returns the old value */
10038 " jns 1f\n\t"
10039 " call call_rwsem_wake\n"
10040@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10041 rwsem_count_t tmp;
10042 asm volatile("# beginning __up_write\n\t"
10043 LOCK_PREFIX " xadd %1,(%2)\n\t"
10044+
10045+#ifdef CONFIG_PAX_REFCOUNT
10046+ "jno 0f\n"
10047+ "mov %1,(%2)\n"
10048+ "int $4\n0:\n"
10049+ _ASM_EXTABLE(0b, 0b)
10050+#endif
10051+
10052 /* tries to transition
10053 0xffff0001 -> 0x00000000 */
10054 " jz 1f\n"
10055@@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10056 {
10057 asm volatile("# beginning __downgrade_write\n\t"
10058 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10059+
10060+#ifdef CONFIG_PAX_REFCOUNT
10061+ "jno 0f\n"
10062+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10063+ "int $4\n0:\n"
10064+ _ASM_EXTABLE(0b, 0b)
10065+#endif
10066+
10067 /*
10068 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10069 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10070@@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10071 static inline void rwsem_atomic_add(rwsem_count_t delta,
10072 struct rw_semaphore *sem)
10073 {
10074- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10075+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10076+
10077+#ifdef CONFIG_PAX_REFCOUNT
10078+ "jno 0f\n"
10079+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10080+ "int $4\n0:\n"
10081+ _ASM_EXTABLE(0b, 0b)
10082+#endif
10083+
10084 : "+m" (sem->count)
10085 : "er" (delta));
10086 }
10087@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10088 {
10089 rwsem_count_t tmp = delta;
10090
10091- asm volatile(LOCK_PREFIX "xadd %0,%1"
10092+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10093+
10094+#ifdef CONFIG_PAX_REFCOUNT
10095+ "jno 0f\n"
10096+ "mov %0,%1\n"
10097+ "int $4\n0:\n"
10098+ _ASM_EXTABLE(0b, 0b)
10099+#endif
10100+
10101 : "+r" (tmp), "+m" (sem->count)
10102 : : "memory");
10103
10104diff -urNp linux-2.6.32.45/arch/x86/include/asm/segment.h linux-2.6.32.45/arch/x86/include/asm/segment.h
10105--- linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10106+++ linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10107@@ -62,8 +62,8 @@
10108 * 26 - ESPFIX small SS
10109 * 27 - per-cpu [ offset to per-cpu data area ]
10110 * 28 - stack_canary-20 [ for stack protector ]
10111- * 29 - unused
10112- * 30 - unused
10113+ * 29 - PCI BIOS CS
10114+ * 30 - PCI BIOS DS
10115 * 31 - TSS for double fault handler
10116 */
10117 #define GDT_ENTRY_TLS_MIN 6
10118@@ -77,6 +77,8 @@
10119
10120 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10121
10122+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10123+
10124 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10125
10126 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10127@@ -88,7 +90,7 @@
10128 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10129 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10130
10131-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10132+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10133 #ifdef CONFIG_SMP
10134 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10135 #else
10136@@ -102,6 +104,12 @@
10137 #define __KERNEL_STACK_CANARY 0
10138 #endif
10139
10140+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10141+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10142+
10143+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10144+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10145+
10146 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10147
10148 /*
10149@@ -139,7 +147,7 @@
10150 */
10151
10152 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10153-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10154+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10155
10156
10157 #else
10158@@ -163,6 +171,8 @@
10159 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10160 #define __USER32_DS __USER_DS
10161
10162+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10163+
10164 #define GDT_ENTRY_TSS 8 /* needs two entries */
10165 #define GDT_ENTRY_LDT 10 /* needs two entries */
10166 #define GDT_ENTRY_TLS_MIN 12
10167@@ -183,6 +193,7 @@
10168 #endif
10169
10170 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10171+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10172 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10173 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10174 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10175diff -urNp linux-2.6.32.45/arch/x86/include/asm/smp.h linux-2.6.32.45/arch/x86/include/asm/smp.h
10176--- linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10177+++ linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-08-05 20:33:55.000000000 -0400
10178@@ -24,7 +24,7 @@ extern unsigned int num_processors;
10179 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10180 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10181 DECLARE_PER_CPU(u16, cpu_llc_id);
10182-DECLARE_PER_CPU(int, cpu_number);
10183+DECLARE_PER_CPU(unsigned int, cpu_number);
10184
10185 static inline struct cpumask *cpu_sibling_mask(int cpu)
10186 {
10187@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10188 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10189
10190 /* Static state in head.S used to set up a CPU */
10191-extern struct {
10192- void *sp;
10193- unsigned short ss;
10194-} stack_start;
10195+extern unsigned long stack_start; /* Initial stack pointer address */
10196
10197 struct smp_ops {
10198 void (*smp_prepare_boot_cpu)(void);
10199@@ -60,7 +57,7 @@ struct smp_ops {
10200
10201 void (*send_call_func_ipi)(const struct cpumask *mask);
10202 void (*send_call_func_single_ipi)(int cpu);
10203-};
10204+} __no_const;
10205
10206 /* Globals due to paravirt */
10207 extern void set_cpu_sibling_map(int cpu);
10208@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10209 extern int safe_smp_processor_id(void);
10210
10211 #elif defined(CONFIG_X86_64_SMP)
10212-#define raw_smp_processor_id() (percpu_read(cpu_number))
10213-
10214-#define stack_smp_processor_id() \
10215-({ \
10216- struct thread_info *ti; \
10217- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10218- ti->cpu; \
10219-})
10220+#define raw_smp_processor_id() (percpu_read(cpu_number))
10221+#define stack_smp_processor_id() raw_smp_processor_id()
10222 #define safe_smp_processor_id() smp_processor_id()
10223
10224 #endif
10225diff -urNp linux-2.6.32.45/arch/x86/include/asm/spinlock.h linux-2.6.32.45/arch/x86/include/asm/spinlock.h
10226--- linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10227+++ linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10228@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10229 static inline void __raw_read_lock(raw_rwlock_t *rw)
10230 {
10231 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10232+
10233+#ifdef CONFIG_PAX_REFCOUNT
10234+ "jno 0f\n"
10235+ LOCK_PREFIX " addl $1,(%0)\n"
10236+ "int $4\n0:\n"
10237+ _ASM_EXTABLE(0b, 0b)
10238+#endif
10239+
10240 "jns 1f\n"
10241 "call __read_lock_failed\n\t"
10242 "1:\n"
10243@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10244 static inline void __raw_write_lock(raw_rwlock_t *rw)
10245 {
10246 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10247+
10248+#ifdef CONFIG_PAX_REFCOUNT
10249+ "jno 0f\n"
10250+ LOCK_PREFIX " addl %1,(%0)\n"
10251+ "int $4\n0:\n"
10252+ _ASM_EXTABLE(0b, 0b)
10253+#endif
10254+
10255 "jz 1f\n"
10256 "call __write_lock_failed\n\t"
10257 "1:\n"
10258@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10259
10260 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10261 {
10262- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10263+ asm volatile(LOCK_PREFIX "incl %0\n"
10264+
10265+#ifdef CONFIG_PAX_REFCOUNT
10266+ "jno 0f\n"
10267+ LOCK_PREFIX "decl %0\n"
10268+ "int $4\n0:\n"
10269+ _ASM_EXTABLE(0b, 0b)
10270+#endif
10271+
10272+ :"+m" (rw->lock) : : "memory");
10273 }
10274
10275 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10276 {
10277- asm volatile(LOCK_PREFIX "addl %1, %0"
10278+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
10279+
10280+#ifdef CONFIG_PAX_REFCOUNT
10281+ "jno 0f\n"
10282+ LOCK_PREFIX "subl %1, %0\n"
10283+ "int $4\n0:\n"
10284+ _ASM_EXTABLE(0b, 0b)
10285+#endif
10286+
10287 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10288 }
10289
10290diff -urNp linux-2.6.32.45/arch/x86/include/asm/stackprotector.h linux-2.6.32.45/arch/x86/include/asm/stackprotector.h
10291--- linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10292+++ linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10293@@ -48,7 +48,7 @@
10294 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10295 */
10296 #define GDT_STACK_CANARY_INIT \
10297- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10298+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10299
10300 /*
10301 * Initialize the stackprotector canary value.
10302@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10303
10304 static inline void load_stack_canary_segment(void)
10305 {
10306-#ifdef CONFIG_X86_32
10307+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10308 asm volatile ("mov %0, %%gs" : : "r" (0));
10309 #endif
10310 }
10311diff -urNp linux-2.6.32.45/arch/x86/include/asm/system.h linux-2.6.32.45/arch/x86/include/asm/system.h
10312--- linux-2.6.32.45/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10313+++ linux-2.6.32.45/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10314@@ -132,7 +132,7 @@ do { \
10315 "thread_return:\n\t" \
10316 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10317 __switch_canary \
10318- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10319+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10320 "movq %%rax,%%rdi\n\t" \
10321 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10322 "jnz ret_from_fork\n\t" \
10323@@ -143,7 +143,7 @@ do { \
10324 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10325 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10326 [_tif_fork] "i" (_TIF_FORK), \
10327- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10328+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
10329 [current_task] "m" (per_cpu_var(current_task)) \
10330 __switch_canary_iparam \
10331 : "memory", "cc" __EXTRA_CLOBBER)
10332@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10333 {
10334 unsigned long __limit;
10335 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10336- return __limit + 1;
10337+ return __limit;
10338 }
10339
10340 static inline void native_clts(void)
10341@@ -340,12 +340,12 @@ void enable_hlt(void);
10342
10343 void cpu_idle_wait(void);
10344
10345-extern unsigned long arch_align_stack(unsigned long sp);
10346+#define arch_align_stack(x) ((x) & ~0xfUL)
10347 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10348
10349 void default_idle(void);
10350
10351-void stop_this_cpu(void *dummy);
10352+void stop_this_cpu(void *dummy) __noreturn;
10353
10354 /*
10355 * Force strict CPU ordering.
10356diff -urNp linux-2.6.32.45/arch/x86/include/asm/thread_info.h linux-2.6.32.45/arch/x86/include/asm/thread_info.h
10357--- linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10358+++ linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10359@@ -10,6 +10,7 @@
10360 #include <linux/compiler.h>
10361 #include <asm/page.h>
10362 #include <asm/types.h>
10363+#include <asm/percpu.h>
10364
10365 /*
10366 * low level task data that entry.S needs immediate access to
10367@@ -24,7 +25,6 @@ struct exec_domain;
10368 #include <asm/atomic.h>
10369
10370 struct thread_info {
10371- struct task_struct *task; /* main task structure */
10372 struct exec_domain *exec_domain; /* execution domain */
10373 __u32 flags; /* low level flags */
10374 __u32 status; /* thread synchronous flags */
10375@@ -34,18 +34,12 @@ struct thread_info {
10376 mm_segment_t addr_limit;
10377 struct restart_block restart_block;
10378 void __user *sysenter_return;
10379-#ifdef CONFIG_X86_32
10380- unsigned long previous_esp; /* ESP of the previous stack in
10381- case of nested (IRQ) stacks
10382- */
10383- __u8 supervisor_stack[0];
10384-#endif
10385+ unsigned long lowest_stack;
10386 int uaccess_err;
10387 };
10388
10389-#define INIT_THREAD_INFO(tsk) \
10390+#define INIT_THREAD_INFO \
10391 { \
10392- .task = &tsk, \
10393 .exec_domain = &default_exec_domain, \
10394 .flags = 0, \
10395 .cpu = 0, \
10396@@ -56,7 +50,7 @@ struct thread_info {
10397 }, \
10398 }
10399
10400-#define init_thread_info (init_thread_union.thread_info)
10401+#define init_thread_info (init_thread_union.stack)
10402 #define init_stack (init_thread_union.stack)
10403
10404 #else /* !__ASSEMBLY__ */
10405@@ -163,6 +157,23 @@ struct thread_info {
10406 #define alloc_thread_info(tsk) \
10407 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10408
10409+#ifdef __ASSEMBLY__
10410+/* how to get the thread information struct from ASM */
10411+#define GET_THREAD_INFO(reg) \
10412+ mov PER_CPU_VAR(current_tinfo), reg
10413+
10414+/* use this one if reg already contains %esp */
10415+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10416+#else
10417+/* how to get the thread information struct from C */
10418+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10419+
10420+static __always_inline struct thread_info *current_thread_info(void)
10421+{
10422+ return percpu_read_stable(current_tinfo);
10423+}
10424+#endif
10425+
10426 #ifdef CONFIG_X86_32
10427
10428 #define STACK_WARN (THREAD_SIZE/8)
10429@@ -173,35 +184,13 @@ struct thread_info {
10430 */
10431 #ifndef __ASSEMBLY__
10432
10433-
10434 /* how to get the current stack pointer from C */
10435 register unsigned long current_stack_pointer asm("esp") __used;
10436
10437-/* how to get the thread information struct from C */
10438-static inline struct thread_info *current_thread_info(void)
10439-{
10440- return (struct thread_info *)
10441- (current_stack_pointer & ~(THREAD_SIZE - 1));
10442-}
10443-
10444-#else /* !__ASSEMBLY__ */
10445-
10446-/* how to get the thread information struct from ASM */
10447-#define GET_THREAD_INFO(reg) \
10448- movl $-THREAD_SIZE, reg; \
10449- andl %esp, reg
10450-
10451-/* use this one if reg already contains %esp */
10452-#define GET_THREAD_INFO_WITH_ESP(reg) \
10453- andl $-THREAD_SIZE, reg
10454-
10455 #endif
10456
10457 #else /* X86_32 */
10458
10459-#include <asm/percpu.h>
10460-#define KERNEL_STACK_OFFSET (5*8)
10461-
10462 /*
10463 * macros/functions for gaining access to the thread information structure
10464 * preempt_count needs to be 1 initially, until the scheduler is functional.
10465@@ -209,21 +198,8 @@ static inline struct thread_info *curren
10466 #ifndef __ASSEMBLY__
10467 DECLARE_PER_CPU(unsigned long, kernel_stack);
10468
10469-static inline struct thread_info *current_thread_info(void)
10470-{
10471- struct thread_info *ti;
10472- ti = (void *)(percpu_read_stable(kernel_stack) +
10473- KERNEL_STACK_OFFSET - THREAD_SIZE);
10474- return ti;
10475-}
10476-
10477-#else /* !__ASSEMBLY__ */
10478-
10479-/* how to get the thread information struct from ASM */
10480-#define GET_THREAD_INFO(reg) \
10481- movq PER_CPU_VAR(kernel_stack),reg ; \
10482- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10483-
10484+/* how to get the current stack pointer from C */
10485+register unsigned long current_stack_pointer asm("rsp") __used;
10486 #endif
10487
10488 #endif /* !X86_32 */
10489@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10490 extern void free_thread_info(struct thread_info *ti);
10491 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10492 #define arch_task_cache_init arch_task_cache_init
10493+
10494+#define __HAVE_THREAD_FUNCTIONS
10495+#define task_thread_info(task) (&(task)->tinfo)
10496+#define task_stack_page(task) ((task)->stack)
10497+#define setup_thread_stack(p, org) do {} while (0)
10498+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10499+
10500+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10501+extern struct task_struct *alloc_task_struct(void);
10502+extern void free_task_struct(struct task_struct *);
10503+
10504 #endif
10505 #endif /* _ASM_X86_THREAD_INFO_H */
10506diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h
10507--- linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10508+++ linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10509@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10510 static __always_inline unsigned long __must_check
10511 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10512 {
10513+ pax_track_stack();
10514+
10515+ if ((long)n < 0)
10516+ return n;
10517+
10518 if (__builtin_constant_p(n)) {
10519 unsigned long ret;
10520
10521@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10522 return ret;
10523 }
10524 }
10525+ if (!__builtin_constant_p(n))
10526+ check_object_size(from, n, true);
10527 return __copy_to_user_ll(to, from, n);
10528 }
10529
10530@@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10531 __copy_to_user(void __user *to, const void *from, unsigned long n)
10532 {
10533 might_fault();
10534+
10535 return __copy_to_user_inatomic(to, from, n);
10536 }
10537
10538 static __always_inline unsigned long
10539 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10540 {
10541+ if ((long)n < 0)
10542+ return n;
10543+
10544 /* Avoid zeroing the tail if the copy fails..
10545 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10546 * but as the zeroing behaviour is only significant when n is not
10547@@ -138,6 +149,12 @@ static __always_inline unsigned long
10548 __copy_from_user(void *to, const void __user *from, unsigned long n)
10549 {
10550 might_fault();
10551+
10552+ pax_track_stack();
10553+
10554+ if ((long)n < 0)
10555+ return n;
10556+
10557 if (__builtin_constant_p(n)) {
10558 unsigned long ret;
10559
10560@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10561 return ret;
10562 }
10563 }
10564+ if (!__builtin_constant_p(n))
10565+ check_object_size(to, n, false);
10566 return __copy_from_user_ll(to, from, n);
10567 }
10568
10569@@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10570 const void __user *from, unsigned long n)
10571 {
10572 might_fault();
10573+
10574+ if ((long)n < 0)
10575+ return n;
10576+
10577 if (__builtin_constant_p(n)) {
10578 unsigned long ret;
10579
10580@@ -182,14 +205,62 @@ static __always_inline unsigned long
10581 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10582 unsigned long n)
10583 {
10584- return __copy_from_user_ll_nocache_nozero(to, from, n);
10585+ if ((long)n < 0)
10586+ return n;
10587+
10588+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10589+}
10590+
10591+/**
10592+ * copy_to_user: - Copy a block of data into user space.
10593+ * @to: Destination address, in user space.
10594+ * @from: Source address, in kernel space.
10595+ * @n: Number of bytes to copy.
10596+ *
10597+ * Context: User context only. This function may sleep.
10598+ *
10599+ * Copy data from kernel space to user space.
10600+ *
10601+ * Returns number of bytes that could not be copied.
10602+ * On success, this will be zero.
10603+ */
10604+static __always_inline unsigned long __must_check
10605+copy_to_user(void __user *to, const void *from, unsigned long n)
10606+{
10607+ if (access_ok(VERIFY_WRITE, to, n))
10608+ n = __copy_to_user(to, from, n);
10609+ return n;
10610+}
10611+
10612+/**
10613+ * copy_from_user: - Copy a block of data from user space.
10614+ * @to: Destination address, in kernel space.
10615+ * @from: Source address, in user space.
10616+ * @n: Number of bytes to copy.
10617+ *
10618+ * Context: User context only. This function may sleep.
10619+ *
10620+ * Copy data from user space to kernel space.
10621+ *
10622+ * Returns number of bytes that could not be copied.
10623+ * On success, this will be zero.
10624+ *
10625+ * If some data could not be copied, this function will pad the copied
10626+ * data to the requested size using zero bytes.
10627+ */
10628+static __always_inline unsigned long __must_check
10629+copy_from_user(void *to, const void __user *from, unsigned long n)
10630+{
10631+ if (access_ok(VERIFY_READ, from, n))
10632+ n = __copy_from_user(to, from, n);
10633+ else if ((long)n > 0) {
10634+ if (!__builtin_constant_p(n))
10635+ check_object_size(to, n, false);
10636+ memset(to, 0, n);
10637+ }
10638+ return n;
10639 }
10640
10641-unsigned long __must_check copy_to_user(void __user *to,
10642- const void *from, unsigned long n);
10643-unsigned long __must_check copy_from_user(void *to,
10644- const void __user *from,
10645- unsigned long n);
10646 long __must_check strncpy_from_user(char *dst, const char __user *src,
10647 long count);
10648 long __must_check __strncpy_from_user(char *dst,
10649diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h
10650--- linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10651+++ linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10652@@ -9,6 +9,9 @@
10653 #include <linux/prefetch.h>
10654 #include <linux/lockdep.h>
10655 #include <asm/page.h>
10656+#include <asm/pgtable.h>
10657+
10658+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10659
10660 /*
10661 * Copy To/From Userspace
10662@@ -19,113 +22,203 @@ __must_check unsigned long
10663 copy_user_generic(void *to, const void *from, unsigned len);
10664
10665 __must_check unsigned long
10666-copy_to_user(void __user *to, const void *from, unsigned len);
10667-__must_check unsigned long
10668-copy_from_user(void *to, const void __user *from, unsigned len);
10669-__must_check unsigned long
10670 copy_in_user(void __user *to, const void __user *from, unsigned len);
10671
10672 static __always_inline __must_check
10673-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10674+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10675 {
10676- int ret = 0;
10677+ unsigned ret = 0;
10678
10679 might_fault();
10680- if (!__builtin_constant_p(size))
10681- return copy_user_generic(dst, (__force void *)src, size);
10682+
10683+ if ((int)size < 0)
10684+ return size;
10685+
10686+#ifdef CONFIG_PAX_MEMORY_UDEREF
10687+ if (!__access_ok(VERIFY_READ, src, size))
10688+ return size;
10689+#endif
10690+
10691+ if (!__builtin_constant_p(size)) {
10692+ check_object_size(dst, size, false);
10693+
10694+#ifdef CONFIG_PAX_MEMORY_UDEREF
10695+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10696+ src += PAX_USER_SHADOW_BASE;
10697+#endif
10698+
10699+ return copy_user_generic(dst, (__force const void *)src, size);
10700+ }
10701 switch (size) {
10702- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10703+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10704 ret, "b", "b", "=q", 1);
10705 return ret;
10706- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10707+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10708 ret, "w", "w", "=r", 2);
10709 return ret;
10710- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10711+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10712 ret, "l", "k", "=r", 4);
10713 return ret;
10714- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10715+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10716 ret, "q", "", "=r", 8);
10717 return ret;
10718 case 10:
10719- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10720+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10721 ret, "q", "", "=r", 10);
10722 if (unlikely(ret))
10723 return ret;
10724 __get_user_asm(*(u16 *)(8 + (char *)dst),
10725- (u16 __user *)(8 + (char __user *)src),
10726+ (const u16 __user *)(8 + (const char __user *)src),
10727 ret, "w", "w", "=r", 2);
10728 return ret;
10729 case 16:
10730- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10731+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10732 ret, "q", "", "=r", 16);
10733 if (unlikely(ret))
10734 return ret;
10735 __get_user_asm(*(u64 *)(8 + (char *)dst),
10736- (u64 __user *)(8 + (char __user *)src),
10737+ (const u64 __user *)(8 + (const char __user *)src),
10738 ret, "q", "", "=r", 8);
10739 return ret;
10740 default:
10741- return copy_user_generic(dst, (__force void *)src, size);
10742+
10743+#ifdef CONFIG_PAX_MEMORY_UDEREF
10744+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10745+ src += PAX_USER_SHADOW_BASE;
10746+#endif
10747+
10748+ return copy_user_generic(dst, (__force const void *)src, size);
10749 }
10750 }
10751
10752 static __always_inline __must_check
10753-int __copy_to_user(void __user *dst, const void *src, unsigned size)
10754+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10755 {
10756- int ret = 0;
10757+ unsigned ret = 0;
10758
10759 might_fault();
10760- if (!__builtin_constant_p(size))
10761+
10762+ pax_track_stack();
10763+
10764+ if ((int)size < 0)
10765+ return size;
10766+
10767+#ifdef CONFIG_PAX_MEMORY_UDEREF
10768+ if (!__access_ok(VERIFY_WRITE, dst, size))
10769+ return size;
10770+#endif
10771+
10772+ if (!__builtin_constant_p(size)) {
10773+ check_object_size(src, size, true);
10774+
10775+#ifdef CONFIG_PAX_MEMORY_UDEREF
10776+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10777+ dst += PAX_USER_SHADOW_BASE;
10778+#endif
10779+
10780 return copy_user_generic((__force void *)dst, src, size);
10781+ }
10782 switch (size) {
10783- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10784+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10785 ret, "b", "b", "iq", 1);
10786 return ret;
10787- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10788+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10789 ret, "w", "w", "ir", 2);
10790 return ret;
10791- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10792+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10793 ret, "l", "k", "ir", 4);
10794 return ret;
10795- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10796+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10797 ret, "q", "", "er", 8);
10798 return ret;
10799 case 10:
10800- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10801+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10802 ret, "q", "", "er", 10);
10803 if (unlikely(ret))
10804 return ret;
10805 asm("":::"memory");
10806- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10807+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10808 ret, "w", "w", "ir", 2);
10809 return ret;
10810 case 16:
10811- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10812+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10813 ret, "q", "", "er", 16);
10814 if (unlikely(ret))
10815 return ret;
10816 asm("":::"memory");
10817- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10818+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10819 ret, "q", "", "er", 8);
10820 return ret;
10821 default:
10822+
10823+#ifdef CONFIG_PAX_MEMORY_UDEREF
10824+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10825+ dst += PAX_USER_SHADOW_BASE;
10826+#endif
10827+
10828 return copy_user_generic((__force void *)dst, src, size);
10829 }
10830 }
10831
10832 static __always_inline __must_check
10833-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10834+unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10835+{
10836+ if (access_ok(VERIFY_WRITE, to, len))
10837+ len = __copy_to_user(to, from, len);
10838+ return len;
10839+}
10840+
10841+static __always_inline __must_check
10842+unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10843+{
10844+ if ((int)len < 0)
10845+ return len;
10846+
10847+ if (access_ok(VERIFY_READ, from, len))
10848+ len = __copy_from_user(to, from, len);
10849+ else if ((int)len > 0) {
10850+ if (!__builtin_constant_p(len))
10851+ check_object_size(to, len, false);
10852+ memset(to, 0, len);
10853+ }
10854+ return len;
10855+}
10856+
10857+static __always_inline __must_check
10858+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10859 {
10860- int ret = 0;
10861+ unsigned ret = 0;
10862
10863 might_fault();
10864- if (!__builtin_constant_p(size))
10865+
10866+ pax_track_stack();
10867+
10868+ if ((int)size < 0)
10869+ return size;
10870+
10871+#ifdef CONFIG_PAX_MEMORY_UDEREF
10872+ if (!__access_ok(VERIFY_READ, src, size))
10873+ return size;
10874+ if (!__access_ok(VERIFY_WRITE, dst, size))
10875+ return size;
10876+#endif
10877+
10878+ if (!__builtin_constant_p(size)) {
10879+
10880+#ifdef CONFIG_PAX_MEMORY_UDEREF
10881+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10882+ src += PAX_USER_SHADOW_BASE;
10883+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10884+ dst += PAX_USER_SHADOW_BASE;
10885+#endif
10886+
10887 return copy_user_generic((__force void *)dst,
10888- (__force void *)src, size);
10889+ (__force const void *)src, size);
10890+ }
10891 switch (size) {
10892 case 1: {
10893 u8 tmp;
10894- __get_user_asm(tmp, (u8 __user *)src,
10895+ __get_user_asm(tmp, (const u8 __user *)src,
10896 ret, "b", "b", "=q", 1);
10897 if (likely(!ret))
10898 __put_user_asm(tmp, (u8 __user *)dst,
10899@@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10900 }
10901 case 2: {
10902 u16 tmp;
10903- __get_user_asm(tmp, (u16 __user *)src,
10904+ __get_user_asm(tmp, (const u16 __user *)src,
10905 ret, "w", "w", "=r", 2);
10906 if (likely(!ret))
10907 __put_user_asm(tmp, (u16 __user *)dst,
10908@@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10909
10910 case 4: {
10911 u32 tmp;
10912- __get_user_asm(tmp, (u32 __user *)src,
10913+ __get_user_asm(tmp, (const u32 __user *)src,
10914 ret, "l", "k", "=r", 4);
10915 if (likely(!ret))
10916 __put_user_asm(tmp, (u32 __user *)dst,
10917@@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10918 }
10919 case 8: {
10920 u64 tmp;
10921- __get_user_asm(tmp, (u64 __user *)src,
10922+ __get_user_asm(tmp, (const u64 __user *)src,
10923 ret, "q", "", "=r", 8);
10924 if (likely(!ret))
10925 __put_user_asm(tmp, (u64 __user *)dst,
10926@@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10927 return ret;
10928 }
10929 default:
10930+
10931+#ifdef CONFIG_PAX_MEMORY_UDEREF
10932+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10933+ src += PAX_USER_SHADOW_BASE;
10934+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10935+ dst += PAX_USER_SHADOW_BASE;
10936+#endif
10937+
10938 return copy_user_generic((__force void *)dst,
10939- (__force void *)src, size);
10940+ (__force const void *)src, size);
10941 }
10942 }
10943
10944@@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10945 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10946 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10947
10948-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10949- unsigned size);
10950+static __must_check __always_inline unsigned long
10951+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10952+{
10953+ pax_track_stack();
10954+
10955+ if ((int)size < 0)
10956+ return size;
10957
10958-static __must_check __always_inline int
10959+#ifdef CONFIG_PAX_MEMORY_UDEREF
10960+ if (!__access_ok(VERIFY_READ, src, size))
10961+ return size;
10962+
10963+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10964+ src += PAX_USER_SHADOW_BASE;
10965+#endif
10966+
10967+ return copy_user_generic(dst, (__force const void *)src, size);
10968+}
10969+
10970+static __must_check __always_inline unsigned long
10971 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10972 {
10973+ if ((int)size < 0)
10974+ return size;
10975+
10976+#ifdef CONFIG_PAX_MEMORY_UDEREF
10977+ if (!__access_ok(VERIFY_WRITE, dst, size))
10978+ return size;
10979+
10980+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10981+ dst += PAX_USER_SHADOW_BASE;
10982+#endif
10983+
10984 return copy_user_generic((__force void *)dst, src, size);
10985 }
10986
10987-extern long __copy_user_nocache(void *dst, const void __user *src,
10988+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10989 unsigned size, int zerorest);
10990
10991-static inline int
10992-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10993+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10994 {
10995 might_sleep();
10996+
10997+ if ((int)size < 0)
10998+ return size;
10999+
11000+#ifdef CONFIG_PAX_MEMORY_UDEREF
11001+ if (!__access_ok(VERIFY_READ, src, size))
11002+ return size;
11003+#endif
11004+
11005 return __copy_user_nocache(dst, src, size, 1);
11006 }
11007
11008-static inline int
11009-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11010+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11011 unsigned size)
11012 {
11013+ if ((int)size < 0)
11014+ return size;
11015+
11016+#ifdef CONFIG_PAX_MEMORY_UDEREF
11017+ if (!__access_ok(VERIFY_READ, src, size))
11018+ return size;
11019+#endif
11020+
11021 return __copy_user_nocache(dst, src, size, 0);
11022 }
11023
11024-unsigned long
11025+extern unsigned long
11026 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11027
11028 #endif /* _ASM_X86_UACCESS_64_H */
11029diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess.h linux-2.6.32.45/arch/x86/include/asm/uaccess.h
11030--- linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
11031+++ linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
11032@@ -8,12 +8,15 @@
11033 #include <linux/thread_info.h>
11034 #include <linux/prefetch.h>
11035 #include <linux/string.h>
11036+#include <linux/sched.h>
11037 #include <asm/asm.h>
11038 #include <asm/page.h>
11039
11040 #define VERIFY_READ 0
11041 #define VERIFY_WRITE 1
11042
11043+extern void check_object_size(const void *ptr, unsigned long n, bool to);
11044+
11045 /*
11046 * The fs value determines whether argument validity checking should be
11047 * performed or not. If get_fs() == USER_DS, checking is performed, with
11048@@ -29,7 +32,12 @@
11049
11050 #define get_ds() (KERNEL_DS)
11051 #define get_fs() (current_thread_info()->addr_limit)
11052+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11053+void __set_fs(mm_segment_t x);
11054+void set_fs(mm_segment_t x);
11055+#else
11056 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11057+#endif
11058
11059 #define segment_eq(a, b) ((a).seg == (b).seg)
11060
11061@@ -77,7 +85,33 @@
11062 * checks that the pointer is in the user space range - after calling
11063 * this function, memory access functions may still return -EFAULT.
11064 */
11065-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11066+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11067+#define access_ok(type, addr, size) \
11068+({ \
11069+ long __size = size; \
11070+ unsigned long __addr = (unsigned long)addr; \
11071+ unsigned long __addr_ao = __addr & PAGE_MASK; \
11072+ unsigned long __end_ao = __addr + __size - 1; \
11073+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11074+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11075+ while(__addr_ao <= __end_ao) { \
11076+ char __c_ao; \
11077+ __addr_ao += PAGE_SIZE; \
11078+ if (__size > PAGE_SIZE) \
11079+ cond_resched(); \
11080+ if (__get_user(__c_ao, (char __user *)__addr)) \
11081+ break; \
11082+ if (type != VERIFY_WRITE) { \
11083+ __addr = __addr_ao; \
11084+ continue; \
11085+ } \
11086+ if (__put_user(__c_ao, (char __user *)__addr)) \
11087+ break; \
11088+ __addr = __addr_ao; \
11089+ } \
11090+ } \
11091+ __ret_ao; \
11092+})
11093
11094 /*
11095 * The exception table consists of pairs of addresses: the first is the
11096@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11097 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11098 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11099
11100-
11101+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11102+#define __copyuser_seg "gs;"
11103+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11104+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11105+#else
11106+#define __copyuser_seg
11107+#define __COPYUSER_SET_ES
11108+#define __COPYUSER_RESTORE_ES
11109+#endif
11110
11111 #ifdef CONFIG_X86_32
11112 #define __put_user_asm_u64(x, addr, err, errret) \
11113- asm volatile("1: movl %%eax,0(%2)\n" \
11114- "2: movl %%edx,4(%2)\n" \
11115+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11116+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11117 "3:\n" \
11118 ".section .fixup,\"ax\"\n" \
11119 "4: movl %3,%0\n" \
11120@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11121 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11122
11123 #define __put_user_asm_ex_u64(x, addr) \
11124- asm volatile("1: movl %%eax,0(%1)\n" \
11125- "2: movl %%edx,4(%1)\n" \
11126+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11127+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11128 "3:\n" \
11129 _ASM_EXTABLE(1b, 2b - 1b) \
11130 _ASM_EXTABLE(2b, 3b - 2b) \
11131@@ -374,7 +416,7 @@ do { \
11132 } while (0)
11133
11134 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11135- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11136+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11137 "2:\n" \
11138 ".section .fixup,\"ax\"\n" \
11139 "3: mov %3,%0\n" \
11140@@ -382,7 +424,7 @@ do { \
11141 " jmp 2b\n" \
11142 ".previous\n" \
11143 _ASM_EXTABLE(1b, 3b) \
11144- : "=r" (err), ltype(x) \
11145+ : "=r" (err), ltype (x) \
11146 : "m" (__m(addr)), "i" (errret), "0" (err))
11147
11148 #define __get_user_size_ex(x, ptr, size) \
11149@@ -407,7 +449,7 @@ do { \
11150 } while (0)
11151
11152 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11153- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11154+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11155 "2:\n" \
11156 _ASM_EXTABLE(1b, 2b - 1b) \
11157 : ltype(x) : "m" (__m(addr)))
11158@@ -424,13 +466,24 @@ do { \
11159 int __gu_err; \
11160 unsigned long __gu_val; \
11161 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11162- (x) = (__force __typeof__(*(ptr)))__gu_val; \
11163+ (x) = (__typeof__(*(ptr)))__gu_val; \
11164 __gu_err; \
11165 })
11166
11167 /* FIXME: this hack is definitely wrong -AK */
11168 struct __large_struct { unsigned long buf[100]; };
11169-#define __m(x) (*(struct __large_struct __user *)(x))
11170+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11171+#define ____m(x) \
11172+({ \
11173+ unsigned long ____x = (unsigned long)(x); \
11174+ if (____x < PAX_USER_SHADOW_BASE) \
11175+ ____x += PAX_USER_SHADOW_BASE; \
11176+ (void __user *)____x; \
11177+})
11178+#else
11179+#define ____m(x) (x)
11180+#endif
11181+#define __m(x) (*(struct __large_struct __user *)____m(x))
11182
11183 /*
11184 * Tell gcc we read from memory instead of writing: this is because
11185@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11186 * aliasing issues.
11187 */
11188 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11189- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11190+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11191 "2:\n" \
11192 ".section .fixup,\"ax\"\n" \
11193 "3: mov %3,%0\n" \
11194@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11195 ".previous\n" \
11196 _ASM_EXTABLE(1b, 3b) \
11197 : "=r"(err) \
11198- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11199+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11200
11201 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11202- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11203+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11204 "2:\n" \
11205 _ASM_EXTABLE(1b, 2b - 1b) \
11206 : : ltype(x), "m" (__m(addr)))
11207@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11208 * On error, the variable @x is set to zero.
11209 */
11210
11211+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11212+#define __get_user(x, ptr) get_user((x), (ptr))
11213+#else
11214 #define __get_user(x, ptr) \
11215 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11216+#endif
11217
11218 /**
11219 * __put_user: - Write a simple value into user space, with less checking.
11220@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11221 * Returns zero on success, or -EFAULT on error.
11222 */
11223
11224+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11225+#define __put_user(x, ptr) put_user((x), (ptr))
11226+#else
11227 #define __put_user(x, ptr) \
11228 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11229+#endif
11230
11231 #define __get_user_unaligned __get_user
11232 #define __put_user_unaligned __put_user
11233@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11234 #define get_user_ex(x, ptr) do { \
11235 unsigned long __gue_val; \
11236 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11237- (x) = (__force __typeof__(*(ptr)))__gue_val; \
11238+ (x) = (__typeof__(*(ptr)))__gue_val; \
11239 } while (0)
11240
11241 #ifdef CONFIG_X86_WP_WORKS_OK
11242@@ -567,6 +628,7 @@ extern struct movsl_mask {
11243
11244 #define ARCH_HAS_NOCACHE_UACCESS 1
11245
11246+#define ARCH_HAS_SORT_EXTABLE
11247 #ifdef CONFIG_X86_32
11248 # include "uaccess_32.h"
11249 #else
11250diff -urNp linux-2.6.32.45/arch/x86/include/asm/vgtod.h linux-2.6.32.45/arch/x86/include/asm/vgtod.h
11251--- linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11252+++ linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11253@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11254 int sysctl_enabled;
11255 struct timezone sys_tz;
11256 struct { /* extract of a clocksource struct */
11257+ char name[8];
11258 cycle_t (*vread)(void);
11259 cycle_t cycle_last;
11260 cycle_t mask;
11261diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi.h linux-2.6.32.45/arch/x86/include/asm/vmi.h
11262--- linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11263+++ linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11264@@ -191,6 +191,7 @@ struct vrom_header {
11265 u8 reserved[96]; /* Reserved for headers */
11266 char vmi_init[8]; /* VMI_Init jump point */
11267 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11268+ char rom_data[8048]; /* rest of the option ROM */
11269 } __attribute__((packed));
11270
11271 struct pnp_header {
11272diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi_time.h linux-2.6.32.45/arch/x86/include/asm/vmi_time.h
11273--- linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-03-27 14:31:47.000000000 -0400
11274+++ linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-08-05 20:33:55.000000000 -0400
11275@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
11276 int (*wallclock_updated)(void);
11277 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
11278 void (*cancel_alarm)(u32 flags);
11279-} vmi_timer_ops;
11280+} __no_const vmi_timer_ops;
11281
11282 /* Prototypes */
11283 extern void __init vmi_time_init(void);
11284diff -urNp linux-2.6.32.45/arch/x86/include/asm/vsyscall.h linux-2.6.32.45/arch/x86/include/asm/vsyscall.h
11285--- linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11286+++ linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11287@@ -15,9 +15,10 @@ enum vsyscall_num {
11288
11289 #ifdef __KERNEL__
11290 #include <linux/seqlock.h>
11291+#include <linux/getcpu.h>
11292+#include <linux/time.h>
11293
11294 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11295-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11296
11297 /* Definitions for CONFIG_GENERIC_TIME definitions */
11298 #define __section_vsyscall_gtod_data __attribute__ \
11299@@ -31,7 +32,6 @@ enum vsyscall_num {
11300 #define VGETCPU_LSL 2
11301
11302 extern int __vgetcpu_mode;
11303-extern volatile unsigned long __jiffies;
11304
11305 /* kernel space (writeable) */
11306 extern int vgetcpu_mode;
11307@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11308
11309 extern void map_vsyscall(void);
11310
11311+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11312+extern time_t vtime(time_t *t);
11313+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11314 #endif /* __KERNEL__ */
11315
11316 #endif /* _ASM_X86_VSYSCALL_H */
11317diff -urNp linux-2.6.32.45/arch/x86/include/asm/x86_init.h linux-2.6.32.45/arch/x86/include/asm/x86_init.h
11318--- linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-03-27 14:31:47.000000000 -0400
11319+++ linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-08-05 20:33:55.000000000 -0400
11320@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11321 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11322 void (*find_smp_config)(unsigned int reserve);
11323 void (*get_smp_config)(unsigned int early);
11324-};
11325+} __no_const;
11326
11327 /**
11328 * struct x86_init_resources - platform specific resource related ops
11329@@ -42,7 +42,7 @@ struct x86_init_resources {
11330 void (*probe_roms)(void);
11331 void (*reserve_resources)(void);
11332 char *(*memory_setup)(void);
11333-};
11334+} __no_const;
11335
11336 /**
11337 * struct x86_init_irqs - platform specific interrupt setup
11338@@ -55,7 +55,7 @@ struct x86_init_irqs {
11339 void (*pre_vector_init)(void);
11340 void (*intr_init)(void);
11341 void (*trap_init)(void);
11342-};
11343+} __no_const;
11344
11345 /**
11346 * struct x86_init_oem - oem platform specific customizing functions
11347@@ -65,7 +65,7 @@ struct x86_init_irqs {
11348 struct x86_init_oem {
11349 void (*arch_setup)(void);
11350 void (*banner)(void);
11351-};
11352+} __no_const;
11353
11354 /**
11355 * struct x86_init_paging - platform specific paging functions
11356@@ -75,7 +75,7 @@ struct x86_init_oem {
11357 struct x86_init_paging {
11358 void (*pagetable_setup_start)(pgd_t *base);
11359 void (*pagetable_setup_done)(pgd_t *base);
11360-};
11361+} __no_const;
11362
11363 /**
11364 * struct x86_init_timers - platform specific timer setup
11365@@ -88,7 +88,7 @@ struct x86_init_timers {
11366 void (*setup_percpu_clockev)(void);
11367 void (*tsc_pre_init)(void);
11368 void (*timer_init)(void);
11369-};
11370+} __no_const;
11371
11372 /**
11373 * struct x86_init_ops - functions for platform specific setup
11374@@ -101,7 +101,7 @@ struct x86_init_ops {
11375 struct x86_init_oem oem;
11376 struct x86_init_paging paging;
11377 struct x86_init_timers timers;
11378-};
11379+} __no_const;
11380
11381 /**
11382 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11383@@ -109,7 +109,7 @@ struct x86_init_ops {
11384 */
11385 struct x86_cpuinit_ops {
11386 void (*setup_percpu_clockev)(void);
11387-};
11388+} __no_const;
11389
11390 /**
11391 * struct x86_platform_ops - platform specific runtime functions
11392@@ -121,7 +121,7 @@ struct x86_platform_ops {
11393 unsigned long (*calibrate_tsc)(void);
11394 unsigned long (*get_wallclock)(void);
11395 int (*set_wallclock)(unsigned long nowtime);
11396-};
11397+} __no_const;
11398
11399 extern struct x86_init_ops x86_init;
11400 extern struct x86_cpuinit_ops x86_cpuinit;
11401diff -urNp linux-2.6.32.45/arch/x86/include/asm/xsave.h linux-2.6.32.45/arch/x86/include/asm/xsave.h
11402--- linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11403+++ linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11404@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11405 static inline int xsave_user(struct xsave_struct __user *buf)
11406 {
11407 int err;
11408+
11409+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11410+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11411+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11412+#endif
11413+
11414 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11415 "2:\n"
11416 ".section .fixup,\"ax\"\n"
11417@@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11418 u32 lmask = mask;
11419 u32 hmask = mask >> 32;
11420
11421+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11422+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11423+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11424+#endif
11425+
11426 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11427 "2:\n"
11428 ".section .fixup,\"ax\"\n"
11429diff -urNp linux-2.6.32.45/arch/x86/Kconfig linux-2.6.32.45/arch/x86/Kconfig
11430--- linux-2.6.32.45/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11431+++ linux-2.6.32.45/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11432@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11433
11434 config X86_32_LAZY_GS
11435 def_bool y
11436- depends on X86_32 && !CC_STACKPROTECTOR
11437+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11438
11439 config KTIME_SCALAR
11440 def_bool X86_32
11441@@ -1008,7 +1008,7 @@ choice
11442
11443 config NOHIGHMEM
11444 bool "off"
11445- depends on !X86_NUMAQ
11446+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11447 ---help---
11448 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11449 However, the address space of 32-bit x86 processors is only 4
11450@@ -1045,7 +1045,7 @@ config NOHIGHMEM
11451
11452 config HIGHMEM4G
11453 bool "4GB"
11454- depends on !X86_NUMAQ
11455+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11456 ---help---
11457 Select this if you have a 32-bit processor and between 1 and 4
11458 gigabytes of physical RAM.
11459@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11460 hex
11461 default 0xB0000000 if VMSPLIT_3G_OPT
11462 default 0x80000000 if VMSPLIT_2G
11463- default 0x78000000 if VMSPLIT_2G_OPT
11464+ default 0x70000000 if VMSPLIT_2G_OPT
11465 default 0x40000000 if VMSPLIT_1G
11466 default 0xC0000000
11467 depends on X86_32
11468@@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11469
11470 config EFI
11471 bool "EFI runtime service support"
11472- depends on ACPI
11473+ depends on ACPI && !PAX_KERNEXEC
11474 ---help---
11475 This enables the kernel to use EFI runtime services that are
11476 available (such as the EFI variable services).
11477@@ -1460,6 +1460,7 @@ config SECCOMP
11478
11479 config CC_STACKPROTECTOR
11480 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11481+ depends on X86_64 || !PAX_MEMORY_UDEREF
11482 ---help---
11483 This option turns on the -fstack-protector GCC feature. This
11484 feature puts, at the beginning of functions, a canary value on
11485@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11486 config PHYSICAL_START
11487 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11488 default "0x1000000"
11489+ range 0x400000 0x40000000
11490 ---help---
11491 This gives the physical address where the kernel is loaded.
11492
11493@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11494 hex
11495 prompt "Alignment value to which kernel should be aligned" if X86_32
11496 default "0x1000000"
11497+ range 0x400000 0x1000000 if PAX_KERNEXEC
11498 range 0x2000 0x1000000
11499 ---help---
11500 This value puts the alignment restrictions on physical address
11501@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11502 Say N if you want to disable CPU hotplug.
11503
11504 config COMPAT_VDSO
11505- def_bool y
11506+ def_bool n
11507 prompt "Compat VDSO support"
11508 depends on X86_32 || IA32_EMULATION
11509+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11510 ---help---
11511 Map the 32-bit VDSO to the predictable old-style address too.
11512 ---help---
11513diff -urNp linux-2.6.32.45/arch/x86/Kconfig.cpu linux-2.6.32.45/arch/x86/Kconfig.cpu
11514--- linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11515+++ linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11516@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11517
11518 config X86_F00F_BUG
11519 def_bool y
11520- depends on M586MMX || M586TSC || M586 || M486 || M386
11521+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11522
11523 config X86_WP_WORKS_OK
11524 def_bool y
11525@@ -360,7 +360,7 @@ config X86_POPAD_OK
11526
11527 config X86_ALIGNMENT_16
11528 def_bool y
11529- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11530+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11531
11532 config X86_INTEL_USERCOPY
11533 def_bool y
11534@@ -406,7 +406,7 @@ config X86_CMPXCHG64
11535 # generates cmov.
11536 config X86_CMOV
11537 def_bool y
11538- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11539+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11540
11541 config X86_MINIMUM_CPU_FAMILY
11542 int
11543diff -urNp linux-2.6.32.45/arch/x86/Kconfig.debug linux-2.6.32.45/arch/x86/Kconfig.debug
11544--- linux-2.6.32.45/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11545+++ linux-2.6.32.45/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11546@@ -99,7 +99,7 @@ config X86_PTDUMP
11547 config DEBUG_RODATA
11548 bool "Write protect kernel read-only data structures"
11549 default y
11550- depends on DEBUG_KERNEL
11551+ depends on DEBUG_KERNEL && BROKEN
11552 ---help---
11553 Mark the kernel read-only data as write-protected in the pagetables,
11554 in order to catch accidental (and incorrect) writes to such const
11555diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile
11556--- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-03-27 14:31:47.000000000 -0400
11557+++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-08-07 14:38:58.000000000 -0400
11558@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
11559 $(call cc-option, -fno-stack-protector) \
11560 $(call cc-option, -mpreferred-stack-boundary=2)
11561 KBUILD_CFLAGS += $(call cc-option, -m32)
11562+ifdef CONSTIFY_PLUGIN
11563+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11564+endif
11565 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11566 GCOV_PROFILE := n
11567
11568diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S
11569--- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11570+++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11571@@ -91,6 +91,9 @@ _start:
11572 /* Do any other stuff... */
11573
11574 #ifndef CONFIG_64BIT
11575+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
11576+ call verify_cpu
11577+
11578 /* This could also be done in C code... */
11579 movl pmode_cr3, %eax
11580 movl %eax, %cr3
11581@@ -104,7 +107,7 @@ _start:
11582 movl %eax, %ecx
11583 orl %edx, %ecx
11584 jz 1f
11585- movl $0xc0000080, %ecx
11586+ mov $MSR_EFER, %ecx
11587 wrmsr
11588 1:
11589
11590@@ -114,6 +117,7 @@ _start:
11591 movl pmode_cr0, %eax
11592 movl %eax, %cr0
11593 jmp pmode_return
11594+# include "../../verify_cpu.S"
11595 #else
11596 pushw $0
11597 pushw trampoline_segment
11598diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c
11599--- linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11600+++ linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11601@@ -11,11 +11,12 @@
11602 #include <linux/cpumask.h>
11603 #include <asm/segment.h>
11604 #include <asm/desc.h>
11605+#include <asm/e820.h>
11606
11607 #include "realmode/wakeup.h"
11608 #include "sleep.h"
11609
11610-unsigned long acpi_wakeup_address;
11611+unsigned long acpi_wakeup_address = 0x2000;
11612 unsigned long acpi_realmode_flags;
11613
11614 /* address in low memory of the wakeup routine. */
11615@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11616 #else /* CONFIG_64BIT */
11617 header->trampoline_segment = setup_trampoline() >> 4;
11618 #ifdef CONFIG_SMP
11619- stack_start.sp = temp_stack + sizeof(temp_stack);
11620+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11621+
11622+ pax_open_kernel();
11623 early_gdt_descr.address =
11624 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11625+ pax_close_kernel();
11626+
11627 initial_gs = per_cpu_offset(smp_processor_id());
11628 #endif
11629 initial_code = (unsigned long)wakeup_long64;
11630@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11631 return;
11632 }
11633
11634- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11635-
11636- if (!acpi_realmode) {
11637- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11638- return;
11639- }
11640-
11641- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11642+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11643+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11644 }
11645
11646
11647diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S
11648--- linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11649+++ linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11650@@ -30,13 +30,11 @@ wakeup_pmode_return:
11651 # and restore the stack ... but you need gdt for this to work
11652 movl saved_context_esp, %esp
11653
11654- movl %cs:saved_magic, %eax
11655- cmpl $0x12345678, %eax
11656+ cmpl $0x12345678, saved_magic
11657 jne bogus_magic
11658
11659 # jump to place where we left off
11660- movl saved_eip, %eax
11661- jmp *%eax
11662+ jmp *(saved_eip)
11663
11664 bogus_magic:
11665 jmp bogus_magic
11666diff -urNp linux-2.6.32.45/arch/x86/kernel/alternative.c linux-2.6.32.45/arch/x86/kernel/alternative.c
11667--- linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11668+++ linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11669@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11670
11671 BUG_ON(p->len > MAX_PATCH_LEN);
11672 /* prep the buffer with the original instructions */
11673- memcpy(insnbuf, p->instr, p->len);
11674+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11675 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11676 (unsigned long)p->instr, p->len);
11677
11678@@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11679 if (smp_alt_once)
11680 free_init_pages("SMP alternatives",
11681 (unsigned long)__smp_locks,
11682- (unsigned long)__smp_locks_end);
11683+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11684
11685 restart_nmi();
11686 }
11687@@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11688 * instructions. And on the local CPU you need to be protected again NMI or MCE
11689 * handlers seeing an inconsistent instruction while you patch.
11690 */
11691-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11692+static void *__kprobes text_poke_early(void *addr, const void *opcode,
11693 size_t len)
11694 {
11695 unsigned long flags;
11696 local_irq_save(flags);
11697- memcpy(addr, opcode, len);
11698+
11699+ pax_open_kernel();
11700+ memcpy(ktla_ktva(addr), opcode, len);
11701 sync_core();
11702+ pax_close_kernel();
11703+
11704 local_irq_restore(flags);
11705 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11706 that causes hangs on some VIA CPUs. */
11707@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11708 */
11709 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11710 {
11711- unsigned long flags;
11712- char *vaddr;
11713+ unsigned char *vaddr = ktla_ktva(addr);
11714 struct page *pages[2];
11715- int i;
11716+ size_t i;
11717
11718 if (!core_kernel_text((unsigned long)addr)) {
11719- pages[0] = vmalloc_to_page(addr);
11720- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11721+ pages[0] = vmalloc_to_page(vaddr);
11722+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11723 } else {
11724- pages[0] = virt_to_page(addr);
11725+ pages[0] = virt_to_page(vaddr);
11726 WARN_ON(!PageReserved(pages[0]));
11727- pages[1] = virt_to_page(addr + PAGE_SIZE);
11728+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11729 }
11730 BUG_ON(!pages[0]);
11731- local_irq_save(flags);
11732- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11733- if (pages[1])
11734- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11735- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11736- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11737- clear_fixmap(FIX_TEXT_POKE0);
11738- if (pages[1])
11739- clear_fixmap(FIX_TEXT_POKE1);
11740- local_flush_tlb();
11741- sync_core();
11742- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11743- that causes hangs on some VIA CPUs. */
11744+ text_poke_early(addr, opcode, len);
11745 for (i = 0; i < len; i++)
11746- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11747- local_irq_restore(flags);
11748+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11749 return addr;
11750 }
11751diff -urNp linux-2.6.32.45/arch/x86/kernel/amd_iommu.c linux-2.6.32.45/arch/x86/kernel/amd_iommu.c
11752--- linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11753+++ linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11754@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11755 }
11756 }
11757
11758-static struct dma_map_ops amd_iommu_dma_ops = {
11759+static const struct dma_map_ops amd_iommu_dma_ops = {
11760 .alloc_coherent = alloc_coherent,
11761 .free_coherent = free_coherent,
11762 .map_page = map_page,
11763diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/apic.c linux-2.6.32.45/arch/x86/kernel/apic/apic.c
11764--- linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11765+++ linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11766@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11767 apic_write(APIC_ESR, 0);
11768 v1 = apic_read(APIC_ESR);
11769 ack_APIC_irq();
11770- atomic_inc(&irq_err_count);
11771+ atomic_inc_unchecked(&irq_err_count);
11772
11773 /*
11774 * Here is what the APIC error bits mean:
11775@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11776 u16 *bios_cpu_apicid;
11777 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11778
11779+ pax_track_stack();
11780+
11781 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11782 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11783
11784diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c
11785--- linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11786+++ linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11787@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11788 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11789 GFP_ATOMIC);
11790 if (!ioapic_entries)
11791- return 0;
11792+ return NULL;
11793
11794 for (apic = 0; apic < nr_ioapics; apic++) {
11795 ioapic_entries[apic] =
11796@@ -733,7 +733,7 @@ nomem:
11797 kfree(ioapic_entries[apic]);
11798 kfree(ioapic_entries);
11799
11800- return 0;
11801+ return NULL;
11802 }
11803
11804 /*
11805@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11806 }
11807 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11808
11809-void lock_vector_lock(void)
11810+void lock_vector_lock(void) __acquires(vector_lock)
11811 {
11812 /* Used to the online set of cpus does not change
11813 * during assign_irq_vector.
11814@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11815 spin_lock(&vector_lock);
11816 }
11817
11818-void unlock_vector_lock(void)
11819+void unlock_vector_lock(void) __releases(vector_lock)
11820 {
11821 spin_unlock(&vector_lock);
11822 }
11823@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11824 ack_APIC_irq();
11825 }
11826
11827-atomic_t irq_mis_count;
11828+atomic_unchecked_t irq_mis_count;
11829
11830 static void ack_apic_level(unsigned int irq)
11831 {
11832@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11833
11834 /* Tail end of version 0x11 I/O APIC bug workaround */
11835 if (!(v & (1 << (i & 0x1f)))) {
11836- atomic_inc(&irq_mis_count);
11837+ atomic_inc_unchecked(&irq_mis_count);
11838 spin_lock(&ioapic_lock);
11839 __mask_and_edge_IO_APIC_irq(cfg);
11840 __unmask_and_level_IO_APIC_irq(cfg);
11841diff -urNp linux-2.6.32.45/arch/x86/kernel/apm_32.c linux-2.6.32.45/arch/x86/kernel/apm_32.c
11842--- linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11843+++ linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11844@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11845 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11846 * even though they are called in protected mode.
11847 */
11848-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11849+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11850 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11851
11852 static const char driver_version[] = "1.16ac"; /* no spaces */
11853@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11854 BUG_ON(cpu != 0);
11855 gdt = get_cpu_gdt_table(cpu);
11856 save_desc_40 = gdt[0x40 / 8];
11857+
11858+ pax_open_kernel();
11859 gdt[0x40 / 8] = bad_bios_desc;
11860+ pax_close_kernel();
11861
11862 apm_irq_save(flags);
11863 APM_DO_SAVE_SEGS;
11864@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11865 &call->esi);
11866 APM_DO_RESTORE_SEGS;
11867 apm_irq_restore(flags);
11868+
11869+ pax_open_kernel();
11870 gdt[0x40 / 8] = save_desc_40;
11871+ pax_close_kernel();
11872+
11873 put_cpu();
11874
11875 return call->eax & 0xff;
11876@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11877 BUG_ON(cpu != 0);
11878 gdt = get_cpu_gdt_table(cpu);
11879 save_desc_40 = gdt[0x40 / 8];
11880+
11881+ pax_open_kernel();
11882 gdt[0x40 / 8] = bad_bios_desc;
11883+ pax_close_kernel();
11884
11885 apm_irq_save(flags);
11886 APM_DO_SAVE_SEGS;
11887@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11888 &call->eax);
11889 APM_DO_RESTORE_SEGS;
11890 apm_irq_restore(flags);
11891+
11892+ pax_open_kernel();
11893 gdt[0x40 / 8] = save_desc_40;
11894+ pax_close_kernel();
11895+
11896 put_cpu();
11897 return error;
11898 }
11899@@ -975,7 +989,7 @@ recalc:
11900
11901 static void apm_power_off(void)
11902 {
11903- unsigned char po_bios_call[] = {
11904+ const unsigned char po_bios_call[] = {
11905 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11906 0x8e, 0xd0, /* movw ax,ss */
11907 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11908@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11909 * code to that CPU.
11910 */
11911 gdt = get_cpu_gdt_table(0);
11912+
11913+ pax_open_kernel();
11914 set_desc_base(&gdt[APM_CS >> 3],
11915 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11916 set_desc_base(&gdt[APM_CS_16 >> 3],
11917 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11918 set_desc_base(&gdt[APM_DS >> 3],
11919 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11920+ pax_close_kernel();
11921
11922 proc_create("apm", 0, NULL, &apm_file_ops);
11923
11924diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c
11925--- linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11926+++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11927@@ -51,7 +51,6 @@ void foo(void)
11928 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11929 BLANK();
11930
11931- OFFSET(TI_task, thread_info, task);
11932 OFFSET(TI_exec_domain, thread_info, exec_domain);
11933 OFFSET(TI_flags, thread_info, flags);
11934 OFFSET(TI_status, thread_info, status);
11935@@ -60,6 +59,8 @@ void foo(void)
11936 OFFSET(TI_restart_block, thread_info, restart_block);
11937 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11938 OFFSET(TI_cpu, thread_info, cpu);
11939+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11940+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11941 BLANK();
11942
11943 OFFSET(GDS_size, desc_ptr, size);
11944@@ -99,6 +100,7 @@ void foo(void)
11945
11946 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11947 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11948+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11949 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11950 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11951 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11952@@ -115,6 +117,11 @@ void foo(void)
11953 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11954 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11955 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11956+
11957+#ifdef CONFIG_PAX_KERNEXEC
11958+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11959+#endif
11960+
11961 #endif
11962
11963 #ifdef CONFIG_XEN
11964diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c
11965--- linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11966+++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11967@@ -44,6 +44,8 @@ int main(void)
11968 ENTRY(addr_limit);
11969 ENTRY(preempt_count);
11970 ENTRY(status);
11971+ ENTRY(lowest_stack);
11972+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11973 #ifdef CONFIG_IA32_EMULATION
11974 ENTRY(sysenter_return);
11975 #endif
11976@@ -63,6 +65,18 @@ int main(void)
11977 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11978 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
11979 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11980+
11981+#ifdef CONFIG_PAX_KERNEXEC
11982+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11983+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11984+#endif
11985+
11986+#ifdef CONFIG_PAX_MEMORY_UDEREF
11987+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11988+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11989+ OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
11990+#endif
11991+
11992 #endif
11993
11994
11995@@ -115,6 +129,7 @@ int main(void)
11996 ENTRY(cr8);
11997 BLANK();
11998 #undef ENTRY
11999+ DEFINE(TSS_size, sizeof(struct tss_struct));
12000 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
12001 BLANK();
12002 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
12003@@ -130,6 +145,7 @@ int main(void)
12004
12005 BLANK();
12006 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12007+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12008 #ifdef CONFIG_XEN
12009 BLANK();
12010 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12011diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/amd.c
12012--- linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
12013+++ linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
12014@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
12015 unsigned int size)
12016 {
12017 /* AMD errata T13 (order #21922) */
12018- if ((c->x86 == 6)) {
12019+ if (c->x86 == 6) {
12020 /* Duron Rev A0 */
12021 if (c->x86_model == 3 && c->x86_mask == 0)
12022 size = 64;
12023diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/common.c linux-2.6.32.45/arch/x86/kernel/cpu/common.c
12024--- linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
12025+++ linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
12026@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
12027
12028 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12029
12030-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12031-#ifdef CONFIG_X86_64
12032- /*
12033- * We need valid kernel segments for data and code in long mode too
12034- * IRET will check the segment types kkeil 2000/10/28
12035- * Also sysret mandates a special GDT layout
12036- *
12037- * TLS descriptors are currently at a different place compared to i386.
12038- * Hopefully nobody expects them at a fixed place (Wine?)
12039- */
12040- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12041- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12042- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12043- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12044- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12045- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12046-#else
12047- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12048- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12049- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12050- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12051- /*
12052- * Segments used for calling PnP BIOS have byte granularity.
12053- * They code segments and data segments have fixed 64k limits,
12054- * the transfer segment sizes are set at run time.
12055- */
12056- /* 32-bit code */
12057- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12058- /* 16-bit code */
12059- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12060- /* 16-bit data */
12061- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12062- /* 16-bit data */
12063- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12064- /* 16-bit data */
12065- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12066- /*
12067- * The APM segments have byte granularity and their bases
12068- * are set at run time. All have 64k limits.
12069- */
12070- /* 32-bit code */
12071- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12072- /* 16-bit code */
12073- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12074- /* data */
12075- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12076-
12077- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12078- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12079- GDT_STACK_CANARY_INIT
12080-#endif
12081-} };
12082-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12083-
12084 static int __init x86_xsave_setup(char *s)
12085 {
12086 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12087@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
12088 {
12089 struct desc_ptr gdt_descr;
12090
12091- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12092+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12093 gdt_descr.size = GDT_SIZE - 1;
12094 load_gdt(&gdt_descr);
12095 /* Reload the per-cpu base */
12096@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
12097 /* Filter out anything that depends on CPUID levels we don't have */
12098 filter_cpuid_features(c, true);
12099
12100+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
12101+ setup_clear_cpu_cap(X86_FEATURE_SEP);
12102+#endif
12103+
12104 /* If the model name is still unset, do table lookup. */
12105 if (!c->x86_model_id[0]) {
12106 const char *p;
12107@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
12108 }
12109 __setup("clearcpuid=", setup_disablecpuid);
12110
12111+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12112+EXPORT_PER_CPU_SYMBOL(current_tinfo);
12113+
12114 #ifdef CONFIG_X86_64
12115 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12116
12117@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
12118 EXPORT_PER_CPU_SYMBOL(current_task);
12119
12120 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12121- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12122+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12123 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12124
12125 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12126@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
12127 {
12128 memset(regs, 0, sizeof(struct pt_regs));
12129 regs->fs = __KERNEL_PERCPU;
12130- regs->gs = __KERNEL_STACK_CANARY;
12131+ savesegment(gs, regs->gs);
12132
12133 return regs;
12134 }
12135@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
12136 int i;
12137
12138 cpu = stack_smp_processor_id();
12139- t = &per_cpu(init_tss, cpu);
12140+ t = init_tss + cpu;
12141 orig_ist = &per_cpu(orig_ist, cpu);
12142
12143 #ifdef CONFIG_NUMA
12144@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12145 switch_to_new_gdt(cpu);
12146 loadsegment(fs, 0);
12147
12148- load_idt((const struct desc_ptr *)&idt_descr);
12149+ load_idt(&idt_descr);
12150
12151 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12152 syscall_init();
12153@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12154 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12155 barrier();
12156
12157- check_efer();
12158 if (cpu != 0)
12159 enable_x2apic();
12160
12161@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12162 {
12163 int cpu = smp_processor_id();
12164 struct task_struct *curr = current;
12165- struct tss_struct *t = &per_cpu(init_tss, cpu);
12166+ struct tss_struct *t = init_tss + cpu;
12167 struct thread_struct *thread = &curr->thread;
12168
12169 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12170diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel.c linux-2.6.32.45/arch/x86/kernel/cpu/intel.c
12171--- linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12172+++ linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12173@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12174 * Update the IDT descriptor and reload the IDT so that
12175 * it uses the read-only mapped virtual address.
12176 */
12177- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12178+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12179 load_idt(&idt_descr);
12180 }
12181 #endif
12182diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c
12183--- linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12184+++ linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12185@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12186 return ret;
12187 }
12188
12189-static struct sysfs_ops sysfs_ops = {
12190+static const struct sysfs_ops sysfs_ops = {
12191 .show = show,
12192 .store = store,
12193 };
12194diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/Makefile linux-2.6.32.45/arch/x86/kernel/cpu/Makefile
12195--- linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12196+++ linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12197@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12198 CFLAGS_REMOVE_common.o = -pg
12199 endif
12200
12201-# Make sure load_percpu_segment has no stackprotector
12202-nostackp := $(call cc-option, -fno-stack-protector)
12203-CFLAGS_common.o := $(nostackp)
12204-
12205 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12206 obj-y += proc.o capflags.o powerflags.o common.o
12207 obj-y += vmware.o hypervisor.o sched.o
12208diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c
12209--- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12210+++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12211@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12212 return ret;
12213 }
12214
12215-static struct sysfs_ops threshold_ops = {
12216+static const struct sysfs_ops threshold_ops = {
12217 .show = show,
12218 .store = store,
12219 };
12220diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c
12221--- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12222+++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12223@@ -43,6 +43,7 @@
12224 #include <asm/ipi.h>
12225 #include <asm/mce.h>
12226 #include <asm/msr.h>
12227+#include <asm/local.h>
12228
12229 #include "mce-internal.h"
12230
12231@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12232 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12233 m->cs, m->ip);
12234
12235- if (m->cs == __KERNEL_CS)
12236+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12237 print_symbol("{%s}", m->ip);
12238 pr_cont("\n");
12239 }
12240@@ -221,10 +222,10 @@ static void print_mce_tail(void)
12241
12242 #define PANIC_TIMEOUT 5 /* 5 seconds */
12243
12244-static atomic_t mce_paniced;
12245+static atomic_unchecked_t mce_paniced;
12246
12247 static int fake_panic;
12248-static atomic_t mce_fake_paniced;
12249+static atomic_unchecked_t mce_fake_paniced;
12250
12251 /* Panic in progress. Enable interrupts and wait for final IPI */
12252 static void wait_for_panic(void)
12253@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12254 /*
12255 * Make sure only one CPU runs in machine check panic
12256 */
12257- if (atomic_inc_return(&mce_paniced) > 1)
12258+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12259 wait_for_panic();
12260 barrier();
12261
12262@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12263 console_verbose();
12264 } else {
12265 /* Don't log too much for fake panic */
12266- if (atomic_inc_return(&mce_fake_paniced) > 1)
12267+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12268 return;
12269 }
12270 print_mce_head();
12271@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12272 * might have been modified by someone else.
12273 */
12274 rmb();
12275- if (atomic_read(&mce_paniced))
12276+ if (atomic_read_unchecked(&mce_paniced))
12277 wait_for_panic();
12278 if (!monarch_timeout)
12279 goto out;
12280@@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12281 */
12282
12283 static DEFINE_SPINLOCK(mce_state_lock);
12284-static int open_count; /* #times opened */
12285+static local_t open_count; /* #times opened */
12286 static int open_exclu; /* already open exclusive? */
12287
12288 static int mce_open(struct inode *inode, struct file *file)
12289 {
12290 spin_lock(&mce_state_lock);
12291
12292- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12293+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12294 spin_unlock(&mce_state_lock);
12295
12296 return -EBUSY;
12297@@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12298
12299 if (file->f_flags & O_EXCL)
12300 open_exclu = 1;
12301- open_count++;
12302+ local_inc(&open_count);
12303
12304 spin_unlock(&mce_state_lock);
12305
12306@@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12307 {
12308 spin_lock(&mce_state_lock);
12309
12310- open_count--;
12311+ local_dec(&open_count);
12312 open_exclu = 0;
12313
12314 spin_unlock(&mce_state_lock);
12315@@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12316 static void mce_reset(void)
12317 {
12318 cpu_missing = 0;
12319- atomic_set(&mce_fake_paniced, 0);
12320+ atomic_set_unchecked(&mce_fake_paniced, 0);
12321 atomic_set(&mce_executing, 0);
12322 atomic_set(&mce_callin, 0);
12323 atomic_set(&global_nwo, 0);
12324diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c
12325--- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-03-27 14:31:47.000000000 -0400
12326+++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:33:55.000000000 -0400
12327@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *fi
12328 static int inject_init(void)
12329 {
12330 printk(KERN_INFO "Machine check injector initialized\n");
12331- mce_chrdev_ops.write = mce_write;
12332+ pax_open_kernel();
12333+ *(void **)&mce_chrdev_ops.write = mce_write;
12334+ pax_close_kernel();
12335 register_die_notifier(&mce_raise_nb);
12336 return 0;
12337 }
12338diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c
12339--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12340+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12341@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12342 return 0;
12343 }
12344
12345-static struct mtrr_ops amd_mtrr_ops = {
12346+static const struct mtrr_ops amd_mtrr_ops = {
12347 .vendor = X86_VENDOR_AMD,
12348 .set = amd_set_mtrr,
12349 .get = amd_get_mtrr,
12350diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c
12351--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12352+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12353@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12354 return 0;
12355 }
12356
12357-static struct mtrr_ops centaur_mtrr_ops = {
12358+static const struct mtrr_ops centaur_mtrr_ops = {
12359 .vendor = X86_VENDOR_CENTAUR,
12360 .set = centaur_set_mcr,
12361 .get = centaur_get_mcr,
12362diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c
12363--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12364+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12365@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12366 post_set();
12367 }
12368
12369-static struct mtrr_ops cyrix_mtrr_ops = {
12370+static const struct mtrr_ops cyrix_mtrr_ops = {
12371 .vendor = X86_VENDOR_CYRIX,
12372 .set_all = cyrix_set_all,
12373 .set = cyrix_set_arr,
12374diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c
12375--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12376+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12377@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12378 /*
12379 * Generic structure...
12380 */
12381-struct mtrr_ops generic_mtrr_ops = {
12382+const struct mtrr_ops generic_mtrr_ops = {
12383 .use_intel_if = 1,
12384 .set_all = generic_set_all,
12385 .get = generic_get_mtrr,
12386diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c
12387--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12388+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12389@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12390 u64 size_or_mask, size_and_mask;
12391 static bool mtrr_aps_delayed_init;
12392
12393-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12394+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12395
12396-struct mtrr_ops *mtrr_if;
12397+const struct mtrr_ops *mtrr_if;
12398
12399 static void set_mtrr(unsigned int reg, unsigned long base,
12400 unsigned long size, mtrr_type type);
12401
12402-void set_mtrr_ops(struct mtrr_ops *ops)
12403+void set_mtrr_ops(const struct mtrr_ops *ops)
12404 {
12405 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12406 mtrr_ops[ops->vendor] = ops;
12407diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h
12408--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12409+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12410@@ -12,19 +12,19 @@
12411 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12412
12413 struct mtrr_ops {
12414- u32 vendor;
12415- u32 use_intel_if;
12416- void (*set)(unsigned int reg, unsigned long base,
12417+ const u32 vendor;
12418+ const u32 use_intel_if;
12419+ void (* const set)(unsigned int reg, unsigned long base,
12420 unsigned long size, mtrr_type type);
12421- void (*set_all)(void);
12422+ void (* const set_all)(void);
12423
12424- void (*get)(unsigned int reg, unsigned long *base,
12425+ void (* const get)(unsigned int reg, unsigned long *base,
12426 unsigned long *size, mtrr_type *type);
12427- int (*get_free_region)(unsigned long base, unsigned long size,
12428+ int (* const get_free_region)(unsigned long base, unsigned long size,
12429 int replace_reg);
12430- int (*validate_add_page)(unsigned long base, unsigned long size,
12431+ int (* const validate_add_page)(unsigned long base, unsigned long size,
12432 unsigned int type);
12433- int (*have_wrcomb)(void);
12434+ int (* const have_wrcomb)(void);
12435 };
12436
12437 extern int generic_get_free_region(unsigned long base, unsigned long size,
12438@@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12439 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12440 unsigned int type);
12441
12442-extern struct mtrr_ops generic_mtrr_ops;
12443+extern const struct mtrr_ops generic_mtrr_ops;
12444
12445 extern int positive_have_wrcomb(void);
12446
12447@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12448 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12449 void get_mtrr_state(void);
12450
12451-extern void set_mtrr_ops(struct mtrr_ops *ops);
12452+extern void set_mtrr_ops(const struct mtrr_ops *ops);
12453
12454 extern u64 size_or_mask, size_and_mask;
12455-extern struct mtrr_ops *mtrr_if;
12456+extern const struct mtrr_ops *mtrr_if;
12457
12458 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12459 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12460diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c
12461--- linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12462+++ linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12463@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12464
12465 /* Interface defining a CPU specific perfctr watchdog */
12466 struct wd_ops {
12467- int (*reserve)(void);
12468- void (*unreserve)(void);
12469- int (*setup)(unsigned nmi_hz);
12470- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12471- void (*stop)(void);
12472+ int (* const reserve)(void);
12473+ void (* const unreserve)(void);
12474+ int (* const setup)(unsigned nmi_hz);
12475+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12476+ void (* const stop)(void);
12477 unsigned perfctr;
12478 unsigned evntsel;
12479 u64 checkbit;
12480@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12481 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12482 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12483
12484+/* cannot be const */
12485 static struct wd_ops intel_arch_wd_ops;
12486
12487 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12488@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12489 return 1;
12490 }
12491
12492+/* cannot be const */
12493 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12494 .reserve = single_msr_reserve,
12495 .unreserve = single_msr_unreserve,
12496diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c
12497--- linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12498+++ linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12499@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12500 * count to the generic event atomically:
12501 */
12502 again:
12503- prev_raw_count = atomic64_read(&hwc->prev_count);
12504+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12505 rdmsrl(hwc->event_base + idx, new_raw_count);
12506
12507- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12508+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12509 new_raw_count) != prev_raw_count)
12510 goto again;
12511
12512@@ -741,7 +741,7 @@ again:
12513 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12514 delta >>= shift;
12515
12516- atomic64_add(delta, &event->count);
12517+ atomic64_add_unchecked(delta, &event->count);
12518 atomic64_sub(delta, &hwc->period_left);
12519
12520 return new_raw_count;
12521@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12522 * The hw event starts counting from this event offset,
12523 * mark it to be able to extra future deltas:
12524 */
12525- atomic64_set(&hwc->prev_count, (u64)-left);
12526+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12527
12528 err = checking_wrmsrl(hwc->event_base + idx,
12529 (u64)(-left) & x86_pmu.event_mask);
12530@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12531 break;
12532
12533 callchain_store(entry, frame.return_address);
12534- fp = frame.next_frame;
12535+ fp = (__force const void __user *)frame.next_frame;
12536 }
12537 }
12538
12539diff -urNp linux-2.6.32.45/arch/x86/kernel/crash.c linux-2.6.32.45/arch/x86/kernel/crash.c
12540--- linux-2.6.32.45/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12541+++ linux-2.6.32.45/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12542@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12543 regs = args->regs;
12544
12545 #ifdef CONFIG_X86_32
12546- if (!user_mode_vm(regs)) {
12547+ if (!user_mode(regs)) {
12548 crash_fixup_ss_esp(&fixed_regs, regs);
12549 regs = &fixed_regs;
12550 }
12551diff -urNp linux-2.6.32.45/arch/x86/kernel/doublefault_32.c linux-2.6.32.45/arch/x86/kernel/doublefault_32.c
12552--- linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12553+++ linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12554@@ -11,7 +11,7 @@
12555
12556 #define DOUBLEFAULT_STACKSIZE (1024)
12557 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12558-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12559+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12560
12561 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12562
12563@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12564 unsigned long gdt, tss;
12565
12566 store_gdt(&gdt_desc);
12567- gdt = gdt_desc.address;
12568+ gdt = (unsigned long)gdt_desc.address;
12569
12570 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12571
12572@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12573 /* 0x2 bit is always set */
12574 .flags = X86_EFLAGS_SF | 0x2,
12575 .sp = STACK_START,
12576- .es = __USER_DS,
12577+ .es = __KERNEL_DS,
12578 .cs = __KERNEL_CS,
12579 .ss = __KERNEL_DS,
12580- .ds = __USER_DS,
12581+ .ds = __KERNEL_DS,
12582 .fs = __KERNEL_PERCPU,
12583
12584 .__cr3 = __pa_nodebug(swapper_pg_dir),
12585diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c
12586--- linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12587+++ linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12588@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12589 #endif
12590
12591 for (;;) {
12592- struct thread_info *context;
12593+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12594+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12595
12596- context = (struct thread_info *)
12597- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12598- bp = print_context_stack(context, stack, bp, ops,
12599- data, NULL, &graph);
12600-
12601- stack = (unsigned long *)context->previous_esp;
12602- if (!stack)
12603+ if (stack_start == task_stack_page(task))
12604 break;
12605+ stack = *(unsigned long **)stack_start;
12606 if (ops->stack(data, "IRQ") < 0)
12607 break;
12608 touch_nmi_watchdog();
12609@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12610 * When in-kernel, we also print out the stack and code at the
12611 * time of the fault..
12612 */
12613- if (!user_mode_vm(regs)) {
12614+ if (!user_mode(regs)) {
12615 unsigned int code_prologue = code_bytes * 43 / 64;
12616 unsigned int code_len = code_bytes;
12617 unsigned char c;
12618 u8 *ip;
12619+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12620
12621 printk(KERN_EMERG "Stack:\n");
12622 show_stack_log_lvl(NULL, regs, &regs->sp,
12623@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12624
12625 printk(KERN_EMERG "Code: ");
12626
12627- ip = (u8 *)regs->ip - code_prologue;
12628+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12629 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12630 /* try starting at IP */
12631- ip = (u8 *)regs->ip;
12632+ ip = (u8 *)regs->ip + cs_base;
12633 code_len = code_len - code_prologue + 1;
12634 }
12635 for (i = 0; i < code_len; i++, ip++) {
12636@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12637 printk(" Bad EIP value.");
12638 break;
12639 }
12640- if (ip == (u8 *)regs->ip)
12641+ if (ip == (u8 *)regs->ip + cs_base)
12642 printk("<%02x> ", c);
12643 else
12644 printk("%02x ", c);
12645@@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12646 {
12647 unsigned short ud2;
12648
12649+ ip = ktla_ktva(ip);
12650 if (ip < PAGE_OFFSET)
12651 return 0;
12652 if (probe_kernel_address((unsigned short *)ip, ud2))
12653diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c
12654--- linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12655+++ linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12656@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12657 unsigned long *irq_stack_end =
12658 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12659 unsigned used = 0;
12660- struct thread_info *tinfo;
12661 int graph = 0;
12662+ void *stack_start;
12663
12664 if (!task)
12665 task = current;
12666@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12667 * current stack address. If the stacks consist of nested
12668 * exceptions
12669 */
12670- tinfo = task_thread_info(task);
12671 for (;;) {
12672 char *id;
12673 unsigned long *estack_end;
12674+
12675 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12676 &used, &id);
12677
12678@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12679 if (ops->stack(data, id) < 0)
12680 break;
12681
12682- bp = print_context_stack(tinfo, stack, bp, ops,
12683+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12684 data, estack_end, &graph);
12685 ops->stack(data, "<EOE>");
12686 /*
12687@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12688 if (stack >= irq_stack && stack < irq_stack_end) {
12689 if (ops->stack(data, "IRQ") < 0)
12690 break;
12691- bp = print_context_stack(tinfo, stack, bp,
12692+ bp = print_context_stack(task, irq_stack, stack, bp,
12693 ops, data, irq_stack_end, &graph);
12694 /*
12695 * We link to the next stack (which would be
12696@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12697 /*
12698 * This handles the process stack:
12699 */
12700- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12701+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12702+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12703 put_cpu();
12704 }
12705 EXPORT_SYMBOL(dump_trace);
12706diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.c linux-2.6.32.45/arch/x86/kernel/dumpstack.c
12707--- linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12708+++ linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12709@@ -2,6 +2,9 @@
12710 * Copyright (C) 1991, 1992 Linus Torvalds
12711 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12712 */
12713+#ifdef CONFIG_GRKERNSEC_HIDESYM
12714+#define __INCLUDED_BY_HIDESYM 1
12715+#endif
12716 #include <linux/kallsyms.h>
12717 #include <linux/kprobes.h>
12718 #include <linux/uaccess.h>
12719@@ -28,7 +31,7 @@ static int die_counter;
12720
12721 void printk_address(unsigned long address, int reliable)
12722 {
12723- printk(" [<%p>] %s%pS\n", (void *) address,
12724+ printk(" [<%p>] %s%pA\n", (void *) address,
12725 reliable ? "" : "? ", (void *) address);
12726 }
12727
12728@@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12729 static void
12730 print_ftrace_graph_addr(unsigned long addr, void *data,
12731 const struct stacktrace_ops *ops,
12732- struct thread_info *tinfo, int *graph)
12733+ struct task_struct *task, int *graph)
12734 {
12735- struct task_struct *task = tinfo->task;
12736 unsigned long ret_addr;
12737 int index = task->curr_ret_stack;
12738
12739@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12740 static inline void
12741 print_ftrace_graph_addr(unsigned long addr, void *data,
12742 const struct stacktrace_ops *ops,
12743- struct thread_info *tinfo, int *graph)
12744+ struct task_struct *task, int *graph)
12745 { }
12746 #endif
12747
12748@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12749 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12750 */
12751
12752-static inline int valid_stack_ptr(struct thread_info *tinfo,
12753- void *p, unsigned int size, void *end)
12754+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12755 {
12756- void *t = tinfo;
12757 if (end) {
12758 if (p < end && p >= (end-THREAD_SIZE))
12759 return 1;
12760@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12761 }
12762
12763 unsigned long
12764-print_context_stack(struct thread_info *tinfo,
12765+print_context_stack(struct task_struct *task, void *stack_start,
12766 unsigned long *stack, unsigned long bp,
12767 const struct stacktrace_ops *ops, void *data,
12768 unsigned long *end, int *graph)
12769 {
12770 struct stack_frame *frame = (struct stack_frame *)bp;
12771
12772- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12773+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12774 unsigned long addr;
12775
12776 addr = *stack;
12777@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12778 } else {
12779 ops->address(data, addr, 0);
12780 }
12781- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12782+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12783 }
12784 stack++;
12785 }
12786@@ -180,7 +180,7 @@ void dump_stack(void)
12787 #endif
12788
12789 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12790- current->pid, current->comm, print_tainted(),
12791+ task_pid_nr(current), current->comm, print_tainted(),
12792 init_utsname()->release,
12793 (int)strcspn(init_utsname()->version, " "),
12794 init_utsname()->version);
12795@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12796 return flags;
12797 }
12798
12799+extern void gr_handle_kernel_exploit(void);
12800+
12801 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12802 {
12803 if (regs && kexec_should_crash(current))
12804@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12805 panic("Fatal exception in interrupt");
12806 if (panic_on_oops)
12807 panic("Fatal exception");
12808- do_exit(signr);
12809+
12810+ gr_handle_kernel_exploit();
12811+
12812+ do_group_exit(signr);
12813 }
12814
12815 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12816@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12817 unsigned long flags = oops_begin();
12818 int sig = SIGSEGV;
12819
12820- if (!user_mode_vm(regs))
12821+ if (!user_mode(regs))
12822 report_bug(regs->ip, regs);
12823
12824 if (__die(str, regs, err))
12825diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.h linux-2.6.32.45/arch/x86/kernel/dumpstack.h
12826--- linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12827+++ linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12828@@ -15,7 +15,7 @@
12829 #endif
12830
12831 extern unsigned long
12832-print_context_stack(struct thread_info *tinfo,
12833+print_context_stack(struct task_struct *task, void *stack_start,
12834 unsigned long *stack, unsigned long bp,
12835 const struct stacktrace_ops *ops, void *data,
12836 unsigned long *end, int *graph);
12837diff -urNp linux-2.6.32.45/arch/x86/kernel/e820.c linux-2.6.32.45/arch/x86/kernel/e820.c
12838--- linux-2.6.32.45/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12839+++ linux-2.6.32.45/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12840@@ -733,7 +733,7 @@ struct early_res {
12841 };
12842 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12843 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12844- {}
12845+ { 0, 0, {0}, 0 }
12846 };
12847
12848 static int __init find_overlapped_early(u64 start, u64 end)
12849diff -urNp linux-2.6.32.45/arch/x86/kernel/early_printk.c linux-2.6.32.45/arch/x86/kernel/early_printk.c
12850--- linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12851+++ linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12852@@ -7,6 +7,7 @@
12853 #include <linux/pci_regs.h>
12854 #include <linux/pci_ids.h>
12855 #include <linux/errno.h>
12856+#include <linux/sched.h>
12857 #include <asm/io.h>
12858 #include <asm/processor.h>
12859 #include <asm/fcntl.h>
12860@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12861 int n;
12862 va_list ap;
12863
12864+ pax_track_stack();
12865+
12866 va_start(ap, fmt);
12867 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12868 early_console->write(early_console, buf, n);
12869diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_32.c linux-2.6.32.45/arch/x86/kernel/efi_32.c
12870--- linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12871+++ linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12872@@ -38,70 +38,38 @@
12873 */
12874
12875 static unsigned long efi_rt_eflags;
12876-static pgd_t efi_bak_pg_dir_pointer[2];
12877+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12878
12879-void efi_call_phys_prelog(void)
12880+void __init efi_call_phys_prelog(void)
12881 {
12882- unsigned long cr4;
12883- unsigned long temp;
12884 struct desc_ptr gdt_descr;
12885
12886 local_irq_save(efi_rt_eflags);
12887
12888- /*
12889- * If I don't have PAE, I should just duplicate two entries in page
12890- * directory. If I have PAE, I just need to duplicate one entry in
12891- * page directory.
12892- */
12893- cr4 = read_cr4_safe();
12894
12895- if (cr4 & X86_CR4_PAE) {
12896- efi_bak_pg_dir_pointer[0].pgd =
12897- swapper_pg_dir[pgd_index(0)].pgd;
12898- swapper_pg_dir[0].pgd =
12899- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12900- } else {
12901- efi_bak_pg_dir_pointer[0].pgd =
12902- swapper_pg_dir[pgd_index(0)].pgd;
12903- efi_bak_pg_dir_pointer[1].pgd =
12904- swapper_pg_dir[pgd_index(0x400000)].pgd;
12905- swapper_pg_dir[pgd_index(0)].pgd =
12906- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12907- temp = PAGE_OFFSET + 0x400000;
12908- swapper_pg_dir[pgd_index(0x400000)].pgd =
12909- swapper_pg_dir[pgd_index(temp)].pgd;
12910- }
12911+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12912+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12913+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12914
12915 /*
12916 * After the lock is released, the original page table is restored.
12917 */
12918 __flush_tlb_all();
12919
12920- gdt_descr.address = __pa(get_cpu_gdt_table(0));
12921+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12922 gdt_descr.size = GDT_SIZE - 1;
12923 load_gdt(&gdt_descr);
12924 }
12925
12926-void efi_call_phys_epilog(void)
12927+void __init efi_call_phys_epilog(void)
12928 {
12929- unsigned long cr4;
12930 struct desc_ptr gdt_descr;
12931
12932- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12933+ gdt_descr.address = get_cpu_gdt_table(0);
12934 gdt_descr.size = GDT_SIZE - 1;
12935 load_gdt(&gdt_descr);
12936
12937- cr4 = read_cr4_safe();
12938-
12939- if (cr4 & X86_CR4_PAE) {
12940- swapper_pg_dir[pgd_index(0)].pgd =
12941- efi_bak_pg_dir_pointer[0].pgd;
12942- } else {
12943- swapper_pg_dir[pgd_index(0)].pgd =
12944- efi_bak_pg_dir_pointer[0].pgd;
12945- swapper_pg_dir[pgd_index(0x400000)].pgd =
12946- efi_bak_pg_dir_pointer[1].pgd;
12947- }
12948+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12949
12950 /*
12951 * After the lock is released, the original page table is restored.
12952diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S
12953--- linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12954+++ linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12955@@ -6,6 +6,7 @@
12956 */
12957
12958 #include <linux/linkage.h>
12959+#include <linux/init.h>
12960 #include <asm/page_types.h>
12961
12962 /*
12963@@ -20,7 +21,7 @@
12964 * service functions will comply with gcc calling convention, too.
12965 */
12966
12967-.text
12968+__INIT
12969 ENTRY(efi_call_phys)
12970 /*
12971 * 0. The function can only be called in Linux kernel. So CS has been
12972@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
12973 * The mapping of lower virtual memory has been created in prelog and
12974 * epilog.
12975 */
12976- movl $1f, %edx
12977- subl $__PAGE_OFFSET, %edx
12978- jmp *%edx
12979+ jmp 1f-__PAGE_OFFSET
12980 1:
12981
12982 /*
12983@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
12984 * parameter 2, ..., param n. To make things easy, we save the return
12985 * address of efi_call_phys in a global variable.
12986 */
12987- popl %edx
12988- movl %edx, saved_return_addr
12989- /* get the function pointer into ECX*/
12990- popl %ecx
12991- movl %ecx, efi_rt_function_ptr
12992- movl $2f, %edx
12993- subl $__PAGE_OFFSET, %edx
12994- pushl %edx
12995+ popl (saved_return_addr)
12996+ popl (efi_rt_function_ptr)
12997
12998 /*
12999 * 3. Clear PG bit in %CR0.
13000@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
13001 /*
13002 * 5. Call the physical function.
13003 */
13004- jmp *%ecx
13005+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
13006
13007-2:
13008 /*
13009 * 6. After EFI runtime service returns, control will return to
13010 * following instruction. We'd better readjust stack pointer first.
13011@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
13012 movl %cr0, %edx
13013 orl $0x80000000, %edx
13014 movl %edx, %cr0
13015- jmp 1f
13016-1:
13017+
13018 /*
13019 * 8. Now restore the virtual mode from flat mode by
13020 * adding EIP with PAGE_OFFSET.
13021 */
13022- movl $1f, %edx
13023- jmp *%edx
13024+ jmp 1f+__PAGE_OFFSET
13025 1:
13026
13027 /*
13028 * 9. Balance the stack. And because EAX contain the return value,
13029 * we'd better not clobber it.
13030 */
13031- leal efi_rt_function_ptr, %edx
13032- movl (%edx), %ecx
13033- pushl %ecx
13034+ pushl (efi_rt_function_ptr)
13035
13036 /*
13037- * 10. Push the saved return address onto the stack and return.
13038+ * 10. Return to the saved return address.
13039 */
13040- leal saved_return_addr, %edx
13041- movl (%edx), %ecx
13042- pushl %ecx
13043- ret
13044+ jmpl *(saved_return_addr)
13045 ENDPROC(efi_call_phys)
13046 .previous
13047
13048-.data
13049+__INITDATA
13050 saved_return_addr:
13051 .long 0
13052 efi_rt_function_ptr:
13053diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_32.S linux-2.6.32.45/arch/x86/kernel/entry_32.S
13054--- linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
13055+++ linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
13056@@ -185,13 +185,146 @@
13057 /*CFI_REL_OFFSET gs, PT_GS*/
13058 .endm
13059 .macro SET_KERNEL_GS reg
13060+
13061+#ifdef CONFIG_CC_STACKPROTECTOR
13062 movl $(__KERNEL_STACK_CANARY), \reg
13063+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13064+ movl $(__USER_DS), \reg
13065+#else
13066+ xorl \reg, \reg
13067+#endif
13068+
13069 movl \reg, %gs
13070 .endm
13071
13072 #endif /* CONFIG_X86_32_LAZY_GS */
13073
13074-.macro SAVE_ALL
13075+.macro pax_enter_kernel
13076+#ifdef CONFIG_PAX_KERNEXEC
13077+ call pax_enter_kernel
13078+#endif
13079+.endm
13080+
13081+.macro pax_exit_kernel
13082+#ifdef CONFIG_PAX_KERNEXEC
13083+ call pax_exit_kernel
13084+#endif
13085+.endm
13086+
13087+#ifdef CONFIG_PAX_KERNEXEC
13088+ENTRY(pax_enter_kernel)
13089+#ifdef CONFIG_PARAVIRT
13090+ pushl %eax
13091+ pushl %ecx
13092+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13093+ mov %eax, %esi
13094+#else
13095+ mov %cr0, %esi
13096+#endif
13097+ bts $16, %esi
13098+ jnc 1f
13099+ mov %cs, %esi
13100+ cmp $__KERNEL_CS, %esi
13101+ jz 3f
13102+ ljmp $__KERNEL_CS, $3f
13103+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13104+2:
13105+#ifdef CONFIG_PARAVIRT
13106+ mov %esi, %eax
13107+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13108+#else
13109+ mov %esi, %cr0
13110+#endif
13111+3:
13112+#ifdef CONFIG_PARAVIRT
13113+ popl %ecx
13114+ popl %eax
13115+#endif
13116+ ret
13117+ENDPROC(pax_enter_kernel)
13118+
13119+ENTRY(pax_exit_kernel)
13120+#ifdef CONFIG_PARAVIRT
13121+ pushl %eax
13122+ pushl %ecx
13123+#endif
13124+ mov %cs, %esi
13125+ cmp $__KERNEXEC_KERNEL_CS, %esi
13126+ jnz 2f
13127+#ifdef CONFIG_PARAVIRT
13128+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13129+ mov %eax, %esi
13130+#else
13131+ mov %cr0, %esi
13132+#endif
13133+ btr $16, %esi
13134+ ljmp $__KERNEL_CS, $1f
13135+1:
13136+#ifdef CONFIG_PARAVIRT
13137+ mov %esi, %eax
13138+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13139+#else
13140+ mov %esi, %cr0
13141+#endif
13142+2:
13143+#ifdef CONFIG_PARAVIRT
13144+ popl %ecx
13145+ popl %eax
13146+#endif
13147+ ret
13148+ENDPROC(pax_exit_kernel)
13149+#endif
13150+
13151+.macro pax_erase_kstack
13152+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13153+ call pax_erase_kstack
13154+#endif
13155+.endm
13156+
13157+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13158+/*
13159+ * ebp: thread_info
13160+ * ecx, edx: can be clobbered
13161+ */
13162+ENTRY(pax_erase_kstack)
13163+ pushl %edi
13164+ pushl %eax
13165+
13166+ mov TI_lowest_stack(%ebp), %edi
13167+ mov $-0xBEEF, %eax
13168+ std
13169+
13170+1: mov %edi, %ecx
13171+ and $THREAD_SIZE_asm - 1, %ecx
13172+ shr $2, %ecx
13173+ repne scasl
13174+ jecxz 2f
13175+
13176+ cmp $2*16, %ecx
13177+ jc 2f
13178+
13179+ mov $2*16, %ecx
13180+ repe scasl
13181+ jecxz 2f
13182+ jne 1b
13183+
13184+2: cld
13185+ mov %esp, %ecx
13186+ sub %edi, %ecx
13187+ shr $2, %ecx
13188+ rep stosl
13189+
13190+ mov TI_task_thread_sp0(%ebp), %edi
13191+ sub $128, %edi
13192+ mov %edi, TI_lowest_stack(%ebp)
13193+
13194+ popl %eax
13195+ popl %edi
13196+ ret
13197+ENDPROC(pax_erase_kstack)
13198+#endif
13199+
13200+.macro __SAVE_ALL _DS
13201 cld
13202 PUSH_GS
13203 pushl %fs
13204@@ -224,7 +357,7 @@
13205 pushl %ebx
13206 CFI_ADJUST_CFA_OFFSET 4
13207 CFI_REL_OFFSET ebx, 0
13208- movl $(__USER_DS), %edx
13209+ movl $\_DS, %edx
13210 movl %edx, %ds
13211 movl %edx, %es
13212 movl $(__KERNEL_PERCPU), %edx
13213@@ -232,6 +365,15 @@
13214 SET_KERNEL_GS %edx
13215 .endm
13216
13217+.macro SAVE_ALL
13218+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13219+ __SAVE_ALL __KERNEL_DS
13220+ pax_enter_kernel
13221+#else
13222+ __SAVE_ALL __USER_DS
13223+#endif
13224+.endm
13225+
13226 .macro RESTORE_INT_REGS
13227 popl %ebx
13228 CFI_ADJUST_CFA_OFFSET -4
13229@@ -352,7 +494,15 @@ check_userspace:
13230 movb PT_CS(%esp), %al
13231 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13232 cmpl $USER_RPL, %eax
13233+
13234+#ifdef CONFIG_PAX_KERNEXEC
13235+ jae resume_userspace
13236+
13237+ PAX_EXIT_KERNEL
13238+ jmp resume_kernel
13239+#else
13240 jb resume_kernel # not returning to v8086 or userspace
13241+#endif
13242
13243 ENTRY(resume_userspace)
13244 LOCKDEP_SYS_EXIT
13245@@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13246 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13247 # int/exception return?
13248 jne work_pending
13249- jmp restore_all
13250+ jmp restore_all_pax
13251 END(ret_from_exception)
13252
13253 #ifdef CONFIG_PREEMPT
13254@@ -414,25 +564,36 @@ sysenter_past_esp:
13255 /*CFI_REL_OFFSET cs, 0*/
13256 /*
13257 * Push current_thread_info()->sysenter_return to the stack.
13258- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13259- * pushed above; +8 corresponds to copy_thread's esp0 setting.
13260 */
13261- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13262+ pushl $0
13263 CFI_ADJUST_CFA_OFFSET 4
13264 CFI_REL_OFFSET eip, 0
13265
13266 pushl %eax
13267 CFI_ADJUST_CFA_OFFSET 4
13268 SAVE_ALL
13269+ GET_THREAD_INFO(%ebp)
13270+ movl TI_sysenter_return(%ebp),%ebp
13271+ movl %ebp,PT_EIP(%esp)
13272 ENABLE_INTERRUPTS(CLBR_NONE)
13273
13274 /*
13275 * Load the potential sixth argument from user stack.
13276 * Careful about security.
13277 */
13278+ movl PT_OLDESP(%esp),%ebp
13279+
13280+#ifdef CONFIG_PAX_MEMORY_UDEREF
13281+ mov PT_OLDSS(%esp),%ds
13282+1: movl %ds:(%ebp),%ebp
13283+ push %ss
13284+ pop %ds
13285+#else
13286 cmpl $__PAGE_OFFSET-3,%ebp
13287 jae syscall_fault
13288 1: movl (%ebp),%ebp
13289+#endif
13290+
13291 movl %ebp,PT_EBP(%esp)
13292 .section __ex_table,"a"
13293 .align 4
13294@@ -455,12 +616,23 @@ sysenter_do_call:
13295 testl $_TIF_ALLWORK_MASK, %ecx
13296 jne sysexit_audit
13297 sysenter_exit:
13298+
13299+#ifdef CONFIG_PAX_RANDKSTACK
13300+ pushl_cfi %eax
13301+ call pax_randomize_kstack
13302+ popl_cfi %eax
13303+#endif
13304+
13305+ pax_erase_kstack
13306+
13307 /* if something modifies registers it must also disable sysexit */
13308 movl PT_EIP(%esp), %edx
13309 movl PT_OLDESP(%esp), %ecx
13310 xorl %ebp,%ebp
13311 TRACE_IRQS_ON
13312 1: mov PT_FS(%esp), %fs
13313+2: mov PT_DS(%esp), %ds
13314+3: mov PT_ES(%esp), %es
13315 PTGS_TO_GS
13316 ENABLE_INTERRUPTS_SYSEXIT
13317
13318@@ -477,6 +649,9 @@ sysenter_audit:
13319 movl %eax,%edx /* 2nd arg: syscall number */
13320 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13321 call audit_syscall_entry
13322+
13323+ pax_erase_kstack
13324+
13325 pushl %ebx
13326 CFI_ADJUST_CFA_OFFSET 4
13327 movl PT_EAX(%esp),%eax /* reload syscall number */
13328@@ -504,11 +679,17 @@ sysexit_audit:
13329
13330 CFI_ENDPROC
13331 .pushsection .fixup,"ax"
13332-2: movl $0,PT_FS(%esp)
13333+4: movl $0,PT_FS(%esp)
13334+ jmp 1b
13335+5: movl $0,PT_DS(%esp)
13336+ jmp 1b
13337+6: movl $0,PT_ES(%esp)
13338 jmp 1b
13339 .section __ex_table,"a"
13340 .align 4
13341- .long 1b,2b
13342+ .long 1b,4b
13343+ .long 2b,5b
13344+ .long 3b,6b
13345 .popsection
13346 PTGS_TO_GS_EX
13347 ENDPROC(ia32_sysenter_target)
13348@@ -538,6 +719,14 @@ syscall_exit:
13349 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13350 jne syscall_exit_work
13351
13352+restore_all_pax:
13353+
13354+#ifdef CONFIG_PAX_RANDKSTACK
13355+ call pax_randomize_kstack
13356+#endif
13357+
13358+ pax_erase_kstack
13359+
13360 restore_all:
13361 TRACE_IRQS_IRET
13362 restore_all_notrace:
13363@@ -602,7 +791,13 @@ ldt_ss:
13364 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13365 mov %dx, %ax /* eax: new kernel esp */
13366 sub %eax, %edx /* offset (low word is 0) */
13367- PER_CPU(gdt_page, %ebx)
13368+#ifdef CONFIG_SMP
13369+ movl PER_CPU_VAR(cpu_number), %ebx
13370+ shll $PAGE_SHIFT_asm, %ebx
13371+ addl $cpu_gdt_table, %ebx
13372+#else
13373+ movl $cpu_gdt_table, %ebx
13374+#endif
13375 shr $16, %edx
13376 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13377 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13378@@ -636,31 +831,25 @@ work_resched:
13379 movl TI_flags(%ebp), %ecx
13380 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13381 # than syscall tracing?
13382- jz restore_all
13383+ jz restore_all_pax
13384 testb $_TIF_NEED_RESCHED, %cl
13385 jnz work_resched
13386
13387 work_notifysig: # deal with pending signals and
13388 # notify-resume requests
13389+ movl %esp, %eax
13390 #ifdef CONFIG_VM86
13391 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13392- movl %esp, %eax
13393- jne work_notifysig_v86 # returning to kernel-space or
13394+ jz 1f # returning to kernel-space or
13395 # vm86-space
13396- xorl %edx, %edx
13397- call do_notify_resume
13398- jmp resume_userspace_sig
13399
13400- ALIGN
13401-work_notifysig_v86:
13402 pushl %ecx # save ti_flags for do_notify_resume
13403 CFI_ADJUST_CFA_OFFSET 4
13404 call save_v86_state # %eax contains pt_regs pointer
13405 popl %ecx
13406 CFI_ADJUST_CFA_OFFSET -4
13407 movl %eax, %esp
13408-#else
13409- movl %esp, %eax
13410+1:
13411 #endif
13412 xorl %edx, %edx
13413 call do_notify_resume
13414@@ -673,6 +862,9 @@ syscall_trace_entry:
13415 movl $-ENOSYS,PT_EAX(%esp)
13416 movl %esp, %eax
13417 call syscall_trace_enter
13418+
13419+ pax_erase_kstack
13420+
13421 /* What it returned is what we'll actually use. */
13422 cmpl $(nr_syscalls), %eax
13423 jnae syscall_call
13424@@ -695,6 +887,10 @@ END(syscall_exit_work)
13425
13426 RING0_INT_FRAME # can't unwind into user space anyway
13427 syscall_fault:
13428+#ifdef CONFIG_PAX_MEMORY_UDEREF
13429+ push %ss
13430+ pop %ds
13431+#endif
13432 GET_THREAD_INFO(%ebp)
13433 movl $-EFAULT,PT_EAX(%esp)
13434 jmp resume_userspace
13435@@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13436 PTREGSCALL(vm86)
13437 PTREGSCALL(vm86old)
13438
13439+ ALIGN;
13440+ENTRY(kernel_execve)
13441+ push %ebp
13442+ sub $PT_OLDSS+4,%esp
13443+ push %edi
13444+ push %ecx
13445+ push %eax
13446+ lea 3*4(%esp),%edi
13447+ mov $PT_OLDSS/4+1,%ecx
13448+ xorl %eax,%eax
13449+ rep stosl
13450+ pop %eax
13451+ pop %ecx
13452+ pop %edi
13453+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13454+ mov %eax,PT_EBX(%esp)
13455+ mov %edx,PT_ECX(%esp)
13456+ mov %ecx,PT_EDX(%esp)
13457+ mov %esp,%eax
13458+ call sys_execve
13459+ GET_THREAD_INFO(%ebp)
13460+ test %eax,%eax
13461+ jz syscall_exit
13462+ add $PT_OLDSS+4,%esp
13463+ pop %ebp
13464+ ret
13465+
13466 .macro FIXUP_ESPFIX_STACK
13467 /*
13468 * Switch back for ESPFIX stack to the normal zerobased stack
13469@@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13470 * normal stack and adjusts ESP with the matching offset.
13471 */
13472 /* fixup the stack */
13473- PER_CPU(gdt_page, %ebx)
13474+#ifdef CONFIG_SMP
13475+ movl PER_CPU_VAR(cpu_number), %ebx
13476+ shll $PAGE_SHIFT_asm, %ebx
13477+ addl $cpu_gdt_table, %ebx
13478+#else
13479+ movl $cpu_gdt_table, %ebx
13480+#endif
13481 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13482 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13483 shl $16, %eax
13484@@ -1198,7 +1427,6 @@ return_to_handler:
13485 ret
13486 #endif
13487
13488-.section .rodata,"a"
13489 #include "syscall_table_32.S"
13490
13491 syscall_table_size=(.-sys_call_table)
13492@@ -1255,9 +1483,12 @@ error_code:
13493 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13494 REG_TO_PTGS %ecx
13495 SET_KERNEL_GS %ecx
13496- movl $(__USER_DS), %ecx
13497+ movl $(__KERNEL_DS), %ecx
13498 movl %ecx, %ds
13499 movl %ecx, %es
13500+
13501+ pax_enter_kernel
13502+
13503 TRACE_IRQS_OFF
13504 movl %esp,%eax # pt_regs pointer
13505 call *%edi
13506@@ -1351,6 +1582,9 @@ nmi_stack_correct:
13507 xorl %edx,%edx # zero error code
13508 movl %esp,%eax # pt_regs pointer
13509 call do_nmi
13510+
13511+ pax_exit_kernel
13512+
13513 jmp restore_all_notrace
13514 CFI_ENDPROC
13515
13516@@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13517 FIXUP_ESPFIX_STACK # %eax == %esp
13518 xorl %edx,%edx # zero error code
13519 call do_nmi
13520+
13521+ pax_exit_kernel
13522+
13523 RESTORE_REGS
13524 lss 12+4(%esp), %esp # back to espfix stack
13525 CFI_ADJUST_CFA_OFFSET -24
13526diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_64.S linux-2.6.32.45/arch/x86/kernel/entry_64.S
13527--- linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13528+++ linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13529@@ -53,6 +53,7 @@
13530 #include <asm/paravirt.h>
13531 #include <asm/ftrace.h>
13532 #include <asm/percpu.h>
13533+#include <asm/pgtable.h>
13534
13535 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13536 #include <linux/elf-em.h>
13537@@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13538 ENDPROC(native_usergs_sysret64)
13539 #endif /* CONFIG_PARAVIRT */
13540
13541+ .macro ljmpq sel, off
13542+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13543+ .byte 0x48; ljmp *1234f(%rip)
13544+ .pushsection .rodata
13545+ .align 16
13546+ 1234: .quad \off; .word \sel
13547+ .popsection
13548+#else
13549+ pushq $\sel
13550+ pushq $\off
13551+ lretq
13552+#endif
13553+ .endm
13554+
13555+ .macro pax_enter_kernel
13556+#ifdef CONFIG_PAX_KERNEXEC
13557+ call pax_enter_kernel
13558+#endif
13559+ .endm
13560+
13561+ .macro pax_exit_kernel
13562+#ifdef CONFIG_PAX_KERNEXEC
13563+ call pax_exit_kernel
13564+#endif
13565+ .endm
13566+
13567+#ifdef CONFIG_PAX_KERNEXEC
13568+ENTRY(pax_enter_kernel)
13569+ pushq %rdi
13570+
13571+#ifdef CONFIG_PARAVIRT
13572+ PV_SAVE_REGS(CLBR_RDI)
13573+#endif
13574+
13575+ GET_CR0_INTO_RDI
13576+ bts $16,%rdi
13577+ jnc 1f
13578+ mov %cs,%edi
13579+ cmp $__KERNEL_CS,%edi
13580+ jz 3f
13581+ ljmpq __KERNEL_CS,3f
13582+1: ljmpq __KERNEXEC_KERNEL_CS,2f
13583+2: SET_RDI_INTO_CR0
13584+3:
13585+
13586+#ifdef CONFIG_PARAVIRT
13587+ PV_RESTORE_REGS(CLBR_RDI)
13588+#endif
13589+
13590+ popq %rdi
13591+ retq
13592+ENDPROC(pax_enter_kernel)
13593+
13594+ENTRY(pax_exit_kernel)
13595+ pushq %rdi
13596+
13597+#ifdef CONFIG_PARAVIRT
13598+ PV_SAVE_REGS(CLBR_RDI)
13599+#endif
13600+
13601+ mov %cs,%rdi
13602+ cmp $__KERNEXEC_KERNEL_CS,%edi
13603+ jnz 2f
13604+ GET_CR0_INTO_RDI
13605+ btr $16,%rdi
13606+ ljmpq __KERNEL_CS,1f
13607+1: SET_RDI_INTO_CR0
13608+2:
13609+
13610+#ifdef CONFIG_PARAVIRT
13611+ PV_RESTORE_REGS(CLBR_RDI);
13612+#endif
13613+
13614+ popq %rdi
13615+ retq
13616+ENDPROC(pax_exit_kernel)
13617+#endif
13618+
13619+ .macro pax_enter_kernel_user
13620+#ifdef CONFIG_PAX_MEMORY_UDEREF
13621+ call pax_enter_kernel_user
13622+#endif
13623+ .endm
13624+
13625+ .macro pax_exit_kernel_user
13626+#ifdef CONFIG_PAX_MEMORY_UDEREF
13627+ call pax_exit_kernel_user
13628+#endif
13629+#ifdef CONFIG_PAX_RANDKSTACK
13630+ push %rax
13631+ call pax_randomize_kstack
13632+ pop %rax
13633+#endif
13634+ pax_erase_kstack
13635+ .endm
13636+
13637+#ifdef CONFIG_PAX_MEMORY_UDEREF
13638+ENTRY(pax_enter_kernel_user)
13639+ pushq %rdi
13640+ pushq %rbx
13641+
13642+#ifdef CONFIG_PARAVIRT
13643+ PV_SAVE_REGS(CLBR_RDI)
13644+#endif
13645+
13646+ GET_CR3_INTO_RDI
13647+ mov %rdi,%rbx
13648+ add $__START_KERNEL_map,%rbx
13649+ sub phys_base(%rip),%rbx
13650+
13651+#ifdef CONFIG_PARAVIRT
13652+ pushq %rdi
13653+ cmpl $0, pv_info+PARAVIRT_enabled
13654+ jz 1f
13655+ i = 0
13656+ .rept USER_PGD_PTRS
13657+ mov i*8(%rbx),%rsi
13658+ mov $0,%sil
13659+ lea i*8(%rbx),%rdi
13660+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13661+ i = i + 1
13662+ .endr
13663+ jmp 2f
13664+1:
13665+#endif
13666+
13667+ i = 0
13668+ .rept USER_PGD_PTRS
13669+ movb $0,i*8(%rbx)
13670+ i = i + 1
13671+ .endr
13672+
13673+#ifdef CONFIG_PARAVIRT
13674+2: popq %rdi
13675+#endif
13676+ SET_RDI_INTO_CR3
13677+
13678+#ifdef CONFIG_PAX_KERNEXEC
13679+ GET_CR0_INTO_RDI
13680+ bts $16,%rdi
13681+ SET_RDI_INTO_CR0
13682+#endif
13683+
13684+#ifdef CONFIG_PARAVIRT
13685+ PV_RESTORE_REGS(CLBR_RDI)
13686+#endif
13687+
13688+ popq %rbx
13689+ popq %rdi
13690+ retq
13691+ENDPROC(pax_enter_kernel_user)
13692+
13693+ENTRY(pax_exit_kernel_user)
13694+ push %rdi
13695+
13696+#ifdef CONFIG_PARAVIRT
13697+ pushq %rbx
13698+ PV_SAVE_REGS(CLBR_RDI)
13699+#endif
13700+
13701+#ifdef CONFIG_PAX_KERNEXEC
13702+ GET_CR0_INTO_RDI
13703+ btr $16,%rdi
13704+ SET_RDI_INTO_CR0
13705+#endif
13706+
13707+ GET_CR3_INTO_RDI
13708+ add $__START_KERNEL_map,%rdi
13709+ sub phys_base(%rip),%rdi
13710+
13711+#ifdef CONFIG_PARAVIRT
13712+ cmpl $0, pv_info+PARAVIRT_enabled
13713+ jz 1f
13714+ mov %rdi,%rbx
13715+ i = 0
13716+ .rept USER_PGD_PTRS
13717+ mov i*8(%rbx),%rsi
13718+ mov $0x67,%sil
13719+ lea i*8(%rbx),%rdi
13720+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13721+ i = i + 1
13722+ .endr
13723+ jmp 2f
13724+1:
13725+#endif
13726+
13727+ i = 0
13728+ .rept USER_PGD_PTRS
13729+ movb $0x67,i*8(%rdi)
13730+ i = i + 1
13731+ .endr
13732+
13733+#ifdef CONFIG_PARAVIRT
13734+2: PV_RESTORE_REGS(CLBR_RDI)
13735+ popq %rbx
13736+#endif
13737+
13738+ popq %rdi
13739+ retq
13740+ENDPROC(pax_exit_kernel_user)
13741+#endif
13742+
13743+.macro pax_erase_kstack
13744+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13745+ call pax_erase_kstack
13746+#endif
13747+.endm
13748+
13749+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13750+/*
13751+ * r10: thread_info
13752+ * rcx, rdx: can be clobbered
13753+ */
13754+ENTRY(pax_erase_kstack)
13755+ pushq %rdi
13756+ pushq %rax
13757+
13758+ GET_THREAD_INFO(%r10)
13759+ mov TI_lowest_stack(%r10), %rdi
13760+ mov $-0xBEEF, %rax
13761+ std
13762+
13763+1: mov %edi, %ecx
13764+ and $THREAD_SIZE_asm - 1, %ecx
13765+ shr $3, %ecx
13766+ repne scasq
13767+ jecxz 2f
13768+
13769+ cmp $2*8, %ecx
13770+ jc 2f
13771+
13772+ mov $2*8, %ecx
13773+ repe scasq
13774+ jecxz 2f
13775+ jne 1b
13776+
13777+2: cld
13778+ mov %esp, %ecx
13779+ sub %edi, %ecx
13780+ shr $3, %ecx
13781+ rep stosq
13782+
13783+ mov TI_task_thread_sp0(%r10), %rdi
13784+ sub $256, %rdi
13785+ mov %rdi, TI_lowest_stack(%r10)
13786+
13787+ popq %rax
13788+ popq %rdi
13789+ ret
13790+ENDPROC(pax_erase_kstack)
13791+#endif
13792
13793 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13794 #ifdef CONFIG_TRACE_IRQFLAGS
13795@@ -317,7 +569,7 @@ ENTRY(save_args)
13796 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13797 movq_cfi rbp, 8 /* push %rbp */
13798 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13799- testl $3, CS(%rdi)
13800+ testb $3, CS(%rdi)
13801 je 1f
13802 SWAPGS
13803 /*
13804@@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13805
13806 RESTORE_REST
13807
13808- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13809+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13810 je int_ret_from_sys_call
13811
13812 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13813@@ -455,7 +707,7 @@ END(ret_from_fork)
13814 ENTRY(system_call)
13815 CFI_STARTPROC simple
13816 CFI_SIGNAL_FRAME
13817- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13818+ CFI_DEF_CFA rsp,0
13819 CFI_REGISTER rip,rcx
13820 /*CFI_REGISTER rflags,r11*/
13821 SWAPGS_UNSAFE_STACK
13822@@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13823
13824 movq %rsp,PER_CPU_VAR(old_rsp)
13825 movq PER_CPU_VAR(kernel_stack),%rsp
13826+ pax_enter_kernel_user
13827 /*
13828 * No need to follow this irqs off/on section - it's straight
13829 * and short:
13830 */
13831 ENABLE_INTERRUPTS(CLBR_NONE)
13832- SAVE_ARGS 8,1
13833+ SAVE_ARGS 8*6,1
13834 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13835 movq %rcx,RIP-ARGOFFSET(%rsp)
13836 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13837@@ -502,6 +755,7 @@ sysret_check:
13838 andl %edi,%edx
13839 jnz sysret_careful
13840 CFI_REMEMBER_STATE
13841+ pax_exit_kernel_user
13842 /*
13843 * sysretq will re-enable interrupts:
13844 */
13845@@ -562,6 +816,9 @@ auditsys:
13846 movq %rax,%rsi /* 2nd arg: syscall number */
13847 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13848 call audit_syscall_entry
13849+
13850+ pax_erase_kstack
13851+
13852 LOAD_ARGS 0 /* reload call-clobbered registers */
13853 jmp system_call_fastpath
13854
13855@@ -592,6 +849,9 @@ tracesys:
13856 FIXUP_TOP_OF_STACK %rdi
13857 movq %rsp,%rdi
13858 call syscall_trace_enter
13859+
13860+ pax_erase_kstack
13861+
13862 /*
13863 * Reload arg registers from stack in case ptrace changed them.
13864 * We don't reload %rax because syscall_trace_enter() returned
13865@@ -613,7 +873,7 @@ tracesys:
13866 GLOBAL(int_ret_from_sys_call)
13867 DISABLE_INTERRUPTS(CLBR_NONE)
13868 TRACE_IRQS_OFF
13869- testl $3,CS-ARGOFFSET(%rsp)
13870+ testb $3,CS-ARGOFFSET(%rsp)
13871 je retint_restore_args
13872 movl $_TIF_ALLWORK_MASK,%edi
13873 /* edi: mask to check */
13874@@ -800,6 +1060,16 @@ END(interrupt)
13875 CFI_ADJUST_CFA_OFFSET 10*8
13876 call save_args
13877 PARTIAL_FRAME 0
13878+#ifdef CONFIG_PAX_MEMORY_UDEREF
13879+ testb $3, CS(%rdi)
13880+ jnz 1f
13881+ pax_enter_kernel
13882+ jmp 2f
13883+1: pax_enter_kernel_user
13884+2:
13885+#else
13886+ pax_enter_kernel
13887+#endif
13888 call \func
13889 .endm
13890
13891@@ -822,7 +1092,7 @@ ret_from_intr:
13892 CFI_ADJUST_CFA_OFFSET -8
13893 exit_intr:
13894 GET_THREAD_INFO(%rcx)
13895- testl $3,CS-ARGOFFSET(%rsp)
13896+ testb $3,CS-ARGOFFSET(%rsp)
13897 je retint_kernel
13898
13899 /* Interrupt came from user space */
13900@@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13901 * The iretq could re-enable interrupts:
13902 */
13903 DISABLE_INTERRUPTS(CLBR_ANY)
13904+ pax_exit_kernel_user
13905 TRACE_IRQS_IRETQ
13906 SWAPGS
13907 jmp restore_args
13908
13909 retint_restore_args: /* return to kernel space */
13910 DISABLE_INTERRUPTS(CLBR_ANY)
13911+ pax_exit_kernel
13912 /*
13913 * The iretq could re-enable interrupts:
13914 */
13915@@ -1032,6 +1304,16 @@ ENTRY(\sym)
13916 CFI_ADJUST_CFA_OFFSET 15*8
13917 call error_entry
13918 DEFAULT_FRAME 0
13919+#ifdef CONFIG_PAX_MEMORY_UDEREF
13920+ testb $3, CS(%rsp)
13921+ jnz 1f
13922+ pax_enter_kernel
13923+ jmp 2f
13924+1: pax_enter_kernel_user
13925+2:
13926+#else
13927+ pax_enter_kernel
13928+#endif
13929 movq %rsp,%rdi /* pt_regs pointer */
13930 xorl %esi,%esi /* no error code */
13931 call \do_sym
13932@@ -1049,6 +1331,16 @@ ENTRY(\sym)
13933 subq $15*8, %rsp
13934 call save_paranoid
13935 TRACE_IRQS_OFF
13936+#ifdef CONFIG_PAX_MEMORY_UDEREF
13937+ testb $3, CS(%rsp)
13938+ jnz 1f
13939+ pax_enter_kernel
13940+ jmp 2f
13941+1: pax_enter_kernel_user
13942+2:
13943+#else
13944+ pax_enter_kernel
13945+#endif
13946 movq %rsp,%rdi /* pt_regs pointer */
13947 xorl %esi,%esi /* no error code */
13948 call \do_sym
13949@@ -1066,9 +1358,24 @@ ENTRY(\sym)
13950 subq $15*8, %rsp
13951 call save_paranoid
13952 TRACE_IRQS_OFF
13953+#ifdef CONFIG_PAX_MEMORY_UDEREF
13954+ testb $3, CS(%rsp)
13955+ jnz 1f
13956+ pax_enter_kernel
13957+ jmp 2f
13958+1: pax_enter_kernel_user
13959+2:
13960+#else
13961+ pax_enter_kernel
13962+#endif
13963 movq %rsp,%rdi /* pt_regs pointer */
13964 xorl %esi,%esi /* no error code */
13965- PER_CPU(init_tss, %rbp)
13966+#ifdef CONFIG_SMP
13967+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
13968+ lea init_tss(%rbp), %rbp
13969+#else
13970+ lea init_tss(%rip), %rbp
13971+#endif
13972 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13973 call \do_sym
13974 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13975@@ -1085,6 +1392,16 @@ ENTRY(\sym)
13976 CFI_ADJUST_CFA_OFFSET 15*8
13977 call error_entry
13978 DEFAULT_FRAME 0
13979+#ifdef CONFIG_PAX_MEMORY_UDEREF
13980+ testb $3, CS(%rsp)
13981+ jnz 1f
13982+ pax_enter_kernel
13983+ jmp 2f
13984+1: pax_enter_kernel_user
13985+2:
13986+#else
13987+ pax_enter_kernel
13988+#endif
13989 movq %rsp,%rdi /* pt_regs pointer */
13990 movq ORIG_RAX(%rsp),%rsi /* get error code */
13991 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13992@@ -1104,6 +1421,16 @@ ENTRY(\sym)
13993 call save_paranoid
13994 DEFAULT_FRAME 0
13995 TRACE_IRQS_OFF
13996+#ifdef CONFIG_PAX_MEMORY_UDEREF
13997+ testb $3, CS(%rsp)
13998+ jnz 1f
13999+ pax_enter_kernel
14000+ jmp 2f
14001+1: pax_enter_kernel_user
14002+2:
14003+#else
14004+ pax_enter_kernel
14005+#endif
14006 movq %rsp,%rdi /* pt_regs pointer */
14007 movq ORIG_RAX(%rsp),%rsi /* get error code */
14008 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14009@@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
14010 TRACE_IRQS_OFF
14011 testl %ebx,%ebx /* swapgs needed? */
14012 jnz paranoid_restore
14013- testl $3,CS(%rsp)
14014+ testb $3,CS(%rsp)
14015 jnz paranoid_userspace
14016+#ifdef CONFIG_PAX_MEMORY_UDEREF
14017+ pax_exit_kernel
14018+ TRACE_IRQS_IRETQ 0
14019+ SWAPGS_UNSAFE_STACK
14020+ RESTORE_ALL 8
14021+ jmp irq_return
14022+#endif
14023 paranoid_swapgs:
14024+#ifdef CONFIG_PAX_MEMORY_UDEREF
14025+ pax_exit_kernel_user
14026+#else
14027+ pax_exit_kernel
14028+#endif
14029 TRACE_IRQS_IRETQ 0
14030 SWAPGS_UNSAFE_STACK
14031 RESTORE_ALL 8
14032 jmp irq_return
14033 paranoid_restore:
14034+ pax_exit_kernel
14035 TRACE_IRQS_IRETQ 0
14036 RESTORE_ALL 8
14037 jmp irq_return
14038@@ -1470,7 +1810,7 @@ ENTRY(error_entry)
14039 movq_cfi r14, R14+8
14040 movq_cfi r15, R15+8
14041 xorl %ebx,%ebx
14042- testl $3,CS+8(%rsp)
14043+ testb $3,CS+8(%rsp)
14044 je error_kernelspace
14045 error_swapgs:
14046 SWAPGS
14047@@ -1529,6 +1869,16 @@ ENTRY(nmi)
14048 CFI_ADJUST_CFA_OFFSET 15*8
14049 call save_paranoid
14050 DEFAULT_FRAME 0
14051+#ifdef CONFIG_PAX_MEMORY_UDEREF
14052+ testb $3, CS(%rsp)
14053+ jnz 1f
14054+ pax_enter_kernel
14055+ jmp 2f
14056+1: pax_enter_kernel_user
14057+2:
14058+#else
14059+ pax_enter_kernel
14060+#endif
14061 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14062 movq %rsp,%rdi
14063 movq $-1,%rsi
14064@@ -1539,11 +1889,25 @@ ENTRY(nmi)
14065 DISABLE_INTERRUPTS(CLBR_NONE)
14066 testl %ebx,%ebx /* swapgs needed? */
14067 jnz nmi_restore
14068- testl $3,CS(%rsp)
14069+ testb $3,CS(%rsp)
14070 jnz nmi_userspace
14071+#ifdef CONFIG_PAX_MEMORY_UDEREF
14072+ pax_exit_kernel
14073+ SWAPGS_UNSAFE_STACK
14074+ RESTORE_ALL 8
14075+ jmp irq_return
14076+#endif
14077 nmi_swapgs:
14078+#ifdef CONFIG_PAX_MEMORY_UDEREF
14079+ pax_exit_kernel_user
14080+#else
14081+ pax_exit_kernel
14082+#endif
14083 SWAPGS_UNSAFE_STACK
14084+ RESTORE_ALL 8
14085+ jmp irq_return
14086 nmi_restore:
14087+ pax_exit_kernel
14088 RESTORE_ALL 8
14089 jmp irq_return
14090 nmi_userspace:
14091diff -urNp linux-2.6.32.45/arch/x86/kernel/ftrace.c linux-2.6.32.45/arch/x86/kernel/ftrace.c
14092--- linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
14093+++ linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
14094@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
14095 static void *mod_code_newcode; /* holds the text to write to the IP */
14096
14097 static unsigned nmi_wait_count;
14098-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14099+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14100
14101 int ftrace_arch_read_dyn_info(char *buf, int size)
14102 {
14103@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
14104
14105 r = snprintf(buf, size, "%u %u",
14106 nmi_wait_count,
14107- atomic_read(&nmi_update_count));
14108+ atomic_read_unchecked(&nmi_update_count));
14109 return r;
14110 }
14111
14112@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
14113 {
14114 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14115 smp_rmb();
14116+ pax_open_kernel();
14117 ftrace_mod_code();
14118- atomic_inc(&nmi_update_count);
14119+ pax_close_kernel();
14120+ atomic_inc_unchecked(&nmi_update_count);
14121 }
14122 /* Must have previous changes seen before executions */
14123 smp_mb();
14124@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
14125
14126
14127
14128-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
14129+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
14130
14131 static unsigned char *ftrace_nop_replace(void)
14132 {
14133@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
14134 {
14135 unsigned char replaced[MCOUNT_INSN_SIZE];
14136
14137+ ip = ktla_ktva(ip);
14138+
14139 /*
14140 * Note: Due to modules and __init, code can
14141 * disappear and change, we need to protect against faulting
14142@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
14143 unsigned char old[MCOUNT_INSN_SIZE], *new;
14144 int ret;
14145
14146- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14147+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14148 new = ftrace_call_replace(ip, (unsigned long)func);
14149 ret = ftrace_modify_code(ip, old, new);
14150
14151@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
14152 switch (faulted) {
14153 case 0:
14154 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
14155- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
14156+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14157 break;
14158 case 1:
14159 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14160- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14161+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14162 break;
14163 case 2:
14164 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14165- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14166+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14167 break;
14168 }
14169
14170@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14171 {
14172 unsigned char code[MCOUNT_INSN_SIZE];
14173
14174+ ip = ktla_ktva(ip);
14175+
14176 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14177 return -EFAULT;
14178
14179diff -urNp linux-2.6.32.45/arch/x86/kernel/head32.c linux-2.6.32.45/arch/x86/kernel/head32.c
14180--- linux-2.6.32.45/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14181+++ linux-2.6.32.45/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14182@@ -16,6 +16,7 @@
14183 #include <asm/apic.h>
14184 #include <asm/io_apic.h>
14185 #include <asm/bios_ebda.h>
14186+#include <asm/boot.h>
14187
14188 static void __init i386_default_early_setup(void)
14189 {
14190@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14191 {
14192 reserve_trampoline_memory();
14193
14194- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14195+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14196
14197 #ifdef CONFIG_BLK_DEV_INITRD
14198 /* Reserve INITRD */
14199diff -urNp linux-2.6.32.45/arch/x86/kernel/head_32.S linux-2.6.32.45/arch/x86/kernel/head_32.S
14200--- linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14201+++ linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14202@@ -19,10 +19,17 @@
14203 #include <asm/setup.h>
14204 #include <asm/processor-flags.h>
14205 #include <asm/percpu.h>
14206+#include <asm/msr-index.h>
14207
14208 /* Physical address */
14209 #define pa(X) ((X) - __PAGE_OFFSET)
14210
14211+#ifdef CONFIG_PAX_KERNEXEC
14212+#define ta(X) (X)
14213+#else
14214+#define ta(X) ((X) - __PAGE_OFFSET)
14215+#endif
14216+
14217 /*
14218 * References to members of the new_cpu_data structure.
14219 */
14220@@ -52,11 +59,7 @@
14221 * and small than max_low_pfn, otherwise will waste some page table entries
14222 */
14223
14224-#if PTRS_PER_PMD > 1
14225-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14226-#else
14227-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14228-#endif
14229+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14230
14231 /* Enough space to fit pagetables for the low memory linear map */
14232 MAPPING_BEYOND_END = \
14233@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14234 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14235
14236 /*
14237+ * Real beginning of normal "text" segment
14238+ */
14239+ENTRY(stext)
14240+ENTRY(_stext)
14241+
14242+/*
14243 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14244 * %esi points to the real-mode code as a 32-bit pointer.
14245 * CS and DS must be 4 GB flat segments, but we don't depend on
14246@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14247 * can.
14248 */
14249 __HEAD
14250+
14251+#ifdef CONFIG_PAX_KERNEXEC
14252+ jmp startup_32
14253+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14254+.fill PAGE_SIZE-5,1,0xcc
14255+#endif
14256+
14257 ENTRY(startup_32)
14258+ movl pa(stack_start),%ecx
14259+
14260 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14261 us to not reload segments */
14262 testb $(1<<6), BP_loadflags(%esi)
14263@@ -95,7 +113,60 @@ ENTRY(startup_32)
14264 movl %eax,%es
14265 movl %eax,%fs
14266 movl %eax,%gs
14267+ movl %eax,%ss
14268 2:
14269+ leal -__PAGE_OFFSET(%ecx),%esp
14270+
14271+#ifdef CONFIG_SMP
14272+ movl $pa(cpu_gdt_table),%edi
14273+ movl $__per_cpu_load,%eax
14274+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14275+ rorl $16,%eax
14276+ movb %al,__KERNEL_PERCPU + 4(%edi)
14277+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14278+ movl $__per_cpu_end - 1,%eax
14279+ subl $__per_cpu_start,%eax
14280+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14281+#endif
14282+
14283+#ifdef CONFIG_PAX_MEMORY_UDEREF
14284+ movl $NR_CPUS,%ecx
14285+ movl $pa(cpu_gdt_table),%edi
14286+1:
14287+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14288+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14289+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14290+ addl $PAGE_SIZE_asm,%edi
14291+ loop 1b
14292+#endif
14293+
14294+#ifdef CONFIG_PAX_KERNEXEC
14295+ movl $pa(boot_gdt),%edi
14296+ movl $__LOAD_PHYSICAL_ADDR,%eax
14297+ movw %ax,__BOOT_CS + 2(%edi)
14298+ rorl $16,%eax
14299+ movb %al,__BOOT_CS + 4(%edi)
14300+ movb %ah,__BOOT_CS + 7(%edi)
14301+ rorl $16,%eax
14302+
14303+ ljmp $(__BOOT_CS),$1f
14304+1:
14305+
14306+ movl $NR_CPUS,%ecx
14307+ movl $pa(cpu_gdt_table),%edi
14308+ addl $__PAGE_OFFSET,%eax
14309+1:
14310+ movw %ax,__KERNEL_CS + 2(%edi)
14311+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14312+ rorl $16,%eax
14313+ movb %al,__KERNEL_CS + 4(%edi)
14314+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14315+ movb %ah,__KERNEL_CS + 7(%edi)
14316+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14317+ rorl $16,%eax
14318+ addl $PAGE_SIZE_asm,%edi
14319+ loop 1b
14320+#endif
14321
14322 /*
14323 * Clear BSS first so that there are no surprises...
14324@@ -140,9 +211,7 @@ ENTRY(startup_32)
14325 cmpl $num_subarch_entries, %eax
14326 jae bad_subarch
14327
14328- movl pa(subarch_entries)(,%eax,4), %eax
14329- subl $__PAGE_OFFSET, %eax
14330- jmp *%eax
14331+ jmp *pa(subarch_entries)(,%eax,4)
14332
14333 bad_subarch:
14334 WEAK(lguest_entry)
14335@@ -154,10 +223,10 @@ WEAK(xen_entry)
14336 __INITDATA
14337
14338 subarch_entries:
14339- .long default_entry /* normal x86/PC */
14340- .long lguest_entry /* lguest hypervisor */
14341- .long xen_entry /* Xen hypervisor */
14342- .long default_entry /* Moorestown MID */
14343+ .long ta(default_entry) /* normal x86/PC */
14344+ .long ta(lguest_entry) /* lguest hypervisor */
14345+ .long ta(xen_entry) /* Xen hypervisor */
14346+ .long ta(default_entry) /* Moorestown MID */
14347 num_subarch_entries = (. - subarch_entries) / 4
14348 .previous
14349 #endif /* CONFIG_PARAVIRT */
14350@@ -218,8 +287,11 @@ default_entry:
14351 movl %eax, pa(max_pfn_mapped)
14352
14353 /* Do early initialization of the fixmap area */
14354- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14355- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14356+#ifdef CONFIG_COMPAT_VDSO
14357+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14358+#else
14359+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14360+#endif
14361 #else /* Not PAE */
14362
14363 page_pde_offset = (__PAGE_OFFSET >> 20);
14364@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14365 movl %eax, pa(max_pfn_mapped)
14366
14367 /* Do early initialization of the fixmap area */
14368- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14369- movl %eax,pa(swapper_pg_dir+0xffc)
14370+#ifdef CONFIG_COMPAT_VDSO
14371+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14372+#else
14373+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14374+#endif
14375 #endif
14376 jmp 3f
14377 /*
14378@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14379 movl %eax,%es
14380 movl %eax,%fs
14381 movl %eax,%gs
14382+ movl pa(stack_start),%ecx
14383+ movl %eax,%ss
14384+ leal -__PAGE_OFFSET(%ecx),%esp
14385 #endif /* CONFIG_SMP */
14386 3:
14387
14388@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14389 orl %edx,%eax
14390 movl %eax,%cr4
14391
14392+#ifdef CONFIG_X86_PAE
14393 btl $5, %eax # check if PAE is enabled
14394 jnc 6f
14395
14396@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14397 cpuid
14398 cmpl $0x80000000, %eax
14399 jbe 6f
14400+
14401+ /* Clear bogus XD_DISABLE bits */
14402+ call verify_cpu
14403+
14404 mov $0x80000001, %eax
14405 cpuid
14406 /* Execute Disable bit supported? */
14407@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14408 jnc 6f
14409
14410 /* Setup EFER (Extended Feature Enable Register) */
14411- movl $0xc0000080, %ecx
14412+ movl $MSR_EFER, %ecx
14413 rdmsr
14414
14415 btsl $11, %eax
14416 /* Make changes effective */
14417 wrmsr
14418
14419+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14420+ movl $1,pa(nx_enabled)
14421+#endif
14422+
14423 6:
14424
14425 /*
14426@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14427 movl %eax,%cr0 /* ..and set paging (PG) bit */
14428 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14429 1:
14430- /* Set up the stack pointer */
14431- lss stack_start,%esp
14432+ /* Shift the stack pointer to a virtual address */
14433+ addl $__PAGE_OFFSET, %esp
14434
14435 /*
14436 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14437@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14438
14439 #ifdef CONFIG_SMP
14440 cmpb $0, ready
14441- jz 1f /* Initial CPU cleans BSS */
14442- jmp checkCPUtype
14443-1:
14444+ jnz checkCPUtype
14445 #endif /* CONFIG_SMP */
14446
14447 /*
14448@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14449 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14450 movl %eax,%ss # after changing gdt.
14451
14452- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14453+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14454 movl %eax,%ds
14455 movl %eax,%es
14456
14457@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14458 */
14459 cmpb $0,ready
14460 jne 1f
14461- movl $per_cpu__gdt_page,%eax
14462+ movl $cpu_gdt_table,%eax
14463 movl $per_cpu__stack_canary,%ecx
14464+#ifdef CONFIG_SMP
14465+ addl $__per_cpu_load,%ecx
14466+#endif
14467 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14468 shrl $16, %ecx
14469 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14470 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14471 1:
14472-#endif
14473 movl $(__KERNEL_STACK_CANARY),%eax
14474+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14475+ movl $(__USER_DS),%eax
14476+#else
14477+ xorl %eax,%eax
14478+#endif
14479 movl %eax,%gs
14480
14481 xorl %eax,%eax # Clear LDT
14482@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14483
14484 cld # gcc2 wants the direction flag cleared at all times
14485 pushl $0 # fake return address for unwinder
14486-#ifdef CONFIG_SMP
14487- movb ready, %cl
14488 movb $1, ready
14489- cmpb $0,%cl # the first CPU calls start_kernel
14490- je 1f
14491- movl (stack_start), %esp
14492-1:
14493-#endif /* CONFIG_SMP */
14494 jmp *(initial_code)
14495
14496 /*
14497@@ -546,22 +631,22 @@ early_page_fault:
14498 jmp early_fault
14499
14500 early_fault:
14501- cld
14502 #ifdef CONFIG_PRINTK
14503+ cmpl $1,%ss:early_recursion_flag
14504+ je hlt_loop
14505+ incl %ss:early_recursion_flag
14506+ cld
14507 pusha
14508 movl $(__KERNEL_DS),%eax
14509 movl %eax,%ds
14510 movl %eax,%es
14511- cmpl $2,early_recursion_flag
14512- je hlt_loop
14513- incl early_recursion_flag
14514 movl %cr2,%eax
14515 pushl %eax
14516 pushl %edx /* trapno */
14517 pushl $fault_msg
14518 call printk
14519+; call dump_stack
14520 #endif
14521- call dump_stack
14522 hlt_loop:
14523 hlt
14524 jmp hlt_loop
14525@@ -569,8 +654,11 @@ hlt_loop:
14526 /* This is the default interrupt "handler" :-) */
14527 ALIGN
14528 ignore_int:
14529- cld
14530 #ifdef CONFIG_PRINTK
14531+ cmpl $2,%ss:early_recursion_flag
14532+ je hlt_loop
14533+ incl %ss:early_recursion_flag
14534+ cld
14535 pushl %eax
14536 pushl %ecx
14537 pushl %edx
14538@@ -579,9 +667,6 @@ ignore_int:
14539 movl $(__KERNEL_DS),%eax
14540 movl %eax,%ds
14541 movl %eax,%es
14542- cmpl $2,early_recursion_flag
14543- je hlt_loop
14544- incl early_recursion_flag
14545 pushl 16(%esp)
14546 pushl 24(%esp)
14547 pushl 32(%esp)
14548@@ -600,6 +685,8 @@ ignore_int:
14549 #endif
14550 iret
14551
14552+#include "verify_cpu.S"
14553+
14554 __REFDATA
14555 .align 4
14556 ENTRY(initial_code)
14557@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14558 /*
14559 * BSS section
14560 */
14561-__PAGE_ALIGNED_BSS
14562- .align PAGE_SIZE_asm
14563 #ifdef CONFIG_X86_PAE
14564+.section .swapper_pg_pmd,"a",@progbits
14565 swapper_pg_pmd:
14566 .fill 1024*KPMDS,4,0
14567 #else
14568+.section .swapper_pg_dir,"a",@progbits
14569 ENTRY(swapper_pg_dir)
14570 .fill 1024,4,0
14571 #endif
14572+.section .swapper_pg_fixmap,"a",@progbits
14573 swapper_pg_fixmap:
14574 .fill 1024,4,0
14575 #ifdef CONFIG_X86_TRAMPOLINE
14576+.section .trampoline_pg_dir,"a",@progbits
14577 ENTRY(trampoline_pg_dir)
14578+#ifdef CONFIG_X86_PAE
14579+ .fill 4,8,0
14580+#else
14581 .fill 1024,4,0
14582 #endif
14583+#endif
14584+
14585+.section .empty_zero_page,"a",@progbits
14586 ENTRY(empty_zero_page)
14587 .fill 4096,1,0
14588
14589 /*
14590+ * The IDT has to be page-aligned to simplify the Pentium
14591+ * F0 0F bug workaround.. We have a special link segment
14592+ * for this.
14593+ */
14594+.section .idt,"a",@progbits
14595+ENTRY(idt_table)
14596+ .fill 256,8,0
14597+
14598+/*
14599 * This starts the data section.
14600 */
14601 #ifdef CONFIG_X86_PAE
14602-__PAGE_ALIGNED_DATA
14603- /* Page-aligned for the benefit of paravirt? */
14604- .align PAGE_SIZE_asm
14605+.section .swapper_pg_dir,"a",@progbits
14606+
14607 ENTRY(swapper_pg_dir)
14608 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14609 # if KPMDS == 3
14610@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14611 # error "Kernel PMDs should be 1, 2 or 3"
14612 # endif
14613 .align PAGE_SIZE_asm /* needs to be page-sized too */
14614+
14615+#ifdef CONFIG_PAX_PER_CPU_PGD
14616+ENTRY(cpu_pgd)
14617+ .rept NR_CPUS
14618+ .fill 4,8,0
14619+ .endr
14620+#endif
14621+
14622 #endif
14623
14624 .data
14625+.balign 4
14626 ENTRY(stack_start)
14627- .long init_thread_union+THREAD_SIZE
14628- .long __BOOT_DS
14629+ .long init_thread_union+THREAD_SIZE-8
14630
14631 ready: .byte 0
14632
14633+.section .rodata,"a",@progbits
14634 early_recursion_flag:
14635 .long 0
14636
14637@@ -697,7 +809,7 @@ fault_msg:
14638 .word 0 # 32 bit align gdt_desc.address
14639 boot_gdt_descr:
14640 .word __BOOT_DS+7
14641- .long boot_gdt - __PAGE_OFFSET
14642+ .long pa(boot_gdt)
14643
14644 .word 0 # 32-bit align idt_desc.address
14645 idt_descr:
14646@@ -708,7 +820,7 @@ idt_descr:
14647 .word 0 # 32 bit align gdt_desc.address
14648 ENTRY(early_gdt_descr)
14649 .word GDT_ENTRIES*8-1
14650- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14651+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14652
14653 /*
14654 * The boot_gdt must mirror the equivalent in setup.S and is
14655@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14656 .align L1_CACHE_BYTES
14657 ENTRY(boot_gdt)
14658 .fill GDT_ENTRY_BOOT_CS,8,0
14659- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14660- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14661+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14662+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14663+
14664+ .align PAGE_SIZE_asm
14665+ENTRY(cpu_gdt_table)
14666+ .rept NR_CPUS
14667+ .quad 0x0000000000000000 /* NULL descriptor */
14668+ .quad 0x0000000000000000 /* 0x0b reserved */
14669+ .quad 0x0000000000000000 /* 0x13 reserved */
14670+ .quad 0x0000000000000000 /* 0x1b reserved */
14671+
14672+#ifdef CONFIG_PAX_KERNEXEC
14673+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14674+#else
14675+ .quad 0x0000000000000000 /* 0x20 unused */
14676+#endif
14677+
14678+ .quad 0x0000000000000000 /* 0x28 unused */
14679+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14680+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14681+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14682+ .quad 0x0000000000000000 /* 0x4b reserved */
14683+ .quad 0x0000000000000000 /* 0x53 reserved */
14684+ .quad 0x0000000000000000 /* 0x5b reserved */
14685+
14686+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14687+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14688+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14689+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14690+
14691+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14692+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14693+
14694+ /*
14695+ * Segments used for calling PnP BIOS have byte granularity.
14696+ * The code segments and data segments have fixed 64k limits,
14697+ * the transfer segment sizes are set at run time.
14698+ */
14699+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14700+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14701+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14702+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14703+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14704+
14705+ /*
14706+ * The APM segments have byte granularity and their bases
14707+ * are set at run time. All have 64k limits.
14708+ */
14709+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14710+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14711+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14712+
14713+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14714+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14715+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14716+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14717+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14718+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14719+
14720+ /* Be sure this is zeroed to avoid false validations in Xen */
14721+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14722+ .endr
14723diff -urNp linux-2.6.32.45/arch/x86/kernel/head_64.S linux-2.6.32.45/arch/x86/kernel/head_64.S
14724--- linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14725+++ linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14726@@ -19,6 +19,7 @@
14727 #include <asm/cache.h>
14728 #include <asm/processor-flags.h>
14729 #include <asm/percpu.h>
14730+#include <asm/cpufeature.h>
14731
14732 #ifdef CONFIG_PARAVIRT
14733 #include <asm/asm-offsets.h>
14734@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14735 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14736 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14737 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14738+L4_VMALLOC_START = pgd_index(VMALLOC_START)
14739+L3_VMALLOC_START = pud_index(VMALLOC_START)
14740+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14741+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14742
14743 .text
14744 __HEAD
14745@@ -85,35 +90,22 @@ startup_64:
14746 */
14747 addq %rbp, init_level4_pgt + 0(%rip)
14748 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14749+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14750+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14751 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14752
14753 addq %rbp, level3_ident_pgt + 0(%rip)
14754+#ifndef CONFIG_XEN
14755+ addq %rbp, level3_ident_pgt + 8(%rip)
14756+#endif
14757
14758- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14759- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14760+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14761
14762- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14763+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14764+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14765
14766- /* Add an Identity mapping if I am above 1G */
14767- leaq _text(%rip), %rdi
14768- andq $PMD_PAGE_MASK, %rdi
14769-
14770- movq %rdi, %rax
14771- shrq $PUD_SHIFT, %rax
14772- andq $(PTRS_PER_PUD - 1), %rax
14773- jz ident_complete
14774-
14775- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14776- leaq level3_ident_pgt(%rip), %rbx
14777- movq %rdx, 0(%rbx, %rax, 8)
14778-
14779- movq %rdi, %rax
14780- shrq $PMD_SHIFT, %rax
14781- andq $(PTRS_PER_PMD - 1), %rax
14782- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14783- leaq level2_spare_pgt(%rip), %rbx
14784- movq %rdx, 0(%rbx, %rax, 8)
14785-ident_complete:
14786+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14787+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14788
14789 /*
14790 * Fixup the kernel text+data virtual addresses. Note that
14791@@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14792 * after the boot processor executes this code.
14793 */
14794
14795- /* Enable PAE mode and PGE */
14796- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14797+ /* Enable PAE mode and PSE/PGE */
14798+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14799 movq %rax, %cr4
14800
14801 /* Setup early boot stage 4 level pagetables. */
14802@@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14803 movl $MSR_EFER, %ecx
14804 rdmsr
14805 btsl $_EFER_SCE, %eax /* Enable System Call */
14806- btl $20,%edi /* No Execute supported? */
14807+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14808 jnc 1f
14809 btsl $_EFER_NX, %eax
14810+ leaq init_level4_pgt(%rip), %rdi
14811+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14812+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14813+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14814 1: wrmsr /* Make changes effective */
14815
14816 /* Setup cr0 */
14817@@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14818 .quad x86_64_start_kernel
14819 ENTRY(initial_gs)
14820 .quad INIT_PER_CPU_VAR(irq_stack_union)
14821- __FINITDATA
14822
14823 ENTRY(stack_start)
14824 .quad init_thread_union+THREAD_SIZE-8
14825 .word 0
14826+ __FINITDATA
14827
14828 bad_address:
14829 jmp bad_address
14830
14831- .section ".init.text","ax"
14832+ __INIT
14833 #ifdef CONFIG_EARLY_PRINTK
14834 .globl early_idt_handlers
14835 early_idt_handlers:
14836@@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14837 #endif /* EARLY_PRINTK */
14838 1: hlt
14839 jmp 1b
14840+ .previous
14841
14842 #ifdef CONFIG_EARLY_PRINTK
14843+ __INITDATA
14844 early_recursion_flag:
14845 .long 0
14846+ .previous
14847
14848+ .section .rodata,"a",@progbits
14849 early_idt_msg:
14850 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14851 early_idt_ripmsg:
14852 .asciz "RIP %s\n"
14853-#endif /* CONFIG_EARLY_PRINTK */
14854 .previous
14855+#endif /* CONFIG_EARLY_PRINTK */
14856
14857+ .section .rodata,"a",@progbits
14858 #define NEXT_PAGE(name) \
14859 .balign PAGE_SIZE; \
14860 ENTRY(name)
14861@@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14862 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14863 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14864 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14865+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
14866+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14867+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14868+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14869 .org init_level4_pgt + L4_START_KERNEL*8, 0
14870 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14871 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14872
14873+#ifdef CONFIG_PAX_PER_CPU_PGD
14874+NEXT_PAGE(cpu_pgd)
14875+ .rept NR_CPUS
14876+ .fill 512,8,0
14877+ .endr
14878+#endif
14879+
14880 NEXT_PAGE(level3_ident_pgt)
14881 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14882+#ifdef CONFIG_XEN
14883 .fill 511,8,0
14884+#else
14885+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14886+ .fill 510,8,0
14887+#endif
14888+
14889+NEXT_PAGE(level3_vmalloc_pgt)
14890+ .fill 512,8,0
14891+
14892+NEXT_PAGE(level3_vmemmap_pgt)
14893+ .fill L3_VMEMMAP_START,8,0
14894+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14895
14896 NEXT_PAGE(level3_kernel_pgt)
14897 .fill L3_START_KERNEL,8,0
14898@@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14899 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14900 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14901
14902+NEXT_PAGE(level2_vmemmap_pgt)
14903+ .fill 512,8,0
14904+
14905 NEXT_PAGE(level2_fixmap_pgt)
14906- .fill 506,8,0
14907- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14908- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14909- .fill 5,8,0
14910+ .fill 507,8,0
14911+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14912+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14913+ .fill 4,8,0
14914
14915-NEXT_PAGE(level1_fixmap_pgt)
14916+NEXT_PAGE(level1_vsyscall_pgt)
14917 .fill 512,8,0
14918
14919-NEXT_PAGE(level2_ident_pgt)
14920- /* Since I easily can, map the first 1G.
14921+ /* Since I easily can, map the first 2G.
14922 * Don't set NX because code runs from these pages.
14923 */
14924- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14925+NEXT_PAGE(level2_ident_pgt)
14926+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14927
14928 NEXT_PAGE(level2_kernel_pgt)
14929 /*
14930@@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14931 * If you want to increase this then increase MODULES_VADDR
14932 * too.)
14933 */
14934- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14935- KERNEL_IMAGE_SIZE/PMD_SIZE)
14936-
14937-NEXT_PAGE(level2_spare_pgt)
14938- .fill 512, 8, 0
14939+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14940
14941 #undef PMDS
14942 #undef NEXT_PAGE
14943
14944- .data
14945+ .align PAGE_SIZE
14946+ENTRY(cpu_gdt_table)
14947+ .rept NR_CPUS
14948+ .quad 0x0000000000000000 /* NULL descriptor */
14949+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14950+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
14951+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
14952+ .quad 0x00cffb000000ffff /* __USER32_CS */
14953+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14954+ .quad 0x00affb000000ffff /* __USER_CS */
14955+
14956+#ifdef CONFIG_PAX_KERNEXEC
14957+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14958+#else
14959+ .quad 0x0 /* unused */
14960+#endif
14961+
14962+ .quad 0,0 /* TSS */
14963+ .quad 0,0 /* LDT */
14964+ .quad 0,0,0 /* three TLS descriptors */
14965+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
14966+ /* asm/segment.h:GDT_ENTRIES must match this */
14967+
14968+ /* zero the remaining page */
14969+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14970+ .endr
14971+
14972 .align 16
14973 .globl early_gdt_descr
14974 early_gdt_descr:
14975 .word GDT_ENTRIES*8-1
14976 early_gdt_descr_base:
14977- .quad INIT_PER_CPU_VAR(gdt_page)
14978+ .quad cpu_gdt_table
14979
14980 ENTRY(phys_base)
14981 /* This must match the first entry in level2_kernel_pgt */
14982 .quad 0x0000000000000000
14983
14984 #include "../../x86/xen/xen-head.S"
14985-
14986- .section .bss, "aw", @nobits
14987+
14988+ .section .rodata,"a",@progbits
14989 .align L1_CACHE_BYTES
14990 ENTRY(idt_table)
14991- .skip IDT_ENTRIES * 16
14992+ .fill 512,8,0
14993
14994 __PAGE_ALIGNED_BSS
14995 .align PAGE_SIZE
14996diff -urNp linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c
14997--- linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
14998+++ linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
14999@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15000 EXPORT_SYMBOL(cmpxchg8b_emu);
15001 #endif
15002
15003+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15004+
15005 /* Networking helper routines. */
15006 EXPORT_SYMBOL(csum_partial_copy_generic);
15007+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15008+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15009
15010 EXPORT_SYMBOL(__get_user_1);
15011 EXPORT_SYMBOL(__get_user_2);
15012@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15013
15014 EXPORT_SYMBOL(csum_partial);
15015 EXPORT_SYMBOL(empty_zero_page);
15016+
15017+#ifdef CONFIG_PAX_KERNEXEC
15018+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15019+#endif
15020diff -urNp linux-2.6.32.45/arch/x86/kernel/i8259.c linux-2.6.32.45/arch/x86/kernel/i8259.c
15021--- linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
15022+++ linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
15023@@ -208,7 +208,7 @@ spurious_8259A_irq:
15024 "spurious 8259A interrupt: IRQ%d.\n", irq);
15025 spurious_irq_mask |= irqmask;
15026 }
15027- atomic_inc(&irq_err_count);
15028+ atomic_inc_unchecked(&irq_err_count);
15029 /*
15030 * Theoretically we do not have to handle this IRQ,
15031 * but in Linux this does not cause problems and is
15032diff -urNp linux-2.6.32.45/arch/x86/kernel/init_task.c linux-2.6.32.45/arch/x86/kernel/init_task.c
15033--- linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
15034+++ linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
15035@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
15036 * way process stacks are handled. This is done by having a special
15037 * "init_task" linker map entry..
15038 */
15039-union thread_union init_thread_union __init_task_data =
15040- { INIT_THREAD_INFO(init_task) };
15041+union thread_union init_thread_union __init_task_data;
15042
15043 /*
15044 * Initial task structure.
15045@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15046 * section. Since TSS's are completely CPU-local, we want them
15047 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15048 */
15049-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15050-
15051+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15052+EXPORT_SYMBOL(init_tss);
15053diff -urNp linux-2.6.32.45/arch/x86/kernel/ioport.c linux-2.6.32.45/arch/x86/kernel/ioport.c
15054--- linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
15055+++ linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
15056@@ -6,6 +6,7 @@
15057 #include <linux/sched.h>
15058 #include <linux/kernel.h>
15059 #include <linux/capability.h>
15060+#include <linux/security.h>
15061 #include <linux/errno.h>
15062 #include <linux/types.h>
15063 #include <linux/ioport.h>
15064@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
15065
15066 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15067 return -EINVAL;
15068+#ifdef CONFIG_GRKERNSEC_IO
15069+ if (turn_on && grsec_disable_privio) {
15070+ gr_handle_ioperm();
15071+ return -EPERM;
15072+ }
15073+#endif
15074 if (turn_on && !capable(CAP_SYS_RAWIO))
15075 return -EPERM;
15076
15077@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
15078 * because the ->io_bitmap_max value must match the bitmap
15079 * contents:
15080 */
15081- tss = &per_cpu(init_tss, get_cpu());
15082+ tss = init_tss + get_cpu();
15083
15084 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
15085
15086@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
15087 return -EINVAL;
15088 /* Trying to gain more privileges? */
15089 if (level > old) {
15090+#ifdef CONFIG_GRKERNSEC_IO
15091+ if (grsec_disable_privio) {
15092+ gr_handle_iopl();
15093+ return -EPERM;
15094+ }
15095+#endif
15096 if (!capable(CAP_SYS_RAWIO))
15097 return -EPERM;
15098 }
15099diff -urNp linux-2.6.32.45/arch/x86/kernel/irq_32.c linux-2.6.32.45/arch/x86/kernel/irq_32.c
15100--- linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
15101+++ linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
15102@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
15103 __asm__ __volatile__("andl %%esp,%0" :
15104 "=r" (sp) : "0" (THREAD_SIZE - 1));
15105
15106- return sp < (sizeof(struct thread_info) + STACK_WARN);
15107+ return sp < STACK_WARN;
15108 }
15109
15110 static void print_stack_overflow(void)
15111@@ -54,9 +54,9 @@ static inline void print_stack_overflow(
15112 * per-CPU IRQ handling contexts (thread information and stack)
15113 */
15114 union irq_ctx {
15115- struct thread_info tinfo;
15116- u32 stack[THREAD_SIZE/sizeof(u32)];
15117-} __attribute__((aligned(PAGE_SIZE)));
15118+ unsigned long previous_esp;
15119+ u32 stack[THREAD_SIZE/sizeof(u32)];
15120+} __attribute__((aligned(THREAD_SIZE)));
15121
15122 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15123 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
15124@@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
15125 static inline int
15126 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15127 {
15128- union irq_ctx *curctx, *irqctx;
15129+ union irq_ctx *irqctx;
15130 u32 *isp, arg1, arg2;
15131
15132- curctx = (union irq_ctx *) current_thread_info();
15133 irqctx = __get_cpu_var(hardirq_ctx);
15134
15135 /*
15136@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
15137 * handler) we can't do that and just have to keep using the
15138 * current stack (which is the irq stack already after all)
15139 */
15140- if (unlikely(curctx == irqctx))
15141+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15142 return 0;
15143
15144 /* build the stack frame on the IRQ stack */
15145- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15146- irqctx->tinfo.task = curctx->tinfo.task;
15147- irqctx->tinfo.previous_esp = current_stack_pointer;
15148+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15149+ irqctx->previous_esp = current_stack_pointer;
15150
15151- /*
15152- * Copy the softirq bits in preempt_count so that the
15153- * softirq checks work in the hardirq context.
15154- */
15155- irqctx->tinfo.preempt_count =
15156- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15157- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15158+#ifdef CONFIG_PAX_MEMORY_UDEREF
15159+ __set_fs(MAKE_MM_SEG(0));
15160+#endif
15161
15162 if (unlikely(overflow))
15163 call_on_stack(print_stack_overflow, isp);
15164@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15165 : "0" (irq), "1" (desc), "2" (isp),
15166 "D" (desc->handle_irq)
15167 : "memory", "cc", "ecx");
15168+
15169+#ifdef CONFIG_PAX_MEMORY_UDEREF
15170+ __set_fs(current_thread_info()->addr_limit);
15171+#endif
15172+
15173 return 1;
15174 }
15175
15176@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15177 */
15178 void __cpuinit irq_ctx_init(int cpu)
15179 {
15180- union irq_ctx *irqctx;
15181-
15182 if (per_cpu(hardirq_ctx, cpu))
15183 return;
15184
15185- irqctx = &per_cpu(hardirq_stack, cpu);
15186- irqctx->tinfo.task = NULL;
15187- irqctx->tinfo.exec_domain = NULL;
15188- irqctx->tinfo.cpu = cpu;
15189- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15190- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15191-
15192- per_cpu(hardirq_ctx, cpu) = irqctx;
15193-
15194- irqctx = &per_cpu(softirq_stack, cpu);
15195- irqctx->tinfo.task = NULL;
15196- irqctx->tinfo.exec_domain = NULL;
15197- irqctx->tinfo.cpu = cpu;
15198- irqctx->tinfo.preempt_count = 0;
15199- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15200-
15201- per_cpu(softirq_ctx, cpu) = irqctx;
15202+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15203+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15204
15205 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15206 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15207@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15208 asmlinkage void do_softirq(void)
15209 {
15210 unsigned long flags;
15211- struct thread_info *curctx;
15212 union irq_ctx *irqctx;
15213 u32 *isp;
15214
15215@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15216 local_irq_save(flags);
15217
15218 if (local_softirq_pending()) {
15219- curctx = current_thread_info();
15220 irqctx = __get_cpu_var(softirq_ctx);
15221- irqctx->tinfo.task = curctx->task;
15222- irqctx->tinfo.previous_esp = current_stack_pointer;
15223+ irqctx->previous_esp = current_stack_pointer;
15224
15225 /* build the stack frame on the softirq stack */
15226- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15227+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15228+
15229+#ifdef CONFIG_PAX_MEMORY_UDEREF
15230+ __set_fs(MAKE_MM_SEG(0));
15231+#endif
15232
15233 call_on_stack(__do_softirq, isp);
15234+
15235+#ifdef CONFIG_PAX_MEMORY_UDEREF
15236+ __set_fs(current_thread_info()->addr_limit);
15237+#endif
15238+
15239 /*
15240 * Shouldnt happen, we returned above if in_interrupt():
15241 */
15242diff -urNp linux-2.6.32.45/arch/x86/kernel/irq.c linux-2.6.32.45/arch/x86/kernel/irq.c
15243--- linux-2.6.32.45/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15244+++ linux-2.6.32.45/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15245@@ -15,7 +15,7 @@
15246 #include <asm/mce.h>
15247 #include <asm/hw_irq.h>
15248
15249-atomic_t irq_err_count;
15250+atomic_unchecked_t irq_err_count;
15251
15252 /* Function pointer for generic interrupt vector handling */
15253 void (*generic_interrupt_extension)(void) = NULL;
15254@@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15255 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15256 seq_printf(p, " Machine check polls\n");
15257 #endif
15258- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15259+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15260 #if defined(CONFIG_X86_IO_APIC)
15261- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15262+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15263 #endif
15264 return 0;
15265 }
15266@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15267
15268 u64 arch_irq_stat(void)
15269 {
15270- u64 sum = atomic_read(&irq_err_count);
15271+ u64 sum = atomic_read_unchecked(&irq_err_count);
15272
15273 #ifdef CONFIG_X86_IO_APIC
15274- sum += atomic_read(&irq_mis_count);
15275+ sum += atomic_read_unchecked(&irq_mis_count);
15276 #endif
15277 return sum;
15278 }
15279diff -urNp linux-2.6.32.45/arch/x86/kernel/kgdb.c linux-2.6.32.45/arch/x86/kernel/kgdb.c
15280--- linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15281+++ linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15282@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15283
15284 /* clear the trace bit */
15285 linux_regs->flags &= ~X86_EFLAGS_TF;
15286- atomic_set(&kgdb_cpu_doing_single_step, -1);
15287+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15288
15289 /* set the trace bit if we're stepping */
15290 if (remcomInBuffer[0] == 's') {
15291 linux_regs->flags |= X86_EFLAGS_TF;
15292 kgdb_single_step = 1;
15293- atomic_set(&kgdb_cpu_doing_single_step,
15294+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15295 raw_smp_processor_id());
15296 }
15297
15298@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15299 break;
15300
15301 case DIE_DEBUG:
15302- if (atomic_read(&kgdb_cpu_doing_single_step) ==
15303+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15304 raw_smp_processor_id()) {
15305 if (user_mode(regs))
15306 return single_step_cont(regs, args);
15307@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15308 return instruction_pointer(regs);
15309 }
15310
15311-struct kgdb_arch arch_kgdb_ops = {
15312+const struct kgdb_arch arch_kgdb_ops = {
15313 /* Breakpoint instruction: */
15314 .gdb_bpt_instr = { 0xcc },
15315 .flags = KGDB_HW_BREAKPOINT,
15316diff -urNp linux-2.6.32.45/arch/x86/kernel/kprobes.c linux-2.6.32.45/arch/x86/kernel/kprobes.c
15317--- linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15318+++ linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15319@@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15320 char op;
15321 s32 raddr;
15322 } __attribute__((packed)) * jop;
15323- jop = (struct __arch_jmp_op *)from;
15324+
15325+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15326+
15327+ pax_open_kernel();
15328 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15329 jop->op = RELATIVEJUMP_INSTRUCTION;
15330+ pax_close_kernel();
15331 }
15332
15333 /*
15334@@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15335 kprobe_opcode_t opcode;
15336 kprobe_opcode_t *orig_opcodes = opcodes;
15337
15338- if (search_exception_tables((unsigned long)opcodes))
15339+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15340 return 0; /* Page fault may occur on this address. */
15341
15342 retry:
15343@@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15344 disp = (u8 *) p->addr + *((s32 *) insn) -
15345 (u8 *) p->ainsn.insn;
15346 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15347+ pax_open_kernel();
15348 *(s32 *)insn = (s32) disp;
15349+ pax_close_kernel();
15350 }
15351 }
15352 #endif
15353@@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15354
15355 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15356 {
15357- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15358+ pax_open_kernel();
15359+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15360+ pax_close_kernel();
15361
15362 fix_riprel(p);
15363
15364- if (can_boost(p->addr))
15365+ if (can_boost(ktla_ktva(p->addr)))
15366 p->ainsn.boostable = 0;
15367 else
15368 p->ainsn.boostable = -1;
15369
15370- p->opcode = *p->addr;
15371+ p->opcode = *(ktla_ktva(p->addr));
15372 }
15373
15374 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15375@@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15376 if (p->opcode == BREAKPOINT_INSTRUCTION)
15377 regs->ip = (unsigned long)p->addr;
15378 else
15379- regs->ip = (unsigned long)p->ainsn.insn;
15380+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15381 }
15382
15383 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15384@@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15385 if (p->ainsn.boostable == 1 && !p->post_handler) {
15386 /* Boost up -- we can execute copied instructions directly */
15387 reset_current_kprobe();
15388- regs->ip = (unsigned long)p->ainsn.insn;
15389+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15390 preempt_enable_no_resched();
15391 return;
15392 }
15393@@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15394 struct kprobe_ctlblk *kcb;
15395
15396 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15397- if (*addr != BREAKPOINT_INSTRUCTION) {
15398+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15399 /*
15400 * The breakpoint instruction was removed right
15401 * after we hit it. Another cpu has removed
15402@@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15403 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15404 {
15405 unsigned long *tos = stack_addr(regs);
15406- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15407+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15408 unsigned long orig_ip = (unsigned long)p->addr;
15409 kprobe_opcode_t *insn = p->ainsn.insn;
15410
15411@@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15412 struct die_args *args = data;
15413 int ret = NOTIFY_DONE;
15414
15415- if (args->regs && user_mode_vm(args->regs))
15416+ if (args->regs && user_mode(args->regs))
15417 return ret;
15418
15419 switch (val) {
15420diff -urNp linux-2.6.32.45/arch/x86/kernel/ldt.c linux-2.6.32.45/arch/x86/kernel/ldt.c
15421--- linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15422+++ linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15423@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15424 if (reload) {
15425 #ifdef CONFIG_SMP
15426 preempt_disable();
15427- load_LDT(pc);
15428+ load_LDT_nolock(pc);
15429 if (!cpumask_equal(mm_cpumask(current->mm),
15430 cpumask_of(smp_processor_id())))
15431 smp_call_function(flush_ldt, current->mm, 1);
15432 preempt_enable();
15433 #else
15434- load_LDT(pc);
15435+ load_LDT_nolock(pc);
15436 #endif
15437 }
15438 if (oldsize) {
15439@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15440 return err;
15441
15442 for (i = 0; i < old->size; i++)
15443- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15444+ write_ldt_entry(new->ldt, i, old->ldt + i);
15445 return 0;
15446 }
15447
15448@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15449 retval = copy_ldt(&mm->context, &old_mm->context);
15450 mutex_unlock(&old_mm->context.lock);
15451 }
15452+
15453+ if (tsk == current) {
15454+ mm->context.vdso = 0;
15455+
15456+#ifdef CONFIG_X86_32
15457+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15458+ mm->context.user_cs_base = 0UL;
15459+ mm->context.user_cs_limit = ~0UL;
15460+
15461+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15462+ cpus_clear(mm->context.cpu_user_cs_mask);
15463+#endif
15464+
15465+#endif
15466+#endif
15467+
15468+ }
15469+
15470 return retval;
15471 }
15472
15473@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15474 }
15475 }
15476
15477+#ifdef CONFIG_PAX_SEGMEXEC
15478+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15479+ error = -EINVAL;
15480+ goto out_unlock;
15481+ }
15482+#endif
15483+
15484 fill_ldt(&ldt, &ldt_info);
15485 if (oldmode)
15486 ldt.avl = 0;
15487diff -urNp linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c
15488--- linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15489+++ linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15490@@ -26,7 +26,7 @@
15491 #include <asm/system.h>
15492 #include <asm/cacheflush.h>
15493
15494-static void set_idt(void *newidt, __u16 limit)
15495+static void set_idt(struct desc_struct *newidt, __u16 limit)
15496 {
15497 struct desc_ptr curidt;
15498
15499@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15500 }
15501
15502
15503-static void set_gdt(void *newgdt, __u16 limit)
15504+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15505 {
15506 struct desc_ptr curgdt;
15507
15508@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15509 }
15510
15511 control_page = page_address(image->control_code_page);
15512- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15513+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15514
15515 relocate_kernel_ptr = control_page;
15516 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15517diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_amd.c linux-2.6.32.45/arch/x86/kernel/microcode_amd.c
15518--- linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15519+++ linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15520@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15521 uci->mc = NULL;
15522 }
15523
15524-static struct microcode_ops microcode_amd_ops = {
15525+static const struct microcode_ops microcode_amd_ops = {
15526 .request_microcode_user = request_microcode_user,
15527 .request_microcode_fw = request_microcode_fw,
15528 .collect_cpu_info = collect_cpu_info_amd,
15529@@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15530 .microcode_fini_cpu = microcode_fini_cpu_amd,
15531 };
15532
15533-struct microcode_ops * __init init_amd_microcode(void)
15534+const struct microcode_ops * __init init_amd_microcode(void)
15535 {
15536 return &microcode_amd_ops;
15537 }
15538diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_core.c linux-2.6.32.45/arch/x86/kernel/microcode_core.c
15539--- linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15540+++ linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15541@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15542
15543 #define MICROCODE_VERSION "2.00"
15544
15545-static struct microcode_ops *microcode_ops;
15546+static const struct microcode_ops *microcode_ops;
15547
15548 /*
15549 * Synchronization.
15550diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_intel.c linux-2.6.32.45/arch/x86/kernel/microcode_intel.c
15551--- linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15552+++ linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15553@@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15554
15555 static int get_ucode_user(void *to, const void *from, size_t n)
15556 {
15557- return copy_from_user(to, from, n);
15558+ return copy_from_user(to, (__force const void __user *)from, n);
15559 }
15560
15561 static enum ucode_state
15562 request_microcode_user(int cpu, const void __user *buf, size_t size)
15563 {
15564- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15565+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15566 }
15567
15568 static void microcode_fini_cpu(int cpu)
15569@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15570 uci->mc = NULL;
15571 }
15572
15573-static struct microcode_ops microcode_intel_ops = {
15574+static const struct microcode_ops microcode_intel_ops = {
15575 .request_microcode_user = request_microcode_user,
15576 .request_microcode_fw = request_microcode_fw,
15577 .collect_cpu_info = collect_cpu_info,
15578@@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15579 .microcode_fini_cpu = microcode_fini_cpu,
15580 };
15581
15582-struct microcode_ops * __init init_intel_microcode(void)
15583+const struct microcode_ops * __init init_intel_microcode(void)
15584 {
15585 return &microcode_intel_ops;
15586 }
15587diff -urNp linux-2.6.32.45/arch/x86/kernel/module.c linux-2.6.32.45/arch/x86/kernel/module.c
15588--- linux-2.6.32.45/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15589+++ linux-2.6.32.45/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15590@@ -34,7 +34,7 @@
15591 #define DEBUGP(fmt...)
15592 #endif
15593
15594-void *module_alloc(unsigned long size)
15595+static void *__module_alloc(unsigned long size, pgprot_t prot)
15596 {
15597 struct vm_struct *area;
15598
15599@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15600 if (!area)
15601 return NULL;
15602
15603- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15604- PAGE_KERNEL_EXEC);
15605+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15606+}
15607+
15608+void *module_alloc(unsigned long size)
15609+{
15610+
15611+#ifdef CONFIG_PAX_KERNEXEC
15612+ return __module_alloc(size, PAGE_KERNEL);
15613+#else
15614+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15615+#endif
15616+
15617 }
15618
15619 /* Free memory returned from module_alloc */
15620@@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15621 vfree(module_region);
15622 }
15623
15624+#ifdef CONFIG_PAX_KERNEXEC
15625+#ifdef CONFIG_X86_32
15626+void *module_alloc_exec(unsigned long size)
15627+{
15628+ struct vm_struct *area;
15629+
15630+ if (size == 0)
15631+ return NULL;
15632+
15633+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15634+ return area ? area->addr : NULL;
15635+}
15636+EXPORT_SYMBOL(module_alloc_exec);
15637+
15638+void module_free_exec(struct module *mod, void *module_region)
15639+{
15640+ vunmap(module_region);
15641+}
15642+EXPORT_SYMBOL(module_free_exec);
15643+#else
15644+void module_free_exec(struct module *mod, void *module_region)
15645+{
15646+ module_free(mod, module_region);
15647+}
15648+EXPORT_SYMBOL(module_free_exec);
15649+
15650+void *module_alloc_exec(unsigned long size)
15651+{
15652+ return __module_alloc(size, PAGE_KERNEL_RX);
15653+}
15654+EXPORT_SYMBOL(module_alloc_exec);
15655+#endif
15656+#endif
15657+
15658 /* We don't need anything special. */
15659 int module_frob_arch_sections(Elf_Ehdr *hdr,
15660 Elf_Shdr *sechdrs,
15661@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15662 unsigned int i;
15663 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15664 Elf32_Sym *sym;
15665- uint32_t *location;
15666+ uint32_t *plocation, location;
15667
15668 DEBUGP("Applying relocate section %u to %u\n", relsec,
15669 sechdrs[relsec].sh_info);
15670 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15671 /* This is where to make the change */
15672- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15673- + rel[i].r_offset;
15674+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15675+ location = (uint32_t)plocation;
15676+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15677+ plocation = ktla_ktva((void *)plocation);
15678 /* This is the symbol it is referring to. Note that all
15679 undefined symbols have been resolved. */
15680 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15681@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15682 switch (ELF32_R_TYPE(rel[i].r_info)) {
15683 case R_386_32:
15684 /* We add the value into the location given */
15685- *location += sym->st_value;
15686+ pax_open_kernel();
15687+ *plocation += sym->st_value;
15688+ pax_close_kernel();
15689 break;
15690 case R_386_PC32:
15691 /* Add the value, subtract its postition */
15692- *location += sym->st_value - (uint32_t)location;
15693+ pax_open_kernel();
15694+ *plocation += sym->st_value - location;
15695+ pax_close_kernel();
15696 break;
15697 default:
15698 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15699@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15700 case R_X86_64_NONE:
15701 break;
15702 case R_X86_64_64:
15703+ pax_open_kernel();
15704 *(u64 *)loc = val;
15705+ pax_close_kernel();
15706 break;
15707 case R_X86_64_32:
15708+ pax_open_kernel();
15709 *(u32 *)loc = val;
15710+ pax_close_kernel();
15711 if (val != *(u32 *)loc)
15712 goto overflow;
15713 break;
15714 case R_X86_64_32S:
15715+ pax_open_kernel();
15716 *(s32 *)loc = val;
15717+ pax_close_kernel();
15718 if ((s64)val != *(s32 *)loc)
15719 goto overflow;
15720 break;
15721 case R_X86_64_PC32:
15722 val -= (u64)loc;
15723+ pax_open_kernel();
15724 *(u32 *)loc = val;
15725+ pax_close_kernel();
15726+
15727 #if 0
15728 if ((s64)val != *(s32 *)loc)
15729 goto overflow;
15730diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt.c linux-2.6.32.45/arch/x86/kernel/paravirt.c
15731--- linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15732+++ linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-08-05 20:33:55.000000000 -0400
15733@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15734 {
15735 return x;
15736 }
15737+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15738+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15739+#endif
15740
15741 void __init default_banner(void)
15742 {
15743@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15744 * corresponding structure. */
15745 static void *get_call_destination(u8 type)
15746 {
15747- struct paravirt_patch_template tmpl = {
15748+ const struct paravirt_patch_template tmpl = {
15749 .pv_init_ops = pv_init_ops,
15750 .pv_time_ops = pv_time_ops,
15751 .pv_cpu_ops = pv_cpu_ops,
15752@@ -133,6 +136,8 @@ static void *get_call_destination(u8 typ
15753 .pv_lock_ops = pv_lock_ops,
15754 #endif
15755 };
15756+
15757+ pax_track_stack();
15758 return *((void **)&tmpl + type);
15759 }
15760
15761@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
15762 if (opfunc == NULL)
15763 /* If there's no function, patch it with a ud2a (BUG) */
15764 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15765- else if (opfunc == _paravirt_nop)
15766+ else if (opfunc == (void *)_paravirt_nop)
15767 /* If the operation is a nop, then nop the callsite */
15768 ret = paravirt_patch_nop();
15769
15770 /* identity functions just return their single argument */
15771- else if (opfunc == _paravirt_ident_32)
15772+ else if (opfunc == (void *)_paravirt_ident_32)
15773 ret = paravirt_patch_ident_32(insnbuf, len);
15774- else if (opfunc == _paravirt_ident_64)
15775+ else if (opfunc == (void *)_paravirt_ident_64)
15776+ ret = paravirt_patch_ident_64(insnbuf, len);
15777+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15778+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
15779 ret = paravirt_patch_ident_64(insnbuf, len);
15780+#endif
15781
15782 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15783 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
15784@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
15785 if (insn_len > len || start == NULL)
15786 insn_len = len;
15787 else
15788- memcpy(insnbuf, start, insn_len);
15789+ memcpy(insnbuf, ktla_ktva(start), insn_len);
15790
15791 return insn_len;
15792 }
15793@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
15794 preempt_enable();
15795 }
15796
15797-struct pv_info pv_info = {
15798+struct pv_info pv_info __read_only = {
15799 .name = "bare hardware",
15800 .paravirt_enabled = 0,
15801 .kernel_rpl = 0,
15802 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15803 };
15804
15805-struct pv_init_ops pv_init_ops = {
15806+struct pv_init_ops pv_init_ops __read_only = {
15807 .patch = native_patch,
15808 };
15809
15810-struct pv_time_ops pv_time_ops = {
15811+struct pv_time_ops pv_time_ops __read_only = {
15812 .sched_clock = native_sched_clock,
15813 };
15814
15815-struct pv_irq_ops pv_irq_ops = {
15816+struct pv_irq_ops pv_irq_ops __read_only = {
15817 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15818 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15819 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15820@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
15821 #endif
15822 };
15823
15824-struct pv_cpu_ops pv_cpu_ops = {
15825+struct pv_cpu_ops pv_cpu_ops __read_only = {
15826 .cpuid = native_cpuid,
15827 .get_debugreg = native_get_debugreg,
15828 .set_debugreg = native_set_debugreg,
15829@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
15830 .end_context_switch = paravirt_nop,
15831 };
15832
15833-struct pv_apic_ops pv_apic_ops = {
15834+struct pv_apic_ops pv_apic_ops __read_only = {
15835 #ifdef CONFIG_X86_LOCAL_APIC
15836 .startup_ipi_hook = paravirt_nop,
15837 #endif
15838 };
15839
15840-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
15841+#ifdef CONFIG_X86_32
15842+#ifdef CONFIG_X86_PAE
15843+/* 64-bit pagetable entries */
15844+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
15845+#else
15846 /* 32-bit pagetable entries */
15847 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
15848+#endif
15849 #else
15850 /* 64-bit pagetable entries */
15851 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15852 #endif
15853
15854-struct pv_mmu_ops pv_mmu_ops = {
15855+struct pv_mmu_ops pv_mmu_ops __read_only = {
15856
15857 .read_cr2 = native_read_cr2,
15858 .write_cr2 = native_write_cr2,
15859@@ -467,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15860 },
15861
15862 .set_fixmap = native_set_fixmap,
15863+
15864+#ifdef CONFIG_PAX_KERNEXEC
15865+ .pax_open_kernel = native_pax_open_kernel,
15866+ .pax_close_kernel = native_pax_close_kernel,
15867+#endif
15868+
15869 };
15870
15871 EXPORT_SYMBOL_GPL(pv_time_ops);
15872diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c
15873--- linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15874+++ linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15875@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15876 __raw_spin_lock(lock);
15877 }
15878
15879-struct pv_lock_ops pv_lock_ops = {
15880+struct pv_lock_ops pv_lock_ops __read_only = {
15881 #ifdef CONFIG_SMP
15882 .spin_is_locked = __ticket_spin_is_locked,
15883 .spin_is_contended = __ticket_spin_is_contended,
15884diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c
15885--- linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15886+++ linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15887@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15888 free_pages((unsigned long)vaddr, get_order(size));
15889 }
15890
15891-static struct dma_map_ops calgary_dma_ops = {
15892+static const struct dma_map_ops calgary_dma_ops = {
15893 .alloc_coherent = calgary_alloc_coherent,
15894 .free_coherent = calgary_free_coherent,
15895 .map_sg = calgary_map_sg,
15896diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-dma.c linux-2.6.32.45/arch/x86/kernel/pci-dma.c
15897--- linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15898+++ linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15899@@ -14,7 +14,7 @@
15900
15901 static int forbid_dac __read_mostly;
15902
15903-struct dma_map_ops *dma_ops;
15904+const struct dma_map_ops *dma_ops;
15905 EXPORT_SYMBOL(dma_ops);
15906
15907 static int iommu_sac_force __read_mostly;
15908@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15909
15910 int dma_supported(struct device *dev, u64 mask)
15911 {
15912- struct dma_map_ops *ops = get_dma_ops(dev);
15913+ const struct dma_map_ops *ops = get_dma_ops(dev);
15914
15915 #ifdef CONFIG_PCI
15916 if (mask > 0xffffffff && forbid_dac > 0) {
15917diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c
15918--- linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15919+++ linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15920@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15921 return -1;
15922 }
15923
15924-static struct dma_map_ops gart_dma_ops = {
15925+static const struct dma_map_ops gart_dma_ops = {
15926 .map_sg = gart_map_sg,
15927 .unmap_sg = gart_unmap_sg,
15928 .map_page = gart_map_page,
15929diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-nommu.c linux-2.6.32.45/arch/x86/kernel/pci-nommu.c
15930--- linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15931+++ linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15932@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15933 flush_write_buffers();
15934 }
15935
15936-struct dma_map_ops nommu_dma_ops = {
15937+const struct dma_map_ops nommu_dma_ops = {
15938 .alloc_coherent = dma_generic_alloc_coherent,
15939 .free_coherent = nommu_free_coherent,
15940 .map_sg = nommu_map_sg,
15941diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c
15942--- linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15943+++ linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15944@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15945 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15946 }
15947
15948-static struct dma_map_ops swiotlb_dma_ops = {
15949+static const struct dma_map_ops swiotlb_dma_ops = {
15950 .mapping_error = swiotlb_dma_mapping_error,
15951 .alloc_coherent = x86_swiotlb_alloc_coherent,
15952 .free_coherent = swiotlb_free_coherent,
15953diff -urNp linux-2.6.32.45/arch/x86/kernel/process_32.c linux-2.6.32.45/arch/x86/kernel/process_32.c
15954--- linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
15955+++ linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
15956@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15957 unsigned long thread_saved_pc(struct task_struct *tsk)
15958 {
15959 return ((unsigned long *)tsk->thread.sp)[3];
15960+//XXX return tsk->thread.eip;
15961 }
15962
15963 #ifndef CONFIG_SMP
15964@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15965 unsigned short ss, gs;
15966 const char *board;
15967
15968- if (user_mode_vm(regs)) {
15969+ if (user_mode(regs)) {
15970 sp = regs->sp;
15971 ss = regs->ss & 0xffff;
15972- gs = get_user_gs(regs);
15973 } else {
15974 sp = (unsigned long) (&regs->sp);
15975 savesegment(ss, ss);
15976- savesegment(gs, gs);
15977 }
15978+ gs = get_user_gs(regs);
15979
15980 printk("\n");
15981
15982@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
15983 regs.bx = (unsigned long) fn;
15984 regs.dx = (unsigned long) arg;
15985
15986- regs.ds = __USER_DS;
15987- regs.es = __USER_DS;
15988+ regs.ds = __KERNEL_DS;
15989+ regs.es = __KERNEL_DS;
15990 regs.fs = __KERNEL_PERCPU;
15991- regs.gs = __KERNEL_STACK_CANARY;
15992+ savesegment(gs, regs.gs);
15993 regs.orig_ax = -1;
15994 regs.ip = (unsigned long) kernel_thread_helper;
15995 regs.cs = __KERNEL_CS | get_kernel_rpl();
15996@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
15997 struct task_struct *tsk;
15998 int err;
15999
16000- childregs = task_pt_regs(p);
16001+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16002 *childregs = *regs;
16003 childregs->ax = 0;
16004 childregs->sp = sp;
16005
16006 p->thread.sp = (unsigned long) childregs;
16007 p->thread.sp0 = (unsigned long) (childregs+1);
16008+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16009
16010 p->thread.ip = (unsigned long) ret_from_fork;
16011
16012@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
16013 struct thread_struct *prev = &prev_p->thread,
16014 *next = &next_p->thread;
16015 int cpu = smp_processor_id();
16016- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16017+ struct tss_struct *tss = init_tss + cpu;
16018 bool preload_fpu;
16019
16020 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16021@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
16022 */
16023 lazy_save_gs(prev->gs);
16024
16025+#ifdef CONFIG_PAX_MEMORY_UDEREF
16026+ __set_fs(task_thread_info(next_p)->addr_limit);
16027+#endif
16028+
16029 /*
16030 * Load the per-thread Thread-Local Storage descriptor.
16031 */
16032@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
16033 */
16034 arch_end_context_switch(next_p);
16035
16036+ percpu_write(current_task, next_p);
16037+ percpu_write(current_tinfo, &next_p->tinfo);
16038+
16039 if (preload_fpu)
16040 __math_state_restore();
16041
16042@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
16043 if (prev->gs | next->gs)
16044 lazy_load_gs(next->gs);
16045
16046- percpu_write(current_task, next_p);
16047-
16048 return prev_p;
16049 }
16050
16051@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
16052 } while (count++ < 16);
16053 return 0;
16054 }
16055-
16056diff -urNp linux-2.6.32.45/arch/x86/kernel/process_64.c linux-2.6.32.45/arch/x86/kernel/process_64.c
16057--- linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
16058+++ linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
16059@@ -91,7 +91,7 @@ static void __exit_idle(void)
16060 void exit_idle(void)
16061 {
16062 /* idle loop has pid 0 */
16063- if (current->pid)
16064+ if (task_pid_nr(current))
16065 return;
16066 __exit_idle();
16067 }
16068@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
16069 if (!board)
16070 board = "";
16071 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
16072- current->pid, current->comm, print_tainted(),
16073+ task_pid_nr(current), current->comm, print_tainted(),
16074 init_utsname()->release,
16075 (int)strcspn(init_utsname()->version, " "),
16076 init_utsname()->version, board);
16077@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
16078 struct pt_regs *childregs;
16079 struct task_struct *me = current;
16080
16081- childregs = ((struct pt_regs *)
16082- (THREAD_SIZE + task_stack_page(p))) - 1;
16083+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16084 *childregs = *regs;
16085
16086 childregs->ax = 0;
16087@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
16088 p->thread.sp = (unsigned long) childregs;
16089 p->thread.sp0 = (unsigned long) (childregs+1);
16090 p->thread.usersp = me->thread.usersp;
16091+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16092
16093 set_tsk_thread_flag(p, TIF_FORK);
16094
16095@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
16096 struct thread_struct *prev = &prev_p->thread;
16097 struct thread_struct *next = &next_p->thread;
16098 int cpu = smp_processor_id();
16099- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16100+ struct tss_struct *tss = init_tss + cpu;
16101 unsigned fsindex, gsindex;
16102 bool preload_fpu;
16103
16104@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
16105 prev->usersp = percpu_read(old_rsp);
16106 percpu_write(old_rsp, next->usersp);
16107 percpu_write(current_task, next_p);
16108+ percpu_write(current_tinfo, &next_p->tinfo);
16109
16110- percpu_write(kernel_stack,
16111- (unsigned long)task_stack_page(next_p) +
16112- THREAD_SIZE - KERNEL_STACK_OFFSET);
16113+ percpu_write(kernel_stack, next->sp0);
16114
16115 /*
16116 * Now maybe reload the debug registers and handle I/O bitmaps
16117@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
16118 if (!p || p == current || p->state == TASK_RUNNING)
16119 return 0;
16120 stack = (unsigned long)task_stack_page(p);
16121- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16122+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16123 return 0;
16124 fp = *(u64 *)(p->thread.sp);
16125 do {
16126- if (fp < (unsigned long)stack ||
16127- fp >= (unsigned long)stack+THREAD_SIZE)
16128+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16129 return 0;
16130 ip = *(u64 *)(fp+8);
16131 if (!in_sched_functions(ip))
16132diff -urNp linux-2.6.32.45/arch/x86/kernel/process.c linux-2.6.32.45/arch/x86/kernel/process.c
16133--- linux-2.6.32.45/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
16134+++ linux-2.6.32.45/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
16135@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
16136
16137 void free_thread_info(struct thread_info *ti)
16138 {
16139- free_thread_xstate(ti->task);
16140 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16141 }
16142
16143+static struct kmem_cache *task_struct_cachep;
16144+
16145 void arch_task_cache_init(void)
16146 {
16147- task_xstate_cachep =
16148- kmem_cache_create("task_xstate", xstate_size,
16149+ /* create a slab on which task_structs can be allocated */
16150+ task_struct_cachep =
16151+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16152+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16153+
16154+ task_xstate_cachep =
16155+ kmem_cache_create("task_xstate", xstate_size,
16156 __alignof__(union thread_xstate),
16157- SLAB_PANIC | SLAB_NOTRACK, NULL);
16158+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16159+}
16160+
16161+struct task_struct *alloc_task_struct(void)
16162+{
16163+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16164+}
16165+
16166+void free_task_struct(struct task_struct *task)
16167+{
16168+ free_thread_xstate(task);
16169+ kmem_cache_free(task_struct_cachep, task);
16170 }
16171
16172 /*
16173@@ -73,7 +90,7 @@ void exit_thread(void)
16174 unsigned long *bp = t->io_bitmap_ptr;
16175
16176 if (bp) {
16177- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16178+ struct tss_struct *tss = init_tss + get_cpu();
16179
16180 t->io_bitmap_ptr = NULL;
16181 clear_thread_flag(TIF_IO_BITMAP);
16182@@ -93,6 +110,9 @@ void flush_thread(void)
16183
16184 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16185
16186+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16187+ loadsegment(gs, 0);
16188+#endif
16189 tsk->thread.debugreg0 = 0;
16190 tsk->thread.debugreg1 = 0;
16191 tsk->thread.debugreg2 = 0;
16192@@ -307,7 +327,7 @@ void default_idle(void)
16193 EXPORT_SYMBOL(default_idle);
16194 #endif
16195
16196-void stop_this_cpu(void *dummy)
16197+__noreturn void stop_this_cpu(void *dummy)
16198 {
16199 local_irq_disable();
16200 /*
16201@@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16202 }
16203 early_param("idle", idle_setup);
16204
16205-unsigned long arch_align_stack(unsigned long sp)
16206+#ifdef CONFIG_PAX_RANDKSTACK
16207+asmlinkage void pax_randomize_kstack(void)
16208 {
16209- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16210- sp -= get_random_int() % 8192;
16211- return sp & ~0xf;
16212-}
16213+ struct thread_struct *thread = &current->thread;
16214+ unsigned long time;
16215
16216-unsigned long arch_randomize_brk(struct mm_struct *mm)
16217-{
16218- unsigned long range_end = mm->brk + 0x02000000;
16219- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16220+ if (!randomize_va_space)
16221+ return;
16222+
16223+ rdtscl(time);
16224+
16225+ /* P4 seems to return a 0 LSB, ignore it */
16226+#ifdef CONFIG_MPENTIUM4
16227+ time &= 0x3EUL;
16228+ time <<= 2;
16229+#elif defined(CONFIG_X86_64)
16230+ time &= 0xFUL;
16231+ time <<= 4;
16232+#else
16233+ time &= 0x1FUL;
16234+ time <<= 3;
16235+#endif
16236+
16237+ thread->sp0 ^= time;
16238+ load_sp0(init_tss + smp_processor_id(), thread);
16239+
16240+#ifdef CONFIG_X86_64
16241+ percpu_write(kernel_stack, thread->sp0);
16242+#endif
16243 }
16244+#endif
16245
16246diff -urNp linux-2.6.32.45/arch/x86/kernel/ptrace.c linux-2.6.32.45/arch/x86/kernel/ptrace.c
16247--- linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16248+++ linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16249@@ -925,7 +925,7 @@ static const struct user_regset_view use
16250 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16251 {
16252 int ret;
16253- unsigned long __user *datap = (unsigned long __user *)data;
16254+ unsigned long __user *datap = (__force unsigned long __user *)data;
16255
16256 switch (request) {
16257 /* read the word at location addr in the USER area. */
16258@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16259 if (addr < 0)
16260 return -EIO;
16261 ret = do_get_thread_area(child, addr,
16262- (struct user_desc __user *) data);
16263+ (__force struct user_desc __user *) data);
16264 break;
16265
16266 case PTRACE_SET_THREAD_AREA:
16267 if (addr < 0)
16268 return -EIO;
16269 ret = do_set_thread_area(child, addr,
16270- (struct user_desc __user *) data, 0);
16271+ (__force struct user_desc __user *) data, 0);
16272 break;
16273 #endif
16274
16275@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16276 #ifdef CONFIG_X86_PTRACE_BTS
16277 case PTRACE_BTS_CONFIG:
16278 ret = ptrace_bts_config
16279- (child, data, (struct ptrace_bts_config __user *)addr);
16280+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16281 break;
16282
16283 case PTRACE_BTS_STATUS:
16284 ret = ptrace_bts_status
16285- (child, data, (struct ptrace_bts_config __user *)addr);
16286+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16287 break;
16288
16289 case PTRACE_BTS_SIZE:
16290@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16291
16292 case PTRACE_BTS_GET:
16293 ret = ptrace_bts_read_record
16294- (child, data, (struct bts_struct __user *) addr);
16295+ (child, data, (__force struct bts_struct __user *) addr);
16296 break;
16297
16298 case PTRACE_BTS_CLEAR:
16299@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16300
16301 case PTRACE_BTS_DRAIN:
16302 ret = ptrace_bts_drain
16303- (child, data, (struct bts_struct __user *) addr);
16304+ (child, data, (__force struct bts_struct __user *) addr);
16305 break;
16306 #endif /* CONFIG_X86_PTRACE_BTS */
16307
16308@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16309 info.si_code = si_code;
16310
16311 /* User-mode ip? */
16312- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16313+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16314
16315 /* Send us the fake SIGTRAP */
16316 force_sig_info(SIGTRAP, &info, tsk);
16317@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16318 * We must return the syscall number to actually look up in the table.
16319 * This can be -1L to skip running any syscall at all.
16320 */
16321-asmregparm long syscall_trace_enter(struct pt_regs *regs)
16322+long syscall_trace_enter(struct pt_regs *regs)
16323 {
16324 long ret = 0;
16325
16326@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16327 return ret ?: regs->orig_ax;
16328 }
16329
16330-asmregparm void syscall_trace_leave(struct pt_regs *regs)
16331+void syscall_trace_leave(struct pt_regs *regs)
16332 {
16333 if (unlikely(current->audit_context))
16334 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16335diff -urNp linux-2.6.32.45/arch/x86/kernel/reboot.c linux-2.6.32.45/arch/x86/kernel/reboot.c
16336--- linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:35:28.000000000 -0400
16337+++ linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:33:59.000000000 -0400
16338@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16339 EXPORT_SYMBOL(pm_power_off);
16340
16341 static const struct desc_ptr no_idt = {};
16342-static int reboot_mode;
16343+static unsigned short reboot_mode;
16344 enum reboot_type reboot_type = BOOT_KBD;
16345 int reboot_force;
16346
16347@@ -292,12 +292,12 @@ core_initcall(reboot_init);
16348 controller to pulse the CPU reset line, which is more thorough, but
16349 doesn't work with at least one type of 486 motherboard. It is easy
16350 to stop this code working; hence the copious comments. */
16351-static const unsigned long long
16352-real_mode_gdt_entries [3] =
16353+static struct desc_struct
16354+real_mode_gdt_entries [3] __read_only =
16355 {
16356- 0x0000000000000000ULL, /* Null descriptor */
16357- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16358- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16359+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16360+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16361+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16362 };
16363
16364 static const struct desc_ptr
16365@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16366 * specified by the code and length parameters.
16367 * We assume that length will aways be less that 100!
16368 */
16369-void machine_real_restart(const unsigned char *code, int length)
16370+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16371 {
16372 local_irq_disable();
16373
16374@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16375 /* Remap the kernel at virtual address zero, as well as offset zero
16376 from the kernel segment. This assumes the kernel segment starts at
16377 virtual address PAGE_OFFSET. */
16378- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16379- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16380+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16381+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16382
16383 /*
16384 * Use `swapper_pg_dir' as our page directory.
16385@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16386 boot)". This seems like a fairly standard thing that gets set by
16387 REBOOT.COM programs, and the previous reset routine did this
16388 too. */
16389- *((unsigned short *)0x472) = reboot_mode;
16390+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16391
16392 /* For the switch to real mode, copy some code to low memory. It has
16393 to be in the first 64k because it is running in 16-bit mode, and it
16394 has to have the same physical and virtual address, because it turns
16395 off paging. Copy it near the end of the first page, out of the way
16396 of BIOS variables. */
16397- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16398- real_mode_switch, sizeof (real_mode_switch));
16399- memcpy((void *)(0x1000 - 100), code, length);
16400+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16401+ memcpy(__va(0x1000 - 100), code, length);
16402
16403 /* Set up the IDT for real mode. */
16404 load_idt(&real_mode_idt);
16405@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16406 __asm__ __volatile__ ("ljmp $0x0008,%0"
16407 :
16408 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16409+ do { } while (1);
16410 }
16411 #ifdef CONFIG_APM_MODULE
16412 EXPORT_SYMBOL(machine_real_restart);
16413@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_f
16414 {
16415 }
16416
16417-static void native_machine_emergency_restart(void)
16418+__noreturn static void native_machine_emergency_restart(void)
16419 {
16420 int i;
16421
16422@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
16423 #endif
16424 }
16425
16426-static void __machine_emergency_restart(int emergency)
16427+static __noreturn void __machine_emergency_restart(int emergency)
16428 {
16429 reboot_emergency = emergency;
16430 machine_ops.emergency_restart();
16431 }
16432
16433-static void native_machine_restart(char *__unused)
16434+static __noreturn void native_machine_restart(char *__unused)
16435 {
16436 printk("machine restart\n");
16437
16438@@ -674,7 +674,7 @@ static void native_machine_restart(char
16439 __machine_emergency_restart(0);
16440 }
16441
16442-static void native_machine_halt(void)
16443+static __noreturn void native_machine_halt(void)
16444 {
16445 /* stop other cpus and apics */
16446 machine_shutdown();
16447@@ -685,7 +685,7 @@ static void native_machine_halt(void)
16448 stop_this_cpu(NULL);
16449 }
16450
16451-static void native_machine_power_off(void)
16452+__noreturn static void native_machine_power_off(void)
16453 {
16454 if (pm_power_off) {
16455 if (!reboot_force)
16456@@ -694,6 +694,7 @@ static void native_machine_power_off(voi
16457 }
16458 /* a fallback in case there is no PM info available */
16459 tboot_shutdown(TB_SHUTDOWN_HALT);
16460+ do { } while (1);
16461 }
16462
16463 struct machine_ops machine_ops = {
16464diff -urNp linux-2.6.32.45/arch/x86/kernel/setup.c linux-2.6.32.45/arch/x86/kernel/setup.c
16465--- linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16466+++ linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16467@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16468
16469 if (!boot_params.hdr.root_flags)
16470 root_mountflags &= ~MS_RDONLY;
16471- init_mm.start_code = (unsigned long) _text;
16472- init_mm.end_code = (unsigned long) _etext;
16473+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16474+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16475 init_mm.end_data = (unsigned long) _edata;
16476 init_mm.brk = _brk_end;
16477
16478- code_resource.start = virt_to_phys(_text);
16479- code_resource.end = virt_to_phys(_etext)-1;
16480- data_resource.start = virt_to_phys(_etext);
16481+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16482+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16483+ data_resource.start = virt_to_phys(_sdata);
16484 data_resource.end = virt_to_phys(_edata)-1;
16485 bss_resource.start = virt_to_phys(&__bss_start);
16486 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16487diff -urNp linux-2.6.32.45/arch/x86/kernel/setup_percpu.c linux-2.6.32.45/arch/x86/kernel/setup_percpu.c
16488--- linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16489+++ linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16490@@ -25,19 +25,17 @@
16491 # define DBG(x...)
16492 #endif
16493
16494-DEFINE_PER_CPU(int, cpu_number);
16495+#ifdef CONFIG_SMP
16496+DEFINE_PER_CPU(unsigned int, cpu_number);
16497 EXPORT_PER_CPU_SYMBOL(cpu_number);
16498+#endif
16499
16500-#ifdef CONFIG_X86_64
16501 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16502-#else
16503-#define BOOT_PERCPU_OFFSET 0
16504-#endif
16505
16506 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16507 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16508
16509-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16510+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16511 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16512 };
16513 EXPORT_SYMBOL(__per_cpu_offset);
16514@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16515 {
16516 #ifdef CONFIG_X86_32
16517 struct desc_struct gdt;
16518+ unsigned long base = per_cpu_offset(cpu);
16519
16520- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16521- 0x2 | DESCTYPE_S, 0x8);
16522- gdt.s = 1;
16523+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16524+ 0x83 | DESCTYPE_S, 0xC);
16525 write_gdt_entry(get_cpu_gdt_table(cpu),
16526 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16527 #endif
16528@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16529 /* alrighty, percpu areas up and running */
16530 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16531 for_each_possible_cpu(cpu) {
16532+#ifdef CONFIG_CC_STACKPROTECTOR
16533+#ifdef CONFIG_X86_32
16534+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16535+#endif
16536+#endif
16537 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16538 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16539 per_cpu(cpu_number, cpu) = cpu;
16540@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16541 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16542 #endif
16543 #endif
16544+#ifdef CONFIG_CC_STACKPROTECTOR
16545+#ifdef CONFIG_X86_32
16546+ if (!cpu)
16547+ per_cpu(stack_canary.canary, cpu) = canary;
16548+#endif
16549+#endif
16550 /*
16551 * Up to this point, the boot CPU has been using .data.init
16552 * area. Reload any changed state for the boot CPU.
16553diff -urNp linux-2.6.32.45/arch/x86/kernel/signal.c linux-2.6.32.45/arch/x86/kernel/signal.c
16554--- linux-2.6.32.45/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16555+++ linux-2.6.32.45/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16556@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16557 * Align the stack pointer according to the i386 ABI,
16558 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16559 */
16560- sp = ((sp + 4) & -16ul) - 4;
16561+ sp = ((sp - 12) & -16ul) - 4;
16562 #else /* !CONFIG_X86_32 */
16563 sp = round_down(sp, 16) - 8;
16564 #endif
16565@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16566 * Return an always-bogus address instead so we will die with SIGSEGV.
16567 */
16568 if (onsigstack && !likely(on_sig_stack(sp)))
16569- return (void __user *)-1L;
16570+ return (__force void __user *)-1L;
16571
16572 /* save i387 state */
16573 if (used_math() && save_i387_xstate(*fpstate) < 0)
16574- return (void __user *)-1L;
16575+ return (__force void __user *)-1L;
16576
16577 return (void __user *)sp;
16578 }
16579@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16580 }
16581
16582 if (current->mm->context.vdso)
16583- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16584+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16585 else
16586- restorer = &frame->retcode;
16587+ restorer = (void __user *)&frame->retcode;
16588 if (ka->sa.sa_flags & SA_RESTORER)
16589 restorer = ka->sa.sa_restorer;
16590
16591@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16592 * reasons and because gdb uses it as a signature to notice
16593 * signal handler stack frames.
16594 */
16595- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16596+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16597
16598 if (err)
16599 return -EFAULT;
16600@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16601 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16602
16603 /* Set up to return from userspace. */
16604- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16605+ if (current->mm->context.vdso)
16606+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16607+ else
16608+ restorer = (void __user *)&frame->retcode;
16609 if (ka->sa.sa_flags & SA_RESTORER)
16610 restorer = ka->sa.sa_restorer;
16611 put_user_ex(restorer, &frame->pretcode);
16612@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16613 * reasons and because gdb uses it as a signature to notice
16614 * signal handler stack frames.
16615 */
16616- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16617+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16618 } put_user_catch(err);
16619
16620 if (err)
16621@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16622 int signr;
16623 sigset_t *oldset;
16624
16625+ pax_track_stack();
16626+
16627 /*
16628 * We want the common case to go fast, which is why we may in certain
16629 * cases get here from kernel mode. Just return without doing anything
16630@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16631 * X86_32: vm86 regs switched out by assembly code before reaching
16632 * here, so testing against kernel CS suffices.
16633 */
16634- if (!user_mode(regs))
16635+ if (!user_mode_novm(regs))
16636 return;
16637
16638 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16639diff -urNp linux-2.6.32.45/arch/x86/kernel/smpboot.c linux-2.6.32.45/arch/x86/kernel/smpboot.c
16640--- linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16641+++ linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16642@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16643 */
16644 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16645
16646-void cpu_hotplug_driver_lock()
16647+void cpu_hotplug_driver_lock(void)
16648 {
16649- mutex_lock(&x86_cpu_hotplug_driver_mutex);
16650+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
16651 }
16652
16653-void cpu_hotplug_driver_unlock()
16654+void cpu_hotplug_driver_unlock(void)
16655 {
16656- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16657+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16658 }
16659
16660 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16661@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16662 * target processor state.
16663 */
16664 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16665- (unsigned long)stack_start.sp);
16666+ stack_start);
16667
16668 /*
16669 * Run STARTUP IPI loop.
16670@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16671 set_idle_for_cpu(cpu, c_idle.idle);
16672 do_rest:
16673 per_cpu(current_task, cpu) = c_idle.idle;
16674+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16675 #ifdef CONFIG_X86_32
16676 /* Stack for startup_32 can be just as for start_secondary onwards */
16677 irq_ctx_init(cpu);
16678@@ -750,13 +751,15 @@ do_rest:
16679 #else
16680 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16681 initial_gs = per_cpu_offset(cpu);
16682- per_cpu(kernel_stack, cpu) =
16683- (unsigned long)task_stack_page(c_idle.idle) -
16684- KERNEL_STACK_OFFSET + THREAD_SIZE;
16685+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16686 #endif
16687+
16688+ pax_open_kernel();
16689 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16690+ pax_close_kernel();
16691+
16692 initial_code = (unsigned long)start_secondary;
16693- stack_start.sp = (void *) c_idle.idle->thread.sp;
16694+ stack_start = c_idle.idle->thread.sp;
16695
16696 /* start_ip had better be page-aligned! */
16697 start_ip = setup_trampoline();
16698@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16699
16700 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16701
16702+#ifdef CONFIG_PAX_PER_CPU_PGD
16703+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16704+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16705+ KERNEL_PGD_PTRS);
16706+#endif
16707+
16708 err = do_boot_cpu(apicid, cpu);
16709
16710 if (err) {
16711diff -urNp linux-2.6.32.45/arch/x86/kernel/step.c linux-2.6.32.45/arch/x86/kernel/step.c
16712--- linux-2.6.32.45/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16713+++ linux-2.6.32.45/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16714@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16715 struct desc_struct *desc;
16716 unsigned long base;
16717
16718- seg &= ~7UL;
16719+ seg >>= 3;
16720
16721 mutex_lock(&child->mm->context.lock);
16722- if (unlikely((seg >> 3) >= child->mm->context.size))
16723+ if (unlikely(seg >= child->mm->context.size))
16724 addr = -1L; /* bogus selector, access would fault */
16725 else {
16726 desc = child->mm->context.ldt + seg;
16727@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16728 addr += base;
16729 }
16730 mutex_unlock(&child->mm->context.lock);
16731- }
16732+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16733+ addr = ktla_ktva(addr);
16734
16735 return addr;
16736 }
16737@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16738 unsigned char opcode[15];
16739 unsigned long addr = convert_ip_to_linear(child, regs);
16740
16741+ if (addr == -EINVAL)
16742+ return 0;
16743+
16744 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16745 for (i = 0; i < copied; i++) {
16746 switch (opcode[i]) {
16747@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16748
16749 #ifdef CONFIG_X86_64
16750 case 0x40 ... 0x4f:
16751- if (regs->cs != __USER_CS)
16752+ if ((regs->cs & 0xffff) != __USER_CS)
16753 /* 32-bit mode: register increment */
16754 return 0;
16755 /* 64-bit mode: REX prefix */
16756diff -urNp linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S
16757--- linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16758+++ linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16759@@ -1,3 +1,4 @@
16760+.section .rodata,"a",@progbits
16761 ENTRY(sys_call_table)
16762 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16763 .long sys_exit
16764diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c
16765--- linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16766+++ linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16767@@ -24,6 +24,21 @@
16768
16769 #include <asm/syscalls.h>
16770
16771+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16772+{
16773+ unsigned long pax_task_size = TASK_SIZE;
16774+
16775+#ifdef CONFIG_PAX_SEGMEXEC
16776+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16777+ pax_task_size = SEGMEXEC_TASK_SIZE;
16778+#endif
16779+
16780+ if (len > pax_task_size || addr > pax_task_size - len)
16781+ return -EINVAL;
16782+
16783+ return 0;
16784+}
16785+
16786 /*
16787 * Perform the select(nd, in, out, ex, tv) and mmap() system
16788 * calls. Linux/i386 didn't use to be able to handle more than
16789@@ -58,6 +73,212 @@ out:
16790 return err;
16791 }
16792
16793+unsigned long
16794+arch_get_unmapped_area(struct file *filp, unsigned long addr,
16795+ unsigned long len, unsigned long pgoff, unsigned long flags)
16796+{
16797+ struct mm_struct *mm = current->mm;
16798+ struct vm_area_struct *vma;
16799+ unsigned long start_addr, pax_task_size = TASK_SIZE;
16800+
16801+#ifdef CONFIG_PAX_SEGMEXEC
16802+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16803+ pax_task_size = SEGMEXEC_TASK_SIZE;
16804+#endif
16805+
16806+ pax_task_size -= PAGE_SIZE;
16807+
16808+ if (len > pax_task_size)
16809+ return -ENOMEM;
16810+
16811+ if (flags & MAP_FIXED)
16812+ return addr;
16813+
16814+#ifdef CONFIG_PAX_RANDMMAP
16815+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16816+#endif
16817+
16818+ if (addr) {
16819+ addr = PAGE_ALIGN(addr);
16820+ if (pax_task_size - len >= addr) {
16821+ vma = find_vma(mm, addr);
16822+ if (check_heap_stack_gap(vma, addr, len))
16823+ return addr;
16824+ }
16825+ }
16826+ if (len > mm->cached_hole_size) {
16827+ start_addr = addr = mm->free_area_cache;
16828+ } else {
16829+ start_addr = addr = mm->mmap_base;
16830+ mm->cached_hole_size = 0;
16831+ }
16832+
16833+#ifdef CONFIG_PAX_PAGEEXEC
16834+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16835+ start_addr = 0x00110000UL;
16836+
16837+#ifdef CONFIG_PAX_RANDMMAP
16838+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16839+ start_addr += mm->delta_mmap & 0x03FFF000UL;
16840+#endif
16841+
16842+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16843+ start_addr = addr = mm->mmap_base;
16844+ else
16845+ addr = start_addr;
16846+ }
16847+#endif
16848+
16849+full_search:
16850+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16851+ /* At this point: (!vma || addr < vma->vm_end). */
16852+ if (pax_task_size - len < addr) {
16853+ /*
16854+ * Start a new search - just in case we missed
16855+ * some holes.
16856+ */
16857+ if (start_addr != mm->mmap_base) {
16858+ start_addr = addr = mm->mmap_base;
16859+ mm->cached_hole_size = 0;
16860+ goto full_search;
16861+ }
16862+ return -ENOMEM;
16863+ }
16864+ if (check_heap_stack_gap(vma, addr, len))
16865+ break;
16866+ if (addr + mm->cached_hole_size < vma->vm_start)
16867+ mm->cached_hole_size = vma->vm_start - addr;
16868+ addr = vma->vm_end;
16869+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
16870+ start_addr = addr = mm->mmap_base;
16871+ mm->cached_hole_size = 0;
16872+ goto full_search;
16873+ }
16874+ }
16875+
16876+ /*
16877+ * Remember the place where we stopped the search:
16878+ */
16879+ mm->free_area_cache = addr + len;
16880+ return addr;
16881+}
16882+
16883+unsigned long
16884+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16885+ const unsigned long len, const unsigned long pgoff,
16886+ const unsigned long flags)
16887+{
16888+ struct vm_area_struct *vma;
16889+ struct mm_struct *mm = current->mm;
16890+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16891+
16892+#ifdef CONFIG_PAX_SEGMEXEC
16893+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16894+ pax_task_size = SEGMEXEC_TASK_SIZE;
16895+#endif
16896+
16897+ pax_task_size -= PAGE_SIZE;
16898+
16899+ /* requested length too big for entire address space */
16900+ if (len > pax_task_size)
16901+ return -ENOMEM;
16902+
16903+ if (flags & MAP_FIXED)
16904+ return addr;
16905+
16906+#ifdef CONFIG_PAX_PAGEEXEC
16907+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16908+ goto bottomup;
16909+#endif
16910+
16911+#ifdef CONFIG_PAX_RANDMMAP
16912+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16913+#endif
16914+
16915+ /* requesting a specific address */
16916+ if (addr) {
16917+ addr = PAGE_ALIGN(addr);
16918+ if (pax_task_size - len >= addr) {
16919+ vma = find_vma(mm, addr);
16920+ if (check_heap_stack_gap(vma, addr, len))
16921+ return addr;
16922+ }
16923+ }
16924+
16925+ /* check if free_area_cache is useful for us */
16926+ if (len <= mm->cached_hole_size) {
16927+ mm->cached_hole_size = 0;
16928+ mm->free_area_cache = mm->mmap_base;
16929+ }
16930+
16931+ /* either no address requested or can't fit in requested address hole */
16932+ addr = mm->free_area_cache;
16933+
16934+ /* make sure it can fit in the remaining address space */
16935+ if (addr > len) {
16936+ vma = find_vma(mm, addr-len);
16937+ if (check_heap_stack_gap(vma, addr - len, len))
16938+ /* remember the address as a hint for next time */
16939+ return (mm->free_area_cache = addr-len);
16940+ }
16941+
16942+ if (mm->mmap_base < len)
16943+ goto bottomup;
16944+
16945+ addr = mm->mmap_base-len;
16946+
16947+ do {
16948+ /*
16949+ * Lookup failure means no vma is above this address,
16950+ * else if new region fits below vma->vm_start,
16951+ * return with success:
16952+ */
16953+ vma = find_vma(mm, addr);
16954+ if (check_heap_stack_gap(vma, addr, len))
16955+ /* remember the address as a hint for next time */
16956+ return (mm->free_area_cache = addr);
16957+
16958+ /* remember the largest hole we saw so far */
16959+ if (addr + mm->cached_hole_size < vma->vm_start)
16960+ mm->cached_hole_size = vma->vm_start - addr;
16961+
16962+ /* try just below the current vma->vm_start */
16963+ addr = skip_heap_stack_gap(vma, len);
16964+ } while (!IS_ERR_VALUE(addr));
16965+
16966+bottomup:
16967+ /*
16968+ * A failed mmap() very likely causes application failure,
16969+ * so fall back to the bottom-up function here. This scenario
16970+ * can happen with large stack limits and large mmap()
16971+ * allocations.
16972+ */
16973+
16974+#ifdef CONFIG_PAX_SEGMEXEC
16975+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16976+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16977+ else
16978+#endif
16979+
16980+ mm->mmap_base = TASK_UNMAPPED_BASE;
16981+
16982+#ifdef CONFIG_PAX_RANDMMAP
16983+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16984+ mm->mmap_base += mm->delta_mmap;
16985+#endif
16986+
16987+ mm->free_area_cache = mm->mmap_base;
16988+ mm->cached_hole_size = ~0UL;
16989+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16990+ /*
16991+ * Restore the topdown base:
16992+ */
16993+ mm->mmap_base = base;
16994+ mm->free_area_cache = base;
16995+ mm->cached_hole_size = ~0UL;
16996+
16997+ return addr;
16998+}
16999
17000 struct sel_arg_struct {
17001 unsigned long n;
17002@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
17003 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
17004 case SEMTIMEDOP:
17005 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
17006- (const struct timespec __user *)fifth);
17007+ (__force const struct timespec __user *)fifth);
17008
17009 case SEMGET:
17010 return sys_semget(first, second, third);
17011@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
17012 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
17013 if (ret)
17014 return ret;
17015- return put_user(raddr, (ulong __user *) third);
17016+ return put_user(raddr, (__force ulong __user *) third);
17017 }
17018 case 1: /* iBCS2 emulator entry point */
17019 if (!segment_eq(get_fs(), get_ds()))
17020@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
17021
17022 return error;
17023 }
17024-
17025-
17026-/*
17027- * Do a system call from kernel instead of calling sys_execve so we
17028- * end up with proper pt_regs.
17029- */
17030-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
17031-{
17032- long __res;
17033- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
17034- : "=a" (__res)
17035- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
17036- return __res;
17037-}
17038diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c
17039--- linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
17040+++ linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
17041@@ -32,8 +32,8 @@ out:
17042 return error;
17043 }
17044
17045-static void find_start_end(unsigned long flags, unsigned long *begin,
17046- unsigned long *end)
17047+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17048+ unsigned long *begin, unsigned long *end)
17049 {
17050 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17051 unsigned long new_begin;
17052@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
17053 *begin = new_begin;
17054 }
17055 } else {
17056- *begin = TASK_UNMAPPED_BASE;
17057+ *begin = mm->mmap_base;
17058 *end = TASK_SIZE;
17059 }
17060 }
17061@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
17062 if (flags & MAP_FIXED)
17063 return addr;
17064
17065- find_start_end(flags, &begin, &end);
17066+ find_start_end(mm, flags, &begin, &end);
17067
17068 if (len > end)
17069 return -ENOMEM;
17070
17071+#ifdef CONFIG_PAX_RANDMMAP
17072+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17073+#endif
17074+
17075 if (addr) {
17076 addr = PAGE_ALIGN(addr);
17077 vma = find_vma(mm, addr);
17078- if (end - len >= addr &&
17079- (!vma || addr + len <= vma->vm_start))
17080+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17081 return addr;
17082 }
17083 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17084@@ -106,7 +109,7 @@ full_search:
17085 }
17086 return -ENOMEM;
17087 }
17088- if (!vma || addr + len <= vma->vm_start) {
17089+ if (check_heap_stack_gap(vma, addr, len)) {
17090 /*
17091 * Remember the place where we stopped the search:
17092 */
17093@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
17094 {
17095 struct vm_area_struct *vma;
17096 struct mm_struct *mm = current->mm;
17097- unsigned long addr = addr0;
17098+ unsigned long base = mm->mmap_base, addr = addr0;
17099
17100 /* requested length too big for entire address space */
17101 if (len > TASK_SIZE)
17102@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
17103 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17104 goto bottomup;
17105
17106+#ifdef CONFIG_PAX_RANDMMAP
17107+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17108+#endif
17109+
17110 /* requesting a specific address */
17111 if (addr) {
17112 addr = PAGE_ALIGN(addr);
17113- vma = find_vma(mm, addr);
17114- if (TASK_SIZE - len >= addr &&
17115- (!vma || addr + len <= vma->vm_start))
17116- return addr;
17117+ if (TASK_SIZE - len >= addr) {
17118+ vma = find_vma(mm, addr);
17119+ if (check_heap_stack_gap(vma, addr, len))
17120+ return addr;
17121+ }
17122 }
17123
17124 /* check if free_area_cache is useful for us */
17125@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
17126 /* make sure it can fit in the remaining address space */
17127 if (addr > len) {
17128 vma = find_vma(mm, addr-len);
17129- if (!vma || addr <= vma->vm_start)
17130+ if (check_heap_stack_gap(vma, addr - len, len))
17131 /* remember the address as a hint for next time */
17132 return mm->free_area_cache = addr-len;
17133 }
17134@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
17135 * return with success:
17136 */
17137 vma = find_vma(mm, addr);
17138- if (!vma || addr+len <= vma->vm_start)
17139+ if (check_heap_stack_gap(vma, addr, len))
17140 /* remember the address as a hint for next time */
17141 return mm->free_area_cache = addr;
17142
17143@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
17144 mm->cached_hole_size = vma->vm_start - addr;
17145
17146 /* try just below the current vma->vm_start */
17147- addr = vma->vm_start-len;
17148- } while (len < vma->vm_start);
17149+ addr = skip_heap_stack_gap(vma, len);
17150+ } while (!IS_ERR_VALUE(addr));
17151
17152 bottomup:
17153 /*
17154@@ -198,13 +206,21 @@ bottomup:
17155 * can happen with large stack limits and large mmap()
17156 * allocations.
17157 */
17158+ mm->mmap_base = TASK_UNMAPPED_BASE;
17159+
17160+#ifdef CONFIG_PAX_RANDMMAP
17161+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17162+ mm->mmap_base += mm->delta_mmap;
17163+#endif
17164+
17165+ mm->free_area_cache = mm->mmap_base;
17166 mm->cached_hole_size = ~0UL;
17167- mm->free_area_cache = TASK_UNMAPPED_BASE;
17168 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17169 /*
17170 * Restore the topdown base:
17171 */
17172- mm->free_area_cache = mm->mmap_base;
17173+ mm->mmap_base = base;
17174+ mm->free_area_cache = base;
17175 mm->cached_hole_size = ~0UL;
17176
17177 return addr;
17178diff -urNp linux-2.6.32.45/arch/x86/kernel/tboot.c linux-2.6.32.45/arch/x86/kernel/tboot.c
17179--- linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
17180+++ linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
17181@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17182
17183 void tboot_shutdown(u32 shutdown_type)
17184 {
17185- void (*shutdown)(void);
17186+ void (* __noreturn shutdown)(void);
17187
17188 if (!tboot_enabled())
17189 return;
17190@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17191
17192 switch_to_tboot_pt();
17193
17194- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17195+ shutdown = (void *)tboot->shutdown_entry;
17196 shutdown();
17197
17198 /* should not reach here */
17199@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17200 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17201 }
17202
17203-static atomic_t ap_wfs_count;
17204+static atomic_unchecked_t ap_wfs_count;
17205
17206 static int tboot_wait_for_aps(int num_aps)
17207 {
17208@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17209 {
17210 switch (action) {
17211 case CPU_DYING:
17212- atomic_inc(&ap_wfs_count);
17213+ atomic_inc_unchecked(&ap_wfs_count);
17214 if (num_online_cpus() == 1)
17215- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17216+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17217 return NOTIFY_BAD;
17218 break;
17219 }
17220@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17221
17222 tboot_create_trampoline();
17223
17224- atomic_set(&ap_wfs_count, 0);
17225+ atomic_set_unchecked(&ap_wfs_count, 0);
17226 register_hotcpu_notifier(&tboot_cpu_notifier);
17227 return 0;
17228 }
17229diff -urNp linux-2.6.32.45/arch/x86/kernel/time.c linux-2.6.32.45/arch/x86/kernel/time.c
17230--- linux-2.6.32.45/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17231+++ linux-2.6.32.45/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17232@@ -26,17 +26,13 @@
17233 int timer_ack;
17234 #endif
17235
17236-#ifdef CONFIG_X86_64
17237-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17238-#endif
17239-
17240 unsigned long profile_pc(struct pt_regs *regs)
17241 {
17242 unsigned long pc = instruction_pointer(regs);
17243
17244- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17245+ if (!user_mode(regs) && in_lock_functions(pc)) {
17246 #ifdef CONFIG_FRAME_POINTER
17247- return *(unsigned long *)(regs->bp + sizeof(long));
17248+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17249 #else
17250 unsigned long *sp =
17251 (unsigned long *)kernel_stack_pointer(regs);
17252@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17253 * or above a saved flags. Eflags has bits 22-31 zero,
17254 * kernel addresses don't.
17255 */
17256+
17257+#ifdef CONFIG_PAX_KERNEXEC
17258+ return ktla_ktva(sp[0]);
17259+#else
17260 if (sp[0] >> 22)
17261 return sp[0];
17262 if (sp[1] >> 22)
17263 return sp[1];
17264 #endif
17265+
17266+#endif
17267 }
17268 return pc;
17269 }
17270diff -urNp linux-2.6.32.45/arch/x86/kernel/tls.c linux-2.6.32.45/arch/x86/kernel/tls.c
17271--- linux-2.6.32.45/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17272+++ linux-2.6.32.45/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17273@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17274 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17275 return -EINVAL;
17276
17277+#ifdef CONFIG_PAX_SEGMEXEC
17278+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17279+ return -EINVAL;
17280+#endif
17281+
17282 set_tls_desc(p, idx, &info, 1);
17283
17284 return 0;
17285diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_32.S linux-2.6.32.45/arch/x86/kernel/trampoline_32.S
17286--- linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17287+++ linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17288@@ -32,6 +32,12 @@
17289 #include <asm/segment.h>
17290 #include <asm/page_types.h>
17291
17292+#ifdef CONFIG_PAX_KERNEXEC
17293+#define ta(X) (X)
17294+#else
17295+#define ta(X) ((X) - __PAGE_OFFSET)
17296+#endif
17297+
17298 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17299 __CPUINITRODATA
17300 .code16
17301@@ -60,7 +66,7 @@ r_base = .
17302 inc %ax # protected mode (PE) bit
17303 lmsw %ax # into protected mode
17304 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17305- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17306+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17307
17308 # These need to be in the same 64K segment as the above;
17309 # hence we don't use the boot_gdt_descr defined in head.S
17310diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_64.S linux-2.6.32.45/arch/x86/kernel/trampoline_64.S
17311--- linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17312+++ linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17313@@ -91,7 +91,7 @@ startup_32:
17314 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17315 movl %eax, %ds
17316
17317- movl $X86_CR4_PAE, %eax
17318+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17319 movl %eax, %cr4 # Enable PAE mode
17320
17321 # Setup trampoline 4 level pagetables
17322@@ -127,7 +127,7 @@ startup_64:
17323 no_longmode:
17324 hlt
17325 jmp no_longmode
17326-#include "verify_cpu_64.S"
17327+#include "verify_cpu.S"
17328
17329 # Careful these need to be in the same 64K segment as the above;
17330 tidt:
17331@@ -138,7 +138,7 @@ tidt:
17332 # so the kernel can live anywhere
17333 .balign 4
17334 tgdt:
17335- .short tgdt_end - tgdt # gdt limit
17336+ .short tgdt_end - tgdt - 1 # gdt limit
17337 .long tgdt - r_base
17338 .short 0
17339 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17340diff -urNp linux-2.6.32.45/arch/x86/kernel/traps.c linux-2.6.32.45/arch/x86/kernel/traps.c
17341--- linux-2.6.32.45/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17342+++ linux-2.6.32.45/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17343@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17344
17345 /* Do we ignore FPU interrupts ? */
17346 char ignore_fpu_irq;
17347-
17348-/*
17349- * The IDT has to be page-aligned to simplify the Pentium
17350- * F0 0F bug workaround.
17351- */
17352-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17353 #endif
17354
17355 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17356@@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17357 static inline void
17358 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17359 {
17360- if (!user_mode_vm(regs))
17361+ if (!user_mode(regs))
17362 die(str, regs, err);
17363 }
17364 #endif
17365
17366 static void __kprobes
17367-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17368+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17369 long error_code, siginfo_t *info)
17370 {
17371 struct task_struct *tsk = current;
17372
17373 #ifdef CONFIG_X86_32
17374- if (regs->flags & X86_VM_MASK) {
17375+ if (v8086_mode(regs)) {
17376 /*
17377 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17378 * On nmi (interrupt 2), do_trap should not be called.
17379@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17380 }
17381 #endif
17382
17383- if (!user_mode(regs))
17384+ if (!user_mode_novm(regs))
17385 goto kernel_trap;
17386
17387 #ifdef CONFIG_X86_32
17388@@ -158,7 +152,7 @@ trap_signal:
17389 printk_ratelimit()) {
17390 printk(KERN_INFO
17391 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17392- tsk->comm, tsk->pid, str,
17393+ tsk->comm, task_pid_nr(tsk), str,
17394 regs->ip, regs->sp, error_code);
17395 print_vma_addr(" in ", regs->ip);
17396 printk("\n");
17397@@ -175,8 +169,20 @@ kernel_trap:
17398 if (!fixup_exception(regs)) {
17399 tsk->thread.error_code = error_code;
17400 tsk->thread.trap_no = trapnr;
17401+
17402+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17403+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17404+ str = "PAX: suspicious stack segment fault";
17405+#endif
17406+
17407 die(str, regs, error_code);
17408 }
17409+
17410+#ifdef CONFIG_PAX_REFCOUNT
17411+ if (trapnr == 4)
17412+ pax_report_refcount_overflow(regs);
17413+#endif
17414+
17415 return;
17416
17417 #ifdef CONFIG_X86_32
17418@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17419 conditional_sti(regs);
17420
17421 #ifdef CONFIG_X86_32
17422- if (regs->flags & X86_VM_MASK)
17423+ if (v8086_mode(regs))
17424 goto gp_in_vm86;
17425 #endif
17426
17427 tsk = current;
17428- if (!user_mode(regs))
17429+ if (!user_mode_novm(regs))
17430 goto gp_in_kernel;
17431
17432+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17433+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17434+ struct mm_struct *mm = tsk->mm;
17435+ unsigned long limit;
17436+
17437+ down_write(&mm->mmap_sem);
17438+ limit = mm->context.user_cs_limit;
17439+ if (limit < TASK_SIZE) {
17440+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17441+ up_write(&mm->mmap_sem);
17442+ return;
17443+ }
17444+ up_write(&mm->mmap_sem);
17445+ }
17446+#endif
17447+
17448 tsk->thread.error_code = error_code;
17449 tsk->thread.trap_no = 13;
17450
17451@@ -305,6 +327,13 @@ gp_in_kernel:
17452 if (notify_die(DIE_GPF, "general protection fault", regs,
17453 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17454 return;
17455+
17456+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17457+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17458+ die("PAX: suspicious general protection fault", regs, error_code);
17459+ else
17460+#endif
17461+
17462 die("general protection fault", regs, error_code);
17463 }
17464
17465@@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17466 dotraplinkage notrace __kprobes void
17467 do_nmi(struct pt_regs *regs, long error_code)
17468 {
17469+
17470+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17471+ if (!user_mode(regs)) {
17472+ unsigned long cs = regs->cs & 0xFFFF;
17473+ unsigned long ip = ktva_ktla(regs->ip);
17474+
17475+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17476+ regs->ip = ip;
17477+ }
17478+#endif
17479+
17480 nmi_enter();
17481
17482 inc_irq_stat(__nmi_count);
17483@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17484 }
17485
17486 #ifdef CONFIG_X86_32
17487- if (regs->flags & X86_VM_MASK)
17488+ if (v8086_mode(regs))
17489 goto debug_vm86;
17490 #endif
17491
17492@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17493 * kernel space (but re-enable TF when returning to user mode).
17494 */
17495 if (condition & DR_STEP) {
17496- if (!user_mode(regs))
17497+ if (!user_mode_novm(regs))
17498 goto clear_TF_reenable;
17499 }
17500
17501@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17502 * Handle strange cache flush from user space exception
17503 * in all other cases. This is undocumented behaviour.
17504 */
17505- if (regs->flags & X86_VM_MASK) {
17506+ if (v8086_mode(regs)) {
17507 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17508 return;
17509 }
17510@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17511 void __math_state_restore(void)
17512 {
17513 struct thread_info *thread = current_thread_info();
17514- struct task_struct *tsk = thread->task;
17515+ struct task_struct *tsk = current;
17516
17517 /*
17518 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17519@@ -825,8 +865,7 @@ void __math_state_restore(void)
17520 */
17521 asmlinkage void math_state_restore(void)
17522 {
17523- struct thread_info *thread = current_thread_info();
17524- struct task_struct *tsk = thread->task;
17525+ struct task_struct *tsk = current;
17526
17527 if (!tsk_used_math(tsk)) {
17528 local_irq_enable();
17529diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S
17530--- linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17531+++ linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17532@@ -1,105 +0,0 @@
17533-/*
17534- *
17535- * verify_cpu.S - Code for cpu long mode and SSE verification. This
17536- * code has been borrowed from boot/setup.S and was introduced by
17537- * Andi Kleen.
17538- *
17539- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17540- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17541- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17542- *
17543- * This source code is licensed under the GNU General Public License,
17544- * Version 2. See the file COPYING for more details.
17545- *
17546- * This is a common code for verification whether CPU supports
17547- * long mode and SSE or not. It is not called directly instead this
17548- * file is included at various places and compiled in that context.
17549- * Following are the current usage.
17550- *
17551- * This file is included by both 16bit and 32bit code.
17552- *
17553- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17554- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17555- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17556- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17557- *
17558- * verify_cpu, returns the status of cpu check in register %eax.
17559- * 0: Success 1: Failure
17560- *
17561- * The caller needs to check for the error code and take the action
17562- * appropriately. Either display a message or halt.
17563- */
17564-
17565-#include <asm/cpufeature.h>
17566-
17567-verify_cpu:
17568- pushfl # Save caller passed flags
17569- pushl $0 # Kill any dangerous flags
17570- popfl
17571-
17572- pushfl # standard way to check for cpuid
17573- popl %eax
17574- movl %eax,%ebx
17575- xorl $0x200000,%eax
17576- pushl %eax
17577- popfl
17578- pushfl
17579- popl %eax
17580- cmpl %eax,%ebx
17581- jz verify_cpu_no_longmode # cpu has no cpuid
17582-
17583- movl $0x0,%eax # See if cpuid 1 is implemented
17584- cpuid
17585- cmpl $0x1,%eax
17586- jb verify_cpu_no_longmode # no cpuid 1
17587-
17588- xor %di,%di
17589- cmpl $0x68747541,%ebx # AuthenticAMD
17590- jnz verify_cpu_noamd
17591- cmpl $0x69746e65,%edx
17592- jnz verify_cpu_noamd
17593- cmpl $0x444d4163,%ecx
17594- jnz verify_cpu_noamd
17595- mov $1,%di # cpu is from AMD
17596-
17597-verify_cpu_noamd:
17598- movl $0x1,%eax # Does the cpu have what it takes
17599- cpuid
17600- andl $REQUIRED_MASK0,%edx
17601- xorl $REQUIRED_MASK0,%edx
17602- jnz verify_cpu_no_longmode
17603-
17604- movl $0x80000000,%eax # See if extended cpuid is implemented
17605- cpuid
17606- cmpl $0x80000001,%eax
17607- jb verify_cpu_no_longmode # no extended cpuid
17608-
17609- movl $0x80000001,%eax # Does the cpu have what it takes
17610- cpuid
17611- andl $REQUIRED_MASK1,%edx
17612- xorl $REQUIRED_MASK1,%edx
17613- jnz verify_cpu_no_longmode
17614-
17615-verify_cpu_sse_test:
17616- movl $1,%eax
17617- cpuid
17618- andl $SSE_MASK,%edx
17619- cmpl $SSE_MASK,%edx
17620- je verify_cpu_sse_ok
17621- test %di,%di
17622- jz verify_cpu_no_longmode # only try to force SSE on AMD
17623- movl $0xc0010015,%ecx # HWCR
17624- rdmsr
17625- btr $15,%eax # enable SSE
17626- wrmsr
17627- xor %di,%di # don't loop
17628- jmp verify_cpu_sse_test # try again
17629-
17630-verify_cpu_no_longmode:
17631- popfl # Restore caller passed flags
17632- movl $1,%eax
17633- ret
17634-verify_cpu_sse_ok:
17635- popfl # Restore caller passed flags
17636- xorl %eax, %eax
17637- ret
17638diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu.S linux-2.6.32.45/arch/x86/kernel/verify_cpu.S
17639--- linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17640+++ linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17641@@ -0,0 +1,140 @@
17642+/*
17643+ *
17644+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
17645+ * code has been borrowed from boot/setup.S and was introduced by
17646+ * Andi Kleen.
17647+ *
17648+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17649+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17650+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17651+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17652+ *
17653+ * This source code is licensed under the GNU General Public License,
17654+ * Version 2. See the file COPYING for more details.
17655+ *
17656+ * This is a common code for verification whether CPU supports
17657+ * long mode and SSE or not. It is not called directly instead this
17658+ * file is included at various places and compiled in that context.
17659+ * This file is expected to run in 32bit code. Currently:
17660+ *
17661+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17662+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
17663+ * arch/x86/kernel/head_32.S: processor startup
17664+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17665+ *
17666+ * verify_cpu, returns the status of longmode and SSE in register %eax.
17667+ * 0: Success 1: Failure
17668+ *
17669+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17670+ *
17671+ * The caller needs to check for the error code and take the action
17672+ * appropriately. Either display a message or halt.
17673+ */
17674+
17675+#include <asm/cpufeature.h>
17676+#include <asm/msr-index.h>
17677+
17678+verify_cpu:
17679+ pushfl # Save caller passed flags
17680+ pushl $0 # Kill any dangerous flags
17681+ popfl
17682+
17683+ pushfl # standard way to check for cpuid
17684+ popl %eax
17685+ movl %eax,%ebx
17686+ xorl $0x200000,%eax
17687+ pushl %eax
17688+ popfl
17689+ pushfl
17690+ popl %eax
17691+ cmpl %eax,%ebx
17692+ jz verify_cpu_no_longmode # cpu has no cpuid
17693+
17694+ movl $0x0,%eax # See if cpuid 1 is implemented
17695+ cpuid
17696+ cmpl $0x1,%eax
17697+ jb verify_cpu_no_longmode # no cpuid 1
17698+
17699+ xor %di,%di
17700+ cmpl $0x68747541,%ebx # AuthenticAMD
17701+ jnz verify_cpu_noamd
17702+ cmpl $0x69746e65,%edx
17703+ jnz verify_cpu_noamd
17704+ cmpl $0x444d4163,%ecx
17705+ jnz verify_cpu_noamd
17706+ mov $1,%di # cpu is from AMD
17707+ jmp verify_cpu_check
17708+
17709+verify_cpu_noamd:
17710+ cmpl $0x756e6547,%ebx # GenuineIntel?
17711+ jnz verify_cpu_check
17712+ cmpl $0x49656e69,%edx
17713+ jnz verify_cpu_check
17714+ cmpl $0x6c65746e,%ecx
17715+ jnz verify_cpu_check
17716+
17717+ # only call IA32_MISC_ENABLE when:
17718+ # family > 6 || (family == 6 && model >= 0xd)
17719+ movl $0x1, %eax # check CPU family and model
17720+ cpuid
17721+ movl %eax, %ecx
17722+
17723+ andl $0x0ff00f00, %eax # mask family and extended family
17724+ shrl $8, %eax
17725+ cmpl $6, %eax
17726+ ja verify_cpu_clear_xd # family > 6, ok
17727+ jb verify_cpu_check # family < 6, skip
17728+
17729+ andl $0x000f00f0, %ecx # mask model and extended model
17730+ shrl $4, %ecx
17731+ cmpl $0xd, %ecx
17732+ jb verify_cpu_check # family == 6, model < 0xd, skip
17733+
17734+verify_cpu_clear_xd:
17735+ movl $MSR_IA32_MISC_ENABLE, %ecx
17736+ rdmsr
17737+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17738+ jnc verify_cpu_check # only write MSR if bit was changed
17739+ wrmsr
17740+
17741+verify_cpu_check:
17742+ movl $0x1,%eax # Does the cpu have what it takes
17743+ cpuid
17744+ andl $REQUIRED_MASK0,%edx
17745+ xorl $REQUIRED_MASK0,%edx
17746+ jnz verify_cpu_no_longmode
17747+
17748+ movl $0x80000000,%eax # See if extended cpuid is implemented
17749+ cpuid
17750+ cmpl $0x80000001,%eax
17751+ jb verify_cpu_no_longmode # no extended cpuid
17752+
17753+ movl $0x80000001,%eax # Does the cpu have what it takes
17754+ cpuid
17755+ andl $REQUIRED_MASK1,%edx
17756+ xorl $REQUIRED_MASK1,%edx
17757+ jnz verify_cpu_no_longmode
17758+
17759+verify_cpu_sse_test:
17760+ movl $1,%eax
17761+ cpuid
17762+ andl $SSE_MASK,%edx
17763+ cmpl $SSE_MASK,%edx
17764+ je verify_cpu_sse_ok
17765+ test %di,%di
17766+ jz verify_cpu_no_longmode # only try to force SSE on AMD
17767+ movl $MSR_K7_HWCR,%ecx
17768+ rdmsr
17769+ btr $15,%eax # enable SSE
17770+ wrmsr
17771+ xor %di,%di # don't loop
17772+ jmp verify_cpu_sse_test # try again
17773+
17774+verify_cpu_no_longmode:
17775+ popfl # Restore caller passed flags
17776+ movl $1,%eax
17777+ ret
17778+verify_cpu_sse_ok:
17779+ popfl # Restore caller passed flags
17780+ xorl %eax, %eax
17781+ ret
17782diff -urNp linux-2.6.32.45/arch/x86/kernel/vm86_32.c linux-2.6.32.45/arch/x86/kernel/vm86_32.c
17783--- linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17784+++ linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17785@@ -41,6 +41,7 @@
17786 #include <linux/ptrace.h>
17787 #include <linux/audit.h>
17788 #include <linux/stddef.h>
17789+#include <linux/grsecurity.h>
17790
17791 #include <asm/uaccess.h>
17792 #include <asm/io.h>
17793@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17794 do_exit(SIGSEGV);
17795 }
17796
17797- tss = &per_cpu(init_tss, get_cpu());
17798+ tss = init_tss + get_cpu();
17799 current->thread.sp0 = current->thread.saved_sp0;
17800 current->thread.sysenter_cs = __KERNEL_CS;
17801 load_sp0(tss, &current->thread);
17802@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17803 struct task_struct *tsk;
17804 int tmp, ret = -EPERM;
17805
17806+#ifdef CONFIG_GRKERNSEC_VM86
17807+ if (!capable(CAP_SYS_RAWIO)) {
17808+ gr_handle_vm86();
17809+ goto out;
17810+ }
17811+#endif
17812+
17813 tsk = current;
17814 if (tsk->thread.saved_sp0)
17815 goto out;
17816@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17817 int tmp, ret;
17818 struct vm86plus_struct __user *v86;
17819
17820+#ifdef CONFIG_GRKERNSEC_VM86
17821+ if (!capable(CAP_SYS_RAWIO)) {
17822+ gr_handle_vm86();
17823+ ret = -EPERM;
17824+ goto out;
17825+ }
17826+#endif
17827+
17828 tsk = current;
17829 switch (regs->bx) {
17830 case VM86_REQUEST_IRQ:
17831@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
17832 tsk->thread.saved_fs = info->regs32->fs;
17833 tsk->thread.saved_gs = get_user_gs(info->regs32);
17834
17835- tss = &per_cpu(init_tss, get_cpu());
17836+ tss = init_tss + get_cpu();
17837 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17838 if (cpu_has_sep)
17839 tsk->thread.sysenter_cs = 0;
17840@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17841 goto cannot_handle;
17842 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17843 goto cannot_handle;
17844- intr_ptr = (unsigned long __user *) (i << 2);
17845+ intr_ptr = (__force unsigned long __user *) (i << 2);
17846 if (get_user(segoffs, intr_ptr))
17847 goto cannot_handle;
17848 if ((segoffs >> 16) == BIOSSEG)
17849diff -urNp linux-2.6.32.45/arch/x86/kernel/vmi_32.c linux-2.6.32.45/arch/x86/kernel/vmi_32.c
17850--- linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
17851+++ linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-08-05 20:33:55.000000000 -0400
17852@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
17853 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17854
17855 #define call_vrom_func(rom,func) \
17856- (((VROMFUNC *)(rom->func))())
17857+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
17858
17859 #define call_vrom_long_func(rom,func,arg) \
17860- (((VROMLONGFUNC *)(rom->func)) (arg))
17861+({\
17862+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17863+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17864+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17865+ __reloc;\
17866+})
17867
17868-static struct vrom_header *vmi_rom;
17869+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17870 static int disable_pge;
17871 static int disable_pse;
17872 static int disable_sep;
17873@@ -76,10 +81,10 @@ static struct {
17874 void (*set_initial_ap_state)(int, int);
17875 void (*halt)(void);
17876 void (*set_lazy_mode)(int mode);
17877-} vmi_ops;
17878+} __no_const vmi_ops __read_only;
17879
17880 /* Cached VMI operations */
17881-struct vmi_timer_ops vmi_timer_ops;
17882+struct vmi_timer_ops vmi_timer_ops __read_only;
17883
17884 /*
17885 * VMI patching routines.
17886@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17887 static inline void patch_offset(void *insnbuf,
17888 unsigned long ip, unsigned long dest)
17889 {
17890- *(unsigned long *)(insnbuf+1) = dest-ip-5;
17891+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
17892 }
17893
17894 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17895@@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17896 {
17897 u64 reloc;
17898 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17899+
17900 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17901 switch(rel->type) {
17902 case VMI_RELOCATION_CALL_REL:
17903@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17904
17905 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17906 {
17907- const pte_t pte = { .pte = 0 };
17908+ const pte_t pte = __pte(0ULL);
17909 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17910 }
17911
17912 static void vmi_pmd_clear(pmd_t *pmd)
17913 {
17914- const pte_t pte = { .pte = 0 };
17915+ const pte_t pte = __pte(0ULL);
17916 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17917 }
17918 #endif
17919@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17920 ap.ss = __KERNEL_DS;
17921 ap.esp = (unsigned long) start_esp;
17922
17923- ap.ds = __USER_DS;
17924- ap.es = __USER_DS;
17925+ ap.ds = __KERNEL_DS;
17926+ ap.es = __KERNEL_DS;
17927 ap.fs = __KERNEL_PERCPU;
17928- ap.gs = __KERNEL_STACK_CANARY;
17929+ savesegment(gs, ap.gs);
17930
17931 ap.eflags = 0;
17932
17933@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17934 paravirt_leave_lazy_mmu();
17935 }
17936
17937+#ifdef CONFIG_PAX_KERNEXEC
17938+static unsigned long vmi_pax_open_kernel(void)
17939+{
17940+ return 0;
17941+}
17942+
17943+static unsigned long vmi_pax_close_kernel(void)
17944+{
17945+ return 0;
17946+}
17947+#endif
17948+
17949 static inline int __init check_vmi_rom(struct vrom_header *rom)
17950 {
17951 struct pci_header *pci;
17952@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17953 return 0;
17954 if (rom->vrom_signature != VMI_SIGNATURE)
17955 return 0;
17956+ if (rom->rom_length * 512 > sizeof(*rom)) {
17957+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17958+ return 0;
17959+ }
17960 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17961 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17962 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17963@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17964 struct vrom_header *romstart;
17965 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17966 if (check_vmi_rom(romstart)) {
17967- vmi_rom = romstart;
17968+ vmi_rom = *romstart;
17969 return 1;
17970 }
17971 }
17972@@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
17973
17974 para_fill(pv_irq_ops.safe_halt, Halt);
17975
17976+#ifdef CONFIG_PAX_KERNEXEC
17977+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
17978+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
17979+#endif
17980+
17981 /*
17982 * Alternative instruction rewriting doesn't happen soon enough
17983 * to convert VMI_IRET to a call instead of a jump; so we have
17984@@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
17985
17986 void __init vmi_init(void)
17987 {
17988- if (!vmi_rom)
17989+ if (!vmi_rom.rom_signature)
17990 probe_vmi_rom();
17991 else
17992- check_vmi_rom(vmi_rom);
17993+ check_vmi_rom(&vmi_rom);
17994
17995 /* In case probing for or validating the ROM failed, basil */
17996- if (!vmi_rom)
17997+ if (!vmi_rom.rom_signature)
17998 return;
17999
18000- reserve_top_address(-vmi_rom->virtual_top);
18001+ reserve_top_address(-vmi_rom.virtual_top);
18002
18003 #ifdef CONFIG_X86_IO_APIC
18004 /* This is virtual hardware; timer routing is wired correctly */
18005@@ -874,7 +901,7 @@ void __init vmi_activate(void)
18006 {
18007 unsigned long flags;
18008
18009- if (!vmi_rom)
18010+ if (!vmi_rom.rom_signature)
18011 return;
18012
18013 local_irq_save(flags);
18014diff -urNp linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S
18015--- linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
18016+++ linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
18017@@ -26,6 +26,13 @@
18018 #include <asm/page_types.h>
18019 #include <asm/cache.h>
18020 #include <asm/boot.h>
18021+#include <asm/segment.h>
18022+
18023+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18024+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18025+#else
18026+#define __KERNEL_TEXT_OFFSET 0
18027+#endif
18028
18029 #undef i386 /* in case the preprocessor is a 32bit one */
18030
18031@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
18032 #ifdef CONFIG_X86_32
18033 OUTPUT_ARCH(i386)
18034 ENTRY(phys_startup_32)
18035-jiffies = jiffies_64;
18036 #else
18037 OUTPUT_ARCH(i386:x86-64)
18038 ENTRY(phys_startup_64)
18039-jiffies_64 = jiffies;
18040 #endif
18041
18042 PHDRS {
18043 text PT_LOAD FLAGS(5); /* R_E */
18044- data PT_LOAD FLAGS(7); /* RWE */
18045+#ifdef CONFIG_X86_32
18046+ module PT_LOAD FLAGS(5); /* R_E */
18047+#endif
18048+#ifdef CONFIG_XEN
18049+ rodata PT_LOAD FLAGS(5); /* R_E */
18050+#else
18051+ rodata PT_LOAD FLAGS(4); /* R__ */
18052+#endif
18053+ data PT_LOAD FLAGS(6); /* RW_ */
18054 #ifdef CONFIG_X86_64
18055 user PT_LOAD FLAGS(5); /* R_E */
18056+#endif
18057+ init.begin PT_LOAD FLAGS(6); /* RW_ */
18058 #ifdef CONFIG_SMP
18059 percpu PT_LOAD FLAGS(6); /* RW_ */
18060 #endif
18061+ text.init PT_LOAD FLAGS(5); /* R_E */
18062+ text.exit PT_LOAD FLAGS(5); /* R_E */
18063 init PT_LOAD FLAGS(7); /* RWE */
18064-#endif
18065 note PT_NOTE FLAGS(0); /* ___ */
18066 }
18067
18068 SECTIONS
18069 {
18070 #ifdef CONFIG_X86_32
18071- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18072- phys_startup_32 = startup_32 - LOAD_OFFSET;
18073+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18074 #else
18075- . = __START_KERNEL;
18076- phys_startup_64 = startup_64 - LOAD_OFFSET;
18077+ . = __START_KERNEL;
18078 #endif
18079
18080 /* Text and read-only data */
18081- .text : AT(ADDR(.text) - LOAD_OFFSET) {
18082- _text = .;
18083+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18084 /* bootstrapping code */
18085+#ifdef CONFIG_X86_32
18086+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18087+#else
18088+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18089+#endif
18090+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18091+ _text = .;
18092 HEAD_TEXT
18093 #ifdef CONFIG_X86_32
18094 . = ALIGN(PAGE_SIZE);
18095@@ -82,28 +102,71 @@ SECTIONS
18096 IRQENTRY_TEXT
18097 *(.fixup)
18098 *(.gnu.warning)
18099- /* End of text section */
18100- _etext = .;
18101 } :text = 0x9090
18102
18103- NOTES :text :note
18104+ . += __KERNEL_TEXT_OFFSET;
18105+
18106+#ifdef CONFIG_X86_32
18107+ . = ALIGN(PAGE_SIZE);
18108+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
18109+ *(.vmi.rom)
18110+ } :module
18111+
18112+ . = ALIGN(PAGE_SIZE);
18113+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18114+
18115+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18116+ MODULES_EXEC_VADDR = .;
18117+ BYTE(0)
18118+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18119+ . = ALIGN(HPAGE_SIZE);
18120+ MODULES_EXEC_END = . - 1;
18121+#endif
18122+
18123+ } :module
18124+#endif
18125
18126- EXCEPTION_TABLE(16) :text = 0x9090
18127+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18128+ /* End of text section */
18129+ _etext = . - __KERNEL_TEXT_OFFSET;
18130+ }
18131+
18132+#ifdef CONFIG_X86_32
18133+ . = ALIGN(PAGE_SIZE);
18134+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18135+ *(.idt)
18136+ . = ALIGN(PAGE_SIZE);
18137+ *(.empty_zero_page)
18138+ *(.swapper_pg_fixmap)
18139+ *(.swapper_pg_pmd)
18140+ *(.swapper_pg_dir)
18141+ *(.trampoline_pg_dir)
18142+ } :rodata
18143+#endif
18144+
18145+ . = ALIGN(PAGE_SIZE);
18146+ NOTES :rodata :note
18147+
18148+ EXCEPTION_TABLE(16) :rodata
18149
18150 RO_DATA(PAGE_SIZE)
18151
18152 /* Data */
18153 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18154+
18155+#ifdef CONFIG_PAX_KERNEXEC
18156+ . = ALIGN(HPAGE_SIZE);
18157+#else
18158+ . = ALIGN(PAGE_SIZE);
18159+#endif
18160+
18161 /* Start of data section */
18162 _sdata = .;
18163
18164 /* init_task */
18165 INIT_TASK_DATA(THREAD_SIZE)
18166
18167-#ifdef CONFIG_X86_32
18168- /* 32 bit has nosave before _edata */
18169 NOSAVE_DATA
18170-#endif
18171
18172 PAGE_ALIGNED_DATA(PAGE_SIZE)
18173
18174@@ -112,6 +175,8 @@ SECTIONS
18175 DATA_DATA
18176 CONSTRUCTORS
18177
18178+ jiffies = jiffies_64;
18179+
18180 /* rarely changed data like cpu maps */
18181 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18182
18183@@ -166,12 +231,6 @@ SECTIONS
18184 }
18185 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18186
18187- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18188- .jiffies : AT(VLOAD(.jiffies)) {
18189- *(.jiffies)
18190- }
18191- jiffies = VVIRT(.jiffies);
18192-
18193 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18194 *(.vsyscall_3)
18195 }
18196@@ -187,12 +246,19 @@ SECTIONS
18197 #endif /* CONFIG_X86_64 */
18198
18199 /* Init code and data - will be freed after init */
18200- . = ALIGN(PAGE_SIZE);
18201 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18202+ BYTE(0)
18203+
18204+#ifdef CONFIG_PAX_KERNEXEC
18205+ . = ALIGN(HPAGE_SIZE);
18206+#else
18207+ . = ALIGN(PAGE_SIZE);
18208+#endif
18209+
18210 __init_begin = .; /* paired with __init_end */
18211- }
18212+ } :init.begin
18213
18214-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18215+#ifdef CONFIG_SMP
18216 /*
18217 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18218 * output PHDR, so the next output section - .init.text - should
18219@@ -201,12 +267,27 @@ SECTIONS
18220 PERCPU_VADDR(0, :percpu)
18221 #endif
18222
18223- INIT_TEXT_SECTION(PAGE_SIZE)
18224-#ifdef CONFIG_X86_64
18225- :init
18226-#endif
18227+ . = ALIGN(PAGE_SIZE);
18228+ init_begin = .;
18229+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18230+ VMLINUX_SYMBOL(_sinittext) = .;
18231+ INIT_TEXT
18232+ VMLINUX_SYMBOL(_einittext) = .;
18233+ . = ALIGN(PAGE_SIZE);
18234+ } :text.init
18235
18236- INIT_DATA_SECTION(16)
18237+ /*
18238+ * .exit.text is discard at runtime, not link time, to deal with
18239+ * references from .altinstructions and .eh_frame
18240+ */
18241+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18242+ EXIT_TEXT
18243+ . = ALIGN(16);
18244+ } :text.exit
18245+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18246+
18247+ . = ALIGN(PAGE_SIZE);
18248+ INIT_DATA_SECTION(16) :init
18249
18250 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18251 __x86_cpu_dev_start = .;
18252@@ -232,19 +313,11 @@ SECTIONS
18253 *(.altinstr_replacement)
18254 }
18255
18256- /*
18257- * .exit.text is discard at runtime, not link time, to deal with
18258- * references from .altinstructions and .eh_frame
18259- */
18260- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18261- EXIT_TEXT
18262- }
18263-
18264 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18265 EXIT_DATA
18266 }
18267
18268-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18269+#ifndef CONFIG_SMP
18270 PERCPU(PAGE_SIZE)
18271 #endif
18272
18273@@ -267,12 +340,6 @@ SECTIONS
18274 . = ALIGN(PAGE_SIZE);
18275 }
18276
18277-#ifdef CONFIG_X86_64
18278- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18279- NOSAVE_DATA
18280- }
18281-#endif
18282-
18283 /* BSS */
18284 . = ALIGN(PAGE_SIZE);
18285 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18286@@ -288,6 +355,7 @@ SECTIONS
18287 __brk_base = .;
18288 . += 64 * 1024; /* 64k alignment slop space */
18289 *(.brk_reservation) /* areas brk users have reserved */
18290+ . = ALIGN(HPAGE_SIZE);
18291 __brk_limit = .;
18292 }
18293
18294@@ -316,13 +384,12 @@ SECTIONS
18295 * for the boot processor.
18296 */
18297 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18298-INIT_PER_CPU(gdt_page);
18299 INIT_PER_CPU(irq_stack_union);
18300
18301 /*
18302 * Build-time check on the image size:
18303 */
18304-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18305+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18306 "kernel image bigger than KERNEL_IMAGE_SIZE");
18307
18308 #ifdef CONFIG_SMP
18309diff -urNp linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c
18310--- linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18311+++ linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18312@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18313
18314 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18315 /* copy vsyscall data */
18316+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18317 vsyscall_gtod_data.clock.vread = clock->vread;
18318 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18319 vsyscall_gtod_data.clock.mask = clock->mask;
18320@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18321 We do this here because otherwise user space would do it on
18322 its own in a likely inferior way (no access to jiffies).
18323 If you don't like it pass NULL. */
18324- if (tcache && tcache->blob[0] == (j = __jiffies)) {
18325+ if (tcache && tcache->blob[0] == (j = jiffies)) {
18326 p = tcache->blob[1];
18327 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18328 /* Load per CPU data from RDTSCP */
18329diff -urNp linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c
18330--- linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18331+++ linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18332@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18333
18334 EXPORT_SYMBOL(copy_user_generic);
18335 EXPORT_SYMBOL(__copy_user_nocache);
18336-EXPORT_SYMBOL(copy_from_user);
18337-EXPORT_SYMBOL(copy_to_user);
18338 EXPORT_SYMBOL(__copy_from_user_inatomic);
18339
18340 EXPORT_SYMBOL(copy_page);
18341diff -urNp linux-2.6.32.45/arch/x86/kernel/xsave.c linux-2.6.32.45/arch/x86/kernel/xsave.c
18342--- linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18343+++ linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18344@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18345 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18346 return -1;
18347
18348- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18349+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18350 fx_sw_user->extended_size -
18351 FP_XSTATE_MAGIC2_SIZE));
18352 /*
18353@@ -196,7 +196,7 @@ fx_only:
18354 * the other extended state.
18355 */
18356 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18357- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18358+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18359 }
18360
18361 /*
18362@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18363 if (task_thread_info(tsk)->status & TS_XSAVE)
18364 err = restore_user_xstate(buf);
18365 else
18366- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18367+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
18368 buf);
18369 if (unlikely(err)) {
18370 /*
18371diff -urNp linux-2.6.32.45/arch/x86/kvm/emulate.c linux-2.6.32.45/arch/x86/kvm/emulate.c
18372--- linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18373+++ linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18374@@ -81,8 +81,8 @@
18375 #define Src2CL (1<<29)
18376 #define Src2ImmByte (2<<29)
18377 #define Src2One (3<<29)
18378-#define Src2Imm16 (4<<29)
18379-#define Src2Mask (7<<29)
18380+#define Src2Imm16 (4U<<29)
18381+#define Src2Mask (7U<<29)
18382
18383 enum {
18384 Group1_80, Group1_81, Group1_82, Group1_83,
18385@@ -411,6 +411,7 @@ static u32 group2_table[] = {
18386
18387 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18388 do { \
18389+ unsigned long _tmp; \
18390 __asm__ __volatile__ ( \
18391 _PRE_EFLAGS("0", "4", "2") \
18392 _op _suffix " %"_x"3,%1; " \
18393@@ -424,8 +425,6 @@ static u32 group2_table[] = {
18394 /* Raw emulation: instruction has two explicit operands. */
18395 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18396 do { \
18397- unsigned long _tmp; \
18398- \
18399 switch ((_dst).bytes) { \
18400 case 2: \
18401 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18402@@ -441,7 +440,6 @@ static u32 group2_table[] = {
18403
18404 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18405 do { \
18406- unsigned long _tmp; \
18407 switch ((_dst).bytes) { \
18408 case 1: \
18409 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18410diff -urNp linux-2.6.32.45/arch/x86/kvm/lapic.c linux-2.6.32.45/arch/x86/kvm/lapic.c
18411--- linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18412+++ linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18413@@ -52,7 +52,7 @@
18414 #define APIC_BUS_CYCLE_NS 1
18415
18416 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18417-#define apic_debug(fmt, arg...)
18418+#define apic_debug(fmt, arg...) do {} while (0)
18419
18420 #define APIC_LVT_NUM 6
18421 /* 14 is the version for Xeon and Pentium 8.4.8*/
18422diff -urNp linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h
18423--- linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18424+++ linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18425@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18426 int level = PT_PAGE_TABLE_LEVEL;
18427 unsigned long mmu_seq;
18428
18429+ pax_track_stack();
18430+
18431 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18432 kvm_mmu_audit(vcpu, "pre page fault");
18433
18434diff -urNp linux-2.6.32.45/arch/x86/kvm/svm.c linux-2.6.32.45/arch/x86/kvm/svm.c
18435--- linux-2.6.32.45/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18436+++ linux-2.6.32.45/arch/x86/kvm/svm.c 2011-08-05 20:33:55.000000000 -0400
18437@@ -2485,7 +2485,11 @@ static void reload_tss(struct kvm_vcpu *
18438 int cpu = raw_smp_processor_id();
18439
18440 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18441+
18442+ pax_open_kernel();
18443 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18444+ pax_close_kernel();
18445+
18446 load_TR_desc();
18447 }
18448
18449@@ -2946,7 +2950,7 @@ static bool svm_gb_page_enable(void)
18450 return true;
18451 }
18452
18453-static struct kvm_x86_ops svm_x86_ops = {
18454+static const struct kvm_x86_ops svm_x86_ops = {
18455 .cpu_has_kvm_support = has_svm,
18456 .disabled_by_bios = is_disabled,
18457 .hardware_setup = svm_hardware_setup,
18458diff -urNp linux-2.6.32.45/arch/x86/kvm/vmx.c linux-2.6.32.45/arch/x86/kvm/vmx.c
18459--- linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18460+++ linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18461@@ -570,7 +570,11 @@ static void reload_tss(void)
18462
18463 kvm_get_gdt(&gdt);
18464 descs = (void *)gdt.base;
18465+
18466+ pax_open_kernel();
18467 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18468+ pax_close_kernel();
18469+
18470 load_TR_desc();
18471 }
18472
18473@@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18474 if (!cpu_has_vmx_flexpriority())
18475 flexpriority_enabled = 0;
18476
18477- if (!cpu_has_vmx_tpr_shadow())
18478- kvm_x86_ops->update_cr8_intercept = NULL;
18479+ if (!cpu_has_vmx_tpr_shadow()) {
18480+ pax_open_kernel();
18481+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18482+ pax_close_kernel();
18483+ }
18484
18485 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18486 kvm_disable_largepages();
18487@@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18488 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18489
18490 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18491- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18492+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18493 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18494 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18495 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18496@@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18497 "jmp .Lkvm_vmx_return \n\t"
18498 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18499 ".Lkvm_vmx_return: "
18500+
18501+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18502+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18503+ ".Lkvm_vmx_return2: "
18504+#endif
18505+
18506 /* Save guest registers, load host registers, keep flags */
18507 "xchg %0, (%%"R"sp) \n\t"
18508 "mov %%"R"ax, %c[rax](%0) \n\t"
18509@@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18510 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18511 #endif
18512 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18513+
18514+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18515+ ,[cs]"i"(__KERNEL_CS)
18516+#endif
18517+
18518 : "cc", "memory"
18519- , R"bx", R"di", R"si"
18520+ , R"ax", R"bx", R"di", R"si"
18521 #ifdef CONFIG_X86_64
18522 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18523 #endif
18524@@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18525 if (vmx->rmode.irq.pending)
18526 fixup_rmode_irq(vmx);
18527
18528- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18529+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18530+
18531+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18532+ loadsegment(fs, __KERNEL_PERCPU);
18533+#endif
18534+
18535+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18536+ __set_fs(current_thread_info()->addr_limit);
18537+#endif
18538+
18539 vmx->launched = 1;
18540
18541 vmx_complete_interrupts(vmx);
18542@@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18543 return false;
18544 }
18545
18546-static struct kvm_x86_ops vmx_x86_ops = {
18547+static const struct kvm_x86_ops vmx_x86_ops = {
18548 .cpu_has_kvm_support = cpu_has_kvm_support,
18549 .disabled_by_bios = vmx_disabled_by_bios,
18550 .hardware_setup = hardware_setup,
18551diff -urNp linux-2.6.32.45/arch/x86/kvm/x86.c linux-2.6.32.45/arch/x86/kvm/x86.c
18552--- linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18553+++ linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18554@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18555 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18556 struct kvm_cpuid_entry2 __user *entries);
18557
18558-struct kvm_x86_ops *kvm_x86_ops;
18559+const struct kvm_x86_ops *kvm_x86_ops;
18560 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18561
18562 int ignore_msrs = 0;
18563@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18564 struct kvm_cpuid2 *cpuid,
18565 struct kvm_cpuid_entry2 __user *entries)
18566 {
18567- int r;
18568+ int r, i;
18569
18570 r = -E2BIG;
18571 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18572 goto out;
18573 r = -EFAULT;
18574- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18575- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18576+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18577 goto out;
18578+ for (i = 0; i < cpuid->nent; ++i) {
18579+ struct kvm_cpuid_entry2 cpuid_entry;
18580+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18581+ goto out;
18582+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18583+ }
18584 vcpu->arch.cpuid_nent = cpuid->nent;
18585 kvm_apic_set_version(vcpu);
18586 return 0;
18587@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18588 struct kvm_cpuid2 *cpuid,
18589 struct kvm_cpuid_entry2 __user *entries)
18590 {
18591- int r;
18592+ int r, i;
18593
18594 vcpu_load(vcpu);
18595 r = -E2BIG;
18596 if (cpuid->nent < vcpu->arch.cpuid_nent)
18597 goto out;
18598 r = -EFAULT;
18599- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18600- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18601+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18602 goto out;
18603+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18604+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18605+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18606+ goto out;
18607+ }
18608 return 0;
18609
18610 out:
18611@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18612 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18613 struct kvm_interrupt *irq)
18614 {
18615- if (irq->irq < 0 || irq->irq >= 256)
18616+ if (irq->irq >= 256)
18617 return -EINVAL;
18618 if (irqchip_in_kernel(vcpu->kvm))
18619 return -ENXIO;
18620@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18621 .notifier_call = kvmclock_cpufreq_notifier
18622 };
18623
18624-int kvm_arch_init(void *opaque)
18625+int kvm_arch_init(const void *opaque)
18626 {
18627 int r, cpu;
18628- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18629+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18630
18631 if (kvm_x86_ops) {
18632 printk(KERN_ERR "kvm: already loaded the other module\n");
18633diff -urNp linux-2.6.32.45/arch/x86/lguest/boot.c linux-2.6.32.45/arch/x86/lguest/boot.c
18634--- linux-2.6.32.45/arch/x86/lguest/boot.c 2011-03-27 14:31:47.000000000 -0400
18635+++ linux-2.6.32.45/arch/x86/lguest/boot.c 2011-08-05 20:33:55.000000000 -0400
18636@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vt
18637 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18638 * Launcher to reboot us.
18639 */
18640-static void lguest_restart(char *reason)
18641+static __noreturn void lguest_restart(char *reason)
18642 {
18643 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
18644+ BUG();
18645 }
18646
18647 /*G:050
18648diff -urNp linux-2.6.32.45/arch/x86/lib/atomic64_32.c linux-2.6.32.45/arch/x86/lib/atomic64_32.c
18649--- linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18650+++ linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18651@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18652 }
18653 EXPORT_SYMBOL(atomic64_cmpxchg);
18654
18655+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18656+{
18657+ return cmpxchg8b(&ptr->counter, old_val, new_val);
18658+}
18659+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18660+
18661 /**
18662 * atomic64_xchg - xchg atomic64 variable
18663 * @ptr: pointer to type atomic64_t
18664@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18665 EXPORT_SYMBOL(atomic64_xchg);
18666
18667 /**
18668+ * atomic64_xchg_unchecked - xchg atomic64 variable
18669+ * @ptr: pointer to type atomic64_unchecked_t
18670+ * @new_val: value to assign
18671+ *
18672+ * Atomically xchgs the value of @ptr to @new_val and returns
18673+ * the old value.
18674+ */
18675+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18676+{
18677+ /*
18678+ * Try first with a (possibly incorrect) assumption about
18679+ * what we have there. We'll do two loops most likely,
18680+ * but we'll get an ownership MESI transaction straight away
18681+ * instead of a read transaction followed by a
18682+ * flush-for-ownership transaction:
18683+ */
18684+ u64 old_val, real_val = 0;
18685+
18686+ do {
18687+ old_val = real_val;
18688+
18689+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18690+
18691+ } while (real_val != old_val);
18692+
18693+ return old_val;
18694+}
18695+EXPORT_SYMBOL(atomic64_xchg_unchecked);
18696+
18697+/**
18698 * atomic64_set - set atomic64 variable
18699 * @ptr: pointer to type atomic64_t
18700 * @new_val: value to assign
18701@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18702 EXPORT_SYMBOL(atomic64_set);
18703
18704 /**
18705-EXPORT_SYMBOL(atomic64_read);
18706+ * atomic64_unchecked_set - set atomic64 variable
18707+ * @ptr: pointer to type atomic64_unchecked_t
18708+ * @new_val: value to assign
18709+ *
18710+ * Atomically sets the value of @ptr to @new_val.
18711+ */
18712+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18713+{
18714+ atomic64_xchg_unchecked(ptr, new_val);
18715+}
18716+EXPORT_SYMBOL(atomic64_set_unchecked);
18717+
18718+/**
18719 * atomic64_add_return - add and return
18720 * @delta: integer value to add
18721 * @ptr: pointer to type atomic64_t
18722@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18723 }
18724 EXPORT_SYMBOL(atomic64_add_return);
18725
18726+/**
18727+ * atomic64_add_return_unchecked - add and return
18728+ * @delta: integer value to add
18729+ * @ptr: pointer to type atomic64_unchecked_t
18730+ *
18731+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
18732+ */
18733+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18734+{
18735+ /*
18736+ * Try first with a (possibly incorrect) assumption about
18737+ * what we have there. We'll do two loops most likely,
18738+ * but we'll get an ownership MESI transaction straight away
18739+ * instead of a read transaction followed by a
18740+ * flush-for-ownership transaction:
18741+ */
18742+ u64 old_val, new_val, real_val = 0;
18743+
18744+ do {
18745+ old_val = real_val;
18746+ new_val = old_val + delta;
18747+
18748+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18749+
18750+ } while (real_val != old_val);
18751+
18752+ return new_val;
18753+}
18754+EXPORT_SYMBOL(atomic64_add_return_unchecked);
18755+
18756 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
18757 {
18758 return atomic64_add_return(-delta, ptr);
18759 }
18760 EXPORT_SYMBOL(atomic64_sub_return);
18761
18762+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18763+{
18764+ return atomic64_add_return_unchecked(-delta, ptr);
18765+}
18766+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
18767+
18768 u64 atomic64_inc_return(atomic64_t *ptr)
18769 {
18770 return atomic64_add_return(1, ptr);
18771 }
18772 EXPORT_SYMBOL(atomic64_inc_return);
18773
18774+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
18775+{
18776+ return atomic64_add_return_unchecked(1, ptr);
18777+}
18778+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
18779+
18780 u64 atomic64_dec_return(atomic64_t *ptr)
18781 {
18782 return atomic64_sub_return(1, ptr);
18783 }
18784 EXPORT_SYMBOL(atomic64_dec_return);
18785
18786+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18787+{
18788+ return atomic64_sub_return_unchecked(1, ptr);
18789+}
18790+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18791+
18792 /**
18793 * atomic64_add - add integer to atomic64 variable
18794 * @delta: integer value to add
18795@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18796 EXPORT_SYMBOL(atomic64_add);
18797
18798 /**
18799+ * atomic64_add_unchecked - add integer to atomic64 variable
18800+ * @delta: integer value to add
18801+ * @ptr: pointer to type atomic64_unchecked_t
18802+ *
18803+ * Atomically adds @delta to @ptr.
18804+ */
18805+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18806+{
18807+ atomic64_add_return_unchecked(delta, ptr);
18808+}
18809+EXPORT_SYMBOL(atomic64_add_unchecked);
18810+
18811+/**
18812 * atomic64_sub - subtract the atomic64 variable
18813 * @delta: integer value to subtract
18814 * @ptr: pointer to type atomic64_t
18815@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18816 EXPORT_SYMBOL(atomic64_sub);
18817
18818 /**
18819+ * atomic64_sub_unchecked - subtract the atomic64 variable
18820+ * @delta: integer value to subtract
18821+ * @ptr: pointer to type atomic64_unchecked_t
18822+ *
18823+ * Atomically subtracts @delta from @ptr.
18824+ */
18825+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18826+{
18827+ atomic64_add_unchecked(-delta, ptr);
18828+}
18829+EXPORT_SYMBOL(atomic64_sub_unchecked);
18830+
18831+/**
18832 * atomic64_sub_and_test - subtract value from variable and test result
18833 * @delta: integer value to subtract
18834 * @ptr: pointer to type atomic64_t
18835@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
18836 EXPORT_SYMBOL(atomic64_inc);
18837
18838 /**
18839+ * atomic64_inc_unchecked - increment atomic64 variable
18840+ * @ptr: pointer to type atomic64_unchecked_t
18841+ *
18842+ * Atomically increments @ptr by 1.
18843+ */
18844+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
18845+{
18846+ atomic64_add_unchecked(1, ptr);
18847+}
18848+EXPORT_SYMBOL(atomic64_inc_unchecked);
18849+
18850+/**
18851 * atomic64_dec - decrement atomic64 variable
18852 * @ptr: pointer to type atomic64_t
18853 *
18854@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
18855 EXPORT_SYMBOL(atomic64_dec);
18856
18857 /**
18858+ * atomic64_dec_unchecked - decrement atomic64 variable
18859+ * @ptr: pointer to type atomic64_unchecked_t
18860+ *
18861+ * Atomically decrements @ptr by 1.
18862+ */
18863+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
18864+{
18865+ atomic64_sub_unchecked(1, ptr);
18866+}
18867+EXPORT_SYMBOL(atomic64_dec_unchecked);
18868+
18869+/**
18870 * atomic64_dec_and_test - decrement and test
18871 * @ptr: pointer to type atomic64_t
18872 *
18873diff -urNp linux-2.6.32.45/arch/x86/lib/checksum_32.S linux-2.6.32.45/arch/x86/lib/checksum_32.S
18874--- linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18875+++ linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18876@@ -28,7 +28,8 @@
18877 #include <linux/linkage.h>
18878 #include <asm/dwarf2.h>
18879 #include <asm/errno.h>
18880-
18881+#include <asm/segment.h>
18882+
18883 /*
18884 * computes a partial checksum, e.g. for TCP/UDP fragments
18885 */
18886@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18887
18888 #define ARGBASE 16
18889 #define FP 12
18890-
18891-ENTRY(csum_partial_copy_generic)
18892+
18893+ENTRY(csum_partial_copy_generic_to_user)
18894 CFI_STARTPROC
18895+
18896+#ifdef CONFIG_PAX_MEMORY_UDEREF
18897+ pushl %gs
18898+ CFI_ADJUST_CFA_OFFSET 4
18899+ popl %es
18900+ CFI_ADJUST_CFA_OFFSET -4
18901+ jmp csum_partial_copy_generic
18902+#endif
18903+
18904+ENTRY(csum_partial_copy_generic_from_user)
18905+
18906+#ifdef CONFIG_PAX_MEMORY_UDEREF
18907+ pushl %gs
18908+ CFI_ADJUST_CFA_OFFSET 4
18909+ popl %ds
18910+ CFI_ADJUST_CFA_OFFSET -4
18911+#endif
18912+
18913+ENTRY(csum_partial_copy_generic)
18914 subl $4,%esp
18915 CFI_ADJUST_CFA_OFFSET 4
18916 pushl %edi
18917@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18918 jmp 4f
18919 SRC(1: movw (%esi), %bx )
18920 addl $2, %esi
18921-DST( movw %bx, (%edi) )
18922+DST( movw %bx, %es:(%edi) )
18923 addl $2, %edi
18924 addw %bx, %ax
18925 adcl $0, %eax
18926@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18927 SRC(1: movl (%esi), %ebx )
18928 SRC( movl 4(%esi), %edx )
18929 adcl %ebx, %eax
18930-DST( movl %ebx, (%edi) )
18931+DST( movl %ebx, %es:(%edi) )
18932 adcl %edx, %eax
18933-DST( movl %edx, 4(%edi) )
18934+DST( movl %edx, %es:4(%edi) )
18935
18936 SRC( movl 8(%esi), %ebx )
18937 SRC( movl 12(%esi), %edx )
18938 adcl %ebx, %eax
18939-DST( movl %ebx, 8(%edi) )
18940+DST( movl %ebx, %es:8(%edi) )
18941 adcl %edx, %eax
18942-DST( movl %edx, 12(%edi) )
18943+DST( movl %edx, %es:12(%edi) )
18944
18945 SRC( movl 16(%esi), %ebx )
18946 SRC( movl 20(%esi), %edx )
18947 adcl %ebx, %eax
18948-DST( movl %ebx, 16(%edi) )
18949+DST( movl %ebx, %es:16(%edi) )
18950 adcl %edx, %eax
18951-DST( movl %edx, 20(%edi) )
18952+DST( movl %edx, %es:20(%edi) )
18953
18954 SRC( movl 24(%esi), %ebx )
18955 SRC( movl 28(%esi), %edx )
18956 adcl %ebx, %eax
18957-DST( movl %ebx, 24(%edi) )
18958+DST( movl %ebx, %es:24(%edi) )
18959 adcl %edx, %eax
18960-DST( movl %edx, 28(%edi) )
18961+DST( movl %edx, %es:28(%edi) )
18962
18963 lea 32(%esi), %esi
18964 lea 32(%edi), %edi
18965@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18966 shrl $2, %edx # This clears CF
18967 SRC(3: movl (%esi), %ebx )
18968 adcl %ebx, %eax
18969-DST( movl %ebx, (%edi) )
18970+DST( movl %ebx, %es:(%edi) )
18971 lea 4(%esi), %esi
18972 lea 4(%edi), %edi
18973 dec %edx
18974@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
18975 jb 5f
18976 SRC( movw (%esi), %cx )
18977 leal 2(%esi), %esi
18978-DST( movw %cx, (%edi) )
18979+DST( movw %cx, %es:(%edi) )
18980 leal 2(%edi), %edi
18981 je 6f
18982 shll $16,%ecx
18983 SRC(5: movb (%esi), %cl )
18984-DST( movb %cl, (%edi) )
18985+DST( movb %cl, %es:(%edi) )
18986 6: addl %ecx, %eax
18987 adcl $0, %eax
18988 7:
18989@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
18990
18991 6001:
18992 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18993- movl $-EFAULT, (%ebx)
18994+ movl $-EFAULT, %ss:(%ebx)
18995
18996 # zero the complete destination - computing the rest
18997 # is too much work
18998@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
18999
19000 6002:
19001 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19002- movl $-EFAULT,(%ebx)
19003+ movl $-EFAULT,%ss:(%ebx)
19004 jmp 5000b
19005
19006 .previous
19007
19008+ pushl %ss
19009+ CFI_ADJUST_CFA_OFFSET 4
19010+ popl %ds
19011+ CFI_ADJUST_CFA_OFFSET -4
19012+ pushl %ss
19013+ CFI_ADJUST_CFA_OFFSET 4
19014+ popl %es
19015+ CFI_ADJUST_CFA_OFFSET -4
19016 popl %ebx
19017 CFI_ADJUST_CFA_OFFSET -4
19018 CFI_RESTORE ebx
19019@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
19020 CFI_ADJUST_CFA_OFFSET -4
19021 ret
19022 CFI_ENDPROC
19023-ENDPROC(csum_partial_copy_generic)
19024+ENDPROC(csum_partial_copy_generic_to_user)
19025
19026 #else
19027
19028 /* Version for PentiumII/PPro */
19029
19030 #define ROUND1(x) \
19031+ nop; nop; nop; \
19032 SRC(movl x(%esi), %ebx ) ; \
19033 addl %ebx, %eax ; \
19034- DST(movl %ebx, x(%edi) ) ;
19035+ DST(movl %ebx, %es:x(%edi)) ;
19036
19037 #define ROUND(x) \
19038+ nop; nop; nop; \
19039 SRC(movl x(%esi), %ebx ) ; \
19040 adcl %ebx, %eax ; \
19041- DST(movl %ebx, x(%edi) ) ;
19042+ DST(movl %ebx, %es:x(%edi)) ;
19043
19044 #define ARGBASE 12
19045-
19046-ENTRY(csum_partial_copy_generic)
19047+
19048+ENTRY(csum_partial_copy_generic_to_user)
19049 CFI_STARTPROC
19050+
19051+#ifdef CONFIG_PAX_MEMORY_UDEREF
19052+ pushl %gs
19053+ CFI_ADJUST_CFA_OFFSET 4
19054+ popl %es
19055+ CFI_ADJUST_CFA_OFFSET -4
19056+ jmp csum_partial_copy_generic
19057+#endif
19058+
19059+ENTRY(csum_partial_copy_generic_from_user)
19060+
19061+#ifdef CONFIG_PAX_MEMORY_UDEREF
19062+ pushl %gs
19063+ CFI_ADJUST_CFA_OFFSET 4
19064+ popl %ds
19065+ CFI_ADJUST_CFA_OFFSET -4
19066+#endif
19067+
19068+ENTRY(csum_partial_copy_generic)
19069 pushl %ebx
19070 CFI_ADJUST_CFA_OFFSET 4
19071 CFI_REL_OFFSET ebx, 0
19072@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
19073 subl %ebx, %edi
19074 lea -1(%esi),%edx
19075 andl $-32,%edx
19076- lea 3f(%ebx,%ebx), %ebx
19077+ lea 3f(%ebx,%ebx,2), %ebx
19078 testl %esi, %esi
19079 jmp *%ebx
19080 1: addl $64,%esi
19081@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
19082 jb 5f
19083 SRC( movw (%esi), %dx )
19084 leal 2(%esi), %esi
19085-DST( movw %dx, (%edi) )
19086+DST( movw %dx, %es:(%edi) )
19087 leal 2(%edi), %edi
19088 je 6f
19089 shll $16,%edx
19090 5:
19091 SRC( movb (%esi), %dl )
19092-DST( movb %dl, (%edi) )
19093+DST( movb %dl, %es:(%edi) )
19094 6: addl %edx, %eax
19095 adcl $0, %eax
19096 7:
19097 .section .fixup, "ax"
19098 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19099- movl $-EFAULT, (%ebx)
19100+ movl $-EFAULT, %ss:(%ebx)
19101 # zero the complete destination (computing the rest is too much work)
19102 movl ARGBASE+8(%esp),%edi # dst
19103 movl ARGBASE+12(%esp),%ecx # len
19104@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
19105 rep; stosb
19106 jmp 7b
19107 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19108- movl $-EFAULT, (%ebx)
19109+ movl $-EFAULT, %ss:(%ebx)
19110 jmp 7b
19111 .previous
19112
19113+#ifdef CONFIG_PAX_MEMORY_UDEREF
19114+ pushl %ss
19115+ CFI_ADJUST_CFA_OFFSET 4
19116+ popl %ds
19117+ CFI_ADJUST_CFA_OFFSET -4
19118+ pushl %ss
19119+ CFI_ADJUST_CFA_OFFSET 4
19120+ popl %es
19121+ CFI_ADJUST_CFA_OFFSET -4
19122+#endif
19123+
19124 popl %esi
19125 CFI_ADJUST_CFA_OFFSET -4
19126 CFI_RESTORE esi
19127@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
19128 CFI_RESTORE ebx
19129 ret
19130 CFI_ENDPROC
19131-ENDPROC(csum_partial_copy_generic)
19132+ENDPROC(csum_partial_copy_generic_to_user)
19133
19134 #undef ROUND
19135 #undef ROUND1
19136diff -urNp linux-2.6.32.45/arch/x86/lib/clear_page_64.S linux-2.6.32.45/arch/x86/lib/clear_page_64.S
19137--- linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
19138+++ linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
19139@@ -43,7 +43,7 @@ ENDPROC(clear_page)
19140
19141 #include <asm/cpufeature.h>
19142
19143- .section .altinstr_replacement,"ax"
19144+ .section .altinstr_replacement,"a"
19145 1: .byte 0xeb /* jmp <disp8> */
19146 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19147 2:
19148diff -urNp linux-2.6.32.45/arch/x86/lib/copy_page_64.S linux-2.6.32.45/arch/x86/lib/copy_page_64.S
19149--- linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
19150+++ linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
19151@@ -104,7 +104,7 @@ ENDPROC(copy_page)
19152
19153 #include <asm/cpufeature.h>
19154
19155- .section .altinstr_replacement,"ax"
19156+ .section .altinstr_replacement,"a"
19157 1: .byte 0xeb /* jmp <disp8> */
19158 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19159 2:
19160diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_64.S linux-2.6.32.45/arch/x86/lib/copy_user_64.S
19161--- linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
19162+++ linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
19163@@ -15,13 +15,14 @@
19164 #include <asm/asm-offsets.h>
19165 #include <asm/thread_info.h>
19166 #include <asm/cpufeature.h>
19167+#include <asm/pgtable.h>
19168
19169 .macro ALTERNATIVE_JUMP feature,orig,alt
19170 0:
19171 .byte 0xe9 /* 32bit jump */
19172 .long \orig-1f /* by default jump to orig */
19173 1:
19174- .section .altinstr_replacement,"ax"
19175+ .section .altinstr_replacement,"a"
19176 2: .byte 0xe9 /* near jump with 32bit immediate */
19177 .long \alt-1b /* offset */ /* or alternatively to alt */
19178 .previous
19179@@ -64,49 +65,19 @@
19180 #endif
19181 .endm
19182
19183-/* Standard copy_to_user with segment limit checking */
19184-ENTRY(copy_to_user)
19185- CFI_STARTPROC
19186- GET_THREAD_INFO(%rax)
19187- movq %rdi,%rcx
19188- addq %rdx,%rcx
19189- jc bad_to_user
19190- cmpq TI_addr_limit(%rax),%rcx
19191- ja bad_to_user
19192- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19193- CFI_ENDPROC
19194-ENDPROC(copy_to_user)
19195-
19196-/* Standard copy_from_user with segment limit checking */
19197-ENTRY(copy_from_user)
19198- CFI_STARTPROC
19199- GET_THREAD_INFO(%rax)
19200- movq %rsi,%rcx
19201- addq %rdx,%rcx
19202- jc bad_from_user
19203- cmpq TI_addr_limit(%rax),%rcx
19204- ja bad_from_user
19205- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19206- CFI_ENDPROC
19207-ENDPROC(copy_from_user)
19208-
19209 ENTRY(copy_user_generic)
19210 CFI_STARTPROC
19211 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19212 CFI_ENDPROC
19213 ENDPROC(copy_user_generic)
19214
19215-ENTRY(__copy_from_user_inatomic)
19216- CFI_STARTPROC
19217- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19218- CFI_ENDPROC
19219-ENDPROC(__copy_from_user_inatomic)
19220-
19221 .section .fixup,"ax"
19222 /* must zero dest */
19223 ENTRY(bad_from_user)
19224 bad_from_user:
19225 CFI_STARTPROC
19226+ testl %edx,%edx
19227+ js bad_to_user
19228 movl %edx,%ecx
19229 xorl %eax,%eax
19230 rep
19231diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S
19232--- linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19233+++ linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19234@@ -14,6 +14,7 @@
19235 #include <asm/current.h>
19236 #include <asm/asm-offsets.h>
19237 #include <asm/thread_info.h>
19238+#include <asm/pgtable.h>
19239
19240 .macro ALIGN_DESTINATION
19241 #ifdef FIX_ALIGNMENT
19242@@ -50,6 +51,15 @@
19243 */
19244 ENTRY(__copy_user_nocache)
19245 CFI_STARTPROC
19246+
19247+#ifdef CONFIG_PAX_MEMORY_UDEREF
19248+ mov $PAX_USER_SHADOW_BASE,%rcx
19249+ cmp %rcx,%rsi
19250+ jae 1f
19251+ add %rcx,%rsi
19252+1:
19253+#endif
19254+
19255 cmpl $8,%edx
19256 jb 20f /* less then 8 bytes, go to byte copy loop */
19257 ALIGN_DESTINATION
19258diff -urNp linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c
19259--- linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19260+++ linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19261@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19262 len -= 2;
19263 }
19264 }
19265+
19266+#ifdef CONFIG_PAX_MEMORY_UDEREF
19267+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19268+ src += PAX_USER_SHADOW_BASE;
19269+#endif
19270+
19271 isum = csum_partial_copy_generic((__force const void *)src,
19272 dst, len, isum, errp, NULL);
19273 if (unlikely(*errp))
19274@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19275 }
19276
19277 *errp = 0;
19278+
19279+#ifdef CONFIG_PAX_MEMORY_UDEREF
19280+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19281+ dst += PAX_USER_SHADOW_BASE;
19282+#endif
19283+
19284 return csum_partial_copy_generic(src, (void __force *)dst,
19285 len, isum, NULL, errp);
19286 }
19287diff -urNp linux-2.6.32.45/arch/x86/lib/getuser.S linux-2.6.32.45/arch/x86/lib/getuser.S
19288--- linux-2.6.32.45/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19289+++ linux-2.6.32.45/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19290@@ -33,14 +33,35 @@
19291 #include <asm/asm-offsets.h>
19292 #include <asm/thread_info.h>
19293 #include <asm/asm.h>
19294+#include <asm/segment.h>
19295+#include <asm/pgtable.h>
19296+
19297+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19298+#define __copyuser_seg gs;
19299+#else
19300+#define __copyuser_seg
19301+#endif
19302
19303 .text
19304 ENTRY(__get_user_1)
19305 CFI_STARTPROC
19306+
19307+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19308 GET_THREAD_INFO(%_ASM_DX)
19309 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19310 jae bad_get_user
19311-1: movzb (%_ASM_AX),%edx
19312+
19313+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19314+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19315+ cmp %_ASM_DX,%_ASM_AX
19316+ jae 1234f
19317+ add %_ASM_DX,%_ASM_AX
19318+1234:
19319+#endif
19320+
19321+#endif
19322+
19323+1: __copyuser_seg movzb (%_ASM_AX),%edx
19324 xor %eax,%eax
19325 ret
19326 CFI_ENDPROC
19327@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19328 ENTRY(__get_user_2)
19329 CFI_STARTPROC
19330 add $1,%_ASM_AX
19331+
19332+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19333 jc bad_get_user
19334 GET_THREAD_INFO(%_ASM_DX)
19335 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19336 jae bad_get_user
19337-2: movzwl -1(%_ASM_AX),%edx
19338+
19339+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19340+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19341+ cmp %_ASM_DX,%_ASM_AX
19342+ jae 1234f
19343+ add %_ASM_DX,%_ASM_AX
19344+1234:
19345+#endif
19346+
19347+#endif
19348+
19349+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19350 xor %eax,%eax
19351 ret
19352 CFI_ENDPROC
19353@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19354 ENTRY(__get_user_4)
19355 CFI_STARTPROC
19356 add $3,%_ASM_AX
19357+
19358+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19359 jc bad_get_user
19360 GET_THREAD_INFO(%_ASM_DX)
19361 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19362 jae bad_get_user
19363-3: mov -3(%_ASM_AX),%edx
19364+
19365+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19366+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19367+ cmp %_ASM_DX,%_ASM_AX
19368+ jae 1234f
19369+ add %_ASM_DX,%_ASM_AX
19370+1234:
19371+#endif
19372+
19373+#endif
19374+
19375+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19376 xor %eax,%eax
19377 ret
19378 CFI_ENDPROC
19379@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19380 GET_THREAD_INFO(%_ASM_DX)
19381 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19382 jae bad_get_user
19383+
19384+#ifdef CONFIG_PAX_MEMORY_UDEREF
19385+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19386+ cmp %_ASM_DX,%_ASM_AX
19387+ jae 1234f
19388+ add %_ASM_DX,%_ASM_AX
19389+1234:
19390+#endif
19391+
19392 4: movq -7(%_ASM_AX),%_ASM_DX
19393 xor %eax,%eax
19394 ret
19395diff -urNp linux-2.6.32.45/arch/x86/lib/memcpy_64.S linux-2.6.32.45/arch/x86/lib/memcpy_64.S
19396--- linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19397+++ linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19398@@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19399 * It is also a lot simpler. Use this when possible:
19400 */
19401
19402- .section .altinstr_replacement, "ax"
19403+ .section .altinstr_replacement, "a"
19404 1: .byte 0xeb /* jmp <disp8> */
19405 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19406 2:
19407diff -urNp linux-2.6.32.45/arch/x86/lib/memset_64.S linux-2.6.32.45/arch/x86/lib/memset_64.S
19408--- linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19409+++ linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19410@@ -118,7 +118,7 @@ ENDPROC(__memset)
19411
19412 #include <asm/cpufeature.h>
19413
19414- .section .altinstr_replacement,"ax"
19415+ .section .altinstr_replacement,"a"
19416 1: .byte 0xeb /* jmp <disp8> */
19417 .byte (memset_c - memset) - (2f - 1b) /* offset */
19418 2:
19419diff -urNp linux-2.6.32.45/arch/x86/lib/mmx_32.c linux-2.6.32.45/arch/x86/lib/mmx_32.c
19420--- linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19421+++ linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19422@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19423 {
19424 void *p;
19425 int i;
19426+ unsigned long cr0;
19427
19428 if (unlikely(in_interrupt()))
19429 return __memcpy(to, from, len);
19430@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19431 kernel_fpu_begin();
19432
19433 __asm__ __volatile__ (
19434- "1: prefetch (%0)\n" /* This set is 28 bytes */
19435- " prefetch 64(%0)\n"
19436- " prefetch 128(%0)\n"
19437- " prefetch 192(%0)\n"
19438- " prefetch 256(%0)\n"
19439+ "1: prefetch (%1)\n" /* This set is 28 bytes */
19440+ " prefetch 64(%1)\n"
19441+ " prefetch 128(%1)\n"
19442+ " prefetch 192(%1)\n"
19443+ " prefetch 256(%1)\n"
19444 "2: \n"
19445 ".section .fixup, \"ax\"\n"
19446- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19447+ "3: \n"
19448+
19449+#ifdef CONFIG_PAX_KERNEXEC
19450+ " movl %%cr0, %0\n"
19451+ " movl %0, %%eax\n"
19452+ " andl $0xFFFEFFFF, %%eax\n"
19453+ " movl %%eax, %%cr0\n"
19454+#endif
19455+
19456+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19457+
19458+#ifdef CONFIG_PAX_KERNEXEC
19459+ " movl %0, %%cr0\n"
19460+#endif
19461+
19462 " jmp 2b\n"
19463 ".previous\n"
19464 _ASM_EXTABLE(1b, 3b)
19465- : : "r" (from));
19466+ : "=&r" (cr0) : "r" (from) : "ax");
19467
19468 for ( ; i > 5; i--) {
19469 __asm__ __volatile__ (
19470- "1: prefetch 320(%0)\n"
19471- "2: movq (%0), %%mm0\n"
19472- " movq 8(%0), %%mm1\n"
19473- " movq 16(%0), %%mm2\n"
19474- " movq 24(%0), %%mm3\n"
19475- " movq %%mm0, (%1)\n"
19476- " movq %%mm1, 8(%1)\n"
19477- " movq %%mm2, 16(%1)\n"
19478- " movq %%mm3, 24(%1)\n"
19479- " movq 32(%0), %%mm0\n"
19480- " movq 40(%0), %%mm1\n"
19481- " movq 48(%0), %%mm2\n"
19482- " movq 56(%0), %%mm3\n"
19483- " movq %%mm0, 32(%1)\n"
19484- " movq %%mm1, 40(%1)\n"
19485- " movq %%mm2, 48(%1)\n"
19486- " movq %%mm3, 56(%1)\n"
19487+ "1: prefetch 320(%1)\n"
19488+ "2: movq (%1), %%mm0\n"
19489+ " movq 8(%1), %%mm1\n"
19490+ " movq 16(%1), %%mm2\n"
19491+ " movq 24(%1), %%mm3\n"
19492+ " movq %%mm0, (%2)\n"
19493+ " movq %%mm1, 8(%2)\n"
19494+ " movq %%mm2, 16(%2)\n"
19495+ " movq %%mm3, 24(%2)\n"
19496+ " movq 32(%1), %%mm0\n"
19497+ " movq 40(%1), %%mm1\n"
19498+ " movq 48(%1), %%mm2\n"
19499+ " movq 56(%1), %%mm3\n"
19500+ " movq %%mm0, 32(%2)\n"
19501+ " movq %%mm1, 40(%2)\n"
19502+ " movq %%mm2, 48(%2)\n"
19503+ " movq %%mm3, 56(%2)\n"
19504 ".section .fixup, \"ax\"\n"
19505- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19506+ "3:\n"
19507+
19508+#ifdef CONFIG_PAX_KERNEXEC
19509+ " movl %%cr0, %0\n"
19510+ " movl %0, %%eax\n"
19511+ " andl $0xFFFEFFFF, %%eax\n"
19512+ " movl %%eax, %%cr0\n"
19513+#endif
19514+
19515+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19516+
19517+#ifdef CONFIG_PAX_KERNEXEC
19518+ " movl %0, %%cr0\n"
19519+#endif
19520+
19521 " jmp 2b\n"
19522 ".previous\n"
19523 _ASM_EXTABLE(1b, 3b)
19524- : : "r" (from), "r" (to) : "memory");
19525+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19526
19527 from += 64;
19528 to += 64;
19529@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19530 static void fast_copy_page(void *to, void *from)
19531 {
19532 int i;
19533+ unsigned long cr0;
19534
19535 kernel_fpu_begin();
19536
19537@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19538 * but that is for later. -AV
19539 */
19540 __asm__ __volatile__(
19541- "1: prefetch (%0)\n"
19542- " prefetch 64(%0)\n"
19543- " prefetch 128(%0)\n"
19544- " prefetch 192(%0)\n"
19545- " prefetch 256(%0)\n"
19546+ "1: prefetch (%1)\n"
19547+ " prefetch 64(%1)\n"
19548+ " prefetch 128(%1)\n"
19549+ " prefetch 192(%1)\n"
19550+ " prefetch 256(%1)\n"
19551 "2: \n"
19552 ".section .fixup, \"ax\"\n"
19553- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19554+ "3: \n"
19555+
19556+#ifdef CONFIG_PAX_KERNEXEC
19557+ " movl %%cr0, %0\n"
19558+ " movl %0, %%eax\n"
19559+ " andl $0xFFFEFFFF, %%eax\n"
19560+ " movl %%eax, %%cr0\n"
19561+#endif
19562+
19563+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19564+
19565+#ifdef CONFIG_PAX_KERNEXEC
19566+ " movl %0, %%cr0\n"
19567+#endif
19568+
19569 " jmp 2b\n"
19570 ".previous\n"
19571- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19572+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19573
19574 for (i = 0; i < (4096-320)/64; i++) {
19575 __asm__ __volatile__ (
19576- "1: prefetch 320(%0)\n"
19577- "2: movq (%0), %%mm0\n"
19578- " movntq %%mm0, (%1)\n"
19579- " movq 8(%0), %%mm1\n"
19580- " movntq %%mm1, 8(%1)\n"
19581- " movq 16(%0), %%mm2\n"
19582- " movntq %%mm2, 16(%1)\n"
19583- " movq 24(%0), %%mm3\n"
19584- " movntq %%mm3, 24(%1)\n"
19585- " movq 32(%0), %%mm4\n"
19586- " movntq %%mm4, 32(%1)\n"
19587- " movq 40(%0), %%mm5\n"
19588- " movntq %%mm5, 40(%1)\n"
19589- " movq 48(%0), %%mm6\n"
19590- " movntq %%mm6, 48(%1)\n"
19591- " movq 56(%0), %%mm7\n"
19592- " movntq %%mm7, 56(%1)\n"
19593+ "1: prefetch 320(%1)\n"
19594+ "2: movq (%1), %%mm0\n"
19595+ " movntq %%mm0, (%2)\n"
19596+ " movq 8(%1), %%mm1\n"
19597+ " movntq %%mm1, 8(%2)\n"
19598+ " movq 16(%1), %%mm2\n"
19599+ " movntq %%mm2, 16(%2)\n"
19600+ " movq 24(%1), %%mm3\n"
19601+ " movntq %%mm3, 24(%2)\n"
19602+ " movq 32(%1), %%mm4\n"
19603+ " movntq %%mm4, 32(%2)\n"
19604+ " movq 40(%1), %%mm5\n"
19605+ " movntq %%mm5, 40(%2)\n"
19606+ " movq 48(%1), %%mm6\n"
19607+ " movntq %%mm6, 48(%2)\n"
19608+ " movq 56(%1), %%mm7\n"
19609+ " movntq %%mm7, 56(%2)\n"
19610 ".section .fixup, \"ax\"\n"
19611- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19612+ "3:\n"
19613+
19614+#ifdef CONFIG_PAX_KERNEXEC
19615+ " movl %%cr0, %0\n"
19616+ " movl %0, %%eax\n"
19617+ " andl $0xFFFEFFFF, %%eax\n"
19618+ " movl %%eax, %%cr0\n"
19619+#endif
19620+
19621+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19622+
19623+#ifdef CONFIG_PAX_KERNEXEC
19624+ " movl %0, %%cr0\n"
19625+#endif
19626+
19627 " jmp 2b\n"
19628 ".previous\n"
19629- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19630+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19631
19632 from += 64;
19633 to += 64;
19634@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19635 static void fast_copy_page(void *to, void *from)
19636 {
19637 int i;
19638+ unsigned long cr0;
19639
19640 kernel_fpu_begin();
19641
19642 __asm__ __volatile__ (
19643- "1: prefetch (%0)\n"
19644- " prefetch 64(%0)\n"
19645- " prefetch 128(%0)\n"
19646- " prefetch 192(%0)\n"
19647- " prefetch 256(%0)\n"
19648+ "1: prefetch (%1)\n"
19649+ " prefetch 64(%1)\n"
19650+ " prefetch 128(%1)\n"
19651+ " prefetch 192(%1)\n"
19652+ " prefetch 256(%1)\n"
19653 "2: \n"
19654 ".section .fixup, \"ax\"\n"
19655- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19656+ "3: \n"
19657+
19658+#ifdef CONFIG_PAX_KERNEXEC
19659+ " movl %%cr0, %0\n"
19660+ " movl %0, %%eax\n"
19661+ " andl $0xFFFEFFFF, %%eax\n"
19662+ " movl %%eax, %%cr0\n"
19663+#endif
19664+
19665+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19666+
19667+#ifdef CONFIG_PAX_KERNEXEC
19668+ " movl %0, %%cr0\n"
19669+#endif
19670+
19671 " jmp 2b\n"
19672 ".previous\n"
19673- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19674+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19675
19676 for (i = 0; i < 4096/64; i++) {
19677 __asm__ __volatile__ (
19678- "1: prefetch 320(%0)\n"
19679- "2: movq (%0), %%mm0\n"
19680- " movq 8(%0), %%mm1\n"
19681- " movq 16(%0), %%mm2\n"
19682- " movq 24(%0), %%mm3\n"
19683- " movq %%mm0, (%1)\n"
19684- " movq %%mm1, 8(%1)\n"
19685- " movq %%mm2, 16(%1)\n"
19686- " movq %%mm3, 24(%1)\n"
19687- " movq 32(%0), %%mm0\n"
19688- " movq 40(%0), %%mm1\n"
19689- " movq 48(%0), %%mm2\n"
19690- " movq 56(%0), %%mm3\n"
19691- " movq %%mm0, 32(%1)\n"
19692- " movq %%mm1, 40(%1)\n"
19693- " movq %%mm2, 48(%1)\n"
19694- " movq %%mm3, 56(%1)\n"
19695+ "1: prefetch 320(%1)\n"
19696+ "2: movq (%1), %%mm0\n"
19697+ " movq 8(%1), %%mm1\n"
19698+ " movq 16(%1), %%mm2\n"
19699+ " movq 24(%1), %%mm3\n"
19700+ " movq %%mm0, (%2)\n"
19701+ " movq %%mm1, 8(%2)\n"
19702+ " movq %%mm2, 16(%2)\n"
19703+ " movq %%mm3, 24(%2)\n"
19704+ " movq 32(%1), %%mm0\n"
19705+ " movq 40(%1), %%mm1\n"
19706+ " movq 48(%1), %%mm2\n"
19707+ " movq 56(%1), %%mm3\n"
19708+ " movq %%mm0, 32(%2)\n"
19709+ " movq %%mm1, 40(%2)\n"
19710+ " movq %%mm2, 48(%2)\n"
19711+ " movq %%mm3, 56(%2)\n"
19712 ".section .fixup, \"ax\"\n"
19713- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19714+ "3:\n"
19715+
19716+#ifdef CONFIG_PAX_KERNEXEC
19717+ " movl %%cr0, %0\n"
19718+ " movl %0, %%eax\n"
19719+ " andl $0xFFFEFFFF, %%eax\n"
19720+ " movl %%eax, %%cr0\n"
19721+#endif
19722+
19723+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19724+
19725+#ifdef CONFIG_PAX_KERNEXEC
19726+ " movl %0, %%cr0\n"
19727+#endif
19728+
19729 " jmp 2b\n"
19730 ".previous\n"
19731 _ASM_EXTABLE(1b, 3b)
19732- : : "r" (from), "r" (to) : "memory");
19733+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19734
19735 from += 64;
19736 to += 64;
19737diff -urNp linux-2.6.32.45/arch/x86/lib/putuser.S linux-2.6.32.45/arch/x86/lib/putuser.S
19738--- linux-2.6.32.45/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19739+++ linux-2.6.32.45/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19740@@ -15,7 +15,8 @@
19741 #include <asm/thread_info.h>
19742 #include <asm/errno.h>
19743 #include <asm/asm.h>
19744-
19745+#include <asm/segment.h>
19746+#include <asm/pgtable.h>
19747
19748 /*
19749 * __put_user_X
19750@@ -29,52 +30,119 @@
19751 * as they get called from within inline assembly.
19752 */
19753
19754-#define ENTER CFI_STARTPROC ; \
19755- GET_THREAD_INFO(%_ASM_BX)
19756+#define ENTER CFI_STARTPROC
19757 #define EXIT ret ; \
19758 CFI_ENDPROC
19759
19760+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19761+#define _DEST %_ASM_CX,%_ASM_BX
19762+#else
19763+#define _DEST %_ASM_CX
19764+#endif
19765+
19766+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19767+#define __copyuser_seg gs;
19768+#else
19769+#define __copyuser_seg
19770+#endif
19771+
19772 .text
19773 ENTRY(__put_user_1)
19774 ENTER
19775+
19776+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19777+ GET_THREAD_INFO(%_ASM_BX)
19778 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
19779 jae bad_put_user
19780-1: movb %al,(%_ASM_CX)
19781+
19782+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19783+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19784+ cmp %_ASM_BX,%_ASM_CX
19785+ jb 1234f
19786+ xor %ebx,%ebx
19787+1234:
19788+#endif
19789+
19790+#endif
19791+
19792+1: __copyuser_seg movb %al,(_DEST)
19793 xor %eax,%eax
19794 EXIT
19795 ENDPROC(__put_user_1)
19796
19797 ENTRY(__put_user_2)
19798 ENTER
19799+
19800+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19801+ GET_THREAD_INFO(%_ASM_BX)
19802 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19803 sub $1,%_ASM_BX
19804 cmp %_ASM_BX,%_ASM_CX
19805 jae bad_put_user
19806-2: movw %ax,(%_ASM_CX)
19807+
19808+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19809+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19810+ cmp %_ASM_BX,%_ASM_CX
19811+ jb 1234f
19812+ xor %ebx,%ebx
19813+1234:
19814+#endif
19815+
19816+#endif
19817+
19818+2: __copyuser_seg movw %ax,(_DEST)
19819 xor %eax,%eax
19820 EXIT
19821 ENDPROC(__put_user_2)
19822
19823 ENTRY(__put_user_4)
19824 ENTER
19825+
19826+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19827+ GET_THREAD_INFO(%_ASM_BX)
19828 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19829 sub $3,%_ASM_BX
19830 cmp %_ASM_BX,%_ASM_CX
19831 jae bad_put_user
19832-3: movl %eax,(%_ASM_CX)
19833+
19834+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19835+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19836+ cmp %_ASM_BX,%_ASM_CX
19837+ jb 1234f
19838+ xor %ebx,%ebx
19839+1234:
19840+#endif
19841+
19842+#endif
19843+
19844+3: __copyuser_seg movl %eax,(_DEST)
19845 xor %eax,%eax
19846 EXIT
19847 ENDPROC(__put_user_4)
19848
19849 ENTRY(__put_user_8)
19850 ENTER
19851+
19852+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19853+ GET_THREAD_INFO(%_ASM_BX)
19854 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19855 sub $7,%_ASM_BX
19856 cmp %_ASM_BX,%_ASM_CX
19857 jae bad_put_user
19858-4: mov %_ASM_AX,(%_ASM_CX)
19859+
19860+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19861+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19862+ cmp %_ASM_BX,%_ASM_CX
19863+ jb 1234f
19864+ xor %ebx,%ebx
19865+1234:
19866+#endif
19867+
19868+#endif
19869+
19870+4: __copyuser_seg mov %_ASM_AX,(_DEST)
19871 #ifdef CONFIG_X86_32
19872-5: movl %edx,4(%_ASM_CX)
19873+5: __copyuser_seg movl %edx,4(_DEST)
19874 #endif
19875 xor %eax,%eax
19876 EXIT
19877diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_32.c linux-2.6.32.45/arch/x86/lib/usercopy_32.c
19878--- linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19879+++ linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19880@@ -43,7 +43,7 @@ do { \
19881 __asm__ __volatile__( \
19882 " testl %1,%1\n" \
19883 " jz 2f\n" \
19884- "0: lodsb\n" \
19885+ "0: "__copyuser_seg"lodsb\n" \
19886 " stosb\n" \
19887 " testb %%al,%%al\n" \
19888 " jz 1f\n" \
19889@@ -128,10 +128,12 @@ do { \
19890 int __d0; \
19891 might_fault(); \
19892 __asm__ __volatile__( \
19893+ __COPYUSER_SET_ES \
19894 "0: rep; stosl\n" \
19895 " movl %2,%0\n" \
19896 "1: rep; stosb\n" \
19897 "2:\n" \
19898+ __COPYUSER_RESTORE_ES \
19899 ".section .fixup,\"ax\"\n" \
19900 "3: lea 0(%2,%0,4),%0\n" \
19901 " jmp 2b\n" \
19902@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19903 might_fault();
19904
19905 __asm__ __volatile__(
19906+ __COPYUSER_SET_ES
19907 " testl %0, %0\n"
19908 " jz 3f\n"
19909 " andl %0,%%ecx\n"
19910@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19911 " subl %%ecx,%0\n"
19912 " addl %0,%%eax\n"
19913 "1:\n"
19914+ __COPYUSER_RESTORE_ES
19915 ".section .fixup,\"ax\"\n"
19916 "2: xorl %%eax,%%eax\n"
19917 " jmp 1b\n"
19918@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19919
19920 #ifdef CONFIG_X86_INTEL_USERCOPY
19921 static unsigned long
19922-__copy_user_intel(void __user *to, const void *from, unsigned long size)
19923+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19924 {
19925 int d0, d1;
19926 __asm__ __volatile__(
19927@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19928 " .align 2,0x90\n"
19929 "3: movl 0(%4), %%eax\n"
19930 "4: movl 4(%4), %%edx\n"
19931- "5: movl %%eax, 0(%3)\n"
19932- "6: movl %%edx, 4(%3)\n"
19933+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19934+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19935 "7: movl 8(%4), %%eax\n"
19936 "8: movl 12(%4),%%edx\n"
19937- "9: movl %%eax, 8(%3)\n"
19938- "10: movl %%edx, 12(%3)\n"
19939+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19940+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19941 "11: movl 16(%4), %%eax\n"
19942 "12: movl 20(%4), %%edx\n"
19943- "13: movl %%eax, 16(%3)\n"
19944- "14: movl %%edx, 20(%3)\n"
19945+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19946+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19947 "15: movl 24(%4), %%eax\n"
19948 "16: movl 28(%4), %%edx\n"
19949- "17: movl %%eax, 24(%3)\n"
19950- "18: movl %%edx, 28(%3)\n"
19951+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19952+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19953 "19: movl 32(%4), %%eax\n"
19954 "20: movl 36(%4), %%edx\n"
19955- "21: movl %%eax, 32(%3)\n"
19956- "22: movl %%edx, 36(%3)\n"
19957+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19958+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19959 "23: movl 40(%4), %%eax\n"
19960 "24: movl 44(%4), %%edx\n"
19961- "25: movl %%eax, 40(%3)\n"
19962- "26: movl %%edx, 44(%3)\n"
19963+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19964+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19965 "27: movl 48(%4), %%eax\n"
19966 "28: movl 52(%4), %%edx\n"
19967- "29: movl %%eax, 48(%3)\n"
19968- "30: movl %%edx, 52(%3)\n"
19969+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19970+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19971 "31: movl 56(%4), %%eax\n"
19972 "32: movl 60(%4), %%edx\n"
19973- "33: movl %%eax, 56(%3)\n"
19974- "34: movl %%edx, 60(%3)\n"
19975+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19976+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19977 " addl $-64, %0\n"
19978 " addl $64, %4\n"
19979 " addl $64, %3\n"
19980@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19981 " shrl $2, %0\n"
19982 " andl $3, %%eax\n"
19983 " cld\n"
19984+ __COPYUSER_SET_ES
19985 "99: rep; movsl\n"
19986 "36: movl %%eax, %0\n"
19987 "37: rep; movsb\n"
19988 "100:\n"
19989+ __COPYUSER_RESTORE_ES
19990+ ".section .fixup,\"ax\"\n"
19991+ "101: lea 0(%%eax,%0,4),%0\n"
19992+ " jmp 100b\n"
19993+ ".previous\n"
19994+ ".section __ex_table,\"a\"\n"
19995+ " .align 4\n"
19996+ " .long 1b,100b\n"
19997+ " .long 2b,100b\n"
19998+ " .long 3b,100b\n"
19999+ " .long 4b,100b\n"
20000+ " .long 5b,100b\n"
20001+ " .long 6b,100b\n"
20002+ " .long 7b,100b\n"
20003+ " .long 8b,100b\n"
20004+ " .long 9b,100b\n"
20005+ " .long 10b,100b\n"
20006+ " .long 11b,100b\n"
20007+ " .long 12b,100b\n"
20008+ " .long 13b,100b\n"
20009+ " .long 14b,100b\n"
20010+ " .long 15b,100b\n"
20011+ " .long 16b,100b\n"
20012+ " .long 17b,100b\n"
20013+ " .long 18b,100b\n"
20014+ " .long 19b,100b\n"
20015+ " .long 20b,100b\n"
20016+ " .long 21b,100b\n"
20017+ " .long 22b,100b\n"
20018+ " .long 23b,100b\n"
20019+ " .long 24b,100b\n"
20020+ " .long 25b,100b\n"
20021+ " .long 26b,100b\n"
20022+ " .long 27b,100b\n"
20023+ " .long 28b,100b\n"
20024+ " .long 29b,100b\n"
20025+ " .long 30b,100b\n"
20026+ " .long 31b,100b\n"
20027+ " .long 32b,100b\n"
20028+ " .long 33b,100b\n"
20029+ " .long 34b,100b\n"
20030+ " .long 35b,100b\n"
20031+ " .long 36b,100b\n"
20032+ " .long 37b,100b\n"
20033+ " .long 99b,101b\n"
20034+ ".previous"
20035+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
20036+ : "1"(to), "2"(from), "0"(size)
20037+ : "eax", "edx", "memory");
20038+ return size;
20039+}
20040+
20041+static unsigned long
20042+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20043+{
20044+ int d0, d1;
20045+ __asm__ __volatile__(
20046+ " .align 2,0x90\n"
20047+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20048+ " cmpl $67, %0\n"
20049+ " jbe 3f\n"
20050+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20051+ " .align 2,0x90\n"
20052+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20053+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20054+ "5: movl %%eax, 0(%3)\n"
20055+ "6: movl %%edx, 4(%3)\n"
20056+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20057+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20058+ "9: movl %%eax, 8(%3)\n"
20059+ "10: movl %%edx, 12(%3)\n"
20060+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20061+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20062+ "13: movl %%eax, 16(%3)\n"
20063+ "14: movl %%edx, 20(%3)\n"
20064+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20065+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20066+ "17: movl %%eax, 24(%3)\n"
20067+ "18: movl %%edx, 28(%3)\n"
20068+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20069+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20070+ "21: movl %%eax, 32(%3)\n"
20071+ "22: movl %%edx, 36(%3)\n"
20072+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20073+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20074+ "25: movl %%eax, 40(%3)\n"
20075+ "26: movl %%edx, 44(%3)\n"
20076+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20077+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20078+ "29: movl %%eax, 48(%3)\n"
20079+ "30: movl %%edx, 52(%3)\n"
20080+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20081+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20082+ "33: movl %%eax, 56(%3)\n"
20083+ "34: movl %%edx, 60(%3)\n"
20084+ " addl $-64, %0\n"
20085+ " addl $64, %4\n"
20086+ " addl $64, %3\n"
20087+ " cmpl $63, %0\n"
20088+ " ja 1b\n"
20089+ "35: movl %0, %%eax\n"
20090+ " shrl $2, %0\n"
20091+ " andl $3, %%eax\n"
20092+ " cld\n"
20093+ "99: rep; "__copyuser_seg" movsl\n"
20094+ "36: movl %%eax, %0\n"
20095+ "37: rep; "__copyuser_seg" movsb\n"
20096+ "100:\n"
20097 ".section .fixup,\"ax\"\n"
20098 "101: lea 0(%%eax,%0,4),%0\n"
20099 " jmp 100b\n"
20100@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
20101 int d0, d1;
20102 __asm__ __volatile__(
20103 " .align 2,0x90\n"
20104- "0: movl 32(%4), %%eax\n"
20105+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20106 " cmpl $67, %0\n"
20107 " jbe 2f\n"
20108- "1: movl 64(%4), %%eax\n"
20109+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20110 " .align 2,0x90\n"
20111- "2: movl 0(%4), %%eax\n"
20112- "21: movl 4(%4), %%edx\n"
20113+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20114+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20115 " movl %%eax, 0(%3)\n"
20116 " movl %%edx, 4(%3)\n"
20117- "3: movl 8(%4), %%eax\n"
20118- "31: movl 12(%4),%%edx\n"
20119+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20120+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20121 " movl %%eax, 8(%3)\n"
20122 " movl %%edx, 12(%3)\n"
20123- "4: movl 16(%4), %%eax\n"
20124- "41: movl 20(%4), %%edx\n"
20125+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20126+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20127 " movl %%eax, 16(%3)\n"
20128 " movl %%edx, 20(%3)\n"
20129- "10: movl 24(%4), %%eax\n"
20130- "51: movl 28(%4), %%edx\n"
20131+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20132+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20133 " movl %%eax, 24(%3)\n"
20134 " movl %%edx, 28(%3)\n"
20135- "11: movl 32(%4), %%eax\n"
20136- "61: movl 36(%4), %%edx\n"
20137+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20138+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20139 " movl %%eax, 32(%3)\n"
20140 " movl %%edx, 36(%3)\n"
20141- "12: movl 40(%4), %%eax\n"
20142- "71: movl 44(%4), %%edx\n"
20143+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20144+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20145 " movl %%eax, 40(%3)\n"
20146 " movl %%edx, 44(%3)\n"
20147- "13: movl 48(%4), %%eax\n"
20148- "81: movl 52(%4), %%edx\n"
20149+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20150+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20151 " movl %%eax, 48(%3)\n"
20152 " movl %%edx, 52(%3)\n"
20153- "14: movl 56(%4), %%eax\n"
20154- "91: movl 60(%4), %%edx\n"
20155+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20156+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20157 " movl %%eax, 56(%3)\n"
20158 " movl %%edx, 60(%3)\n"
20159 " addl $-64, %0\n"
20160@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
20161 " shrl $2, %0\n"
20162 " andl $3, %%eax\n"
20163 " cld\n"
20164- "6: rep; movsl\n"
20165+ "6: rep; "__copyuser_seg" movsl\n"
20166 " movl %%eax,%0\n"
20167- "7: rep; movsb\n"
20168+ "7: rep; "__copyuser_seg" movsb\n"
20169 "8:\n"
20170 ".section .fixup,\"ax\"\n"
20171 "9: lea 0(%%eax,%0,4),%0\n"
20172@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20173
20174 __asm__ __volatile__(
20175 " .align 2,0x90\n"
20176- "0: movl 32(%4), %%eax\n"
20177+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20178 " cmpl $67, %0\n"
20179 " jbe 2f\n"
20180- "1: movl 64(%4), %%eax\n"
20181+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20182 " .align 2,0x90\n"
20183- "2: movl 0(%4), %%eax\n"
20184- "21: movl 4(%4), %%edx\n"
20185+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20186+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20187 " movnti %%eax, 0(%3)\n"
20188 " movnti %%edx, 4(%3)\n"
20189- "3: movl 8(%4), %%eax\n"
20190- "31: movl 12(%4),%%edx\n"
20191+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20192+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20193 " movnti %%eax, 8(%3)\n"
20194 " movnti %%edx, 12(%3)\n"
20195- "4: movl 16(%4), %%eax\n"
20196- "41: movl 20(%4), %%edx\n"
20197+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20198+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20199 " movnti %%eax, 16(%3)\n"
20200 " movnti %%edx, 20(%3)\n"
20201- "10: movl 24(%4), %%eax\n"
20202- "51: movl 28(%4), %%edx\n"
20203+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20204+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20205 " movnti %%eax, 24(%3)\n"
20206 " movnti %%edx, 28(%3)\n"
20207- "11: movl 32(%4), %%eax\n"
20208- "61: movl 36(%4), %%edx\n"
20209+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20210+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20211 " movnti %%eax, 32(%3)\n"
20212 " movnti %%edx, 36(%3)\n"
20213- "12: movl 40(%4), %%eax\n"
20214- "71: movl 44(%4), %%edx\n"
20215+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20216+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20217 " movnti %%eax, 40(%3)\n"
20218 " movnti %%edx, 44(%3)\n"
20219- "13: movl 48(%4), %%eax\n"
20220- "81: movl 52(%4), %%edx\n"
20221+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20222+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20223 " movnti %%eax, 48(%3)\n"
20224 " movnti %%edx, 52(%3)\n"
20225- "14: movl 56(%4), %%eax\n"
20226- "91: movl 60(%4), %%edx\n"
20227+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20228+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20229 " movnti %%eax, 56(%3)\n"
20230 " movnti %%edx, 60(%3)\n"
20231 " addl $-64, %0\n"
20232@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20233 " shrl $2, %0\n"
20234 " andl $3, %%eax\n"
20235 " cld\n"
20236- "6: rep; movsl\n"
20237+ "6: rep; "__copyuser_seg" movsl\n"
20238 " movl %%eax,%0\n"
20239- "7: rep; movsb\n"
20240+ "7: rep; "__copyuser_seg" movsb\n"
20241 "8:\n"
20242 ".section .fixup,\"ax\"\n"
20243 "9: lea 0(%%eax,%0,4),%0\n"
20244@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20245
20246 __asm__ __volatile__(
20247 " .align 2,0x90\n"
20248- "0: movl 32(%4), %%eax\n"
20249+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20250 " cmpl $67, %0\n"
20251 " jbe 2f\n"
20252- "1: movl 64(%4), %%eax\n"
20253+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20254 " .align 2,0x90\n"
20255- "2: movl 0(%4), %%eax\n"
20256- "21: movl 4(%4), %%edx\n"
20257+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20258+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20259 " movnti %%eax, 0(%3)\n"
20260 " movnti %%edx, 4(%3)\n"
20261- "3: movl 8(%4), %%eax\n"
20262- "31: movl 12(%4),%%edx\n"
20263+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20264+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20265 " movnti %%eax, 8(%3)\n"
20266 " movnti %%edx, 12(%3)\n"
20267- "4: movl 16(%4), %%eax\n"
20268- "41: movl 20(%4), %%edx\n"
20269+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20270+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20271 " movnti %%eax, 16(%3)\n"
20272 " movnti %%edx, 20(%3)\n"
20273- "10: movl 24(%4), %%eax\n"
20274- "51: movl 28(%4), %%edx\n"
20275+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20276+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20277 " movnti %%eax, 24(%3)\n"
20278 " movnti %%edx, 28(%3)\n"
20279- "11: movl 32(%4), %%eax\n"
20280- "61: movl 36(%4), %%edx\n"
20281+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20282+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20283 " movnti %%eax, 32(%3)\n"
20284 " movnti %%edx, 36(%3)\n"
20285- "12: movl 40(%4), %%eax\n"
20286- "71: movl 44(%4), %%edx\n"
20287+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20288+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20289 " movnti %%eax, 40(%3)\n"
20290 " movnti %%edx, 44(%3)\n"
20291- "13: movl 48(%4), %%eax\n"
20292- "81: movl 52(%4), %%edx\n"
20293+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20294+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20295 " movnti %%eax, 48(%3)\n"
20296 " movnti %%edx, 52(%3)\n"
20297- "14: movl 56(%4), %%eax\n"
20298- "91: movl 60(%4), %%edx\n"
20299+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20300+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20301 " movnti %%eax, 56(%3)\n"
20302 " movnti %%edx, 60(%3)\n"
20303 " addl $-64, %0\n"
20304@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20305 " shrl $2, %0\n"
20306 " andl $3, %%eax\n"
20307 " cld\n"
20308- "6: rep; movsl\n"
20309+ "6: rep; "__copyuser_seg" movsl\n"
20310 " movl %%eax,%0\n"
20311- "7: rep; movsb\n"
20312+ "7: rep; "__copyuser_seg" movsb\n"
20313 "8:\n"
20314 ".section .fixup,\"ax\"\n"
20315 "9: lea 0(%%eax,%0,4),%0\n"
20316@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20317 */
20318 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20319 unsigned long size);
20320-unsigned long __copy_user_intel(void __user *to, const void *from,
20321+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20322+ unsigned long size);
20323+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20324 unsigned long size);
20325 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20326 const void __user *from, unsigned long size);
20327 #endif /* CONFIG_X86_INTEL_USERCOPY */
20328
20329 /* Generic arbitrary sized copy. */
20330-#define __copy_user(to, from, size) \
20331+#define __copy_user(to, from, size, prefix, set, restore) \
20332 do { \
20333 int __d0, __d1, __d2; \
20334 __asm__ __volatile__( \
20335+ set \
20336 " cmp $7,%0\n" \
20337 " jbe 1f\n" \
20338 " movl %1,%0\n" \
20339 " negl %0\n" \
20340 " andl $7,%0\n" \
20341 " subl %0,%3\n" \
20342- "4: rep; movsb\n" \
20343+ "4: rep; "prefix"movsb\n" \
20344 " movl %3,%0\n" \
20345 " shrl $2,%0\n" \
20346 " andl $3,%3\n" \
20347 " .align 2,0x90\n" \
20348- "0: rep; movsl\n" \
20349+ "0: rep; "prefix"movsl\n" \
20350 " movl %3,%0\n" \
20351- "1: rep; movsb\n" \
20352+ "1: rep; "prefix"movsb\n" \
20353 "2:\n" \
20354+ restore \
20355 ".section .fixup,\"ax\"\n" \
20356 "5: addl %3,%0\n" \
20357 " jmp 2b\n" \
20358@@ -682,14 +799,14 @@ do { \
20359 " negl %0\n" \
20360 " andl $7,%0\n" \
20361 " subl %0,%3\n" \
20362- "4: rep; movsb\n" \
20363+ "4: rep; "__copyuser_seg"movsb\n" \
20364 " movl %3,%0\n" \
20365 " shrl $2,%0\n" \
20366 " andl $3,%3\n" \
20367 " .align 2,0x90\n" \
20368- "0: rep; movsl\n" \
20369+ "0: rep; "__copyuser_seg"movsl\n" \
20370 " movl %3,%0\n" \
20371- "1: rep; movsb\n" \
20372+ "1: rep; "__copyuser_seg"movsb\n" \
20373 "2:\n" \
20374 ".section .fixup,\"ax\"\n" \
20375 "5: addl %3,%0\n" \
20376@@ -775,9 +892,9 @@ survive:
20377 }
20378 #endif
20379 if (movsl_is_ok(to, from, n))
20380- __copy_user(to, from, n);
20381+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20382 else
20383- n = __copy_user_intel(to, from, n);
20384+ n = __generic_copy_to_user_intel(to, from, n);
20385 return n;
20386 }
20387 EXPORT_SYMBOL(__copy_to_user_ll);
20388@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20389 unsigned long n)
20390 {
20391 if (movsl_is_ok(to, from, n))
20392- __copy_user(to, from, n);
20393+ __copy_user(to, from, n, __copyuser_seg, "", "");
20394 else
20395- n = __copy_user_intel((void __user *)to,
20396- (const void *)from, n);
20397+ n = __generic_copy_from_user_intel(to, from, n);
20398 return n;
20399 }
20400 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20401@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20402 if (n > 64 && cpu_has_xmm2)
20403 n = __copy_user_intel_nocache(to, from, n);
20404 else
20405- __copy_user(to, from, n);
20406+ __copy_user(to, from, n, __copyuser_seg, "", "");
20407 #else
20408- __copy_user(to, from, n);
20409+ __copy_user(to, from, n, __copyuser_seg, "", "");
20410 #endif
20411 return n;
20412 }
20413 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20414
20415-/**
20416- * copy_to_user: - Copy a block of data into user space.
20417- * @to: Destination address, in user space.
20418- * @from: Source address, in kernel space.
20419- * @n: Number of bytes to copy.
20420- *
20421- * Context: User context only. This function may sleep.
20422- *
20423- * Copy data from kernel space to user space.
20424- *
20425- * Returns number of bytes that could not be copied.
20426- * On success, this will be zero.
20427- */
20428-unsigned long
20429-copy_to_user(void __user *to, const void *from, unsigned long n)
20430+#ifdef CONFIG_PAX_MEMORY_UDEREF
20431+void __set_fs(mm_segment_t x)
20432 {
20433- if (access_ok(VERIFY_WRITE, to, n))
20434- n = __copy_to_user(to, from, n);
20435- return n;
20436+ switch (x.seg) {
20437+ case 0:
20438+ loadsegment(gs, 0);
20439+ break;
20440+ case TASK_SIZE_MAX:
20441+ loadsegment(gs, __USER_DS);
20442+ break;
20443+ case -1UL:
20444+ loadsegment(gs, __KERNEL_DS);
20445+ break;
20446+ default:
20447+ BUG();
20448+ }
20449+ return;
20450 }
20451-EXPORT_SYMBOL(copy_to_user);
20452+EXPORT_SYMBOL(__set_fs);
20453
20454-/**
20455- * copy_from_user: - Copy a block of data from user space.
20456- * @to: Destination address, in kernel space.
20457- * @from: Source address, in user space.
20458- * @n: Number of bytes to copy.
20459- *
20460- * Context: User context only. This function may sleep.
20461- *
20462- * Copy data from user space to kernel space.
20463- *
20464- * Returns number of bytes that could not be copied.
20465- * On success, this will be zero.
20466- *
20467- * If some data could not be copied, this function will pad the copied
20468- * data to the requested size using zero bytes.
20469- */
20470-unsigned long
20471-copy_from_user(void *to, const void __user *from, unsigned long n)
20472+void set_fs(mm_segment_t x)
20473 {
20474- if (access_ok(VERIFY_READ, from, n))
20475- n = __copy_from_user(to, from, n);
20476- else
20477- memset(to, 0, n);
20478- return n;
20479+ current_thread_info()->addr_limit = x;
20480+ __set_fs(x);
20481 }
20482-EXPORT_SYMBOL(copy_from_user);
20483+EXPORT_SYMBOL(set_fs);
20484+#endif
20485diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_64.c linux-2.6.32.45/arch/x86/lib/usercopy_64.c
20486--- linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20487+++ linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20488@@ -42,6 +42,12 @@ long
20489 __strncpy_from_user(char *dst, const char __user *src, long count)
20490 {
20491 long res;
20492+
20493+#ifdef CONFIG_PAX_MEMORY_UDEREF
20494+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20495+ src += PAX_USER_SHADOW_BASE;
20496+#endif
20497+
20498 __do_strncpy_from_user(dst, src, count, res);
20499 return res;
20500 }
20501@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20502 {
20503 long __d0;
20504 might_fault();
20505+
20506+#ifdef CONFIG_PAX_MEMORY_UDEREF
20507+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20508+ addr += PAX_USER_SHADOW_BASE;
20509+#endif
20510+
20511 /* no memory constraint because it doesn't change any memory gcc knows
20512 about */
20513 asm volatile(
20514@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20515
20516 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20517 {
20518- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20519+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20520+
20521+#ifdef CONFIG_PAX_MEMORY_UDEREF
20522+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20523+ to += PAX_USER_SHADOW_BASE;
20524+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20525+ from += PAX_USER_SHADOW_BASE;
20526+#endif
20527+
20528 return copy_user_generic((__force void *)to, (__force void *)from, len);
20529- }
20530- return len;
20531+ }
20532+ return len;
20533 }
20534 EXPORT_SYMBOL(copy_in_user);
20535
20536diff -urNp linux-2.6.32.45/arch/x86/Makefile linux-2.6.32.45/arch/x86/Makefile
20537--- linux-2.6.32.45/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20538+++ linux-2.6.32.45/arch/x86/Makefile 2011-07-19 18:16:02.000000000 -0400
20539@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
20540 else
20541 BITS := 64
20542 UTS_MACHINE := x86_64
20543+ biarch := $(call cc-option,-m64)
20544 CHECKFLAGS += -D__x86_64__ -m64
20545
20546 KBUILD_AFLAGS += -m64
20547@@ -189,3 +190,12 @@ define archhelp
20548 echo ' FDARGS="..." arguments for the booted kernel'
20549 echo ' FDINITRD=file initrd for the booted kernel'
20550 endef
20551+
20552+define OLD_LD
20553+
20554+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20555+*** Please upgrade your binutils to 2.18 or newer
20556+endef
20557+
20558+archprepare:
20559+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20560diff -urNp linux-2.6.32.45/arch/x86/mm/extable.c linux-2.6.32.45/arch/x86/mm/extable.c
20561--- linux-2.6.32.45/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20562+++ linux-2.6.32.45/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20563@@ -1,14 +1,71 @@
20564 #include <linux/module.h>
20565 #include <linux/spinlock.h>
20566+#include <linux/sort.h>
20567 #include <asm/uaccess.h>
20568+#include <asm/pgtable.h>
20569
20570+/*
20571+ * The exception table needs to be sorted so that the binary
20572+ * search that we use to find entries in it works properly.
20573+ * This is used both for the kernel exception table and for
20574+ * the exception tables of modules that get loaded.
20575+ */
20576+static int cmp_ex(const void *a, const void *b)
20577+{
20578+ const struct exception_table_entry *x = a, *y = b;
20579+
20580+ /* avoid overflow */
20581+ if (x->insn > y->insn)
20582+ return 1;
20583+ if (x->insn < y->insn)
20584+ return -1;
20585+ return 0;
20586+}
20587+
20588+static void swap_ex(void *a, void *b, int size)
20589+{
20590+ struct exception_table_entry t, *x = a, *y = b;
20591+
20592+ t = *x;
20593+
20594+ pax_open_kernel();
20595+ *x = *y;
20596+ *y = t;
20597+ pax_close_kernel();
20598+}
20599+
20600+void sort_extable(struct exception_table_entry *start,
20601+ struct exception_table_entry *finish)
20602+{
20603+ sort(start, finish - start, sizeof(struct exception_table_entry),
20604+ cmp_ex, swap_ex);
20605+}
20606+
20607+#ifdef CONFIG_MODULES
20608+/*
20609+ * If the exception table is sorted, any referring to the module init
20610+ * will be at the beginning or the end.
20611+ */
20612+void trim_init_extable(struct module *m)
20613+{
20614+ /*trim the beginning*/
20615+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20616+ m->extable++;
20617+ m->num_exentries--;
20618+ }
20619+ /*trim the end*/
20620+ while (m->num_exentries &&
20621+ within_module_init(m->extable[m->num_exentries-1].insn, m))
20622+ m->num_exentries--;
20623+}
20624+#endif /* CONFIG_MODULES */
20625
20626 int fixup_exception(struct pt_regs *regs)
20627 {
20628 const struct exception_table_entry *fixup;
20629
20630 #ifdef CONFIG_PNPBIOS
20631- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20632+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20633 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20634 extern u32 pnp_bios_is_utter_crap;
20635 pnp_bios_is_utter_crap = 1;
20636diff -urNp linux-2.6.32.45/arch/x86/mm/fault.c linux-2.6.32.45/arch/x86/mm/fault.c
20637--- linux-2.6.32.45/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20638+++ linux-2.6.32.45/arch/x86/mm/fault.c 2011-06-06 17:35:16.000000000 -0400
20639@@ -11,10 +11,19 @@
20640 #include <linux/kprobes.h> /* __kprobes, ... */
20641 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20642 #include <linux/perf_event.h> /* perf_sw_event */
20643+#include <linux/unistd.h>
20644+#include <linux/compiler.h>
20645
20646 #include <asm/traps.h> /* dotraplinkage, ... */
20647 #include <asm/pgalloc.h> /* pgd_*(), ... */
20648 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20649+#include <asm/vsyscall.h>
20650+#include <asm/tlbflush.h>
20651+
20652+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20653+#include <asm/stacktrace.h>
20654+#include "../kernel/dumpstack.h"
20655+#endif
20656
20657 /*
20658 * Page fault error code bits:
20659@@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20660 int ret = 0;
20661
20662 /* kprobe_running() needs smp_processor_id() */
20663- if (kprobes_built_in() && !user_mode_vm(regs)) {
20664+ if (kprobes_built_in() && !user_mode(regs)) {
20665 preempt_disable();
20666 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20667 ret = 1;
20668@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20669 return !instr_lo || (instr_lo>>1) == 1;
20670 case 0x00:
20671 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20672- if (probe_kernel_address(instr, opcode))
20673+ if (user_mode(regs)) {
20674+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20675+ return 0;
20676+ } else if (probe_kernel_address(instr, opcode))
20677 return 0;
20678
20679 *prefetch = (instr_lo == 0xF) &&
20680@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20681 while (instr < max_instr) {
20682 unsigned char opcode;
20683
20684- if (probe_kernel_address(instr, opcode))
20685+ if (user_mode(regs)) {
20686+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20687+ break;
20688+ } else if (probe_kernel_address(instr, opcode))
20689 break;
20690
20691 instr++;
20692@@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20693 force_sig_info(si_signo, &info, tsk);
20694 }
20695
20696+#ifdef CONFIG_PAX_EMUTRAMP
20697+static int pax_handle_fetch_fault(struct pt_regs *regs);
20698+#endif
20699+
20700+#ifdef CONFIG_PAX_PAGEEXEC
20701+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20702+{
20703+ pgd_t *pgd;
20704+ pud_t *pud;
20705+ pmd_t *pmd;
20706+
20707+ pgd = pgd_offset(mm, address);
20708+ if (!pgd_present(*pgd))
20709+ return NULL;
20710+ pud = pud_offset(pgd, address);
20711+ if (!pud_present(*pud))
20712+ return NULL;
20713+ pmd = pmd_offset(pud, address);
20714+ if (!pmd_present(*pmd))
20715+ return NULL;
20716+ return pmd;
20717+}
20718+#endif
20719+
20720 DEFINE_SPINLOCK(pgd_lock);
20721 LIST_HEAD(pgd_list);
20722
20723@@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20724 address += PMD_SIZE) {
20725
20726 unsigned long flags;
20727+
20728+#ifdef CONFIG_PAX_PER_CPU_PGD
20729+ unsigned long cpu;
20730+#else
20731 struct page *page;
20732+#endif
20733
20734 spin_lock_irqsave(&pgd_lock, flags);
20735+
20736+#ifdef CONFIG_PAX_PER_CPU_PGD
20737+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20738+ pgd_t *pgd = get_cpu_pgd(cpu);
20739+#else
20740 list_for_each_entry(page, &pgd_list, lru) {
20741- if (!vmalloc_sync_one(page_address(page), address))
20742+ pgd_t *pgd = page_address(page);
20743+#endif
20744+
20745+ if (!vmalloc_sync_one(pgd, address))
20746 break;
20747 }
20748 spin_unlock_irqrestore(&pgd_lock, flags);
20749@@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20750 * an interrupt in the middle of a task switch..
20751 */
20752 pgd_paddr = read_cr3();
20753+
20754+#ifdef CONFIG_PAX_PER_CPU_PGD
20755+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20756+#endif
20757+
20758 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20759 if (!pmd_k)
20760 return -1;
20761@@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
20762
20763 const pgd_t *pgd_ref = pgd_offset_k(address);
20764 unsigned long flags;
20765+
20766+#ifdef CONFIG_PAX_PER_CPU_PGD
20767+ unsigned long cpu;
20768+#else
20769 struct page *page;
20770+#endif
20771
20772 if (pgd_none(*pgd_ref))
20773 continue;
20774
20775 spin_lock_irqsave(&pgd_lock, flags);
20776+
20777+#ifdef CONFIG_PAX_PER_CPU_PGD
20778+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20779+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
20780+#else
20781 list_for_each_entry(page, &pgd_list, lru) {
20782 pgd_t *pgd;
20783 pgd = (pgd_t *)page_address(page) + pgd_index(address);
20784+#endif
20785+
20786 if (pgd_none(*pgd))
20787 set_pgd(pgd, *pgd_ref);
20788 else
20789@@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
20790 * happen within a race in page table update. In the later
20791 * case just flush:
20792 */
20793+
20794+#ifdef CONFIG_PAX_PER_CPU_PGD
20795+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20796+ pgd = pgd_offset_cpu(smp_processor_id(), address);
20797+#else
20798 pgd = pgd_offset(current->active_mm, address);
20799+#endif
20800+
20801 pgd_ref = pgd_offset_k(address);
20802 if (pgd_none(*pgd_ref))
20803 return -1;
20804@@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20805 static int is_errata100(struct pt_regs *regs, unsigned long address)
20806 {
20807 #ifdef CONFIG_X86_64
20808- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20809+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20810 return 1;
20811 #endif
20812 return 0;
20813@@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20814 }
20815
20816 static const char nx_warning[] = KERN_CRIT
20817-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20818+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20819
20820 static void
20821 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20822@@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
20823 if (!oops_may_print())
20824 return;
20825
20826- if (error_code & PF_INSTR) {
20827+ if (nx_enabled && (error_code & PF_INSTR)) {
20828 unsigned int level;
20829
20830 pte_t *pte = lookup_address(address, &level);
20831
20832 if (pte && pte_present(*pte) && !pte_exec(*pte))
20833- printk(nx_warning, current_uid());
20834+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20835 }
20836
20837+#ifdef CONFIG_PAX_KERNEXEC
20838+ if (init_mm.start_code <= address && address < init_mm.end_code) {
20839+ if (current->signal->curr_ip)
20840+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20841+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20842+ else
20843+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20844+ current->comm, task_pid_nr(current), current_uid(), current_euid());
20845+ }
20846+#endif
20847+
20848 printk(KERN_ALERT "BUG: unable to handle kernel ");
20849 if (address < PAGE_SIZE)
20850 printk(KERN_CONT "NULL pointer dereference");
20851@@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
20852 unsigned long address, int si_code)
20853 {
20854 struct task_struct *tsk = current;
20855+ struct mm_struct *mm = tsk->mm;
20856+
20857+#ifdef CONFIG_X86_64
20858+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
20859+ if (regs->ip == (unsigned long)vgettimeofday) {
20860+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
20861+ return;
20862+ } else if (regs->ip == (unsigned long)vtime) {
20863+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
20864+ return;
20865+ } else if (regs->ip == (unsigned long)vgetcpu) {
20866+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
20867+ return;
20868+ }
20869+ }
20870+#endif
20871+
20872+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20873+ if (mm && (error_code & PF_USER)) {
20874+ unsigned long ip = regs->ip;
20875+
20876+ if (v8086_mode(regs))
20877+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20878+
20879+ /*
20880+ * It's possible to have interrupts off here:
20881+ */
20882+ local_irq_enable();
20883+
20884+#ifdef CONFIG_PAX_PAGEEXEC
20885+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20886+ ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20887+
20888+#ifdef CONFIG_PAX_EMUTRAMP
20889+ switch (pax_handle_fetch_fault(regs)) {
20890+ case 2:
20891+ return;
20892+ }
20893+#endif
20894+
20895+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20896+ do_group_exit(SIGKILL);
20897+ }
20898+#endif
20899+
20900+#ifdef CONFIG_PAX_SEGMEXEC
20901+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20902+
20903+#ifdef CONFIG_PAX_EMUTRAMP
20904+ switch (pax_handle_fetch_fault(regs)) {
20905+ case 2:
20906+ return;
20907+ }
20908+#endif
20909+
20910+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20911+ do_group_exit(SIGKILL);
20912+ }
20913+#endif
20914+
20915+ }
20916+#endif
20917
20918 /* User mode accesses just cause a SIGSEGV */
20919 if (error_code & PF_USER) {
20920@@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
20921 return 1;
20922 }
20923
20924+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20925+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20926+{
20927+ pte_t *pte;
20928+ pmd_t *pmd;
20929+ spinlock_t *ptl;
20930+ unsigned char pte_mask;
20931+
20932+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20933+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
20934+ return 0;
20935+
20936+ /* PaX: it's our fault, let's handle it if we can */
20937+
20938+ /* PaX: take a look at read faults before acquiring any locks */
20939+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20940+ /* instruction fetch attempt from a protected page in user mode */
20941+ up_read(&mm->mmap_sem);
20942+
20943+#ifdef CONFIG_PAX_EMUTRAMP
20944+ switch (pax_handle_fetch_fault(regs)) {
20945+ case 2:
20946+ return 1;
20947+ }
20948+#endif
20949+
20950+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20951+ do_group_exit(SIGKILL);
20952+ }
20953+
20954+ pmd = pax_get_pmd(mm, address);
20955+ if (unlikely(!pmd))
20956+ return 0;
20957+
20958+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20959+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20960+ pte_unmap_unlock(pte, ptl);
20961+ return 0;
20962+ }
20963+
20964+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20965+ /* write attempt to a protected page in user mode */
20966+ pte_unmap_unlock(pte, ptl);
20967+ return 0;
20968+ }
20969+
20970+#ifdef CONFIG_SMP
20971+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20972+#else
20973+ if (likely(address > get_limit(regs->cs)))
20974+#endif
20975+ {
20976+ set_pte(pte, pte_mkread(*pte));
20977+ __flush_tlb_one(address);
20978+ pte_unmap_unlock(pte, ptl);
20979+ up_read(&mm->mmap_sem);
20980+ return 1;
20981+ }
20982+
20983+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20984+
20985+ /*
20986+ * PaX: fill DTLB with user rights and retry
20987+ */
20988+ __asm__ __volatile__ (
20989+ "orb %2,(%1)\n"
20990+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20991+/*
20992+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20993+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20994+ * page fault when examined during a TLB load attempt. this is true not only
20995+ * for PTEs holding a non-present entry but also present entries that will
20996+ * raise a page fault (such as those set up by PaX, or the copy-on-write
20997+ * mechanism). in effect it means that we do *not* need to flush the TLBs
20998+ * for our target pages since their PTEs are simply not in the TLBs at all.
20999+
21000+ * the best thing in omitting it is that we gain around 15-20% speed in the
21001+ * fast path of the page fault handler and can get rid of tracing since we
21002+ * can no longer flush unintended entries.
21003+ */
21004+ "invlpg (%0)\n"
21005+#endif
21006+ __copyuser_seg"testb $0,(%0)\n"
21007+ "xorb %3,(%1)\n"
21008+ :
21009+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21010+ : "memory", "cc");
21011+ pte_unmap_unlock(pte, ptl);
21012+ up_read(&mm->mmap_sem);
21013+ return 1;
21014+}
21015+#endif
21016+
21017 /*
21018 * Handle a spurious fault caused by a stale TLB entry.
21019 *
21020@@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
21021 static inline int
21022 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
21023 {
21024+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21025+ return 1;
21026+
21027 if (write) {
21028 /* write, present and write, not present: */
21029 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21030@@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
21031 {
21032 struct vm_area_struct *vma;
21033 struct task_struct *tsk;
21034- unsigned long address;
21035 struct mm_struct *mm;
21036 int write;
21037 int fault;
21038
21039+ /* Get the faulting address: */
21040+ unsigned long address = read_cr2();
21041+
21042+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21043+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21044+ if (!search_exception_tables(regs->ip)) {
21045+ bad_area_nosemaphore(regs, error_code, address);
21046+ return;
21047+ }
21048+ if (address < PAX_USER_SHADOW_BASE) {
21049+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21050+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
21051+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21052+ } else
21053+ address -= PAX_USER_SHADOW_BASE;
21054+ }
21055+#endif
21056+
21057 tsk = current;
21058 mm = tsk->mm;
21059
21060- /* Get the faulting address: */
21061- address = read_cr2();
21062-
21063 /*
21064 * Detect and handle instructions that would cause a page fault for
21065 * both a tracked kernel page and a userspace page.
21066@@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
21067 * User-mode registers count as a user access even for any
21068 * potential system fault or CPU buglet:
21069 */
21070- if (user_mode_vm(regs)) {
21071+ if (user_mode(regs)) {
21072 local_irq_enable();
21073 error_code |= PF_USER;
21074 } else {
21075@@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
21076 might_sleep();
21077 }
21078
21079+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21080+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21081+ return;
21082+#endif
21083+
21084 vma = find_vma(mm, address);
21085 if (unlikely(!vma)) {
21086 bad_area(regs, error_code, address);
21087@@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
21088 bad_area(regs, error_code, address);
21089 return;
21090 }
21091- if (error_code & PF_USER) {
21092- /*
21093- * Accessing the stack below %sp is always a bug.
21094- * The large cushion allows instructions like enter
21095- * and pusha to work. ("enter $65535, $31" pushes
21096- * 32 pointers and then decrements %sp by 65535.)
21097- */
21098- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21099- bad_area(regs, error_code, address);
21100- return;
21101- }
21102+ /*
21103+ * Accessing the stack below %sp is always a bug.
21104+ * The large cushion allows instructions like enter
21105+ * and pusha to work. ("enter $65535, $31" pushes
21106+ * 32 pointers and then decrements %sp by 65535.)
21107+ */
21108+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21109+ bad_area(regs, error_code, address);
21110+ return;
21111+ }
21112+
21113+#ifdef CONFIG_PAX_SEGMEXEC
21114+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21115+ bad_area(regs, error_code, address);
21116+ return;
21117 }
21118+#endif
21119+
21120 if (unlikely(expand_stack(vma, address))) {
21121 bad_area(regs, error_code, address);
21122 return;
21123@@ -1146,3 +1416,199 @@ good_area:
21124
21125 up_read(&mm->mmap_sem);
21126 }
21127+
21128+#ifdef CONFIG_PAX_EMUTRAMP
21129+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21130+{
21131+ int err;
21132+
21133+ do { /* PaX: gcc trampoline emulation #1 */
21134+ unsigned char mov1, mov2;
21135+ unsigned short jmp;
21136+ unsigned int addr1, addr2;
21137+
21138+#ifdef CONFIG_X86_64
21139+ if ((regs->ip + 11) >> 32)
21140+ break;
21141+#endif
21142+
21143+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21144+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21145+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21146+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21147+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21148+
21149+ if (err)
21150+ break;
21151+
21152+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21153+ regs->cx = addr1;
21154+ regs->ax = addr2;
21155+ regs->ip = addr2;
21156+ return 2;
21157+ }
21158+ } while (0);
21159+
21160+ do { /* PaX: gcc trampoline emulation #2 */
21161+ unsigned char mov, jmp;
21162+ unsigned int addr1, addr2;
21163+
21164+#ifdef CONFIG_X86_64
21165+ if ((regs->ip + 9) >> 32)
21166+ break;
21167+#endif
21168+
21169+ err = get_user(mov, (unsigned char __user *)regs->ip);
21170+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21171+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21172+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21173+
21174+ if (err)
21175+ break;
21176+
21177+ if (mov == 0xB9 && jmp == 0xE9) {
21178+ regs->cx = addr1;
21179+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21180+ return 2;
21181+ }
21182+ } while (0);
21183+
21184+ return 1; /* PaX in action */
21185+}
21186+
21187+#ifdef CONFIG_X86_64
21188+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21189+{
21190+ int err;
21191+
21192+ do { /* PaX: gcc trampoline emulation #1 */
21193+ unsigned short mov1, mov2, jmp1;
21194+ unsigned char jmp2;
21195+ unsigned int addr1;
21196+ unsigned long addr2;
21197+
21198+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21199+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21200+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21201+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21202+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21203+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21204+
21205+ if (err)
21206+ break;
21207+
21208+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21209+ regs->r11 = addr1;
21210+ regs->r10 = addr2;
21211+ regs->ip = addr1;
21212+ return 2;
21213+ }
21214+ } while (0);
21215+
21216+ do { /* PaX: gcc trampoline emulation #2 */
21217+ unsigned short mov1, mov2, jmp1;
21218+ unsigned char jmp2;
21219+ unsigned long addr1, addr2;
21220+
21221+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21222+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21223+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21224+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21225+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21226+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21227+
21228+ if (err)
21229+ break;
21230+
21231+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21232+ regs->r11 = addr1;
21233+ regs->r10 = addr2;
21234+ regs->ip = addr1;
21235+ return 2;
21236+ }
21237+ } while (0);
21238+
21239+ return 1; /* PaX in action */
21240+}
21241+#endif
21242+
21243+/*
21244+ * PaX: decide what to do with offenders (regs->ip = fault address)
21245+ *
21246+ * returns 1 when task should be killed
21247+ * 2 when gcc trampoline was detected
21248+ */
21249+static int pax_handle_fetch_fault(struct pt_regs *regs)
21250+{
21251+ if (v8086_mode(regs))
21252+ return 1;
21253+
21254+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21255+ return 1;
21256+
21257+#ifdef CONFIG_X86_32
21258+ return pax_handle_fetch_fault_32(regs);
21259+#else
21260+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21261+ return pax_handle_fetch_fault_32(regs);
21262+ else
21263+ return pax_handle_fetch_fault_64(regs);
21264+#endif
21265+}
21266+#endif
21267+
21268+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21269+void pax_report_insns(void *pc, void *sp)
21270+{
21271+ long i;
21272+
21273+ printk(KERN_ERR "PAX: bytes at PC: ");
21274+ for (i = 0; i < 20; i++) {
21275+ unsigned char c;
21276+ if (get_user(c, (__force unsigned char __user *)pc+i))
21277+ printk(KERN_CONT "?? ");
21278+ else
21279+ printk(KERN_CONT "%02x ", c);
21280+ }
21281+ printk("\n");
21282+
21283+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21284+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
21285+ unsigned long c;
21286+ if (get_user(c, (__force unsigned long __user *)sp+i))
21287+#ifdef CONFIG_X86_32
21288+ printk(KERN_CONT "???????? ");
21289+#else
21290+ printk(KERN_CONT "???????????????? ");
21291+#endif
21292+ else
21293+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21294+ }
21295+ printk("\n");
21296+}
21297+#endif
21298+
21299+/**
21300+ * probe_kernel_write(): safely attempt to write to a location
21301+ * @dst: address to write to
21302+ * @src: pointer to the data that shall be written
21303+ * @size: size of the data chunk
21304+ *
21305+ * Safely write to address @dst from the buffer at @src. If a kernel fault
21306+ * happens, handle that and return -EFAULT.
21307+ */
21308+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21309+{
21310+ long ret;
21311+ mm_segment_t old_fs = get_fs();
21312+
21313+ set_fs(KERNEL_DS);
21314+ pagefault_disable();
21315+ pax_open_kernel();
21316+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21317+ pax_close_kernel();
21318+ pagefault_enable();
21319+ set_fs(old_fs);
21320+
21321+ return ret ? -EFAULT : 0;
21322+}
21323diff -urNp linux-2.6.32.45/arch/x86/mm/gup.c linux-2.6.32.45/arch/x86/mm/gup.c
21324--- linux-2.6.32.45/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21325+++ linux-2.6.32.45/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21326@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21327 addr = start;
21328 len = (unsigned long) nr_pages << PAGE_SHIFT;
21329 end = start + len;
21330- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21331+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21332 (void __user *)start, len)))
21333 return 0;
21334
21335diff -urNp linux-2.6.32.45/arch/x86/mm/highmem_32.c linux-2.6.32.45/arch/x86/mm/highmem_32.c
21336--- linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21337+++ linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21338@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21339 idx = type + KM_TYPE_NR*smp_processor_id();
21340 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21341 BUG_ON(!pte_none(*(kmap_pte-idx)));
21342+
21343+ pax_open_kernel();
21344 set_pte(kmap_pte-idx, mk_pte(page, prot));
21345+ pax_close_kernel();
21346
21347 return (void *)vaddr;
21348 }
21349diff -urNp linux-2.6.32.45/arch/x86/mm/hugetlbpage.c linux-2.6.32.45/arch/x86/mm/hugetlbpage.c
21350--- linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21351+++ linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21352@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21353 struct hstate *h = hstate_file(file);
21354 struct mm_struct *mm = current->mm;
21355 struct vm_area_struct *vma;
21356- unsigned long start_addr;
21357+ unsigned long start_addr, pax_task_size = TASK_SIZE;
21358+
21359+#ifdef CONFIG_PAX_SEGMEXEC
21360+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21361+ pax_task_size = SEGMEXEC_TASK_SIZE;
21362+#endif
21363+
21364+ pax_task_size -= PAGE_SIZE;
21365
21366 if (len > mm->cached_hole_size) {
21367- start_addr = mm->free_area_cache;
21368+ start_addr = mm->free_area_cache;
21369 } else {
21370- start_addr = TASK_UNMAPPED_BASE;
21371- mm->cached_hole_size = 0;
21372+ start_addr = mm->mmap_base;
21373+ mm->cached_hole_size = 0;
21374 }
21375
21376 full_search:
21377@@ -281,26 +288,27 @@ full_search:
21378
21379 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21380 /* At this point: (!vma || addr < vma->vm_end). */
21381- if (TASK_SIZE - len < addr) {
21382+ if (pax_task_size - len < addr) {
21383 /*
21384 * Start a new search - just in case we missed
21385 * some holes.
21386 */
21387- if (start_addr != TASK_UNMAPPED_BASE) {
21388- start_addr = TASK_UNMAPPED_BASE;
21389+ if (start_addr != mm->mmap_base) {
21390+ start_addr = mm->mmap_base;
21391 mm->cached_hole_size = 0;
21392 goto full_search;
21393 }
21394 return -ENOMEM;
21395 }
21396- if (!vma || addr + len <= vma->vm_start) {
21397- mm->free_area_cache = addr + len;
21398- return addr;
21399- }
21400+ if (check_heap_stack_gap(vma, addr, len))
21401+ break;
21402 if (addr + mm->cached_hole_size < vma->vm_start)
21403 mm->cached_hole_size = vma->vm_start - addr;
21404 addr = ALIGN(vma->vm_end, huge_page_size(h));
21405 }
21406+
21407+ mm->free_area_cache = addr + len;
21408+ return addr;
21409 }
21410
21411 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21412@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21413 {
21414 struct hstate *h = hstate_file(file);
21415 struct mm_struct *mm = current->mm;
21416- struct vm_area_struct *vma, *prev_vma;
21417- unsigned long base = mm->mmap_base, addr = addr0;
21418+ struct vm_area_struct *vma;
21419+ unsigned long base = mm->mmap_base, addr;
21420 unsigned long largest_hole = mm->cached_hole_size;
21421- int first_time = 1;
21422
21423 /* don't allow allocations above current base */
21424 if (mm->free_area_cache > base)
21425@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21426 largest_hole = 0;
21427 mm->free_area_cache = base;
21428 }
21429-try_again:
21430+
21431 /* make sure it can fit in the remaining address space */
21432 if (mm->free_area_cache < len)
21433 goto fail;
21434
21435 /* either no address requested or cant fit in requested address hole */
21436- addr = (mm->free_area_cache - len) & huge_page_mask(h);
21437+ addr = (mm->free_area_cache - len);
21438 do {
21439+ addr &= huge_page_mask(h);
21440+ vma = find_vma(mm, addr);
21441 /*
21442 * Lookup failure means no vma is above this address,
21443 * i.e. return with success:
21444- */
21445- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21446- return addr;
21447-
21448- /*
21449 * new region fits between prev_vma->vm_end and
21450 * vma->vm_start, use it:
21451 */
21452- if (addr + len <= vma->vm_start &&
21453- (!prev_vma || (addr >= prev_vma->vm_end))) {
21454+ if (check_heap_stack_gap(vma, addr, len)) {
21455 /* remember the address as a hint for next time */
21456- mm->cached_hole_size = largest_hole;
21457- return (mm->free_area_cache = addr);
21458- } else {
21459- /* pull free_area_cache down to the first hole */
21460- if (mm->free_area_cache == vma->vm_end) {
21461- mm->free_area_cache = vma->vm_start;
21462- mm->cached_hole_size = largest_hole;
21463- }
21464+ mm->cached_hole_size = largest_hole;
21465+ return (mm->free_area_cache = addr);
21466+ }
21467+ /* pull free_area_cache down to the first hole */
21468+ if (mm->free_area_cache == vma->vm_end) {
21469+ mm->free_area_cache = vma->vm_start;
21470+ mm->cached_hole_size = largest_hole;
21471 }
21472
21473 /* remember the largest hole we saw so far */
21474 if (addr + largest_hole < vma->vm_start)
21475- largest_hole = vma->vm_start - addr;
21476+ largest_hole = vma->vm_start - addr;
21477
21478 /* try just below the current vma->vm_start */
21479- addr = (vma->vm_start - len) & huge_page_mask(h);
21480- } while (len <= vma->vm_start);
21481+ addr = skip_heap_stack_gap(vma, len);
21482+ } while (!IS_ERR_VALUE(addr));
21483
21484 fail:
21485 /*
21486- * if hint left us with no space for the requested
21487- * mapping then try again:
21488- */
21489- if (first_time) {
21490- mm->free_area_cache = base;
21491- largest_hole = 0;
21492- first_time = 0;
21493- goto try_again;
21494- }
21495- /*
21496 * A failed mmap() very likely causes application failure,
21497 * so fall back to the bottom-up function here. This scenario
21498 * can happen with large stack limits and large mmap()
21499 * allocations.
21500 */
21501- mm->free_area_cache = TASK_UNMAPPED_BASE;
21502+
21503+#ifdef CONFIG_PAX_SEGMEXEC
21504+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21505+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21506+ else
21507+#endif
21508+
21509+ mm->mmap_base = TASK_UNMAPPED_BASE;
21510+
21511+#ifdef CONFIG_PAX_RANDMMAP
21512+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21513+ mm->mmap_base += mm->delta_mmap;
21514+#endif
21515+
21516+ mm->free_area_cache = mm->mmap_base;
21517 mm->cached_hole_size = ~0UL;
21518 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21519 len, pgoff, flags);
21520@@ -387,6 +393,7 @@ fail:
21521 /*
21522 * Restore the topdown base:
21523 */
21524+ mm->mmap_base = base;
21525 mm->free_area_cache = base;
21526 mm->cached_hole_size = ~0UL;
21527
21528@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21529 struct hstate *h = hstate_file(file);
21530 struct mm_struct *mm = current->mm;
21531 struct vm_area_struct *vma;
21532+ unsigned long pax_task_size = TASK_SIZE;
21533
21534 if (len & ~huge_page_mask(h))
21535 return -EINVAL;
21536- if (len > TASK_SIZE)
21537+
21538+#ifdef CONFIG_PAX_SEGMEXEC
21539+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21540+ pax_task_size = SEGMEXEC_TASK_SIZE;
21541+#endif
21542+
21543+ pax_task_size -= PAGE_SIZE;
21544+
21545+ if (len > pax_task_size)
21546 return -ENOMEM;
21547
21548 if (flags & MAP_FIXED) {
21549@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21550 if (addr) {
21551 addr = ALIGN(addr, huge_page_size(h));
21552 vma = find_vma(mm, addr);
21553- if (TASK_SIZE - len >= addr &&
21554- (!vma || addr + len <= vma->vm_start))
21555+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21556 return addr;
21557 }
21558 if (mm->get_unmapped_area == arch_get_unmapped_area)
21559diff -urNp linux-2.6.32.45/arch/x86/mm/init_32.c linux-2.6.32.45/arch/x86/mm/init_32.c
21560--- linux-2.6.32.45/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21561+++ linux-2.6.32.45/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21562@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21563 }
21564
21565 /*
21566- * Creates a middle page table and puts a pointer to it in the
21567- * given global directory entry. This only returns the gd entry
21568- * in non-PAE compilation mode, since the middle layer is folded.
21569- */
21570-static pmd_t * __init one_md_table_init(pgd_t *pgd)
21571-{
21572- pud_t *pud;
21573- pmd_t *pmd_table;
21574-
21575-#ifdef CONFIG_X86_PAE
21576- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21577- if (after_bootmem)
21578- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21579- else
21580- pmd_table = (pmd_t *)alloc_low_page();
21581- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21582- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21583- pud = pud_offset(pgd, 0);
21584- BUG_ON(pmd_table != pmd_offset(pud, 0));
21585-
21586- return pmd_table;
21587- }
21588-#endif
21589- pud = pud_offset(pgd, 0);
21590- pmd_table = pmd_offset(pud, 0);
21591-
21592- return pmd_table;
21593-}
21594-
21595-/*
21596 * Create a page table and place a pointer to it in a middle page
21597 * directory entry:
21598 */
21599@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21600 page_table = (pte_t *)alloc_low_page();
21601
21602 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21603+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21604+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21605+#else
21606 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21607+#endif
21608 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21609 }
21610
21611 return pte_offset_kernel(pmd, 0);
21612 }
21613
21614+static pmd_t * __init one_md_table_init(pgd_t *pgd)
21615+{
21616+ pud_t *pud;
21617+ pmd_t *pmd_table;
21618+
21619+ pud = pud_offset(pgd, 0);
21620+ pmd_table = pmd_offset(pud, 0);
21621+
21622+ return pmd_table;
21623+}
21624+
21625 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21626 {
21627 int pgd_idx = pgd_index(vaddr);
21628@@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21629 int pgd_idx, pmd_idx;
21630 unsigned long vaddr;
21631 pgd_t *pgd;
21632+ pud_t *pud;
21633 pmd_t *pmd;
21634 pte_t *pte = NULL;
21635
21636@@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21637 pgd = pgd_base + pgd_idx;
21638
21639 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21640- pmd = one_md_table_init(pgd);
21641- pmd = pmd + pmd_index(vaddr);
21642+ pud = pud_offset(pgd, vaddr);
21643+ pmd = pmd_offset(pud, vaddr);
21644+
21645+#ifdef CONFIG_X86_PAE
21646+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21647+#endif
21648+
21649 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21650 pmd++, pmd_idx++) {
21651 pte = page_table_kmap_check(one_page_table_init(pmd),
21652@@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21653 }
21654 }
21655
21656-static inline int is_kernel_text(unsigned long addr)
21657+static inline int is_kernel_text(unsigned long start, unsigned long end)
21658 {
21659- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21660- return 1;
21661- return 0;
21662+ if ((start > ktla_ktva((unsigned long)_etext) ||
21663+ end <= ktla_ktva((unsigned long)_stext)) &&
21664+ (start > ktla_ktva((unsigned long)_einittext) ||
21665+ end <= ktla_ktva((unsigned long)_sinittext)) &&
21666+
21667+#ifdef CONFIG_ACPI_SLEEP
21668+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21669+#endif
21670+
21671+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21672+ return 0;
21673+ return 1;
21674 }
21675
21676 /*
21677@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21678 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21679 unsigned long start_pfn, end_pfn;
21680 pgd_t *pgd_base = swapper_pg_dir;
21681- int pgd_idx, pmd_idx, pte_ofs;
21682+ unsigned int pgd_idx, pmd_idx, pte_ofs;
21683 unsigned long pfn;
21684 pgd_t *pgd;
21685+ pud_t *pud;
21686 pmd_t *pmd;
21687 pte_t *pte;
21688 unsigned pages_2m, pages_4k;
21689@@ -278,8 +279,13 @@ repeat:
21690 pfn = start_pfn;
21691 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21692 pgd = pgd_base + pgd_idx;
21693- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21694- pmd = one_md_table_init(pgd);
21695+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21696+ pud = pud_offset(pgd, 0);
21697+ pmd = pmd_offset(pud, 0);
21698+
21699+#ifdef CONFIG_X86_PAE
21700+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21701+#endif
21702
21703 if (pfn >= end_pfn)
21704 continue;
21705@@ -291,14 +297,13 @@ repeat:
21706 #endif
21707 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21708 pmd++, pmd_idx++) {
21709- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21710+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21711
21712 /*
21713 * Map with big pages if possible, otherwise
21714 * create normal page tables:
21715 */
21716 if (use_pse) {
21717- unsigned int addr2;
21718 pgprot_t prot = PAGE_KERNEL_LARGE;
21719 /*
21720 * first pass will use the same initial
21721@@ -308,11 +313,7 @@ repeat:
21722 __pgprot(PTE_IDENT_ATTR |
21723 _PAGE_PSE);
21724
21725- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21726- PAGE_OFFSET + PAGE_SIZE-1;
21727-
21728- if (is_kernel_text(addr) ||
21729- is_kernel_text(addr2))
21730+ if (is_kernel_text(address, address + PMD_SIZE))
21731 prot = PAGE_KERNEL_LARGE_EXEC;
21732
21733 pages_2m++;
21734@@ -329,7 +330,7 @@ repeat:
21735 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21736 pte += pte_ofs;
21737 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21738- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21739+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21740 pgprot_t prot = PAGE_KERNEL;
21741 /*
21742 * first pass will use the same initial
21743@@ -337,7 +338,7 @@ repeat:
21744 */
21745 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21746
21747- if (is_kernel_text(addr))
21748+ if (is_kernel_text(address, address + PAGE_SIZE))
21749 prot = PAGE_KERNEL_EXEC;
21750
21751 pages_4k++;
21752@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21753
21754 pud = pud_offset(pgd, va);
21755 pmd = pmd_offset(pud, va);
21756- if (!pmd_present(*pmd))
21757+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
21758 break;
21759
21760 pte = pte_offset_kernel(pmd, va);
21761@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
21762
21763 static void __init pagetable_init(void)
21764 {
21765- pgd_t *pgd_base = swapper_pg_dir;
21766-
21767- permanent_kmaps_init(pgd_base);
21768+ permanent_kmaps_init(swapper_pg_dir);
21769 }
21770
21771 #ifdef CONFIG_ACPI_SLEEP
21772@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
21773 * ACPI suspend needs this for resume, because things like the intel-agp
21774 * driver might have split up a kernel 4MB mapping.
21775 */
21776-char swsusp_pg_dir[PAGE_SIZE]
21777+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
21778 __attribute__ ((aligned(PAGE_SIZE)));
21779
21780 static inline void save_pg_dir(void)
21781 {
21782- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
21783+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
21784 }
21785 #else /* !CONFIG_ACPI_SLEEP */
21786 static inline void save_pg_dir(void)
21787@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
21788 flush_tlb_all();
21789 }
21790
21791-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21792+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21793 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21794
21795 /* user-defined highmem size */
21796@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21797 * Initialize the boot-time allocator (with low memory only):
21798 */
21799 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21800- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21801+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21802 PAGE_SIZE);
21803 if (bootmap == -1L)
21804 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21805@@ -864,6 +863,12 @@ void __init mem_init(void)
21806
21807 pci_iommu_alloc();
21808
21809+#ifdef CONFIG_PAX_PER_CPU_PGD
21810+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21811+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21812+ KERNEL_PGD_PTRS);
21813+#endif
21814+
21815 #ifdef CONFIG_FLATMEM
21816 BUG_ON(!mem_map);
21817 #endif
21818@@ -881,7 +886,7 @@ void __init mem_init(void)
21819 set_highmem_pages_init();
21820
21821 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21822- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21823+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21824 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21825
21826 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21827@@ -923,10 +928,10 @@ void __init mem_init(void)
21828 ((unsigned long)&__init_end -
21829 (unsigned long)&__init_begin) >> 10,
21830
21831- (unsigned long)&_etext, (unsigned long)&_edata,
21832- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21833+ (unsigned long)&_sdata, (unsigned long)&_edata,
21834+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21835
21836- (unsigned long)&_text, (unsigned long)&_etext,
21837+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21838 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21839
21840 /*
21841@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
21842 if (!kernel_set_to_readonly)
21843 return;
21844
21845+ start = ktla_ktva(start);
21846 pr_debug("Set kernel text: %lx - %lx for read write\n",
21847 start, start+size);
21848
21849@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
21850 if (!kernel_set_to_readonly)
21851 return;
21852
21853+ start = ktla_ktva(start);
21854 pr_debug("Set kernel text: %lx - %lx for read only\n",
21855 start, start+size);
21856
21857@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
21858 unsigned long start = PFN_ALIGN(_text);
21859 unsigned long size = PFN_ALIGN(_etext) - start;
21860
21861+ start = ktla_ktva(start);
21862 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21863 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21864 size >> 10);
21865diff -urNp linux-2.6.32.45/arch/x86/mm/init_64.c linux-2.6.32.45/arch/x86/mm/init_64.c
21866--- linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
21867+++ linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
21868@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21869 pmd = fill_pmd(pud, vaddr);
21870 pte = fill_pte(pmd, vaddr);
21871
21872+ pax_open_kernel();
21873 set_pte(pte, new_pte);
21874+ pax_close_kernel();
21875
21876 /*
21877 * It's enough to flush this one mapping.
21878@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21879 pgd = pgd_offset_k((unsigned long)__va(phys));
21880 if (pgd_none(*pgd)) {
21881 pud = (pud_t *) spp_getpage();
21882- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21883- _PAGE_USER));
21884+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21885 }
21886 pud = pud_offset(pgd, (unsigned long)__va(phys));
21887 if (pud_none(*pud)) {
21888 pmd = (pmd_t *) spp_getpage();
21889- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21890- _PAGE_USER));
21891+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21892 }
21893 pmd = pmd_offset(pud, phys);
21894 BUG_ON(!pmd_none(*pmd));
21895@@ -675,6 +675,12 @@ void __init mem_init(void)
21896
21897 pci_iommu_alloc();
21898
21899+#ifdef CONFIG_PAX_PER_CPU_PGD
21900+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21901+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21902+ KERNEL_PGD_PTRS);
21903+#endif
21904+
21905 /* clear_bss() already clear the empty_zero_page */
21906
21907 reservedpages = 0;
21908@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21909 static struct vm_area_struct gate_vma = {
21910 .vm_start = VSYSCALL_START,
21911 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21912- .vm_page_prot = PAGE_READONLY_EXEC,
21913- .vm_flags = VM_READ | VM_EXEC
21914+ .vm_page_prot = PAGE_READONLY,
21915+ .vm_flags = VM_READ
21916 };
21917
21918 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21919@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21920
21921 const char *arch_vma_name(struct vm_area_struct *vma)
21922 {
21923- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21924+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21925 return "[vdso]";
21926 if (vma == &gate_vma)
21927 return "[vsyscall]";
21928diff -urNp linux-2.6.32.45/arch/x86/mm/init.c linux-2.6.32.45/arch/x86/mm/init.c
21929--- linux-2.6.32.45/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21930+++ linux-2.6.32.45/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21931@@ -69,11 +69,7 @@ static void __init find_early_table_spac
21932 * cause a hotspot and fill up ZONE_DMA. The page tables
21933 * need roughly 0.5KB per GB.
21934 */
21935-#ifdef CONFIG_X86_32
21936- start = 0x7000;
21937-#else
21938- start = 0x8000;
21939-#endif
21940+ start = 0x100000;
21941 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21942 tables, PAGE_SIZE);
21943 if (e820_table_start == -1UL)
21944@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21945 #endif
21946
21947 set_nx();
21948- if (nx_enabled)
21949+ if (nx_enabled && cpu_has_nx)
21950 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21951
21952 /* Enable PSE if available */
21953@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
21954 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
21955 * mmio resources as well as potential bios/acpi data regions.
21956 */
21957+
21958 int devmem_is_allowed(unsigned long pagenr)
21959 {
21960+#ifdef CONFIG_GRKERNSEC_KMEM
21961+ /* allow BDA */
21962+ if (!pagenr)
21963+ return 1;
21964+ /* allow EBDA */
21965+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21966+ return 1;
21967+ /* allow ISA/video mem */
21968+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21969+ return 1;
21970+ /* throw out everything else below 1MB */
21971+ if (pagenr <= 256)
21972+ return 0;
21973+#else
21974 if (pagenr <= 256)
21975 return 1;
21976+#endif
21977+
21978 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21979 return 0;
21980 if (!page_is_ram(pagenr))
21981@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
21982
21983 void free_initmem(void)
21984 {
21985+
21986+#ifdef CONFIG_PAX_KERNEXEC
21987+#ifdef CONFIG_X86_32
21988+ /* PaX: limit KERNEL_CS to actual size */
21989+ unsigned long addr, limit;
21990+ struct desc_struct d;
21991+ int cpu;
21992+
21993+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21994+ limit = (limit - 1UL) >> PAGE_SHIFT;
21995+
21996+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21997+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21998+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21999+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22000+ }
22001+
22002+ /* PaX: make KERNEL_CS read-only */
22003+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22004+ if (!paravirt_enabled())
22005+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22006+/*
22007+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22008+ pgd = pgd_offset_k(addr);
22009+ pud = pud_offset(pgd, addr);
22010+ pmd = pmd_offset(pud, addr);
22011+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22012+ }
22013+*/
22014+#ifdef CONFIG_X86_PAE
22015+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22016+/*
22017+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22018+ pgd = pgd_offset_k(addr);
22019+ pud = pud_offset(pgd, addr);
22020+ pmd = pmd_offset(pud, addr);
22021+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22022+ }
22023+*/
22024+#endif
22025+
22026+#ifdef CONFIG_MODULES
22027+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22028+#endif
22029+
22030+#else
22031+ pgd_t *pgd;
22032+ pud_t *pud;
22033+ pmd_t *pmd;
22034+ unsigned long addr, end;
22035+
22036+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22037+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22038+ pgd = pgd_offset_k(addr);
22039+ pud = pud_offset(pgd, addr);
22040+ pmd = pmd_offset(pud, addr);
22041+ if (!pmd_present(*pmd))
22042+ continue;
22043+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22044+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22045+ else
22046+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22047+ }
22048+
22049+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22050+ end = addr + KERNEL_IMAGE_SIZE;
22051+ for (; addr < end; addr += PMD_SIZE) {
22052+ pgd = pgd_offset_k(addr);
22053+ pud = pud_offset(pgd, addr);
22054+ pmd = pmd_offset(pud, addr);
22055+ if (!pmd_present(*pmd))
22056+ continue;
22057+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22058+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22059+ }
22060+#endif
22061+
22062+ flush_tlb_all();
22063+#endif
22064+
22065 free_init_pages("unused kernel memory",
22066 (unsigned long)(&__init_begin),
22067 (unsigned long)(&__init_end));
22068diff -urNp linux-2.6.32.45/arch/x86/mm/iomap_32.c linux-2.6.32.45/arch/x86/mm/iomap_32.c
22069--- linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
22070+++ linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
22071@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
22072 debug_kmap_atomic(type);
22073 idx = type + KM_TYPE_NR * smp_processor_id();
22074 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22075+
22076+ pax_open_kernel();
22077 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22078+ pax_close_kernel();
22079+
22080 arch_flush_lazy_mmu_mode();
22081
22082 return (void *)vaddr;
22083diff -urNp linux-2.6.32.45/arch/x86/mm/ioremap.c linux-2.6.32.45/arch/x86/mm/ioremap.c
22084--- linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
22085+++ linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
22086@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
22087 * Second special case: Some BIOSen report the PC BIOS
22088 * area (640->1Mb) as ram even though it is not.
22089 */
22090- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
22091- pagenr < (BIOS_END >> PAGE_SHIFT))
22092+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
22093+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22094 return 0;
22095
22096 for (i = 0; i < e820.nr_map; i++) {
22097@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
22098 /*
22099 * Don't allow anybody to remap normal RAM that we're using..
22100 */
22101- for (pfn = phys_addr >> PAGE_SHIFT;
22102- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
22103- pfn++) {
22104-
22105+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
22106 int is_ram = page_is_ram(pfn);
22107
22108- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22109+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22110 return NULL;
22111 WARN_ON_ONCE(is_ram);
22112 }
22113@@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
22114 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22115
22116 static __initdata int after_paging_init;
22117-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22118+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22119
22120 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22121 {
22122@@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
22123 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22124
22125 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22126- memset(bm_pte, 0, sizeof(bm_pte));
22127- pmd_populate_kernel(&init_mm, pmd, bm_pte);
22128+ pmd_populate_user(&init_mm, pmd, bm_pte);
22129
22130 /*
22131 * The boot-ioremap range spans multiple pmds, for which
22132diff -urNp linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c
22133--- linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
22134+++ linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
22135@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
22136 * memory (e.g. tracked pages)? For now, we need this to avoid
22137 * invoking kmemcheck for PnP BIOS calls.
22138 */
22139- if (regs->flags & X86_VM_MASK)
22140+ if (v8086_mode(regs))
22141 return false;
22142- if (regs->cs != __KERNEL_CS)
22143+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22144 return false;
22145
22146 pte = kmemcheck_pte_lookup(address);
22147diff -urNp linux-2.6.32.45/arch/x86/mm/mmap.c linux-2.6.32.45/arch/x86/mm/mmap.c
22148--- linux-2.6.32.45/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
22149+++ linux-2.6.32.45/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
22150@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
22151 * Leave an at least ~128 MB hole with possible stack randomization.
22152 */
22153 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22154-#define MAX_GAP (TASK_SIZE/6*5)
22155+#define MAX_GAP (pax_task_size/6*5)
22156
22157 /*
22158 * True on X86_32 or when emulating IA32 on X86_64
22159@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22160 return rnd << PAGE_SHIFT;
22161 }
22162
22163-static unsigned long mmap_base(void)
22164+static unsigned long mmap_base(struct mm_struct *mm)
22165 {
22166 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
22167+ unsigned long pax_task_size = TASK_SIZE;
22168+
22169+#ifdef CONFIG_PAX_SEGMEXEC
22170+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22171+ pax_task_size = SEGMEXEC_TASK_SIZE;
22172+#endif
22173
22174 if (gap < MIN_GAP)
22175 gap = MIN_GAP;
22176 else if (gap > MAX_GAP)
22177 gap = MAX_GAP;
22178
22179- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22180+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22181 }
22182
22183 /*
22184 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22185 * does, but not when emulating X86_32
22186 */
22187-static unsigned long mmap_legacy_base(void)
22188+static unsigned long mmap_legacy_base(struct mm_struct *mm)
22189 {
22190- if (mmap_is_ia32())
22191+ if (mmap_is_ia32()) {
22192+
22193+#ifdef CONFIG_PAX_SEGMEXEC
22194+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22195+ return SEGMEXEC_TASK_UNMAPPED_BASE;
22196+ else
22197+#endif
22198+
22199 return TASK_UNMAPPED_BASE;
22200- else
22201+ } else
22202 return TASK_UNMAPPED_BASE + mmap_rnd();
22203 }
22204
22205@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22206 void arch_pick_mmap_layout(struct mm_struct *mm)
22207 {
22208 if (mmap_is_legacy()) {
22209- mm->mmap_base = mmap_legacy_base();
22210+ mm->mmap_base = mmap_legacy_base(mm);
22211+
22212+#ifdef CONFIG_PAX_RANDMMAP
22213+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22214+ mm->mmap_base += mm->delta_mmap;
22215+#endif
22216+
22217 mm->get_unmapped_area = arch_get_unmapped_area;
22218 mm->unmap_area = arch_unmap_area;
22219 } else {
22220- mm->mmap_base = mmap_base();
22221+ mm->mmap_base = mmap_base(mm);
22222+
22223+#ifdef CONFIG_PAX_RANDMMAP
22224+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22225+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22226+#endif
22227+
22228 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22229 mm->unmap_area = arch_unmap_area_topdown;
22230 }
22231diff -urNp linux-2.6.32.45/arch/x86/mm/mmio-mod.c linux-2.6.32.45/arch/x86/mm/mmio-mod.c
22232--- linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22233+++ linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22234@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22235 break;
22236 default:
22237 {
22238- unsigned char *ip = (unsigned char *)instptr;
22239+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22240 my_trace->opcode = MMIO_UNKNOWN_OP;
22241 my_trace->width = 0;
22242 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22243@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22244 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22245 void __iomem *addr)
22246 {
22247- static atomic_t next_id;
22248+ static atomic_unchecked_t next_id;
22249 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22250 /* These are page-unaligned. */
22251 struct mmiotrace_map map = {
22252@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22253 .private = trace
22254 },
22255 .phys = offset,
22256- .id = atomic_inc_return(&next_id)
22257+ .id = atomic_inc_return_unchecked(&next_id)
22258 };
22259 map.map_id = trace->id;
22260
22261diff -urNp linux-2.6.32.45/arch/x86/mm/numa_32.c linux-2.6.32.45/arch/x86/mm/numa_32.c
22262--- linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22263+++ linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22264@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22265 }
22266 #endif
22267
22268-extern unsigned long find_max_low_pfn(void);
22269 extern unsigned long highend_pfn, highstart_pfn;
22270
22271 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22272diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr.c linux-2.6.32.45/arch/x86/mm/pageattr.c
22273--- linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22274+++ linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22275@@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22276 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22277 */
22278 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22279- pgprot_val(forbidden) |= _PAGE_NX;
22280+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22281
22282 /*
22283 * The kernel text needs to be executable for obvious reasons
22284 * Does not cover __inittext since that is gone later on. On
22285 * 64bit we do not enforce !NX on the low mapping
22286 */
22287- if (within(address, (unsigned long)_text, (unsigned long)_etext))
22288- pgprot_val(forbidden) |= _PAGE_NX;
22289+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22290+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22291
22292+#ifdef CONFIG_DEBUG_RODATA
22293 /*
22294 * The .rodata section needs to be read-only. Using the pfn
22295 * catches all aliases.
22296@@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22297 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22298 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22299 pgprot_val(forbidden) |= _PAGE_RW;
22300+#endif
22301+
22302+#ifdef CONFIG_PAX_KERNEXEC
22303+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22304+ pgprot_val(forbidden) |= _PAGE_RW;
22305+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22306+ }
22307+#endif
22308
22309 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22310
22311@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22312 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22313 {
22314 /* change init_mm */
22315+ pax_open_kernel();
22316 set_pte_atomic(kpte, pte);
22317+
22318 #ifdef CONFIG_X86_32
22319 if (!SHARED_KERNEL_PMD) {
22320+
22321+#ifdef CONFIG_PAX_PER_CPU_PGD
22322+ unsigned long cpu;
22323+#else
22324 struct page *page;
22325+#endif
22326
22327+#ifdef CONFIG_PAX_PER_CPU_PGD
22328+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22329+ pgd_t *pgd = get_cpu_pgd(cpu);
22330+#else
22331 list_for_each_entry(page, &pgd_list, lru) {
22332- pgd_t *pgd;
22333+ pgd_t *pgd = (pgd_t *)page_address(page);
22334+#endif
22335+
22336 pud_t *pud;
22337 pmd_t *pmd;
22338
22339- pgd = (pgd_t *)page_address(page) + pgd_index(address);
22340+ pgd += pgd_index(address);
22341 pud = pud_offset(pgd, address);
22342 pmd = pmd_offset(pud, address);
22343 set_pte_atomic((pte_t *)pmd, pte);
22344 }
22345 }
22346 #endif
22347+ pax_close_kernel();
22348 }
22349
22350 static int
22351diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr-test.c linux-2.6.32.45/arch/x86/mm/pageattr-test.c
22352--- linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22353+++ linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22354@@ -36,7 +36,7 @@ enum {
22355
22356 static int pte_testbit(pte_t pte)
22357 {
22358- return pte_flags(pte) & _PAGE_UNUSED1;
22359+ return pte_flags(pte) & _PAGE_CPA_TEST;
22360 }
22361
22362 struct split_state {
22363diff -urNp linux-2.6.32.45/arch/x86/mm/pat.c linux-2.6.32.45/arch/x86/mm/pat.c
22364--- linux-2.6.32.45/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22365+++ linux-2.6.32.45/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22366@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22367
22368 conflict:
22369 printk(KERN_INFO "%s:%d conflicting memory types "
22370- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22371+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22372 new->end, cattr_name(new->type), cattr_name(entry->type));
22373 return -EBUSY;
22374 }
22375@@ -559,7 +559,7 @@ unlock_ret:
22376
22377 if (err) {
22378 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22379- current->comm, current->pid, start, end);
22380+ current->comm, task_pid_nr(current), start, end);
22381 }
22382
22383 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22384@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22385 while (cursor < to) {
22386 if (!devmem_is_allowed(pfn)) {
22387 printk(KERN_INFO
22388- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22389- current->comm, from, to);
22390+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22391+ current->comm, from, to, cursor);
22392 return 0;
22393 }
22394 cursor += PAGE_SIZE;
22395@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22396 printk(KERN_INFO
22397 "%s:%d ioremap_change_attr failed %s "
22398 "for %Lx-%Lx\n",
22399- current->comm, current->pid,
22400+ current->comm, task_pid_nr(current),
22401 cattr_name(flags),
22402 base, (unsigned long long)(base + size));
22403 return -EINVAL;
22404@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22405 free_memtype(paddr, paddr + size);
22406 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22407 " for %Lx-%Lx, got %s\n",
22408- current->comm, current->pid,
22409+ current->comm, task_pid_nr(current),
22410 cattr_name(want_flags),
22411 (unsigned long long)paddr,
22412 (unsigned long long)(paddr + size),
22413diff -urNp linux-2.6.32.45/arch/x86/mm/pf_in.c linux-2.6.32.45/arch/x86/mm/pf_in.c
22414--- linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22415+++ linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22416@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22417 int i;
22418 enum reason_type rv = OTHERS;
22419
22420- p = (unsigned char *)ins_addr;
22421+ p = (unsigned char *)ktla_ktva(ins_addr);
22422 p += skip_prefix(p, &prf);
22423 p += get_opcode(p, &opcode);
22424
22425@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22426 struct prefix_bits prf;
22427 int i;
22428
22429- p = (unsigned char *)ins_addr;
22430+ p = (unsigned char *)ktla_ktva(ins_addr);
22431 p += skip_prefix(p, &prf);
22432 p += get_opcode(p, &opcode);
22433
22434@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22435 struct prefix_bits prf;
22436 int i;
22437
22438- p = (unsigned char *)ins_addr;
22439+ p = (unsigned char *)ktla_ktva(ins_addr);
22440 p += skip_prefix(p, &prf);
22441 p += get_opcode(p, &opcode);
22442
22443@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22444 int i;
22445 unsigned long rv;
22446
22447- p = (unsigned char *)ins_addr;
22448+ p = (unsigned char *)ktla_ktva(ins_addr);
22449 p += skip_prefix(p, &prf);
22450 p += get_opcode(p, &opcode);
22451 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22452@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22453 int i;
22454 unsigned long rv;
22455
22456- p = (unsigned char *)ins_addr;
22457+ p = (unsigned char *)ktla_ktva(ins_addr);
22458 p += skip_prefix(p, &prf);
22459 p += get_opcode(p, &opcode);
22460 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22461diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable_32.c linux-2.6.32.45/arch/x86/mm/pgtable_32.c
22462--- linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22463+++ linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22464@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22465 return;
22466 }
22467 pte = pte_offset_kernel(pmd, vaddr);
22468+
22469+ pax_open_kernel();
22470 if (pte_val(pteval))
22471 set_pte_at(&init_mm, vaddr, pte, pteval);
22472 else
22473 pte_clear(&init_mm, vaddr, pte);
22474+ pax_close_kernel();
22475
22476 /*
22477 * It's enough to flush this one mapping.
22478diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable.c linux-2.6.32.45/arch/x86/mm/pgtable.c
22479--- linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22480+++ linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22481@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22482 list_del(&page->lru);
22483 }
22484
22485-#define UNSHARED_PTRS_PER_PGD \
22486- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22487+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22488+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22489
22490+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22491+{
22492+ while (count--)
22493+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22494+}
22495+#endif
22496+
22497+#ifdef CONFIG_PAX_PER_CPU_PGD
22498+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22499+{
22500+ while (count--)
22501+
22502+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22503+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22504+#else
22505+ *dst++ = *src++;
22506+#endif
22507+
22508+}
22509+#endif
22510+
22511+#ifdef CONFIG_X86_64
22512+#define pxd_t pud_t
22513+#define pyd_t pgd_t
22514+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22515+#define pxd_free(mm, pud) pud_free((mm), (pud))
22516+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22517+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22518+#define PYD_SIZE PGDIR_SIZE
22519+#else
22520+#define pxd_t pmd_t
22521+#define pyd_t pud_t
22522+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22523+#define pxd_free(mm, pud) pmd_free((mm), (pud))
22524+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22525+#define pyd_offset(mm ,address) pud_offset((mm), (address))
22526+#define PYD_SIZE PUD_SIZE
22527+#endif
22528+
22529+#ifdef CONFIG_PAX_PER_CPU_PGD
22530+static inline void pgd_ctor(pgd_t *pgd) {}
22531+static inline void pgd_dtor(pgd_t *pgd) {}
22532+#else
22533 static void pgd_ctor(pgd_t *pgd)
22534 {
22535 /* If the pgd points to a shared pagetable level (either the
22536@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22537 pgd_list_del(pgd);
22538 spin_unlock_irqrestore(&pgd_lock, flags);
22539 }
22540+#endif
22541
22542 /*
22543 * List of all pgd's needed for non-PAE so it can invalidate entries
22544@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22545 * -- wli
22546 */
22547
22548-#ifdef CONFIG_X86_PAE
22549+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22550 /*
22551 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22552 * updating the top-level pagetable entries to guarantee the
22553@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22554 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22555 * and initialize the kernel pmds here.
22556 */
22557-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22558+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22559
22560 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22561 {
22562@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22563 */
22564 flush_tlb_mm(mm);
22565 }
22566+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22567+#define PREALLOCATED_PXDS USER_PGD_PTRS
22568 #else /* !CONFIG_X86_PAE */
22569
22570 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22571-#define PREALLOCATED_PMDS 0
22572+#define PREALLOCATED_PXDS 0
22573
22574 #endif /* CONFIG_X86_PAE */
22575
22576-static void free_pmds(pmd_t *pmds[])
22577+static void free_pxds(pxd_t *pxds[])
22578 {
22579 int i;
22580
22581- for(i = 0; i < PREALLOCATED_PMDS; i++)
22582- if (pmds[i])
22583- free_page((unsigned long)pmds[i]);
22584+ for(i = 0; i < PREALLOCATED_PXDS; i++)
22585+ if (pxds[i])
22586+ free_page((unsigned long)pxds[i]);
22587 }
22588
22589-static int preallocate_pmds(pmd_t *pmds[])
22590+static int preallocate_pxds(pxd_t *pxds[])
22591 {
22592 int i;
22593 bool failed = false;
22594
22595- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22596- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22597- if (pmd == NULL)
22598+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22599+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22600+ if (pxd == NULL)
22601 failed = true;
22602- pmds[i] = pmd;
22603+ pxds[i] = pxd;
22604 }
22605
22606 if (failed) {
22607- free_pmds(pmds);
22608+ free_pxds(pxds);
22609 return -ENOMEM;
22610 }
22611
22612@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22613 * preallocate which never got a corresponding vma will need to be
22614 * freed manually.
22615 */
22616-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22617+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22618 {
22619 int i;
22620
22621- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22622+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22623 pgd_t pgd = pgdp[i];
22624
22625 if (pgd_val(pgd) != 0) {
22626- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22627+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22628
22629- pgdp[i] = native_make_pgd(0);
22630+ set_pgd(pgdp + i, native_make_pgd(0));
22631
22632- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22633- pmd_free(mm, pmd);
22634+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22635+ pxd_free(mm, pxd);
22636 }
22637 }
22638 }
22639
22640-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22641+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22642 {
22643- pud_t *pud;
22644+ pyd_t *pyd;
22645 unsigned long addr;
22646 int i;
22647
22648- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22649+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22650 return;
22651
22652- pud = pud_offset(pgd, 0);
22653+#ifdef CONFIG_X86_64
22654+ pyd = pyd_offset(mm, 0L);
22655+#else
22656+ pyd = pyd_offset(pgd, 0L);
22657+#endif
22658
22659- for (addr = i = 0; i < PREALLOCATED_PMDS;
22660- i++, pud++, addr += PUD_SIZE) {
22661- pmd_t *pmd = pmds[i];
22662+ for (addr = i = 0; i < PREALLOCATED_PXDS;
22663+ i++, pyd++, addr += PYD_SIZE) {
22664+ pxd_t *pxd = pxds[i];
22665
22666 if (i >= KERNEL_PGD_BOUNDARY)
22667- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22668- sizeof(pmd_t) * PTRS_PER_PMD);
22669+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22670+ sizeof(pxd_t) * PTRS_PER_PMD);
22671
22672- pud_populate(mm, pud, pmd);
22673+ pyd_populate(mm, pyd, pxd);
22674 }
22675 }
22676
22677 pgd_t *pgd_alloc(struct mm_struct *mm)
22678 {
22679 pgd_t *pgd;
22680- pmd_t *pmds[PREALLOCATED_PMDS];
22681+ pxd_t *pxds[PREALLOCATED_PXDS];
22682+
22683 unsigned long flags;
22684
22685 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22686@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22687
22688 mm->pgd = pgd;
22689
22690- if (preallocate_pmds(pmds) != 0)
22691+ if (preallocate_pxds(pxds) != 0)
22692 goto out_free_pgd;
22693
22694 if (paravirt_pgd_alloc(mm) != 0)
22695- goto out_free_pmds;
22696+ goto out_free_pxds;
22697
22698 /*
22699 * Make sure that pre-populating the pmds is atomic with
22700@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22701 spin_lock_irqsave(&pgd_lock, flags);
22702
22703 pgd_ctor(pgd);
22704- pgd_prepopulate_pmd(mm, pgd, pmds);
22705+ pgd_prepopulate_pxd(mm, pgd, pxds);
22706
22707 spin_unlock_irqrestore(&pgd_lock, flags);
22708
22709 return pgd;
22710
22711-out_free_pmds:
22712- free_pmds(pmds);
22713+out_free_pxds:
22714+ free_pxds(pxds);
22715 out_free_pgd:
22716 free_page((unsigned long)pgd);
22717 out:
22718@@ -287,7 +338,7 @@ out:
22719
22720 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22721 {
22722- pgd_mop_up_pmds(mm, pgd);
22723+ pgd_mop_up_pxds(mm, pgd);
22724 pgd_dtor(pgd);
22725 paravirt_pgd_free(mm, pgd);
22726 free_page((unsigned long)pgd);
22727diff -urNp linux-2.6.32.45/arch/x86/mm/setup_nx.c linux-2.6.32.45/arch/x86/mm/setup_nx.c
22728--- linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22729+++ linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22730@@ -4,11 +4,10 @@
22731
22732 #include <asm/pgtable.h>
22733
22734+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22735 int nx_enabled;
22736
22737-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22738-static int disable_nx __cpuinitdata;
22739-
22740+#ifndef CONFIG_PAX_PAGEEXEC
22741 /*
22742 * noexec = on|off
22743 *
22744@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22745 if (!str)
22746 return -EINVAL;
22747 if (!strncmp(str, "on", 2)) {
22748- __supported_pte_mask |= _PAGE_NX;
22749- disable_nx = 0;
22750+ nx_enabled = 1;
22751 } else if (!strncmp(str, "off", 3)) {
22752- disable_nx = 1;
22753- __supported_pte_mask &= ~_PAGE_NX;
22754+ nx_enabled = 0;
22755 }
22756 return 0;
22757 }
22758 early_param("noexec", noexec_setup);
22759 #endif
22760+#endif
22761
22762 #ifdef CONFIG_X86_PAE
22763 void __init set_nx(void)
22764 {
22765- unsigned int v[4], l, h;
22766+ if (!nx_enabled && cpu_has_nx) {
22767+ unsigned l, h;
22768
22769- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
22770- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
22771-
22772- if ((v[3] & (1 << 20)) && !disable_nx) {
22773- rdmsr(MSR_EFER, l, h);
22774- l |= EFER_NX;
22775- wrmsr(MSR_EFER, l, h);
22776- nx_enabled = 1;
22777- __supported_pte_mask |= _PAGE_NX;
22778- }
22779+ __supported_pte_mask &= ~_PAGE_NX;
22780+ rdmsr(MSR_EFER, l, h);
22781+ l &= ~EFER_NX;
22782+ wrmsr(MSR_EFER, l, h);
22783 }
22784 }
22785 #else
22786@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
22787 unsigned long efer;
22788
22789 rdmsrl(MSR_EFER, efer);
22790- if (!(efer & EFER_NX) || disable_nx)
22791+ if (!(efer & EFER_NX) || !nx_enabled)
22792 __supported_pte_mask &= ~_PAGE_NX;
22793 }
22794 #endif
22795diff -urNp linux-2.6.32.45/arch/x86/mm/tlb.c linux-2.6.32.45/arch/x86/mm/tlb.c
22796--- linux-2.6.32.45/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
22797+++ linux-2.6.32.45/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
22798@@ -61,7 +61,11 @@ void leave_mm(int cpu)
22799 BUG();
22800 cpumask_clear_cpu(cpu,
22801 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
22802+
22803+#ifndef CONFIG_PAX_PER_CPU_PGD
22804 load_cr3(swapper_pg_dir);
22805+#endif
22806+
22807 }
22808 EXPORT_SYMBOL_GPL(leave_mm);
22809
22810diff -urNp linux-2.6.32.45/arch/x86/oprofile/backtrace.c linux-2.6.32.45/arch/x86/oprofile/backtrace.c
22811--- linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
22812+++ linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
22813@@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
22814 struct frame_head bufhead[2];
22815
22816 /* Also check accessibility of one struct frame_head beyond */
22817- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
22818+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
22819 return NULL;
22820 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
22821 return NULL;
22822@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
22823 {
22824 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
22825
22826- if (!user_mode_vm(regs)) {
22827+ if (!user_mode(regs)) {
22828 unsigned long stack = kernel_stack_pointer(regs);
22829 if (depth)
22830 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22831diff -urNp linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c
22832--- linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
22833+++ linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
22834@@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
22835 #endif
22836 }
22837
22838-static int inline addr_increment(void)
22839+static inline int addr_increment(void)
22840 {
22841 #ifdef CONFIG_SMP
22842 return smp_num_siblings == 2 ? 2 : 1;
22843diff -urNp linux-2.6.32.45/arch/x86/pci/common.c linux-2.6.32.45/arch/x86/pci/common.c
22844--- linux-2.6.32.45/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
22845+++ linux-2.6.32.45/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
22846@@ -31,8 +31,8 @@ int noioapicreroute = 1;
22847 int pcibios_last_bus = -1;
22848 unsigned long pirq_table_addr;
22849 struct pci_bus *pci_root_bus;
22850-struct pci_raw_ops *raw_pci_ops;
22851-struct pci_raw_ops *raw_pci_ext_ops;
22852+const struct pci_raw_ops *raw_pci_ops;
22853+const struct pci_raw_ops *raw_pci_ext_ops;
22854
22855 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
22856 int reg, int len, u32 *val)
22857diff -urNp linux-2.6.32.45/arch/x86/pci/direct.c linux-2.6.32.45/arch/x86/pci/direct.c
22858--- linux-2.6.32.45/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
22859+++ linux-2.6.32.45/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
22860@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
22861
22862 #undef PCI_CONF1_ADDRESS
22863
22864-struct pci_raw_ops pci_direct_conf1 = {
22865+const struct pci_raw_ops pci_direct_conf1 = {
22866 .read = pci_conf1_read,
22867 .write = pci_conf1_write,
22868 };
22869@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
22870
22871 #undef PCI_CONF2_ADDRESS
22872
22873-struct pci_raw_ops pci_direct_conf2 = {
22874+const struct pci_raw_ops pci_direct_conf2 = {
22875 .read = pci_conf2_read,
22876 .write = pci_conf2_write,
22877 };
22878@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
22879 * This should be close to trivial, but it isn't, because there are buggy
22880 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
22881 */
22882-static int __init pci_sanity_check(struct pci_raw_ops *o)
22883+static int __init pci_sanity_check(const struct pci_raw_ops *o)
22884 {
22885 u32 x = 0;
22886 int year, devfn;
22887diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_32.c linux-2.6.32.45/arch/x86/pci/mmconfig_32.c
22888--- linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
22889+++ linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
22890@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
22891 return 0;
22892 }
22893
22894-static struct pci_raw_ops pci_mmcfg = {
22895+static const struct pci_raw_ops pci_mmcfg = {
22896 .read = pci_mmcfg_read,
22897 .write = pci_mmcfg_write,
22898 };
22899diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_64.c linux-2.6.32.45/arch/x86/pci/mmconfig_64.c
22900--- linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
22901+++ linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
22902@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
22903 return 0;
22904 }
22905
22906-static struct pci_raw_ops pci_mmcfg = {
22907+static const struct pci_raw_ops pci_mmcfg = {
22908 .read = pci_mmcfg_read,
22909 .write = pci_mmcfg_write,
22910 };
22911diff -urNp linux-2.6.32.45/arch/x86/pci/numaq_32.c linux-2.6.32.45/arch/x86/pci/numaq_32.c
22912--- linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
22913+++ linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
22914@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
22915
22916 #undef PCI_CONF1_MQ_ADDRESS
22917
22918-static struct pci_raw_ops pci_direct_conf1_mq = {
22919+static const struct pci_raw_ops pci_direct_conf1_mq = {
22920 .read = pci_conf1_mq_read,
22921 .write = pci_conf1_mq_write
22922 };
22923diff -urNp linux-2.6.32.45/arch/x86/pci/olpc.c linux-2.6.32.45/arch/x86/pci/olpc.c
22924--- linux-2.6.32.45/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
22925+++ linux-2.6.32.45/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
22926@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
22927 return 0;
22928 }
22929
22930-static struct pci_raw_ops pci_olpc_conf = {
22931+static const struct pci_raw_ops pci_olpc_conf = {
22932 .read = pci_olpc_read,
22933 .write = pci_olpc_write,
22934 };
22935diff -urNp linux-2.6.32.45/arch/x86/pci/pcbios.c linux-2.6.32.45/arch/x86/pci/pcbios.c
22936--- linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
22937+++ linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
22938@@ -56,50 +56,93 @@ union bios32 {
22939 static struct {
22940 unsigned long address;
22941 unsigned short segment;
22942-} bios32_indirect = { 0, __KERNEL_CS };
22943+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22944
22945 /*
22946 * Returns the entry point for the given service, NULL on error
22947 */
22948
22949-static unsigned long bios32_service(unsigned long service)
22950+static unsigned long __devinit bios32_service(unsigned long service)
22951 {
22952 unsigned char return_code; /* %al */
22953 unsigned long address; /* %ebx */
22954 unsigned long length; /* %ecx */
22955 unsigned long entry; /* %edx */
22956 unsigned long flags;
22957+ struct desc_struct d, *gdt;
22958
22959 local_irq_save(flags);
22960- __asm__("lcall *(%%edi); cld"
22961+
22962+ gdt = get_cpu_gdt_table(smp_processor_id());
22963+
22964+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22965+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22966+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22967+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22968+
22969+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22970 : "=a" (return_code),
22971 "=b" (address),
22972 "=c" (length),
22973 "=d" (entry)
22974 : "0" (service),
22975 "1" (0),
22976- "D" (&bios32_indirect));
22977+ "D" (&bios32_indirect),
22978+ "r"(__PCIBIOS_DS)
22979+ : "memory");
22980+
22981+ pax_open_kernel();
22982+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22983+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22984+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22985+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22986+ pax_close_kernel();
22987+
22988 local_irq_restore(flags);
22989
22990 switch (return_code) {
22991- case 0:
22992- return address + entry;
22993- case 0x80: /* Not present */
22994- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22995- return 0;
22996- default: /* Shouldn't happen */
22997- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22998- service, return_code);
22999+ case 0: {
23000+ int cpu;
23001+ unsigned char flags;
23002+
23003+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23004+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23005+ printk(KERN_WARNING "bios32_service: not valid\n");
23006 return 0;
23007+ }
23008+ address = address + PAGE_OFFSET;
23009+ length += 16UL; /* some BIOSs underreport this... */
23010+ flags = 4;
23011+ if (length >= 64*1024*1024) {
23012+ length >>= PAGE_SHIFT;
23013+ flags |= 8;
23014+ }
23015+
23016+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23017+ gdt = get_cpu_gdt_table(cpu);
23018+ pack_descriptor(&d, address, length, 0x9b, flags);
23019+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23020+ pack_descriptor(&d, address, length, 0x93, flags);
23021+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23022+ }
23023+ return entry;
23024+ }
23025+ case 0x80: /* Not present */
23026+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23027+ return 0;
23028+ default: /* Shouldn't happen */
23029+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23030+ service, return_code);
23031+ return 0;
23032 }
23033 }
23034
23035 static struct {
23036 unsigned long address;
23037 unsigned short segment;
23038-} pci_indirect = { 0, __KERNEL_CS };
23039+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23040
23041-static int pci_bios_present;
23042+static int pci_bios_present __read_only;
23043
23044 static int __devinit check_pcibios(void)
23045 {
23046@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
23047 unsigned long flags, pcibios_entry;
23048
23049 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23050- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23051+ pci_indirect.address = pcibios_entry;
23052
23053 local_irq_save(flags);
23054- __asm__(
23055- "lcall *(%%edi); cld\n\t"
23056+ __asm__("movw %w6, %%ds\n\t"
23057+ "lcall *%%ss:(%%edi); cld\n\t"
23058+ "push %%ss\n\t"
23059+ "pop %%ds\n\t"
23060 "jc 1f\n\t"
23061 "xor %%ah, %%ah\n"
23062 "1:"
23063@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
23064 "=b" (ebx),
23065 "=c" (ecx)
23066 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23067- "D" (&pci_indirect)
23068+ "D" (&pci_indirect),
23069+ "r" (__PCIBIOS_DS)
23070 : "memory");
23071 local_irq_restore(flags);
23072
23073@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
23074
23075 switch (len) {
23076 case 1:
23077- __asm__("lcall *(%%esi); cld\n\t"
23078+ __asm__("movw %w6, %%ds\n\t"
23079+ "lcall *%%ss:(%%esi); cld\n\t"
23080+ "push %%ss\n\t"
23081+ "pop %%ds\n\t"
23082 "jc 1f\n\t"
23083 "xor %%ah, %%ah\n"
23084 "1:"
23085@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
23086 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23087 "b" (bx),
23088 "D" ((long)reg),
23089- "S" (&pci_indirect));
23090+ "S" (&pci_indirect),
23091+ "r" (__PCIBIOS_DS));
23092 /*
23093 * Zero-extend the result beyond 8 bits, do not trust the
23094 * BIOS having done it:
23095@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
23096 *value &= 0xff;
23097 break;
23098 case 2:
23099- __asm__("lcall *(%%esi); cld\n\t"
23100+ __asm__("movw %w6, %%ds\n\t"
23101+ "lcall *%%ss:(%%esi); cld\n\t"
23102+ "push %%ss\n\t"
23103+ "pop %%ds\n\t"
23104 "jc 1f\n\t"
23105 "xor %%ah, %%ah\n"
23106 "1:"
23107@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
23108 : "1" (PCIBIOS_READ_CONFIG_WORD),
23109 "b" (bx),
23110 "D" ((long)reg),
23111- "S" (&pci_indirect));
23112+ "S" (&pci_indirect),
23113+ "r" (__PCIBIOS_DS));
23114 /*
23115 * Zero-extend the result beyond 16 bits, do not trust the
23116 * BIOS having done it:
23117@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
23118 *value &= 0xffff;
23119 break;
23120 case 4:
23121- __asm__("lcall *(%%esi); cld\n\t"
23122+ __asm__("movw %w6, %%ds\n\t"
23123+ "lcall *%%ss:(%%esi); cld\n\t"
23124+ "push %%ss\n\t"
23125+ "pop %%ds\n\t"
23126 "jc 1f\n\t"
23127 "xor %%ah, %%ah\n"
23128 "1:"
23129@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
23130 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23131 "b" (bx),
23132 "D" ((long)reg),
23133- "S" (&pci_indirect));
23134+ "S" (&pci_indirect),
23135+ "r" (__PCIBIOS_DS));
23136 break;
23137 }
23138
23139@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
23140
23141 switch (len) {
23142 case 1:
23143- __asm__("lcall *(%%esi); cld\n\t"
23144+ __asm__("movw %w6, %%ds\n\t"
23145+ "lcall *%%ss:(%%esi); cld\n\t"
23146+ "push %%ss\n\t"
23147+ "pop %%ds\n\t"
23148 "jc 1f\n\t"
23149 "xor %%ah, %%ah\n"
23150 "1:"
23151@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
23152 "c" (value),
23153 "b" (bx),
23154 "D" ((long)reg),
23155- "S" (&pci_indirect));
23156+ "S" (&pci_indirect),
23157+ "r" (__PCIBIOS_DS));
23158 break;
23159 case 2:
23160- __asm__("lcall *(%%esi); cld\n\t"
23161+ __asm__("movw %w6, %%ds\n\t"
23162+ "lcall *%%ss:(%%esi); cld\n\t"
23163+ "push %%ss\n\t"
23164+ "pop %%ds\n\t"
23165 "jc 1f\n\t"
23166 "xor %%ah, %%ah\n"
23167 "1:"
23168@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
23169 "c" (value),
23170 "b" (bx),
23171 "D" ((long)reg),
23172- "S" (&pci_indirect));
23173+ "S" (&pci_indirect),
23174+ "r" (__PCIBIOS_DS));
23175 break;
23176 case 4:
23177- __asm__("lcall *(%%esi); cld\n\t"
23178+ __asm__("movw %w6, %%ds\n\t"
23179+ "lcall *%%ss:(%%esi); cld\n\t"
23180+ "push %%ss\n\t"
23181+ "pop %%ds\n\t"
23182 "jc 1f\n\t"
23183 "xor %%ah, %%ah\n"
23184 "1:"
23185@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
23186 "c" (value),
23187 "b" (bx),
23188 "D" ((long)reg),
23189- "S" (&pci_indirect));
23190+ "S" (&pci_indirect),
23191+ "r" (__PCIBIOS_DS));
23192 break;
23193 }
23194
23195@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
23196 * Function table for BIOS32 access
23197 */
23198
23199-static struct pci_raw_ops pci_bios_access = {
23200+static const struct pci_raw_ops pci_bios_access = {
23201 .read = pci_bios_read,
23202 .write = pci_bios_write
23203 };
23204@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23205 * Try to find PCI BIOS.
23206 */
23207
23208-static struct pci_raw_ops * __devinit pci_find_bios(void)
23209+static const struct pci_raw_ops * __devinit pci_find_bios(void)
23210 {
23211 union bios32 *check;
23212 unsigned char sum;
23213@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23214
23215 DBG("PCI: Fetching IRQ routing table... ");
23216 __asm__("push %%es\n\t"
23217+ "movw %w8, %%ds\n\t"
23218 "push %%ds\n\t"
23219 "pop %%es\n\t"
23220- "lcall *(%%esi); cld\n\t"
23221+ "lcall *%%ss:(%%esi); cld\n\t"
23222 "pop %%es\n\t"
23223+ "push %%ss\n\t"
23224+ "pop %%ds\n"
23225 "jc 1f\n\t"
23226 "xor %%ah, %%ah\n"
23227 "1:"
23228@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23229 "1" (0),
23230 "D" ((long) &opt),
23231 "S" (&pci_indirect),
23232- "m" (opt)
23233+ "m" (opt),
23234+ "r" (__PCIBIOS_DS)
23235 : "memory");
23236 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23237 if (ret & 0xff00)
23238@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23239 {
23240 int ret;
23241
23242- __asm__("lcall *(%%esi); cld\n\t"
23243+ __asm__("movw %w5, %%ds\n\t"
23244+ "lcall *%%ss:(%%esi); cld\n\t"
23245+ "push %%ss\n\t"
23246+ "pop %%ds\n"
23247 "jc 1f\n\t"
23248 "xor %%ah, %%ah\n"
23249 "1:"
23250@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23251 : "0" (PCIBIOS_SET_PCI_HW_INT),
23252 "b" ((dev->bus->number << 8) | dev->devfn),
23253 "c" ((irq << 8) | (pin + 10)),
23254- "S" (&pci_indirect));
23255+ "S" (&pci_indirect),
23256+ "r" (__PCIBIOS_DS));
23257 return !(ret & 0xff00);
23258 }
23259 EXPORT_SYMBOL(pcibios_set_irq_routing);
23260diff -urNp linux-2.6.32.45/arch/x86/power/cpu.c linux-2.6.32.45/arch/x86/power/cpu.c
23261--- linux-2.6.32.45/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23262+++ linux-2.6.32.45/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23263@@ -129,7 +129,7 @@ static void do_fpu_end(void)
23264 static void fix_processor_context(void)
23265 {
23266 int cpu = smp_processor_id();
23267- struct tss_struct *t = &per_cpu(init_tss, cpu);
23268+ struct tss_struct *t = init_tss + cpu;
23269
23270 set_tss_desc(cpu, t); /*
23271 * This just modifies memory; should not be
23272@@ -139,7 +139,9 @@ static void fix_processor_context(void)
23273 */
23274
23275 #ifdef CONFIG_X86_64
23276+ pax_open_kernel();
23277 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23278+ pax_close_kernel();
23279
23280 syscall_init(); /* This sets MSR_*STAR and related */
23281 #endif
23282diff -urNp linux-2.6.32.45/arch/x86/vdso/Makefile linux-2.6.32.45/arch/x86/vdso/Makefile
23283--- linux-2.6.32.45/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23284+++ linux-2.6.32.45/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23285@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23286 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23287 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23288
23289-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23290+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23291 GCOV_PROFILE := n
23292
23293 #
23294diff -urNp linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c
23295--- linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23296+++ linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23297@@ -22,24 +22,48 @@
23298 #include <asm/hpet.h>
23299 #include <asm/unistd.h>
23300 #include <asm/io.h>
23301+#include <asm/fixmap.h>
23302 #include "vextern.h"
23303
23304 #define gtod vdso_vsyscall_gtod_data
23305
23306+notrace noinline long __vdso_fallback_time(long *t)
23307+{
23308+ long secs;
23309+ asm volatile("syscall"
23310+ : "=a" (secs)
23311+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23312+ return secs;
23313+}
23314+
23315 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23316 {
23317 long ret;
23318 asm("syscall" : "=a" (ret) :
23319- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23320+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23321 return ret;
23322 }
23323
23324+notrace static inline cycle_t __vdso_vread_hpet(void)
23325+{
23326+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23327+}
23328+
23329+notrace static inline cycle_t __vdso_vread_tsc(void)
23330+{
23331+ cycle_t ret = (cycle_t)vget_cycles();
23332+
23333+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23334+}
23335+
23336 notrace static inline long vgetns(void)
23337 {
23338 long v;
23339- cycles_t (*vread)(void);
23340- vread = gtod->clock.vread;
23341- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23342+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23343+ v = __vdso_vread_tsc();
23344+ else
23345+ v = __vdso_vread_hpet();
23346+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23347 return (v * gtod->clock.mult) >> gtod->clock.shift;
23348 }
23349
23350@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23351
23352 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23353 {
23354- if (likely(gtod->sysctl_enabled))
23355+ if (likely(gtod->sysctl_enabled &&
23356+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23357+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23358 switch (clock) {
23359 case CLOCK_REALTIME:
23360 if (likely(gtod->clock.vread))
23361@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23362 int clock_gettime(clockid_t, struct timespec *)
23363 __attribute__((weak, alias("__vdso_clock_gettime")));
23364
23365-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23366+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23367 {
23368 long ret;
23369- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23370+ asm("syscall" : "=a" (ret) :
23371+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23372+ return ret;
23373+}
23374+
23375+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23376+{
23377+ if (likely(gtod->sysctl_enabled &&
23378+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23379+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23380+ {
23381 if (likely(tv != NULL)) {
23382 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23383 offsetof(struct timespec, tv_nsec) ||
23384@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23385 }
23386 return 0;
23387 }
23388- asm("syscall" : "=a" (ret) :
23389- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23390- return ret;
23391+ return __vdso_fallback_gettimeofday(tv, tz);
23392 }
23393 int gettimeofday(struct timeval *, struct timezone *)
23394 __attribute__((weak, alias("__vdso_gettimeofday")));
23395diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c
23396--- linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23397+++ linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23398@@ -25,6 +25,7 @@
23399 #include <asm/tlbflush.h>
23400 #include <asm/vdso.h>
23401 #include <asm/proto.h>
23402+#include <asm/mman.h>
23403
23404 enum {
23405 VDSO_DISABLED = 0,
23406@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23407 void enable_sep_cpu(void)
23408 {
23409 int cpu = get_cpu();
23410- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23411+ struct tss_struct *tss = init_tss + cpu;
23412
23413 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23414 put_cpu();
23415@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23416 gate_vma.vm_start = FIXADDR_USER_START;
23417 gate_vma.vm_end = FIXADDR_USER_END;
23418 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23419- gate_vma.vm_page_prot = __P101;
23420+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23421 /*
23422 * Make sure the vDSO gets into every core dump.
23423 * Dumping its contents makes post-mortem fully interpretable later
23424@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23425 if (compat)
23426 addr = VDSO_HIGH_BASE;
23427 else {
23428- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23429+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23430 if (IS_ERR_VALUE(addr)) {
23431 ret = addr;
23432 goto up_fail;
23433 }
23434 }
23435
23436- current->mm->context.vdso = (void *)addr;
23437+ current->mm->context.vdso = addr;
23438
23439 if (compat_uses_vma || !compat) {
23440 /*
23441@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23442 }
23443
23444 current_thread_info()->sysenter_return =
23445- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23446+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23447
23448 up_fail:
23449 if (ret)
23450- current->mm->context.vdso = NULL;
23451+ current->mm->context.vdso = 0;
23452
23453 up_write(&mm->mmap_sem);
23454
23455@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23456
23457 const char *arch_vma_name(struct vm_area_struct *vma)
23458 {
23459- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23460+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23461 return "[vdso]";
23462+
23463+#ifdef CONFIG_PAX_SEGMEXEC
23464+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23465+ return "[vdso]";
23466+#endif
23467+
23468 return NULL;
23469 }
23470
23471@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23472 struct mm_struct *mm = tsk->mm;
23473
23474 /* Check to see if this task was created in compat vdso mode */
23475- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23476+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23477 return &gate_vma;
23478 return NULL;
23479 }
23480diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso.lds.S linux-2.6.32.45/arch/x86/vdso/vdso.lds.S
23481--- linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23482+++ linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23483@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23484 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23485 #include "vextern.h"
23486 #undef VEXTERN
23487+
23488+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23489+VEXTERN(fallback_gettimeofday)
23490+VEXTERN(fallback_time)
23491+VEXTERN(getcpu)
23492+#undef VEXTERN
23493diff -urNp linux-2.6.32.45/arch/x86/vdso/vextern.h linux-2.6.32.45/arch/x86/vdso/vextern.h
23494--- linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23495+++ linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23496@@ -11,6 +11,5 @@
23497 put into vextern.h and be referenced as a pointer with vdso prefix.
23498 The main kernel later fills in the values. */
23499
23500-VEXTERN(jiffies)
23501 VEXTERN(vgetcpu_mode)
23502 VEXTERN(vsyscall_gtod_data)
23503diff -urNp linux-2.6.32.45/arch/x86/vdso/vma.c linux-2.6.32.45/arch/x86/vdso/vma.c
23504--- linux-2.6.32.45/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23505+++ linux-2.6.32.45/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
23506@@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
23507 if (!vbase)
23508 goto oom;
23509
23510- if (memcmp(vbase, "\177ELF", 4)) {
23511+ if (memcmp(vbase, ELFMAG, SELFMAG)) {
23512 printk("VDSO: I'm broken; not ELF\n");
23513 vdso_enabled = 0;
23514 }
23515@@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
23516 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23517 #include "vextern.h"
23518 #undef VEXTERN
23519+ vunmap(vbase);
23520 return 0;
23521
23522 oom:
23523@@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
23524 goto up_fail;
23525 }
23526
23527- current->mm->context.vdso = (void *)addr;
23528+ current->mm->context.vdso = addr;
23529
23530 ret = install_special_mapping(mm, addr, vdso_size,
23531 VM_READ|VM_EXEC|
23532@@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
23533 VM_ALWAYSDUMP,
23534 vdso_pages);
23535 if (ret) {
23536- current->mm->context.vdso = NULL;
23537+ current->mm->context.vdso = 0;
23538 goto up_fail;
23539 }
23540
23541@@ -132,10 +133,3 @@ up_fail:
23542 up_write(&mm->mmap_sem);
23543 return ret;
23544 }
23545-
23546-static __init int vdso_setup(char *s)
23547-{
23548- vdso_enabled = simple_strtoul(s, NULL, 0);
23549- return 0;
23550-}
23551-__setup("vdso=", vdso_setup);
23552diff -urNp linux-2.6.32.45/arch/x86/xen/enlighten.c linux-2.6.32.45/arch/x86/xen/enlighten.c
23553--- linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23554+++ linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23555@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23556
23557 struct shared_info xen_dummy_shared_info;
23558
23559-void *xen_initial_gdt;
23560-
23561 /*
23562 * Point at some empty memory to start with. We map the real shared_info
23563 * page as soon as fixmap is up and running.
23564@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23565
23566 preempt_disable();
23567
23568- start = __get_cpu_var(idt_desc).address;
23569+ start = (unsigned long)__get_cpu_var(idt_desc).address;
23570 end = start + __get_cpu_var(idt_desc).size + 1;
23571
23572 xen_mc_flush();
23573@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23574 #endif
23575 };
23576
23577-static void xen_reboot(int reason)
23578+static __noreturn void xen_reboot(int reason)
23579 {
23580 struct sched_shutdown r = { .reason = reason };
23581
23582@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23583 BUG();
23584 }
23585
23586-static void xen_restart(char *msg)
23587+static __noreturn void xen_restart(char *msg)
23588 {
23589 xen_reboot(SHUTDOWN_reboot);
23590 }
23591
23592-static void xen_emergency_restart(void)
23593+static __noreturn void xen_emergency_restart(void)
23594 {
23595 xen_reboot(SHUTDOWN_reboot);
23596 }
23597
23598-static void xen_machine_halt(void)
23599+static __noreturn void xen_machine_halt(void)
23600 {
23601 xen_reboot(SHUTDOWN_poweroff);
23602 }
23603@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23604 */
23605 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23606
23607-#ifdef CONFIG_X86_64
23608 /* Work out if we support NX */
23609- check_efer();
23610+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23611+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23612+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23613+ unsigned l, h;
23614+
23615+#ifdef CONFIG_X86_PAE
23616+ nx_enabled = 1;
23617+#endif
23618+ __supported_pte_mask |= _PAGE_NX;
23619+ rdmsr(MSR_EFER, l, h);
23620+ l |= EFER_NX;
23621+ wrmsr(MSR_EFER, l, h);
23622+ }
23623 #endif
23624
23625 xen_setup_features();
23626@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23627
23628 machine_ops = xen_machine_ops;
23629
23630- /*
23631- * The only reliable way to retain the initial address of the
23632- * percpu gdt_page is to remember it here, so we can go and
23633- * mark it RW later, when the initial percpu area is freed.
23634- */
23635- xen_initial_gdt = &per_cpu(gdt_page, 0);
23636-
23637 xen_smp_init();
23638
23639 pgd = (pgd_t *)xen_start_info->pt_base;
23640diff -urNp linux-2.6.32.45/arch/x86/xen/mmu.c linux-2.6.32.45/arch/x86/xen/mmu.c
23641--- linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23642+++ linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:18.000000000 -0400
23643@@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23644 convert_pfn_mfn(init_level4_pgt);
23645 convert_pfn_mfn(level3_ident_pgt);
23646 convert_pfn_mfn(level3_kernel_pgt);
23647+ convert_pfn_mfn(level3_vmalloc_pgt);
23648+ convert_pfn_mfn(level3_vmemmap_pgt);
23649
23650 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23651 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23652@@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23653 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23654 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23655 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23656+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23657+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23658 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23659+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23660 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23661 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23662
23663diff -urNp linux-2.6.32.45/arch/x86/xen/smp.c linux-2.6.32.45/arch/x86/xen/smp.c
23664--- linux-2.6.32.45/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23665+++ linux-2.6.32.45/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23666@@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23667 {
23668 BUG_ON(smp_processor_id() != 0);
23669 native_smp_prepare_boot_cpu();
23670-
23671- /* We've switched to the "real" per-cpu gdt, so make sure the
23672- old memory can be recycled */
23673- make_lowmem_page_readwrite(xen_initial_gdt);
23674-
23675 xen_setup_vcpu_info_placement();
23676 }
23677
23678@@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23679 gdt = get_cpu_gdt_table(cpu);
23680
23681 ctxt->flags = VGCF_IN_KERNEL;
23682- ctxt->user_regs.ds = __USER_DS;
23683- ctxt->user_regs.es = __USER_DS;
23684+ ctxt->user_regs.ds = __KERNEL_DS;
23685+ ctxt->user_regs.es = __KERNEL_DS;
23686 ctxt->user_regs.ss = __KERNEL_DS;
23687 #ifdef CONFIG_X86_32
23688 ctxt->user_regs.fs = __KERNEL_PERCPU;
23689- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23690+ savesegment(gs, ctxt->user_regs.gs);
23691 #else
23692 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23693 #endif
23694@@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23695 int rc;
23696
23697 per_cpu(current_task, cpu) = idle;
23698+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
23699 #ifdef CONFIG_X86_32
23700 irq_ctx_init(cpu);
23701 #else
23702 clear_tsk_thread_flag(idle, TIF_FORK);
23703- per_cpu(kernel_stack, cpu) =
23704- (unsigned long)task_stack_page(idle) -
23705- KERNEL_STACK_OFFSET + THREAD_SIZE;
23706+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23707 #endif
23708 xen_setup_runstate_info(cpu);
23709 xen_setup_timer(cpu);
23710diff -urNp linux-2.6.32.45/arch/x86/xen/xen-asm_32.S linux-2.6.32.45/arch/x86/xen/xen-asm_32.S
23711--- linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
23712+++ linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
23713@@ -83,14 +83,14 @@ ENTRY(xen_iret)
23714 ESP_OFFSET=4 # bytes pushed onto stack
23715
23716 /*
23717- * Store vcpu_info pointer for easy access. Do it this way to
23718- * avoid having to reload %fs
23719+ * Store vcpu_info pointer for easy access.
23720 */
23721 #ifdef CONFIG_SMP
23722- GET_THREAD_INFO(%eax)
23723- movl TI_cpu(%eax), %eax
23724- movl __per_cpu_offset(,%eax,4), %eax
23725- mov per_cpu__xen_vcpu(%eax), %eax
23726+ push %fs
23727+ mov $(__KERNEL_PERCPU), %eax
23728+ mov %eax, %fs
23729+ mov PER_CPU_VAR(xen_vcpu), %eax
23730+ pop %fs
23731 #else
23732 movl per_cpu__xen_vcpu, %eax
23733 #endif
23734diff -urNp linux-2.6.32.45/arch/x86/xen/xen-head.S linux-2.6.32.45/arch/x86/xen/xen-head.S
23735--- linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
23736+++ linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
23737@@ -19,6 +19,17 @@ ENTRY(startup_xen)
23738 #ifdef CONFIG_X86_32
23739 mov %esi,xen_start_info
23740 mov $init_thread_union+THREAD_SIZE,%esp
23741+#ifdef CONFIG_SMP
23742+ movl $cpu_gdt_table,%edi
23743+ movl $__per_cpu_load,%eax
23744+ movw %ax,__KERNEL_PERCPU + 2(%edi)
23745+ rorl $16,%eax
23746+ movb %al,__KERNEL_PERCPU + 4(%edi)
23747+ movb %ah,__KERNEL_PERCPU + 7(%edi)
23748+ movl $__per_cpu_end - 1,%eax
23749+ subl $__per_cpu_start,%eax
23750+ movw %ax,__KERNEL_PERCPU + 0(%edi)
23751+#endif
23752 #else
23753 mov %rsi,xen_start_info
23754 mov $init_thread_union+THREAD_SIZE,%rsp
23755diff -urNp linux-2.6.32.45/arch/x86/xen/xen-ops.h linux-2.6.32.45/arch/x86/xen/xen-ops.h
23756--- linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
23757+++ linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
23758@@ -10,8 +10,6 @@
23759 extern const char xen_hypervisor_callback[];
23760 extern const char xen_failsafe_callback[];
23761
23762-extern void *xen_initial_gdt;
23763-
23764 struct trap_info;
23765 void xen_copy_trap_info(struct trap_info *traps);
23766
23767diff -urNp linux-2.6.32.45/block/blk-integrity.c linux-2.6.32.45/block/blk-integrity.c
23768--- linux-2.6.32.45/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
23769+++ linux-2.6.32.45/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
23770@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
23771 NULL,
23772 };
23773
23774-static struct sysfs_ops integrity_ops = {
23775+static const struct sysfs_ops integrity_ops = {
23776 .show = &integrity_attr_show,
23777 .store = &integrity_attr_store,
23778 };
23779diff -urNp linux-2.6.32.45/block/blk-iopoll.c linux-2.6.32.45/block/blk-iopoll.c
23780--- linux-2.6.32.45/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
23781+++ linux-2.6.32.45/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
23782@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23783 }
23784 EXPORT_SYMBOL(blk_iopoll_complete);
23785
23786-static void blk_iopoll_softirq(struct softirq_action *h)
23787+static void blk_iopoll_softirq(void)
23788 {
23789 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23790 int rearm = 0, budget = blk_iopoll_budget;
23791diff -urNp linux-2.6.32.45/block/blk-map.c linux-2.6.32.45/block/blk-map.c
23792--- linux-2.6.32.45/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
23793+++ linux-2.6.32.45/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
23794@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
23795 * direct dma. else, set up kernel bounce buffers
23796 */
23797 uaddr = (unsigned long) ubuf;
23798- if (blk_rq_aligned(q, ubuf, len) && !map_data)
23799+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
23800 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
23801 else
23802 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
23803@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
23804 for (i = 0; i < iov_count; i++) {
23805 unsigned long uaddr = (unsigned long)iov[i].iov_base;
23806
23807+ if (!iov[i].iov_len)
23808+ return -EINVAL;
23809+
23810 if (uaddr & queue_dma_alignment(q)) {
23811 unaligned = 1;
23812 break;
23813 }
23814- if (!iov[i].iov_len)
23815- return -EINVAL;
23816 }
23817
23818 if (unaligned || (q->dma_pad_mask & len) || map_data)
23819@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
23820 if (!len || !kbuf)
23821 return -EINVAL;
23822
23823- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
23824+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
23825 if (do_copy)
23826 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23827 else
23828diff -urNp linux-2.6.32.45/block/blk-softirq.c linux-2.6.32.45/block/blk-softirq.c
23829--- linux-2.6.32.45/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
23830+++ linux-2.6.32.45/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
23831@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23832 * Softirq action handler - move entries to local list and loop over them
23833 * while passing them to the queue registered handler.
23834 */
23835-static void blk_done_softirq(struct softirq_action *h)
23836+static void blk_done_softirq(void)
23837 {
23838 struct list_head *cpu_list, local_list;
23839
23840diff -urNp linux-2.6.32.45/block/blk-sysfs.c linux-2.6.32.45/block/blk-sysfs.c
23841--- linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
23842+++ linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
23843@@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
23844 kmem_cache_free(blk_requestq_cachep, q);
23845 }
23846
23847-static struct sysfs_ops queue_sysfs_ops = {
23848+static const struct sysfs_ops queue_sysfs_ops = {
23849 .show = queue_attr_show,
23850 .store = queue_attr_store,
23851 };
23852diff -urNp linux-2.6.32.45/block/bsg.c linux-2.6.32.45/block/bsg.c
23853--- linux-2.6.32.45/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
23854+++ linux-2.6.32.45/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
23855@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23856 struct sg_io_v4 *hdr, struct bsg_device *bd,
23857 fmode_t has_write_perm)
23858 {
23859+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23860+ unsigned char *cmdptr;
23861+
23862 if (hdr->request_len > BLK_MAX_CDB) {
23863 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23864 if (!rq->cmd)
23865 return -ENOMEM;
23866- }
23867+ cmdptr = rq->cmd;
23868+ } else
23869+ cmdptr = tmpcmd;
23870
23871- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23872+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
23873 hdr->request_len))
23874 return -EFAULT;
23875
23876+ if (cmdptr != rq->cmd)
23877+ memcpy(rq->cmd, cmdptr, hdr->request_len);
23878+
23879 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23880 if (blk_verify_command(rq->cmd, has_write_perm))
23881 return -EPERM;
23882diff -urNp linux-2.6.32.45/block/elevator.c linux-2.6.32.45/block/elevator.c
23883--- linux-2.6.32.45/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
23884+++ linux-2.6.32.45/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
23885@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
23886 return error;
23887 }
23888
23889-static struct sysfs_ops elv_sysfs_ops = {
23890+static const struct sysfs_ops elv_sysfs_ops = {
23891 .show = elv_attr_show,
23892 .store = elv_attr_store,
23893 };
23894diff -urNp linux-2.6.32.45/block/scsi_ioctl.c linux-2.6.32.45/block/scsi_ioctl.c
23895--- linux-2.6.32.45/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
23896+++ linux-2.6.32.45/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
23897@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
23898 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23899 struct sg_io_hdr *hdr, fmode_t mode)
23900 {
23901- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23902+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23903+ unsigned char *cmdptr;
23904+
23905+ if (rq->cmd != rq->__cmd)
23906+ cmdptr = rq->cmd;
23907+ else
23908+ cmdptr = tmpcmd;
23909+
23910+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23911 return -EFAULT;
23912+
23913+ if (cmdptr != rq->cmd)
23914+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23915+
23916 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23917 return -EPERM;
23918
23919@@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
23920 int err;
23921 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23922 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23923+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23924+ unsigned char *cmdptr;
23925
23926 if (!sic)
23927 return -EINVAL;
23928@@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
23929 */
23930 err = -EFAULT;
23931 rq->cmd_len = cmdlen;
23932- if (copy_from_user(rq->cmd, sic->data, cmdlen))
23933+
23934+ if (rq->cmd != rq->__cmd)
23935+ cmdptr = rq->cmd;
23936+ else
23937+ cmdptr = tmpcmd;
23938+
23939+ if (copy_from_user(cmdptr, sic->data, cmdlen))
23940 goto error;
23941
23942+ if (rq->cmd != cmdptr)
23943+ memcpy(rq->cmd, cmdptr, cmdlen);
23944+
23945 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23946 goto error;
23947
23948diff -urNp linux-2.6.32.45/crypto/cryptd.c linux-2.6.32.45/crypto/cryptd.c
23949--- linux-2.6.32.45/crypto/cryptd.c 2011-03-27 14:31:47.000000000 -0400
23950+++ linux-2.6.32.45/crypto/cryptd.c 2011-08-05 20:33:55.000000000 -0400
23951@@ -214,7 +214,7 @@ static int cryptd_blkcipher_enqueue(stru
23952 struct cryptd_queue *queue;
23953
23954 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
23955- rctx->complete = req->base.complete;
23956+ *(void **)&rctx->complete = req->base.complete;
23957 req->base.complete = complete;
23958
23959 return cryptd_enqueue_request(queue, &req->base);
23960diff -urNp linux-2.6.32.45/crypto/gf128mul.c linux-2.6.32.45/crypto/gf128mul.c
23961--- linux-2.6.32.45/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
23962+++ linux-2.6.32.45/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
23963@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
23964 for (i = 0; i < 7; ++i)
23965 gf128mul_x_lle(&p[i + 1], &p[i]);
23966
23967- memset(r, 0, sizeof(r));
23968+ memset(r, 0, sizeof(*r));
23969 for (i = 0;;) {
23970 u8 ch = ((u8 *)b)[15 - i];
23971
23972@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
23973 for (i = 0; i < 7; ++i)
23974 gf128mul_x_bbe(&p[i + 1], &p[i]);
23975
23976- memset(r, 0, sizeof(r));
23977+ memset(r, 0, sizeof(*r));
23978 for (i = 0;;) {
23979 u8 ch = ((u8 *)b)[i];
23980
23981diff -urNp linux-2.6.32.45/crypto/serpent.c linux-2.6.32.45/crypto/serpent.c
23982--- linux-2.6.32.45/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
23983+++ linux-2.6.32.45/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
23984@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23985 u32 r0,r1,r2,r3,r4;
23986 int i;
23987
23988+ pax_track_stack();
23989+
23990 /* Copy key, add padding */
23991
23992 for (i = 0; i < keylen; ++i)
23993diff -urNp linux-2.6.32.45/Documentation/dontdiff linux-2.6.32.45/Documentation/dontdiff
23994--- linux-2.6.32.45/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
23995+++ linux-2.6.32.45/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
23996@@ -1,13 +1,16 @@
23997 *.a
23998 *.aux
23999 *.bin
24000+*.cis
24001 *.cpio
24002 *.csp
24003+*.dbg
24004 *.dsp
24005 *.dvi
24006 *.elf
24007 *.eps
24008 *.fw
24009+*.gcno
24010 *.gen.S
24011 *.gif
24012 *.grep
24013@@ -38,8 +41,10 @@
24014 *.tab.h
24015 *.tex
24016 *.ver
24017+*.vim
24018 *.xml
24019 *_MODULES
24020+*_reg_safe.h
24021 *_vga16.c
24022 *~
24023 *.9
24024@@ -49,11 +54,16 @@
24025 53c700_d.h
24026 CVS
24027 ChangeSet
24028+GPATH
24029+GRTAGS
24030+GSYMS
24031+GTAGS
24032 Image
24033 Kerntypes
24034 Module.markers
24035 Module.symvers
24036 PENDING
24037+PERF*
24038 SCCS
24039 System.map*
24040 TAGS
24041@@ -76,7 +86,11 @@ btfixupprep
24042 build
24043 bvmlinux
24044 bzImage*
24045+capability_names.h
24046+capflags.c
24047 classlist.h*
24048+clut_vga16.c
24049+common-cmds.h
24050 comp*.log
24051 compile.h*
24052 conf
24053@@ -103,13 +117,14 @@ gen_crc32table
24054 gen_init_cpio
24055 genksyms
24056 *_gray256.c
24057+hash
24058 ihex2fw
24059 ikconfig.h*
24060 initramfs_data.cpio
24061+initramfs_data.cpio.bz2
24062 initramfs_data.cpio.gz
24063 initramfs_list
24064 kallsyms
24065-kconfig
24066 keywords.c
24067 ksym.c*
24068 ksym.h*
24069@@ -133,7 +148,9 @@ mkboot
24070 mkbugboot
24071 mkcpustr
24072 mkdep
24073+mkpiggy
24074 mkprep
24075+mkregtable
24076 mktables
24077 mktree
24078 modpost
24079@@ -149,6 +166,7 @@ patches*
24080 pca200e.bin
24081 pca200e_ecd.bin2
24082 piggy.gz
24083+piggy.S
24084 piggyback
24085 pnmtologo
24086 ppc_defs.h*
24087@@ -157,12 +175,15 @@ qconf
24088 raid6altivec*.c
24089 raid6int*.c
24090 raid6tables.c
24091+regdb.c
24092 relocs
24093+rlim_names.h
24094 series
24095 setup
24096 setup.bin
24097 setup.elf
24098 sImage
24099+slabinfo
24100 sm_tbl*
24101 split-include
24102 syscalltab.h
24103@@ -186,14 +207,20 @@ version.h*
24104 vmlinux
24105 vmlinux-*
24106 vmlinux.aout
24107+vmlinux.bin.all
24108+vmlinux.bin.bz2
24109 vmlinux.lds
24110+vmlinux.relocs
24111+voffset.h
24112 vsyscall.lds
24113 vsyscall_32.lds
24114 wanxlfw.inc
24115 uImage
24116 unifdef
24117+utsrelease.h
24118 wakeup.bin
24119 wakeup.elf
24120 wakeup.lds
24121 zImage*
24122 zconf.hash.c
24123+zoffset.h
24124diff -urNp linux-2.6.32.45/Documentation/kernel-parameters.txt linux-2.6.32.45/Documentation/kernel-parameters.txt
24125--- linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
24126+++ linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
24127@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
24128 the specified number of seconds. This is to be used if
24129 your oopses keep scrolling off the screen.
24130
24131+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
24132+ virtualization environments that don't cope well with the
24133+ expand down segment used by UDEREF on X86-32 or the frequent
24134+ page table updates on X86-64.
24135+
24136+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
24137+
24138 pcbit= [HW,ISDN]
24139
24140 pcd. [PARIDE]
24141diff -urNp linux-2.6.32.45/drivers/acpi/acpi_pad.c linux-2.6.32.45/drivers/acpi/acpi_pad.c
24142--- linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
24143+++ linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
24144@@ -30,7 +30,7 @@
24145 #include <acpi/acpi_bus.h>
24146 #include <acpi/acpi_drivers.h>
24147
24148-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
24149+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
24150 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
24151 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
24152 static DEFINE_MUTEX(isolated_cpus_lock);
24153diff -urNp linux-2.6.32.45/drivers/acpi/battery.c linux-2.6.32.45/drivers/acpi/battery.c
24154--- linux-2.6.32.45/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
24155+++ linux-2.6.32.45/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
24156@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
24157 }
24158
24159 static struct battery_file {
24160- struct file_operations ops;
24161+ const struct file_operations ops;
24162 mode_t mode;
24163 const char *name;
24164 } acpi_battery_file[] = {
24165diff -urNp linux-2.6.32.45/drivers/acpi/dock.c linux-2.6.32.45/drivers/acpi/dock.c
24166--- linux-2.6.32.45/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
24167+++ linux-2.6.32.45/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
24168@@ -77,7 +77,7 @@ struct dock_dependent_device {
24169 struct list_head list;
24170 struct list_head hotplug_list;
24171 acpi_handle handle;
24172- struct acpi_dock_ops *ops;
24173+ const struct acpi_dock_ops *ops;
24174 void *context;
24175 };
24176
24177@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
24178 * the dock driver after _DCK is executed.
24179 */
24180 int
24181-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
24182+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
24183 void *context)
24184 {
24185 struct dock_dependent_device *dd;
24186diff -urNp linux-2.6.32.45/drivers/acpi/osl.c linux-2.6.32.45/drivers/acpi/osl.c
24187--- linux-2.6.32.45/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
24188+++ linux-2.6.32.45/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
24189@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
24190 void __iomem *virt_addr;
24191
24192 virt_addr = ioremap(phys_addr, width);
24193+ if (!virt_addr)
24194+ return AE_NO_MEMORY;
24195 if (!value)
24196 value = &dummy;
24197
24198@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
24199 void __iomem *virt_addr;
24200
24201 virt_addr = ioremap(phys_addr, width);
24202+ if (!virt_addr)
24203+ return AE_NO_MEMORY;
24204
24205 switch (width) {
24206 case 8:
24207diff -urNp linux-2.6.32.45/drivers/acpi/power_meter.c linux-2.6.32.45/drivers/acpi/power_meter.c
24208--- linux-2.6.32.45/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
24209+++ linux-2.6.32.45/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
24210@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
24211 return res;
24212
24213 temp /= 1000;
24214- if (temp < 0)
24215- return -EINVAL;
24216
24217 mutex_lock(&resource->lock);
24218 resource->trip[attr->index - 7] = temp;
24219diff -urNp linux-2.6.32.45/drivers/acpi/proc.c linux-2.6.32.45/drivers/acpi/proc.c
24220--- linux-2.6.32.45/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24221+++ linux-2.6.32.45/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24222@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24223 size_t count, loff_t * ppos)
24224 {
24225 struct list_head *node, *next;
24226- char strbuf[5];
24227- char str[5] = "";
24228- unsigned int len = count;
24229+ char strbuf[5] = {0};
24230 struct acpi_device *found_dev = NULL;
24231
24232- if (len > 4)
24233- len = 4;
24234- if (len < 0)
24235- return -EFAULT;
24236+ if (count > 4)
24237+ count = 4;
24238
24239- if (copy_from_user(strbuf, buffer, len))
24240+ if (copy_from_user(strbuf, buffer, count))
24241 return -EFAULT;
24242- strbuf[len] = '\0';
24243- sscanf(strbuf, "%s", str);
24244+ strbuf[count] = '\0';
24245
24246 mutex_lock(&acpi_device_lock);
24247 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24248@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24249 if (!dev->wakeup.flags.valid)
24250 continue;
24251
24252- if (!strncmp(dev->pnp.bus_id, str, 4)) {
24253+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24254 dev->wakeup.state.enabled =
24255 dev->wakeup.state.enabled ? 0 : 1;
24256 found_dev = dev;
24257diff -urNp linux-2.6.32.45/drivers/acpi/processor_core.c linux-2.6.32.45/drivers/acpi/processor_core.c
24258--- linux-2.6.32.45/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24259+++ linux-2.6.32.45/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24260@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24261 return 0;
24262 }
24263
24264- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24265+ BUG_ON(pr->id >= nr_cpu_ids);
24266
24267 /*
24268 * Buggy BIOS check
24269diff -urNp linux-2.6.32.45/drivers/acpi/sbshc.c linux-2.6.32.45/drivers/acpi/sbshc.c
24270--- linux-2.6.32.45/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24271+++ linux-2.6.32.45/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24272@@ -17,7 +17,7 @@
24273
24274 #define PREFIX "ACPI: "
24275
24276-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24277+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24278 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24279
24280 struct acpi_smb_hc {
24281diff -urNp linux-2.6.32.45/drivers/acpi/sleep.c linux-2.6.32.45/drivers/acpi/sleep.c
24282--- linux-2.6.32.45/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24283+++ linux-2.6.32.45/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24284@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24285 }
24286 }
24287
24288-static struct platform_suspend_ops acpi_suspend_ops = {
24289+static const struct platform_suspend_ops acpi_suspend_ops = {
24290 .valid = acpi_suspend_state_valid,
24291 .begin = acpi_suspend_begin,
24292 .prepare_late = acpi_pm_prepare,
24293@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24294 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24295 * been requested.
24296 */
24297-static struct platform_suspend_ops acpi_suspend_ops_old = {
24298+static const struct platform_suspend_ops acpi_suspend_ops_old = {
24299 .valid = acpi_suspend_state_valid,
24300 .begin = acpi_suspend_begin_old,
24301 .prepare_late = acpi_pm_disable_gpes,
24302@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24303 acpi_enable_all_runtime_gpes();
24304 }
24305
24306-static struct platform_hibernation_ops acpi_hibernation_ops = {
24307+static const struct platform_hibernation_ops acpi_hibernation_ops = {
24308 .begin = acpi_hibernation_begin,
24309 .end = acpi_pm_end,
24310 .pre_snapshot = acpi_hibernation_pre_snapshot,
24311@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24312 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24313 * been requested.
24314 */
24315-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24316+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24317 .begin = acpi_hibernation_begin_old,
24318 .end = acpi_pm_end,
24319 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24320diff -urNp linux-2.6.32.45/drivers/acpi/video.c linux-2.6.32.45/drivers/acpi/video.c
24321--- linux-2.6.32.45/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24322+++ linux-2.6.32.45/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24323@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24324 vd->brightness->levels[request_level]);
24325 }
24326
24327-static struct backlight_ops acpi_backlight_ops = {
24328+static const struct backlight_ops acpi_backlight_ops = {
24329 .get_brightness = acpi_video_get_brightness,
24330 .update_status = acpi_video_set_brightness,
24331 };
24332diff -urNp linux-2.6.32.45/drivers/ata/ahci.c linux-2.6.32.45/drivers/ata/ahci.c
24333--- linux-2.6.32.45/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24334+++ linux-2.6.32.45/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24335@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24336 .sdev_attrs = ahci_sdev_attrs,
24337 };
24338
24339-static struct ata_port_operations ahci_ops = {
24340+static const struct ata_port_operations ahci_ops = {
24341 .inherits = &sata_pmp_port_ops,
24342
24343 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24344@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24345 .port_stop = ahci_port_stop,
24346 };
24347
24348-static struct ata_port_operations ahci_vt8251_ops = {
24349+static const struct ata_port_operations ahci_vt8251_ops = {
24350 .inherits = &ahci_ops,
24351 .hardreset = ahci_vt8251_hardreset,
24352 };
24353
24354-static struct ata_port_operations ahci_p5wdh_ops = {
24355+static const struct ata_port_operations ahci_p5wdh_ops = {
24356 .inherits = &ahci_ops,
24357 .hardreset = ahci_p5wdh_hardreset,
24358 };
24359
24360-static struct ata_port_operations ahci_sb600_ops = {
24361+static const struct ata_port_operations ahci_sb600_ops = {
24362 .inherits = &ahci_ops,
24363 .softreset = ahci_sb600_softreset,
24364 .pmp_softreset = ahci_sb600_softreset,
24365diff -urNp linux-2.6.32.45/drivers/ata/ata_generic.c linux-2.6.32.45/drivers/ata/ata_generic.c
24366--- linux-2.6.32.45/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24367+++ linux-2.6.32.45/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24368@@ -104,7 +104,7 @@ static struct scsi_host_template generic
24369 ATA_BMDMA_SHT(DRV_NAME),
24370 };
24371
24372-static struct ata_port_operations generic_port_ops = {
24373+static const struct ata_port_operations generic_port_ops = {
24374 .inherits = &ata_bmdma_port_ops,
24375 .cable_detect = ata_cable_unknown,
24376 .set_mode = generic_set_mode,
24377diff -urNp linux-2.6.32.45/drivers/ata/ata_piix.c linux-2.6.32.45/drivers/ata/ata_piix.c
24378--- linux-2.6.32.45/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24379+++ linux-2.6.32.45/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24380@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24381 ATA_BMDMA_SHT(DRV_NAME),
24382 };
24383
24384-static struct ata_port_operations piix_pata_ops = {
24385+static const struct ata_port_operations piix_pata_ops = {
24386 .inherits = &ata_bmdma32_port_ops,
24387 .cable_detect = ata_cable_40wire,
24388 .set_piomode = piix_set_piomode,
24389@@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24390 .prereset = piix_pata_prereset,
24391 };
24392
24393-static struct ata_port_operations piix_vmw_ops = {
24394+static const struct ata_port_operations piix_vmw_ops = {
24395 .inherits = &piix_pata_ops,
24396 .bmdma_status = piix_vmw_bmdma_status,
24397 };
24398
24399-static struct ata_port_operations ich_pata_ops = {
24400+static const struct ata_port_operations ich_pata_ops = {
24401 .inherits = &piix_pata_ops,
24402 .cable_detect = ich_pata_cable_detect,
24403 .set_dmamode = ich_set_dmamode,
24404 };
24405
24406-static struct ata_port_operations piix_sata_ops = {
24407+static const struct ata_port_operations piix_sata_ops = {
24408 .inherits = &ata_bmdma_port_ops,
24409 };
24410
24411-static struct ata_port_operations piix_sidpr_sata_ops = {
24412+static const struct ata_port_operations piix_sidpr_sata_ops = {
24413 .inherits = &piix_sata_ops,
24414 .hardreset = sata_std_hardreset,
24415 .scr_read = piix_sidpr_scr_read,
24416diff -urNp linux-2.6.32.45/drivers/ata/libata-acpi.c linux-2.6.32.45/drivers/ata/libata-acpi.c
24417--- linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24418+++ linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24419@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24420 ata_acpi_uevent(dev->link->ap, dev, event);
24421 }
24422
24423-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24424+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24425 .handler = ata_acpi_dev_notify_dock,
24426 .uevent = ata_acpi_dev_uevent,
24427 };
24428
24429-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24430+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24431 .handler = ata_acpi_ap_notify_dock,
24432 .uevent = ata_acpi_ap_uevent,
24433 };
24434diff -urNp linux-2.6.32.45/drivers/ata/libata-core.c linux-2.6.32.45/drivers/ata/libata-core.c
24435--- linux-2.6.32.45/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24436+++ linux-2.6.32.45/drivers/ata/libata-core.c 2011-08-05 20:33:55.000000000 -0400
24437@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24438 struct ata_port *ap;
24439 unsigned int tag;
24440
24441- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24442+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24443 ap = qc->ap;
24444
24445 qc->flags = 0;
24446@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24447 struct ata_port *ap;
24448 struct ata_link *link;
24449
24450- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24451+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24452 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24453 ap = qc->ap;
24454 link = qc->dev->link;
24455@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24456 * LOCKING:
24457 * None.
24458 */
24459-static void ata_finalize_port_ops(struct ata_port_operations *ops)
24460+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24461 {
24462 static DEFINE_SPINLOCK(lock);
24463 const struct ata_port_operations *cur;
24464@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24465 return;
24466
24467 spin_lock(&lock);
24468+ pax_open_kernel();
24469
24470 for (cur = ops->inherits; cur; cur = cur->inherits) {
24471 void **inherit = (void **)cur;
24472@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24473 if (IS_ERR(*pp))
24474 *pp = NULL;
24475
24476- ops->inherits = NULL;
24477+ *(struct ata_port_operations **)&ops->inherits = NULL;
24478
24479+ pax_close_kernel();
24480 spin_unlock(&lock);
24481 }
24482
24483@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24484 */
24485 /* KILLME - the only user left is ipr */
24486 void ata_host_init(struct ata_host *host, struct device *dev,
24487- unsigned long flags, struct ata_port_operations *ops)
24488+ unsigned long flags, const struct ata_port_operations *ops)
24489 {
24490 spin_lock_init(&host->lock);
24491 host->dev = dev;
24492@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24493 /* truly dummy */
24494 }
24495
24496-struct ata_port_operations ata_dummy_port_ops = {
24497+const struct ata_port_operations ata_dummy_port_ops = {
24498 .qc_prep = ata_noop_qc_prep,
24499 .qc_issue = ata_dummy_qc_issue,
24500 .error_handler = ata_dummy_error_handler,
24501diff -urNp linux-2.6.32.45/drivers/ata/libata-eh.c linux-2.6.32.45/drivers/ata/libata-eh.c
24502--- linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:35:28.000000000 -0400
24503+++ linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:33:59.000000000 -0400
24504@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24505 {
24506 struct ata_link *link;
24507
24508+ pax_track_stack();
24509+
24510 ata_for_each_link(link, ap, HOST_FIRST)
24511 ata_eh_link_report(link);
24512 }
24513@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24514 */
24515 void ata_std_error_handler(struct ata_port *ap)
24516 {
24517- struct ata_port_operations *ops = ap->ops;
24518+ const struct ata_port_operations *ops = ap->ops;
24519 ata_reset_fn_t hardreset = ops->hardreset;
24520
24521 /* ignore built-in hardreset if SCR access is not available */
24522diff -urNp linux-2.6.32.45/drivers/ata/libata-pmp.c linux-2.6.32.45/drivers/ata/libata-pmp.c
24523--- linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24524+++ linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24525@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24526 */
24527 static int sata_pmp_eh_recover(struct ata_port *ap)
24528 {
24529- struct ata_port_operations *ops = ap->ops;
24530+ const struct ata_port_operations *ops = ap->ops;
24531 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24532 struct ata_link *pmp_link = &ap->link;
24533 struct ata_device *pmp_dev = pmp_link->device;
24534diff -urNp linux-2.6.32.45/drivers/ata/pata_acpi.c linux-2.6.32.45/drivers/ata/pata_acpi.c
24535--- linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24536+++ linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24537@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24538 ATA_BMDMA_SHT(DRV_NAME),
24539 };
24540
24541-static struct ata_port_operations pacpi_ops = {
24542+static const struct ata_port_operations pacpi_ops = {
24543 .inherits = &ata_bmdma_port_ops,
24544 .qc_issue = pacpi_qc_issue,
24545 .cable_detect = pacpi_cable_detect,
24546diff -urNp linux-2.6.32.45/drivers/ata/pata_ali.c linux-2.6.32.45/drivers/ata/pata_ali.c
24547--- linux-2.6.32.45/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24548+++ linux-2.6.32.45/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24549@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24550 * Port operations for PIO only ALi
24551 */
24552
24553-static struct ata_port_operations ali_early_port_ops = {
24554+static const struct ata_port_operations ali_early_port_ops = {
24555 .inherits = &ata_sff_port_ops,
24556 .cable_detect = ata_cable_40wire,
24557 .set_piomode = ali_set_piomode,
24558@@ -382,7 +382,7 @@ static const struct ata_port_operations
24559 * Port operations for DMA capable ALi without cable
24560 * detect
24561 */
24562-static struct ata_port_operations ali_20_port_ops = {
24563+static const struct ata_port_operations ali_20_port_ops = {
24564 .inherits = &ali_dma_base_ops,
24565 .cable_detect = ata_cable_40wire,
24566 .mode_filter = ali_20_filter,
24567@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24568 /*
24569 * Port operations for DMA capable ALi with cable detect
24570 */
24571-static struct ata_port_operations ali_c2_port_ops = {
24572+static const struct ata_port_operations ali_c2_port_ops = {
24573 .inherits = &ali_dma_base_ops,
24574 .check_atapi_dma = ali_check_atapi_dma,
24575 .cable_detect = ali_c2_cable_detect,
24576@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24577 /*
24578 * Port operations for DMA capable ALi with cable detect
24579 */
24580-static struct ata_port_operations ali_c4_port_ops = {
24581+static const struct ata_port_operations ali_c4_port_ops = {
24582 .inherits = &ali_dma_base_ops,
24583 .check_atapi_dma = ali_check_atapi_dma,
24584 .cable_detect = ali_c2_cable_detect,
24585@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24586 /*
24587 * Port operations for DMA capable ALi with cable detect and LBA48
24588 */
24589-static struct ata_port_operations ali_c5_port_ops = {
24590+static const struct ata_port_operations ali_c5_port_ops = {
24591 .inherits = &ali_dma_base_ops,
24592 .check_atapi_dma = ali_check_atapi_dma,
24593 .dev_config = ali_warn_atapi_dma,
24594diff -urNp linux-2.6.32.45/drivers/ata/pata_amd.c linux-2.6.32.45/drivers/ata/pata_amd.c
24595--- linux-2.6.32.45/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24596+++ linux-2.6.32.45/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24597@@ -397,28 +397,28 @@ static const struct ata_port_operations
24598 .prereset = amd_pre_reset,
24599 };
24600
24601-static struct ata_port_operations amd33_port_ops = {
24602+static const struct ata_port_operations amd33_port_ops = {
24603 .inherits = &amd_base_port_ops,
24604 .cable_detect = ata_cable_40wire,
24605 .set_piomode = amd33_set_piomode,
24606 .set_dmamode = amd33_set_dmamode,
24607 };
24608
24609-static struct ata_port_operations amd66_port_ops = {
24610+static const struct ata_port_operations amd66_port_ops = {
24611 .inherits = &amd_base_port_ops,
24612 .cable_detect = ata_cable_unknown,
24613 .set_piomode = amd66_set_piomode,
24614 .set_dmamode = amd66_set_dmamode,
24615 };
24616
24617-static struct ata_port_operations amd100_port_ops = {
24618+static const struct ata_port_operations amd100_port_ops = {
24619 .inherits = &amd_base_port_ops,
24620 .cable_detect = ata_cable_unknown,
24621 .set_piomode = amd100_set_piomode,
24622 .set_dmamode = amd100_set_dmamode,
24623 };
24624
24625-static struct ata_port_operations amd133_port_ops = {
24626+static const struct ata_port_operations amd133_port_ops = {
24627 .inherits = &amd_base_port_ops,
24628 .cable_detect = amd_cable_detect,
24629 .set_piomode = amd133_set_piomode,
24630@@ -433,13 +433,13 @@ static const struct ata_port_operations
24631 .host_stop = nv_host_stop,
24632 };
24633
24634-static struct ata_port_operations nv100_port_ops = {
24635+static const struct ata_port_operations nv100_port_ops = {
24636 .inherits = &nv_base_port_ops,
24637 .set_piomode = nv100_set_piomode,
24638 .set_dmamode = nv100_set_dmamode,
24639 };
24640
24641-static struct ata_port_operations nv133_port_ops = {
24642+static const struct ata_port_operations nv133_port_ops = {
24643 .inherits = &nv_base_port_ops,
24644 .set_piomode = nv133_set_piomode,
24645 .set_dmamode = nv133_set_dmamode,
24646diff -urNp linux-2.6.32.45/drivers/ata/pata_artop.c linux-2.6.32.45/drivers/ata/pata_artop.c
24647--- linux-2.6.32.45/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24648+++ linux-2.6.32.45/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24649@@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24650 ATA_BMDMA_SHT(DRV_NAME),
24651 };
24652
24653-static struct ata_port_operations artop6210_ops = {
24654+static const struct ata_port_operations artop6210_ops = {
24655 .inherits = &ata_bmdma_port_ops,
24656 .cable_detect = ata_cable_40wire,
24657 .set_piomode = artop6210_set_piomode,
24658@@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24659 .qc_defer = artop6210_qc_defer,
24660 };
24661
24662-static struct ata_port_operations artop6260_ops = {
24663+static const struct ata_port_operations artop6260_ops = {
24664 .inherits = &ata_bmdma_port_ops,
24665 .cable_detect = artop6260_cable_detect,
24666 .set_piomode = artop6260_set_piomode,
24667diff -urNp linux-2.6.32.45/drivers/ata/pata_at32.c linux-2.6.32.45/drivers/ata/pata_at32.c
24668--- linux-2.6.32.45/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24669+++ linux-2.6.32.45/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24670@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24671 ATA_PIO_SHT(DRV_NAME),
24672 };
24673
24674-static struct ata_port_operations at32_port_ops = {
24675+static const struct ata_port_operations at32_port_ops = {
24676 .inherits = &ata_sff_port_ops,
24677 .cable_detect = ata_cable_40wire,
24678 .set_piomode = pata_at32_set_piomode,
24679diff -urNp linux-2.6.32.45/drivers/ata/pata_at91.c linux-2.6.32.45/drivers/ata/pata_at91.c
24680--- linux-2.6.32.45/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24681+++ linux-2.6.32.45/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24682@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
24683 ATA_PIO_SHT(DRV_NAME),
24684 };
24685
24686-static struct ata_port_operations pata_at91_port_ops = {
24687+static const struct ata_port_operations pata_at91_port_ops = {
24688 .inherits = &ata_sff_port_ops,
24689
24690 .sff_data_xfer = pata_at91_data_xfer_noirq,
24691diff -urNp linux-2.6.32.45/drivers/ata/pata_atiixp.c linux-2.6.32.45/drivers/ata/pata_atiixp.c
24692--- linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
24693+++ linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
24694@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
24695 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24696 };
24697
24698-static struct ata_port_operations atiixp_port_ops = {
24699+static const struct ata_port_operations atiixp_port_ops = {
24700 .inherits = &ata_bmdma_port_ops,
24701
24702 .qc_prep = ata_sff_dumb_qc_prep,
24703diff -urNp linux-2.6.32.45/drivers/ata/pata_atp867x.c linux-2.6.32.45/drivers/ata/pata_atp867x.c
24704--- linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
24705+++ linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
24706@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
24707 ATA_BMDMA_SHT(DRV_NAME),
24708 };
24709
24710-static struct ata_port_operations atp867x_ops = {
24711+static const struct ata_port_operations atp867x_ops = {
24712 .inherits = &ata_bmdma_port_ops,
24713 .cable_detect = atp867x_cable_detect,
24714 .set_piomode = atp867x_set_piomode,
24715diff -urNp linux-2.6.32.45/drivers/ata/pata_bf54x.c linux-2.6.32.45/drivers/ata/pata_bf54x.c
24716--- linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
24717+++ linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
24718@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
24719 .dma_boundary = ATA_DMA_BOUNDARY,
24720 };
24721
24722-static struct ata_port_operations bfin_pata_ops = {
24723+static const struct ata_port_operations bfin_pata_ops = {
24724 .inherits = &ata_sff_port_ops,
24725
24726 .set_piomode = bfin_set_piomode,
24727diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd640.c linux-2.6.32.45/drivers/ata/pata_cmd640.c
24728--- linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
24729+++ linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
24730@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
24731 ATA_BMDMA_SHT(DRV_NAME),
24732 };
24733
24734-static struct ata_port_operations cmd640_port_ops = {
24735+static const struct ata_port_operations cmd640_port_ops = {
24736 .inherits = &ata_bmdma_port_ops,
24737 /* In theory xfer_noirq is not needed once we kill the prefetcher */
24738 .sff_data_xfer = ata_sff_data_xfer_noirq,
24739diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd64x.c linux-2.6.32.45/drivers/ata/pata_cmd64x.c
24740--- linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
24741+++ linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
24742@@ -271,18 +271,18 @@ static const struct ata_port_operations
24743 .set_dmamode = cmd64x_set_dmamode,
24744 };
24745
24746-static struct ata_port_operations cmd64x_port_ops = {
24747+static const struct ata_port_operations cmd64x_port_ops = {
24748 .inherits = &cmd64x_base_ops,
24749 .cable_detect = ata_cable_40wire,
24750 };
24751
24752-static struct ata_port_operations cmd646r1_port_ops = {
24753+static const struct ata_port_operations cmd646r1_port_ops = {
24754 .inherits = &cmd64x_base_ops,
24755 .bmdma_stop = cmd646r1_bmdma_stop,
24756 .cable_detect = ata_cable_40wire,
24757 };
24758
24759-static struct ata_port_operations cmd648_port_ops = {
24760+static const struct ata_port_operations cmd648_port_ops = {
24761 .inherits = &cmd64x_base_ops,
24762 .bmdma_stop = cmd648_bmdma_stop,
24763 .cable_detect = cmd648_cable_detect,
24764diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5520.c linux-2.6.32.45/drivers/ata/pata_cs5520.c
24765--- linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
24766+++ linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
24767@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
24768 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24769 };
24770
24771-static struct ata_port_operations cs5520_port_ops = {
24772+static const struct ata_port_operations cs5520_port_ops = {
24773 .inherits = &ata_bmdma_port_ops,
24774 .qc_prep = ata_sff_dumb_qc_prep,
24775 .cable_detect = ata_cable_40wire,
24776diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5530.c linux-2.6.32.45/drivers/ata/pata_cs5530.c
24777--- linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
24778+++ linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
24779@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
24780 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24781 };
24782
24783-static struct ata_port_operations cs5530_port_ops = {
24784+static const struct ata_port_operations cs5530_port_ops = {
24785 .inherits = &ata_bmdma_port_ops,
24786
24787 .qc_prep = ata_sff_dumb_qc_prep,
24788diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5535.c linux-2.6.32.45/drivers/ata/pata_cs5535.c
24789--- linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
24790+++ linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
24791@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
24792 ATA_BMDMA_SHT(DRV_NAME),
24793 };
24794
24795-static struct ata_port_operations cs5535_port_ops = {
24796+static const struct ata_port_operations cs5535_port_ops = {
24797 .inherits = &ata_bmdma_port_ops,
24798 .cable_detect = cs5535_cable_detect,
24799 .set_piomode = cs5535_set_piomode,
24800diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5536.c linux-2.6.32.45/drivers/ata/pata_cs5536.c
24801--- linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
24802+++ linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
24803@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
24804 ATA_BMDMA_SHT(DRV_NAME),
24805 };
24806
24807-static struct ata_port_operations cs5536_port_ops = {
24808+static const struct ata_port_operations cs5536_port_ops = {
24809 .inherits = &ata_bmdma_port_ops,
24810 .cable_detect = cs5536_cable_detect,
24811 .set_piomode = cs5536_set_piomode,
24812diff -urNp linux-2.6.32.45/drivers/ata/pata_cypress.c linux-2.6.32.45/drivers/ata/pata_cypress.c
24813--- linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
24814+++ linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
24815@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
24816 ATA_BMDMA_SHT(DRV_NAME),
24817 };
24818
24819-static struct ata_port_operations cy82c693_port_ops = {
24820+static const struct ata_port_operations cy82c693_port_ops = {
24821 .inherits = &ata_bmdma_port_ops,
24822 .cable_detect = ata_cable_40wire,
24823 .set_piomode = cy82c693_set_piomode,
24824diff -urNp linux-2.6.32.45/drivers/ata/pata_efar.c linux-2.6.32.45/drivers/ata/pata_efar.c
24825--- linux-2.6.32.45/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
24826+++ linux-2.6.32.45/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
24827@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
24828 ATA_BMDMA_SHT(DRV_NAME),
24829 };
24830
24831-static struct ata_port_operations efar_ops = {
24832+static const struct ata_port_operations efar_ops = {
24833 .inherits = &ata_bmdma_port_ops,
24834 .cable_detect = efar_cable_detect,
24835 .set_piomode = efar_set_piomode,
24836diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt366.c linux-2.6.32.45/drivers/ata/pata_hpt366.c
24837--- linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
24838+++ linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
24839@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
24840 * Configuration for HPT366/68
24841 */
24842
24843-static struct ata_port_operations hpt366_port_ops = {
24844+static const struct ata_port_operations hpt366_port_ops = {
24845 .inherits = &ata_bmdma_port_ops,
24846 .cable_detect = hpt36x_cable_detect,
24847 .mode_filter = hpt366_filter,
24848diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt37x.c linux-2.6.32.45/drivers/ata/pata_hpt37x.c
24849--- linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
24850+++ linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
24851@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
24852 * Configuration for HPT370
24853 */
24854
24855-static struct ata_port_operations hpt370_port_ops = {
24856+static const struct ata_port_operations hpt370_port_ops = {
24857 .inherits = &ata_bmdma_port_ops,
24858
24859 .bmdma_stop = hpt370_bmdma_stop,
24860@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
24861 * Configuration for HPT370A. Close to 370 but less filters
24862 */
24863
24864-static struct ata_port_operations hpt370a_port_ops = {
24865+static const struct ata_port_operations hpt370a_port_ops = {
24866 .inherits = &hpt370_port_ops,
24867 .mode_filter = hpt370a_filter,
24868 };
24869@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
24870 * and DMA mode setting functionality.
24871 */
24872
24873-static struct ata_port_operations hpt372_port_ops = {
24874+static const struct ata_port_operations hpt372_port_ops = {
24875 .inherits = &ata_bmdma_port_ops,
24876
24877 .bmdma_stop = hpt37x_bmdma_stop,
24878@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
24879 * but we have a different cable detection procedure for function 1.
24880 */
24881
24882-static struct ata_port_operations hpt374_fn1_port_ops = {
24883+static const struct ata_port_operations hpt374_fn1_port_ops = {
24884 .inherits = &hpt372_port_ops,
24885 .prereset = hpt374_fn1_pre_reset,
24886 };
24887diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c
24888--- linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
24889+++ linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
24890@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
24891 * Configuration for HPT3x2n.
24892 */
24893
24894-static struct ata_port_operations hpt3x2n_port_ops = {
24895+static const struct ata_port_operations hpt3x2n_port_ops = {
24896 .inherits = &ata_bmdma_port_ops,
24897
24898 .bmdma_stop = hpt3x2n_bmdma_stop,
24899diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x3.c linux-2.6.32.45/drivers/ata/pata_hpt3x3.c
24900--- linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
24901+++ linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
24902@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
24903 ATA_BMDMA_SHT(DRV_NAME),
24904 };
24905
24906-static struct ata_port_operations hpt3x3_port_ops = {
24907+static const struct ata_port_operations hpt3x3_port_ops = {
24908 .inherits = &ata_bmdma_port_ops,
24909 .cable_detect = ata_cable_40wire,
24910 .set_piomode = hpt3x3_set_piomode,
24911diff -urNp linux-2.6.32.45/drivers/ata/pata_icside.c linux-2.6.32.45/drivers/ata/pata_icside.c
24912--- linux-2.6.32.45/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
24913+++ linux-2.6.32.45/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
24914@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
24915 }
24916 }
24917
24918-static struct ata_port_operations pata_icside_port_ops = {
24919+static const struct ata_port_operations pata_icside_port_ops = {
24920 .inherits = &ata_sff_port_ops,
24921 /* no need to build any PRD tables for DMA */
24922 .qc_prep = ata_noop_qc_prep,
24923diff -urNp linux-2.6.32.45/drivers/ata/pata_isapnp.c linux-2.6.32.45/drivers/ata/pata_isapnp.c
24924--- linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
24925+++ linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
24926@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
24927 ATA_PIO_SHT(DRV_NAME),
24928 };
24929
24930-static struct ata_port_operations isapnp_port_ops = {
24931+static const struct ata_port_operations isapnp_port_ops = {
24932 .inherits = &ata_sff_port_ops,
24933 .cable_detect = ata_cable_40wire,
24934 };
24935
24936-static struct ata_port_operations isapnp_noalt_port_ops = {
24937+static const struct ata_port_operations isapnp_noalt_port_ops = {
24938 .inherits = &ata_sff_port_ops,
24939 .cable_detect = ata_cable_40wire,
24940 /* No altstatus so we don't want to use the lost interrupt poll */
24941diff -urNp linux-2.6.32.45/drivers/ata/pata_it8213.c linux-2.6.32.45/drivers/ata/pata_it8213.c
24942--- linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
24943+++ linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
24944@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
24945 };
24946
24947
24948-static struct ata_port_operations it8213_ops = {
24949+static const struct ata_port_operations it8213_ops = {
24950 .inherits = &ata_bmdma_port_ops,
24951 .cable_detect = it8213_cable_detect,
24952 .set_piomode = it8213_set_piomode,
24953diff -urNp linux-2.6.32.45/drivers/ata/pata_it821x.c linux-2.6.32.45/drivers/ata/pata_it821x.c
24954--- linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
24955+++ linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
24956@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
24957 ATA_BMDMA_SHT(DRV_NAME),
24958 };
24959
24960-static struct ata_port_operations it821x_smart_port_ops = {
24961+static const struct ata_port_operations it821x_smart_port_ops = {
24962 .inherits = &ata_bmdma_port_ops,
24963
24964 .check_atapi_dma= it821x_check_atapi_dma,
24965@@ -814,7 +814,7 @@ static struct ata_port_operations it821x
24966 .port_start = it821x_port_start,
24967 };
24968
24969-static struct ata_port_operations it821x_passthru_port_ops = {
24970+static const struct ata_port_operations it821x_passthru_port_ops = {
24971 .inherits = &ata_bmdma_port_ops,
24972
24973 .check_atapi_dma= it821x_check_atapi_dma,
24974@@ -830,7 +830,7 @@ static struct ata_port_operations it821x
24975 .port_start = it821x_port_start,
24976 };
24977
24978-static struct ata_port_operations it821x_rdc_port_ops = {
24979+static const struct ata_port_operations it821x_rdc_port_ops = {
24980 .inherits = &ata_bmdma_port_ops,
24981
24982 .check_atapi_dma= it821x_check_atapi_dma,
24983diff -urNp linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c
24984--- linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
24985+++ linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
24986@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
24987 ATA_PIO_SHT(DRV_NAME),
24988 };
24989
24990-static struct ata_port_operations ixp4xx_port_ops = {
24991+static const struct ata_port_operations ixp4xx_port_ops = {
24992 .inherits = &ata_sff_port_ops,
24993 .sff_data_xfer = ixp4xx_mmio_data_xfer,
24994 .cable_detect = ata_cable_40wire,
24995diff -urNp linux-2.6.32.45/drivers/ata/pata_jmicron.c linux-2.6.32.45/drivers/ata/pata_jmicron.c
24996--- linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
24997+++ linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
24998@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
24999 ATA_BMDMA_SHT(DRV_NAME),
25000 };
25001
25002-static struct ata_port_operations jmicron_ops = {
25003+static const struct ata_port_operations jmicron_ops = {
25004 .inherits = &ata_bmdma_port_ops,
25005 .prereset = jmicron_pre_reset,
25006 };
25007diff -urNp linux-2.6.32.45/drivers/ata/pata_legacy.c linux-2.6.32.45/drivers/ata/pata_legacy.c
25008--- linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
25009+++ linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
25010@@ -106,7 +106,7 @@ struct legacy_probe {
25011
25012 struct legacy_controller {
25013 const char *name;
25014- struct ata_port_operations *ops;
25015+ const struct ata_port_operations *ops;
25016 unsigned int pio_mask;
25017 unsigned int flags;
25018 unsigned int pflags;
25019@@ -223,12 +223,12 @@ static const struct ata_port_operations
25020 * pio_mask as well.
25021 */
25022
25023-static struct ata_port_operations simple_port_ops = {
25024+static const struct ata_port_operations simple_port_ops = {
25025 .inherits = &legacy_base_port_ops,
25026 .sff_data_xfer = ata_sff_data_xfer_noirq,
25027 };
25028
25029-static struct ata_port_operations legacy_port_ops = {
25030+static const struct ata_port_operations legacy_port_ops = {
25031 .inherits = &legacy_base_port_ops,
25032 .sff_data_xfer = ata_sff_data_xfer_noirq,
25033 .set_mode = legacy_set_mode,
25034@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
25035 return buflen;
25036 }
25037
25038-static struct ata_port_operations pdc20230_port_ops = {
25039+static const struct ata_port_operations pdc20230_port_ops = {
25040 .inherits = &legacy_base_port_ops,
25041 .set_piomode = pdc20230_set_piomode,
25042 .sff_data_xfer = pdc_data_xfer_vlb,
25043@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
25044 ioread8(ap->ioaddr.status_addr);
25045 }
25046
25047-static struct ata_port_operations ht6560a_port_ops = {
25048+static const struct ata_port_operations ht6560a_port_ops = {
25049 .inherits = &legacy_base_port_ops,
25050 .set_piomode = ht6560a_set_piomode,
25051 };
25052@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
25053 ioread8(ap->ioaddr.status_addr);
25054 }
25055
25056-static struct ata_port_operations ht6560b_port_ops = {
25057+static const struct ata_port_operations ht6560b_port_ops = {
25058 .inherits = &legacy_base_port_ops,
25059 .set_piomode = ht6560b_set_piomode,
25060 };
25061@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
25062 }
25063
25064
25065-static struct ata_port_operations opti82c611a_port_ops = {
25066+static const struct ata_port_operations opti82c611a_port_ops = {
25067 .inherits = &legacy_base_port_ops,
25068 .set_piomode = opti82c611a_set_piomode,
25069 };
25070@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
25071 return ata_sff_qc_issue(qc);
25072 }
25073
25074-static struct ata_port_operations opti82c46x_port_ops = {
25075+static const struct ata_port_operations opti82c46x_port_ops = {
25076 .inherits = &legacy_base_port_ops,
25077 .set_piomode = opti82c46x_set_piomode,
25078 .qc_issue = opti82c46x_qc_issue,
25079@@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
25080 return 0;
25081 }
25082
25083-static struct ata_port_operations qdi6500_port_ops = {
25084+static const struct ata_port_operations qdi6500_port_ops = {
25085 .inherits = &legacy_base_port_ops,
25086 .set_piomode = qdi6500_set_piomode,
25087 .qc_issue = qdi_qc_issue,
25088 .sff_data_xfer = vlb32_data_xfer,
25089 };
25090
25091-static struct ata_port_operations qdi6580_port_ops = {
25092+static const struct ata_port_operations qdi6580_port_ops = {
25093 .inherits = &legacy_base_port_ops,
25094 .set_piomode = qdi6580_set_piomode,
25095 .sff_data_xfer = vlb32_data_xfer,
25096 };
25097
25098-static struct ata_port_operations qdi6580dp_port_ops = {
25099+static const struct ata_port_operations qdi6580dp_port_ops = {
25100 .inherits = &legacy_base_port_ops,
25101 .set_piomode = qdi6580dp_set_piomode,
25102 .sff_data_xfer = vlb32_data_xfer,
25103@@ -855,7 +855,7 @@ static int winbond_port(struct platform_
25104 return 0;
25105 }
25106
25107-static struct ata_port_operations winbond_port_ops = {
25108+static const struct ata_port_operations winbond_port_ops = {
25109 .inherits = &legacy_base_port_ops,
25110 .set_piomode = winbond_set_piomode,
25111 .sff_data_xfer = vlb32_data_xfer,
25112@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
25113 int pio_modes = controller->pio_mask;
25114 unsigned long io = probe->port;
25115 u32 mask = (1 << probe->slot);
25116- struct ata_port_operations *ops = controller->ops;
25117+ const struct ata_port_operations *ops = controller->ops;
25118 struct legacy_data *ld = &legacy_data[probe->slot];
25119 struct ata_host *host = NULL;
25120 struct ata_port *ap;
25121diff -urNp linux-2.6.32.45/drivers/ata/pata_marvell.c linux-2.6.32.45/drivers/ata/pata_marvell.c
25122--- linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
25123+++ linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
25124@@ -100,7 +100,7 @@ static struct scsi_host_template marvell
25125 ATA_BMDMA_SHT(DRV_NAME),
25126 };
25127
25128-static struct ata_port_operations marvell_ops = {
25129+static const struct ata_port_operations marvell_ops = {
25130 .inherits = &ata_bmdma_port_ops,
25131 .cable_detect = marvell_cable_detect,
25132 .prereset = marvell_pre_reset,
25133diff -urNp linux-2.6.32.45/drivers/ata/pata_mpc52xx.c linux-2.6.32.45/drivers/ata/pata_mpc52xx.c
25134--- linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
25135+++ linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
25136@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
25137 ATA_PIO_SHT(DRV_NAME),
25138 };
25139
25140-static struct ata_port_operations mpc52xx_ata_port_ops = {
25141+static const struct ata_port_operations mpc52xx_ata_port_ops = {
25142 .inherits = &ata_bmdma_port_ops,
25143 .sff_dev_select = mpc52xx_ata_dev_select,
25144 .set_piomode = mpc52xx_ata_set_piomode,
25145diff -urNp linux-2.6.32.45/drivers/ata/pata_mpiix.c linux-2.6.32.45/drivers/ata/pata_mpiix.c
25146--- linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
25147+++ linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
25148@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
25149 ATA_PIO_SHT(DRV_NAME),
25150 };
25151
25152-static struct ata_port_operations mpiix_port_ops = {
25153+static const struct ata_port_operations mpiix_port_ops = {
25154 .inherits = &ata_sff_port_ops,
25155 .qc_issue = mpiix_qc_issue,
25156 .cable_detect = ata_cable_40wire,
25157diff -urNp linux-2.6.32.45/drivers/ata/pata_netcell.c linux-2.6.32.45/drivers/ata/pata_netcell.c
25158--- linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
25159+++ linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
25160@@ -34,7 +34,7 @@ static struct scsi_host_template netcell
25161 ATA_BMDMA_SHT(DRV_NAME),
25162 };
25163
25164-static struct ata_port_operations netcell_ops = {
25165+static const struct ata_port_operations netcell_ops = {
25166 .inherits = &ata_bmdma_port_ops,
25167 .cable_detect = ata_cable_80wire,
25168 .read_id = netcell_read_id,
25169diff -urNp linux-2.6.32.45/drivers/ata/pata_ninja32.c linux-2.6.32.45/drivers/ata/pata_ninja32.c
25170--- linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
25171+++ linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
25172@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
25173 ATA_BMDMA_SHT(DRV_NAME),
25174 };
25175
25176-static struct ata_port_operations ninja32_port_ops = {
25177+static const struct ata_port_operations ninja32_port_ops = {
25178 .inherits = &ata_bmdma_port_ops,
25179 .sff_dev_select = ninja32_dev_select,
25180 .cable_detect = ata_cable_40wire,
25181diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87410.c linux-2.6.32.45/drivers/ata/pata_ns87410.c
25182--- linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
25183+++ linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
25184@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
25185 ATA_PIO_SHT(DRV_NAME),
25186 };
25187
25188-static struct ata_port_operations ns87410_port_ops = {
25189+static const struct ata_port_operations ns87410_port_ops = {
25190 .inherits = &ata_sff_port_ops,
25191 .qc_issue = ns87410_qc_issue,
25192 .cable_detect = ata_cable_40wire,
25193diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87415.c linux-2.6.32.45/drivers/ata/pata_ns87415.c
25194--- linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
25195+++ linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
25196@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
25197 }
25198 #endif /* 87560 SuperIO Support */
25199
25200-static struct ata_port_operations ns87415_pata_ops = {
25201+static const struct ata_port_operations ns87415_pata_ops = {
25202 .inherits = &ata_bmdma_port_ops,
25203
25204 .check_atapi_dma = ns87415_check_atapi_dma,
25205@@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
25206 };
25207
25208 #if defined(CONFIG_SUPERIO)
25209-static struct ata_port_operations ns87560_pata_ops = {
25210+static const struct ata_port_operations ns87560_pata_ops = {
25211 .inherits = &ns87415_pata_ops,
25212 .sff_tf_read = ns87560_tf_read,
25213 .sff_check_status = ns87560_check_status,
25214diff -urNp linux-2.6.32.45/drivers/ata/pata_octeon_cf.c linux-2.6.32.45/drivers/ata/pata_octeon_cf.c
25215--- linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25216+++ linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25217@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25218 return 0;
25219 }
25220
25221+/* cannot be const */
25222 static struct ata_port_operations octeon_cf_ops = {
25223 .inherits = &ata_sff_port_ops,
25224 .check_atapi_dma = octeon_cf_check_atapi_dma,
25225diff -urNp linux-2.6.32.45/drivers/ata/pata_oldpiix.c linux-2.6.32.45/drivers/ata/pata_oldpiix.c
25226--- linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25227+++ linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25228@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25229 ATA_BMDMA_SHT(DRV_NAME),
25230 };
25231
25232-static struct ata_port_operations oldpiix_pata_ops = {
25233+static const struct ata_port_operations oldpiix_pata_ops = {
25234 .inherits = &ata_bmdma_port_ops,
25235 .qc_issue = oldpiix_qc_issue,
25236 .cable_detect = ata_cable_40wire,
25237diff -urNp linux-2.6.32.45/drivers/ata/pata_opti.c linux-2.6.32.45/drivers/ata/pata_opti.c
25238--- linux-2.6.32.45/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25239+++ linux-2.6.32.45/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25240@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25241 ATA_PIO_SHT(DRV_NAME),
25242 };
25243
25244-static struct ata_port_operations opti_port_ops = {
25245+static const struct ata_port_operations opti_port_ops = {
25246 .inherits = &ata_sff_port_ops,
25247 .cable_detect = ata_cable_40wire,
25248 .set_piomode = opti_set_piomode,
25249diff -urNp linux-2.6.32.45/drivers/ata/pata_optidma.c linux-2.6.32.45/drivers/ata/pata_optidma.c
25250--- linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25251+++ linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25252@@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25253 ATA_BMDMA_SHT(DRV_NAME),
25254 };
25255
25256-static struct ata_port_operations optidma_port_ops = {
25257+static const struct ata_port_operations optidma_port_ops = {
25258 .inherits = &ata_bmdma_port_ops,
25259 .cable_detect = ata_cable_40wire,
25260 .set_piomode = optidma_set_pio_mode,
25261@@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25262 .prereset = optidma_pre_reset,
25263 };
25264
25265-static struct ata_port_operations optiplus_port_ops = {
25266+static const struct ata_port_operations optiplus_port_ops = {
25267 .inherits = &optidma_port_ops,
25268 .set_piomode = optiplus_set_pio_mode,
25269 .set_dmamode = optiplus_set_dma_mode,
25270diff -urNp linux-2.6.32.45/drivers/ata/pata_palmld.c linux-2.6.32.45/drivers/ata/pata_palmld.c
25271--- linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25272+++ linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25273@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25274 ATA_PIO_SHT(DRV_NAME),
25275 };
25276
25277-static struct ata_port_operations palmld_port_ops = {
25278+static const struct ata_port_operations palmld_port_ops = {
25279 .inherits = &ata_sff_port_ops,
25280 .sff_data_xfer = ata_sff_data_xfer_noirq,
25281 .cable_detect = ata_cable_40wire,
25282diff -urNp linux-2.6.32.45/drivers/ata/pata_pcmcia.c linux-2.6.32.45/drivers/ata/pata_pcmcia.c
25283--- linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25284+++ linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25285@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25286 ATA_PIO_SHT(DRV_NAME),
25287 };
25288
25289-static struct ata_port_operations pcmcia_port_ops = {
25290+static const struct ata_port_operations pcmcia_port_ops = {
25291 .inherits = &ata_sff_port_ops,
25292 .sff_data_xfer = ata_sff_data_xfer_noirq,
25293 .cable_detect = ata_cable_40wire,
25294 .set_mode = pcmcia_set_mode,
25295 };
25296
25297-static struct ata_port_operations pcmcia_8bit_port_ops = {
25298+static const struct ata_port_operations pcmcia_8bit_port_ops = {
25299 .inherits = &ata_sff_port_ops,
25300 .sff_data_xfer = ata_data_xfer_8bit,
25301 .cable_detect = ata_cable_40wire,
25302@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25303 unsigned long io_base, ctl_base;
25304 void __iomem *io_addr, *ctl_addr;
25305 int n_ports = 1;
25306- struct ata_port_operations *ops = &pcmcia_port_ops;
25307+ const struct ata_port_operations *ops = &pcmcia_port_ops;
25308
25309 info = kzalloc(sizeof(*info), GFP_KERNEL);
25310 if (info == NULL)
25311diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc2027x.c linux-2.6.32.45/drivers/ata/pata_pdc2027x.c
25312--- linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25313+++ linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25314@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25315 ATA_BMDMA_SHT(DRV_NAME),
25316 };
25317
25318-static struct ata_port_operations pdc2027x_pata100_ops = {
25319+static const struct ata_port_operations pdc2027x_pata100_ops = {
25320 .inherits = &ata_bmdma_port_ops,
25321 .check_atapi_dma = pdc2027x_check_atapi_dma,
25322 .cable_detect = pdc2027x_cable_detect,
25323 .prereset = pdc2027x_prereset,
25324 };
25325
25326-static struct ata_port_operations pdc2027x_pata133_ops = {
25327+static const struct ata_port_operations pdc2027x_pata133_ops = {
25328 .inherits = &pdc2027x_pata100_ops,
25329 .mode_filter = pdc2027x_mode_filter,
25330 .set_piomode = pdc2027x_set_piomode,
25331diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c
25332--- linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25333+++ linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25334@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25335 ATA_BMDMA_SHT(DRV_NAME),
25336 };
25337
25338-static struct ata_port_operations pdc2024x_port_ops = {
25339+static const struct ata_port_operations pdc2024x_port_ops = {
25340 .inherits = &ata_bmdma_port_ops,
25341
25342 .cable_detect = ata_cable_40wire,
25343@@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25344 .sff_exec_command = pdc202xx_exec_command,
25345 };
25346
25347-static struct ata_port_operations pdc2026x_port_ops = {
25348+static const struct ata_port_operations pdc2026x_port_ops = {
25349 .inherits = &pdc2024x_port_ops,
25350
25351 .check_atapi_dma = pdc2026x_check_atapi_dma,
25352diff -urNp linux-2.6.32.45/drivers/ata/pata_platform.c linux-2.6.32.45/drivers/ata/pata_platform.c
25353--- linux-2.6.32.45/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25354+++ linux-2.6.32.45/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25355@@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25356 ATA_PIO_SHT(DRV_NAME),
25357 };
25358
25359-static struct ata_port_operations pata_platform_port_ops = {
25360+static const struct ata_port_operations pata_platform_port_ops = {
25361 .inherits = &ata_sff_port_ops,
25362 .sff_data_xfer = ata_sff_data_xfer_noirq,
25363 .cable_detect = ata_cable_unknown,
25364diff -urNp linux-2.6.32.45/drivers/ata/pata_qdi.c linux-2.6.32.45/drivers/ata/pata_qdi.c
25365--- linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25366+++ linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25367@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25368 ATA_PIO_SHT(DRV_NAME),
25369 };
25370
25371-static struct ata_port_operations qdi6500_port_ops = {
25372+static const struct ata_port_operations qdi6500_port_ops = {
25373 .inherits = &ata_sff_port_ops,
25374 .qc_issue = qdi_qc_issue,
25375 .sff_data_xfer = qdi_data_xfer,
25376@@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25377 .set_piomode = qdi6500_set_piomode,
25378 };
25379
25380-static struct ata_port_operations qdi6580_port_ops = {
25381+static const struct ata_port_operations qdi6580_port_ops = {
25382 .inherits = &qdi6500_port_ops,
25383 .set_piomode = qdi6580_set_piomode,
25384 };
25385diff -urNp linux-2.6.32.45/drivers/ata/pata_radisys.c linux-2.6.32.45/drivers/ata/pata_radisys.c
25386--- linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25387+++ linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25388@@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25389 ATA_BMDMA_SHT(DRV_NAME),
25390 };
25391
25392-static struct ata_port_operations radisys_pata_ops = {
25393+static const struct ata_port_operations radisys_pata_ops = {
25394 .inherits = &ata_bmdma_port_ops,
25395 .qc_issue = radisys_qc_issue,
25396 .cable_detect = ata_cable_unknown,
25397diff -urNp linux-2.6.32.45/drivers/ata/pata_rb532_cf.c linux-2.6.32.45/drivers/ata/pata_rb532_cf.c
25398--- linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25399+++ linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25400@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25401 return IRQ_HANDLED;
25402 }
25403
25404-static struct ata_port_operations rb532_pata_port_ops = {
25405+static const struct ata_port_operations rb532_pata_port_ops = {
25406 .inherits = &ata_sff_port_ops,
25407 .sff_data_xfer = ata_sff_data_xfer32,
25408 };
25409diff -urNp linux-2.6.32.45/drivers/ata/pata_rdc.c linux-2.6.32.45/drivers/ata/pata_rdc.c
25410--- linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25411+++ linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25412@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25413 pci_write_config_byte(dev, 0x48, udma_enable);
25414 }
25415
25416-static struct ata_port_operations rdc_pata_ops = {
25417+static const struct ata_port_operations rdc_pata_ops = {
25418 .inherits = &ata_bmdma32_port_ops,
25419 .cable_detect = rdc_pata_cable_detect,
25420 .set_piomode = rdc_set_piomode,
25421diff -urNp linux-2.6.32.45/drivers/ata/pata_rz1000.c linux-2.6.32.45/drivers/ata/pata_rz1000.c
25422--- linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25423+++ linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25424@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25425 ATA_PIO_SHT(DRV_NAME),
25426 };
25427
25428-static struct ata_port_operations rz1000_port_ops = {
25429+static const struct ata_port_operations rz1000_port_ops = {
25430 .inherits = &ata_sff_port_ops,
25431 .cable_detect = ata_cable_40wire,
25432 .set_mode = rz1000_set_mode,
25433diff -urNp linux-2.6.32.45/drivers/ata/pata_sc1200.c linux-2.6.32.45/drivers/ata/pata_sc1200.c
25434--- linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25435+++ linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25436@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25437 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25438 };
25439
25440-static struct ata_port_operations sc1200_port_ops = {
25441+static const struct ata_port_operations sc1200_port_ops = {
25442 .inherits = &ata_bmdma_port_ops,
25443 .qc_prep = ata_sff_dumb_qc_prep,
25444 .qc_issue = sc1200_qc_issue,
25445diff -urNp linux-2.6.32.45/drivers/ata/pata_scc.c linux-2.6.32.45/drivers/ata/pata_scc.c
25446--- linux-2.6.32.45/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25447+++ linux-2.6.32.45/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25448@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25449 ATA_BMDMA_SHT(DRV_NAME),
25450 };
25451
25452-static struct ata_port_operations scc_pata_ops = {
25453+static const struct ata_port_operations scc_pata_ops = {
25454 .inherits = &ata_bmdma_port_ops,
25455
25456 .set_piomode = scc_set_piomode,
25457diff -urNp linux-2.6.32.45/drivers/ata/pata_sch.c linux-2.6.32.45/drivers/ata/pata_sch.c
25458--- linux-2.6.32.45/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25459+++ linux-2.6.32.45/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25460@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25461 ATA_BMDMA_SHT(DRV_NAME),
25462 };
25463
25464-static struct ata_port_operations sch_pata_ops = {
25465+static const struct ata_port_operations sch_pata_ops = {
25466 .inherits = &ata_bmdma_port_ops,
25467 .cable_detect = ata_cable_unknown,
25468 .set_piomode = sch_set_piomode,
25469diff -urNp linux-2.6.32.45/drivers/ata/pata_serverworks.c linux-2.6.32.45/drivers/ata/pata_serverworks.c
25470--- linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25471+++ linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25472@@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25473 ATA_BMDMA_SHT(DRV_NAME),
25474 };
25475
25476-static struct ata_port_operations serverworks_osb4_port_ops = {
25477+static const struct ata_port_operations serverworks_osb4_port_ops = {
25478 .inherits = &ata_bmdma_port_ops,
25479 .cable_detect = serverworks_cable_detect,
25480 .mode_filter = serverworks_osb4_filter,
25481@@ -307,7 +307,7 @@ static struct ata_port_operations server
25482 .set_dmamode = serverworks_set_dmamode,
25483 };
25484
25485-static struct ata_port_operations serverworks_csb_port_ops = {
25486+static const struct ata_port_operations serverworks_csb_port_ops = {
25487 .inherits = &serverworks_osb4_port_ops,
25488 .mode_filter = serverworks_csb_filter,
25489 };
25490diff -urNp linux-2.6.32.45/drivers/ata/pata_sil680.c linux-2.6.32.45/drivers/ata/pata_sil680.c
25491--- linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25492+++ linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25493@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25494 ATA_BMDMA_SHT(DRV_NAME),
25495 };
25496
25497-static struct ata_port_operations sil680_port_ops = {
25498+static const struct ata_port_operations sil680_port_ops = {
25499 .inherits = &ata_bmdma32_port_ops,
25500 .cable_detect = sil680_cable_detect,
25501 .set_piomode = sil680_set_piomode,
25502diff -urNp linux-2.6.32.45/drivers/ata/pata_sis.c linux-2.6.32.45/drivers/ata/pata_sis.c
25503--- linux-2.6.32.45/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25504+++ linux-2.6.32.45/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25505@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25506 ATA_BMDMA_SHT(DRV_NAME),
25507 };
25508
25509-static struct ata_port_operations sis_133_for_sata_ops = {
25510+static const struct ata_port_operations sis_133_for_sata_ops = {
25511 .inherits = &ata_bmdma_port_ops,
25512 .set_piomode = sis_133_set_piomode,
25513 .set_dmamode = sis_133_set_dmamode,
25514 .cable_detect = sis_133_cable_detect,
25515 };
25516
25517-static struct ata_port_operations sis_base_ops = {
25518+static const struct ata_port_operations sis_base_ops = {
25519 .inherits = &ata_bmdma_port_ops,
25520 .prereset = sis_pre_reset,
25521 };
25522
25523-static struct ata_port_operations sis_133_ops = {
25524+static const struct ata_port_operations sis_133_ops = {
25525 .inherits = &sis_base_ops,
25526 .set_piomode = sis_133_set_piomode,
25527 .set_dmamode = sis_133_set_dmamode,
25528 .cable_detect = sis_133_cable_detect,
25529 };
25530
25531-static struct ata_port_operations sis_133_early_ops = {
25532+static const struct ata_port_operations sis_133_early_ops = {
25533 .inherits = &sis_base_ops,
25534 .set_piomode = sis_100_set_piomode,
25535 .set_dmamode = sis_133_early_set_dmamode,
25536 .cable_detect = sis_66_cable_detect,
25537 };
25538
25539-static struct ata_port_operations sis_100_ops = {
25540+static const struct ata_port_operations sis_100_ops = {
25541 .inherits = &sis_base_ops,
25542 .set_piomode = sis_100_set_piomode,
25543 .set_dmamode = sis_100_set_dmamode,
25544 .cable_detect = sis_66_cable_detect,
25545 };
25546
25547-static struct ata_port_operations sis_66_ops = {
25548+static const struct ata_port_operations sis_66_ops = {
25549 .inherits = &sis_base_ops,
25550 .set_piomode = sis_old_set_piomode,
25551 .set_dmamode = sis_66_set_dmamode,
25552 .cable_detect = sis_66_cable_detect,
25553 };
25554
25555-static struct ata_port_operations sis_old_ops = {
25556+static const struct ata_port_operations sis_old_ops = {
25557 .inherits = &sis_base_ops,
25558 .set_piomode = sis_old_set_piomode,
25559 .set_dmamode = sis_old_set_dmamode,
25560diff -urNp linux-2.6.32.45/drivers/ata/pata_sl82c105.c linux-2.6.32.45/drivers/ata/pata_sl82c105.c
25561--- linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25562+++ linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25563@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25564 ATA_BMDMA_SHT(DRV_NAME),
25565 };
25566
25567-static struct ata_port_operations sl82c105_port_ops = {
25568+static const struct ata_port_operations sl82c105_port_ops = {
25569 .inherits = &ata_bmdma_port_ops,
25570 .qc_defer = sl82c105_qc_defer,
25571 .bmdma_start = sl82c105_bmdma_start,
25572diff -urNp linux-2.6.32.45/drivers/ata/pata_triflex.c linux-2.6.32.45/drivers/ata/pata_triflex.c
25573--- linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25574+++ linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25575@@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25576 ATA_BMDMA_SHT(DRV_NAME),
25577 };
25578
25579-static struct ata_port_operations triflex_port_ops = {
25580+static const struct ata_port_operations triflex_port_ops = {
25581 .inherits = &ata_bmdma_port_ops,
25582 .bmdma_start = triflex_bmdma_start,
25583 .bmdma_stop = triflex_bmdma_stop,
25584diff -urNp linux-2.6.32.45/drivers/ata/pata_via.c linux-2.6.32.45/drivers/ata/pata_via.c
25585--- linux-2.6.32.45/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25586+++ linux-2.6.32.45/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25587@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25588 ATA_BMDMA_SHT(DRV_NAME),
25589 };
25590
25591-static struct ata_port_operations via_port_ops = {
25592+static const struct ata_port_operations via_port_ops = {
25593 .inherits = &ata_bmdma_port_ops,
25594 .cable_detect = via_cable_detect,
25595 .set_piomode = via_set_piomode,
25596@@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25597 .port_start = via_port_start,
25598 };
25599
25600-static struct ata_port_operations via_port_ops_noirq = {
25601+static const struct ata_port_operations via_port_ops_noirq = {
25602 .inherits = &via_port_ops,
25603 .sff_data_xfer = ata_sff_data_xfer_noirq,
25604 };
25605diff -urNp linux-2.6.32.45/drivers/ata/pata_winbond.c linux-2.6.32.45/drivers/ata/pata_winbond.c
25606--- linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25607+++ linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25608@@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25609 ATA_PIO_SHT(DRV_NAME),
25610 };
25611
25612-static struct ata_port_operations winbond_port_ops = {
25613+static const struct ata_port_operations winbond_port_ops = {
25614 .inherits = &ata_sff_port_ops,
25615 .sff_data_xfer = winbond_data_xfer,
25616 .cable_detect = ata_cable_40wire,
25617diff -urNp linux-2.6.32.45/drivers/ata/pdc_adma.c linux-2.6.32.45/drivers/ata/pdc_adma.c
25618--- linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25619+++ linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25620@@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25621 .dma_boundary = ADMA_DMA_BOUNDARY,
25622 };
25623
25624-static struct ata_port_operations adma_ata_ops = {
25625+static const struct ata_port_operations adma_ata_ops = {
25626 .inherits = &ata_sff_port_ops,
25627
25628 .lost_interrupt = ATA_OP_NULL,
25629diff -urNp linux-2.6.32.45/drivers/ata/sata_fsl.c linux-2.6.32.45/drivers/ata/sata_fsl.c
25630--- linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25631+++ linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25632@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25633 .dma_boundary = ATA_DMA_BOUNDARY,
25634 };
25635
25636-static struct ata_port_operations sata_fsl_ops = {
25637+static const struct ata_port_operations sata_fsl_ops = {
25638 .inherits = &sata_pmp_port_ops,
25639
25640 .qc_defer = ata_std_qc_defer,
25641diff -urNp linux-2.6.32.45/drivers/ata/sata_inic162x.c linux-2.6.32.45/drivers/ata/sata_inic162x.c
25642--- linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25643+++ linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25644@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25645 return 0;
25646 }
25647
25648-static struct ata_port_operations inic_port_ops = {
25649+static const struct ata_port_operations inic_port_ops = {
25650 .inherits = &sata_port_ops,
25651
25652 .check_atapi_dma = inic_check_atapi_dma,
25653diff -urNp linux-2.6.32.45/drivers/ata/sata_mv.c linux-2.6.32.45/drivers/ata/sata_mv.c
25654--- linux-2.6.32.45/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25655+++ linux-2.6.32.45/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25656@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25657 .dma_boundary = MV_DMA_BOUNDARY,
25658 };
25659
25660-static struct ata_port_operations mv5_ops = {
25661+static const struct ata_port_operations mv5_ops = {
25662 .inherits = &ata_sff_port_ops,
25663
25664 .lost_interrupt = ATA_OP_NULL,
25665@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25666 .port_stop = mv_port_stop,
25667 };
25668
25669-static struct ata_port_operations mv6_ops = {
25670+static const struct ata_port_operations mv6_ops = {
25671 .inherits = &mv5_ops,
25672 .dev_config = mv6_dev_config,
25673 .scr_read = mv_scr_read,
25674@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25675 .bmdma_status = mv_bmdma_status,
25676 };
25677
25678-static struct ata_port_operations mv_iie_ops = {
25679+static const struct ata_port_operations mv_iie_ops = {
25680 .inherits = &mv6_ops,
25681 .dev_config = ATA_OP_NULL,
25682 .qc_prep = mv_qc_prep_iie,
25683diff -urNp linux-2.6.32.45/drivers/ata/sata_nv.c linux-2.6.32.45/drivers/ata/sata_nv.c
25684--- linux-2.6.32.45/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
25685+++ linux-2.6.32.45/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
25686@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
25687 * cases. Define nv_hardreset() which only kicks in for post-boot
25688 * probing and use it for all variants.
25689 */
25690-static struct ata_port_operations nv_generic_ops = {
25691+static const struct ata_port_operations nv_generic_ops = {
25692 .inherits = &ata_bmdma_port_ops,
25693 .lost_interrupt = ATA_OP_NULL,
25694 .scr_read = nv_scr_read,
25695@@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
25696 .hardreset = nv_hardreset,
25697 };
25698
25699-static struct ata_port_operations nv_nf2_ops = {
25700+static const struct ata_port_operations nv_nf2_ops = {
25701 .inherits = &nv_generic_ops,
25702 .freeze = nv_nf2_freeze,
25703 .thaw = nv_nf2_thaw,
25704 };
25705
25706-static struct ata_port_operations nv_ck804_ops = {
25707+static const struct ata_port_operations nv_ck804_ops = {
25708 .inherits = &nv_generic_ops,
25709 .freeze = nv_ck804_freeze,
25710 .thaw = nv_ck804_thaw,
25711 .host_stop = nv_ck804_host_stop,
25712 };
25713
25714-static struct ata_port_operations nv_adma_ops = {
25715+static const struct ata_port_operations nv_adma_ops = {
25716 .inherits = &nv_ck804_ops,
25717
25718 .check_atapi_dma = nv_adma_check_atapi_dma,
25719@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
25720 .host_stop = nv_adma_host_stop,
25721 };
25722
25723-static struct ata_port_operations nv_swncq_ops = {
25724+static const struct ata_port_operations nv_swncq_ops = {
25725 .inherits = &nv_generic_ops,
25726
25727 .qc_defer = ata_std_qc_defer,
25728diff -urNp linux-2.6.32.45/drivers/ata/sata_promise.c linux-2.6.32.45/drivers/ata/sata_promise.c
25729--- linux-2.6.32.45/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
25730+++ linux-2.6.32.45/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
25731@@ -195,7 +195,7 @@ static const struct ata_port_operations
25732 .error_handler = pdc_error_handler,
25733 };
25734
25735-static struct ata_port_operations pdc_sata_ops = {
25736+static const struct ata_port_operations pdc_sata_ops = {
25737 .inherits = &pdc_common_ops,
25738 .cable_detect = pdc_sata_cable_detect,
25739 .freeze = pdc_sata_freeze,
25740@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
25741
25742 /* First-generation chips need a more restrictive ->check_atapi_dma op,
25743 and ->freeze/thaw that ignore the hotplug controls. */
25744-static struct ata_port_operations pdc_old_sata_ops = {
25745+static const struct ata_port_operations pdc_old_sata_ops = {
25746 .inherits = &pdc_sata_ops,
25747 .freeze = pdc_freeze,
25748 .thaw = pdc_thaw,
25749 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
25750 };
25751
25752-static struct ata_port_operations pdc_pata_ops = {
25753+static const struct ata_port_operations pdc_pata_ops = {
25754 .inherits = &pdc_common_ops,
25755 .cable_detect = pdc_pata_cable_detect,
25756 .freeze = pdc_freeze,
25757diff -urNp linux-2.6.32.45/drivers/ata/sata_qstor.c linux-2.6.32.45/drivers/ata/sata_qstor.c
25758--- linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
25759+++ linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
25760@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
25761 .dma_boundary = QS_DMA_BOUNDARY,
25762 };
25763
25764-static struct ata_port_operations qs_ata_ops = {
25765+static const struct ata_port_operations qs_ata_ops = {
25766 .inherits = &ata_sff_port_ops,
25767
25768 .check_atapi_dma = qs_check_atapi_dma,
25769diff -urNp linux-2.6.32.45/drivers/ata/sata_sil24.c linux-2.6.32.45/drivers/ata/sata_sil24.c
25770--- linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
25771+++ linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
25772@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
25773 .dma_boundary = ATA_DMA_BOUNDARY,
25774 };
25775
25776-static struct ata_port_operations sil24_ops = {
25777+static const struct ata_port_operations sil24_ops = {
25778 .inherits = &sata_pmp_port_ops,
25779
25780 .qc_defer = sil24_qc_defer,
25781diff -urNp linux-2.6.32.45/drivers/ata/sata_sil.c linux-2.6.32.45/drivers/ata/sata_sil.c
25782--- linux-2.6.32.45/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
25783+++ linux-2.6.32.45/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
25784@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
25785 .sg_tablesize = ATA_MAX_PRD
25786 };
25787
25788-static struct ata_port_operations sil_ops = {
25789+static const struct ata_port_operations sil_ops = {
25790 .inherits = &ata_bmdma32_port_ops,
25791 .dev_config = sil_dev_config,
25792 .set_mode = sil_set_mode,
25793diff -urNp linux-2.6.32.45/drivers/ata/sata_sis.c linux-2.6.32.45/drivers/ata/sata_sis.c
25794--- linux-2.6.32.45/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
25795+++ linux-2.6.32.45/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
25796@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
25797 ATA_BMDMA_SHT(DRV_NAME),
25798 };
25799
25800-static struct ata_port_operations sis_ops = {
25801+static const struct ata_port_operations sis_ops = {
25802 .inherits = &ata_bmdma_port_ops,
25803 .scr_read = sis_scr_read,
25804 .scr_write = sis_scr_write,
25805diff -urNp linux-2.6.32.45/drivers/ata/sata_svw.c linux-2.6.32.45/drivers/ata/sata_svw.c
25806--- linux-2.6.32.45/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
25807+++ linux-2.6.32.45/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
25808@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
25809 };
25810
25811
25812-static struct ata_port_operations k2_sata_ops = {
25813+static const struct ata_port_operations k2_sata_ops = {
25814 .inherits = &ata_bmdma_port_ops,
25815 .sff_tf_load = k2_sata_tf_load,
25816 .sff_tf_read = k2_sata_tf_read,
25817diff -urNp linux-2.6.32.45/drivers/ata/sata_sx4.c linux-2.6.32.45/drivers/ata/sata_sx4.c
25818--- linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
25819+++ linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
25820@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
25821 };
25822
25823 /* TODO: inherit from base port_ops after converting to new EH */
25824-static struct ata_port_operations pdc_20621_ops = {
25825+static const struct ata_port_operations pdc_20621_ops = {
25826 .inherits = &ata_sff_port_ops,
25827
25828 .check_atapi_dma = pdc_check_atapi_dma,
25829diff -urNp linux-2.6.32.45/drivers/ata/sata_uli.c linux-2.6.32.45/drivers/ata/sata_uli.c
25830--- linux-2.6.32.45/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
25831+++ linux-2.6.32.45/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
25832@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
25833 ATA_BMDMA_SHT(DRV_NAME),
25834 };
25835
25836-static struct ata_port_operations uli_ops = {
25837+static const struct ata_port_operations uli_ops = {
25838 .inherits = &ata_bmdma_port_ops,
25839 .scr_read = uli_scr_read,
25840 .scr_write = uli_scr_write,
25841diff -urNp linux-2.6.32.45/drivers/ata/sata_via.c linux-2.6.32.45/drivers/ata/sata_via.c
25842--- linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
25843+++ linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
25844@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
25845 ATA_BMDMA_SHT(DRV_NAME),
25846 };
25847
25848-static struct ata_port_operations svia_base_ops = {
25849+static const struct ata_port_operations svia_base_ops = {
25850 .inherits = &ata_bmdma_port_ops,
25851 .sff_tf_load = svia_tf_load,
25852 };
25853
25854-static struct ata_port_operations vt6420_sata_ops = {
25855+static const struct ata_port_operations vt6420_sata_ops = {
25856 .inherits = &svia_base_ops,
25857 .freeze = svia_noop_freeze,
25858 .prereset = vt6420_prereset,
25859 .bmdma_start = vt6420_bmdma_start,
25860 };
25861
25862-static struct ata_port_operations vt6421_pata_ops = {
25863+static const struct ata_port_operations vt6421_pata_ops = {
25864 .inherits = &svia_base_ops,
25865 .cable_detect = vt6421_pata_cable_detect,
25866 .set_piomode = vt6421_set_pio_mode,
25867 .set_dmamode = vt6421_set_dma_mode,
25868 };
25869
25870-static struct ata_port_operations vt6421_sata_ops = {
25871+static const struct ata_port_operations vt6421_sata_ops = {
25872 .inherits = &svia_base_ops,
25873 .scr_read = svia_scr_read,
25874 .scr_write = svia_scr_write,
25875 };
25876
25877-static struct ata_port_operations vt8251_ops = {
25878+static const struct ata_port_operations vt8251_ops = {
25879 .inherits = &svia_base_ops,
25880 .hardreset = sata_std_hardreset,
25881 .scr_read = vt8251_scr_read,
25882diff -urNp linux-2.6.32.45/drivers/ata/sata_vsc.c linux-2.6.32.45/drivers/ata/sata_vsc.c
25883--- linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
25884+++ linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
25885@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
25886 };
25887
25888
25889-static struct ata_port_operations vsc_sata_ops = {
25890+static const struct ata_port_operations vsc_sata_ops = {
25891 .inherits = &ata_bmdma_port_ops,
25892 /* The IRQ handling is not quite standard SFF behaviour so we
25893 cannot use the default lost interrupt handler */
25894diff -urNp linux-2.6.32.45/drivers/atm/adummy.c linux-2.6.32.45/drivers/atm/adummy.c
25895--- linux-2.6.32.45/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
25896+++ linux-2.6.32.45/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
25897@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
25898 vcc->pop(vcc, skb);
25899 else
25900 dev_kfree_skb_any(skb);
25901- atomic_inc(&vcc->stats->tx);
25902+ atomic_inc_unchecked(&vcc->stats->tx);
25903
25904 return 0;
25905 }
25906diff -urNp linux-2.6.32.45/drivers/atm/ambassador.c linux-2.6.32.45/drivers/atm/ambassador.c
25907--- linux-2.6.32.45/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
25908+++ linux-2.6.32.45/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
25909@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
25910 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25911
25912 // VC layer stats
25913- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25914+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25915
25916 // free the descriptor
25917 kfree (tx_descr);
25918@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
25919 dump_skb ("<<<", vc, skb);
25920
25921 // VC layer stats
25922- atomic_inc(&atm_vcc->stats->rx);
25923+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25924 __net_timestamp(skb);
25925 // end of our responsability
25926 atm_vcc->push (atm_vcc, skb);
25927@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
25928 } else {
25929 PRINTK (KERN_INFO, "dropped over-size frame");
25930 // should we count this?
25931- atomic_inc(&atm_vcc->stats->rx_drop);
25932+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25933 }
25934
25935 } else {
25936@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
25937 }
25938
25939 if (check_area (skb->data, skb->len)) {
25940- atomic_inc(&atm_vcc->stats->tx_err);
25941+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25942 return -ENOMEM; // ?
25943 }
25944
25945diff -urNp linux-2.6.32.45/drivers/atm/atmtcp.c linux-2.6.32.45/drivers/atm/atmtcp.c
25946--- linux-2.6.32.45/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
25947+++ linux-2.6.32.45/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
25948@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
25949 if (vcc->pop) vcc->pop(vcc,skb);
25950 else dev_kfree_skb(skb);
25951 if (dev_data) return 0;
25952- atomic_inc(&vcc->stats->tx_err);
25953+ atomic_inc_unchecked(&vcc->stats->tx_err);
25954 return -ENOLINK;
25955 }
25956 size = skb->len+sizeof(struct atmtcp_hdr);
25957@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
25958 if (!new_skb) {
25959 if (vcc->pop) vcc->pop(vcc,skb);
25960 else dev_kfree_skb(skb);
25961- atomic_inc(&vcc->stats->tx_err);
25962+ atomic_inc_unchecked(&vcc->stats->tx_err);
25963 return -ENOBUFS;
25964 }
25965 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25966@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
25967 if (vcc->pop) vcc->pop(vcc,skb);
25968 else dev_kfree_skb(skb);
25969 out_vcc->push(out_vcc,new_skb);
25970- atomic_inc(&vcc->stats->tx);
25971- atomic_inc(&out_vcc->stats->rx);
25972+ atomic_inc_unchecked(&vcc->stats->tx);
25973+ atomic_inc_unchecked(&out_vcc->stats->rx);
25974 return 0;
25975 }
25976
25977@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
25978 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25979 read_unlock(&vcc_sklist_lock);
25980 if (!out_vcc) {
25981- atomic_inc(&vcc->stats->tx_err);
25982+ atomic_inc_unchecked(&vcc->stats->tx_err);
25983 goto done;
25984 }
25985 skb_pull(skb,sizeof(struct atmtcp_hdr));
25986@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
25987 __net_timestamp(new_skb);
25988 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25989 out_vcc->push(out_vcc,new_skb);
25990- atomic_inc(&vcc->stats->tx);
25991- atomic_inc(&out_vcc->stats->rx);
25992+ atomic_inc_unchecked(&vcc->stats->tx);
25993+ atomic_inc_unchecked(&out_vcc->stats->rx);
25994 done:
25995 if (vcc->pop) vcc->pop(vcc,skb);
25996 else dev_kfree_skb(skb);
25997diff -urNp linux-2.6.32.45/drivers/atm/eni.c linux-2.6.32.45/drivers/atm/eni.c
25998--- linux-2.6.32.45/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
25999+++ linux-2.6.32.45/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
26000@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26001 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26002 vcc->dev->number);
26003 length = 0;
26004- atomic_inc(&vcc->stats->rx_err);
26005+ atomic_inc_unchecked(&vcc->stats->rx_err);
26006 }
26007 else {
26008 length = ATM_CELL_SIZE-1; /* no HEC */
26009@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26010 size);
26011 }
26012 eff = length = 0;
26013- atomic_inc(&vcc->stats->rx_err);
26014+ atomic_inc_unchecked(&vcc->stats->rx_err);
26015 }
26016 else {
26017 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26018@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26019 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26020 vcc->dev->number,vcc->vci,length,size << 2,descr);
26021 length = eff = 0;
26022- atomic_inc(&vcc->stats->rx_err);
26023+ atomic_inc_unchecked(&vcc->stats->rx_err);
26024 }
26025 }
26026 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26027@@ -770,7 +770,7 @@ rx_dequeued++;
26028 vcc->push(vcc,skb);
26029 pushed++;
26030 }
26031- atomic_inc(&vcc->stats->rx);
26032+ atomic_inc_unchecked(&vcc->stats->rx);
26033 }
26034 wake_up(&eni_dev->rx_wait);
26035 }
26036@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
26037 PCI_DMA_TODEVICE);
26038 if (vcc->pop) vcc->pop(vcc,skb);
26039 else dev_kfree_skb_irq(skb);
26040- atomic_inc(&vcc->stats->tx);
26041+ atomic_inc_unchecked(&vcc->stats->tx);
26042 wake_up(&eni_dev->tx_wait);
26043 dma_complete++;
26044 }
26045diff -urNp linux-2.6.32.45/drivers/atm/firestream.c linux-2.6.32.45/drivers/atm/firestream.c
26046--- linux-2.6.32.45/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
26047+++ linux-2.6.32.45/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
26048@@ -748,7 +748,7 @@ static void process_txdone_queue (struct
26049 }
26050 }
26051
26052- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26053+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26054
26055 fs_dprintk (FS_DEBUG_TXMEM, "i");
26056 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26057@@ -815,7 +815,7 @@ static void process_incoming (struct fs_
26058 #endif
26059 skb_put (skb, qe->p1 & 0xffff);
26060 ATM_SKB(skb)->vcc = atm_vcc;
26061- atomic_inc(&atm_vcc->stats->rx);
26062+ atomic_inc_unchecked(&atm_vcc->stats->rx);
26063 __net_timestamp(skb);
26064 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26065 atm_vcc->push (atm_vcc, skb);
26066@@ -836,12 +836,12 @@ static void process_incoming (struct fs_
26067 kfree (pe);
26068 }
26069 if (atm_vcc)
26070- atomic_inc(&atm_vcc->stats->rx_drop);
26071+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26072 break;
26073 case 0x1f: /* Reassembly abort: no buffers. */
26074 /* Silently increment error counter. */
26075 if (atm_vcc)
26076- atomic_inc(&atm_vcc->stats->rx_drop);
26077+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26078 break;
26079 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26080 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26081diff -urNp linux-2.6.32.45/drivers/atm/fore200e.c linux-2.6.32.45/drivers/atm/fore200e.c
26082--- linux-2.6.32.45/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
26083+++ linux-2.6.32.45/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
26084@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
26085 #endif
26086 /* check error condition */
26087 if (*entry->status & STATUS_ERROR)
26088- atomic_inc(&vcc->stats->tx_err);
26089+ atomic_inc_unchecked(&vcc->stats->tx_err);
26090 else
26091- atomic_inc(&vcc->stats->tx);
26092+ atomic_inc_unchecked(&vcc->stats->tx);
26093 }
26094 }
26095
26096@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
26097 if (skb == NULL) {
26098 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26099
26100- atomic_inc(&vcc->stats->rx_drop);
26101+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26102 return -ENOMEM;
26103 }
26104
26105@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
26106
26107 dev_kfree_skb_any(skb);
26108
26109- atomic_inc(&vcc->stats->rx_drop);
26110+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26111 return -ENOMEM;
26112 }
26113
26114 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26115
26116 vcc->push(vcc, skb);
26117- atomic_inc(&vcc->stats->rx);
26118+ atomic_inc_unchecked(&vcc->stats->rx);
26119
26120 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26121
26122@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
26123 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
26124 fore200e->atm_dev->number,
26125 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
26126- atomic_inc(&vcc->stats->rx_err);
26127+ atomic_inc_unchecked(&vcc->stats->rx_err);
26128 }
26129 }
26130
26131@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
26132 goto retry_here;
26133 }
26134
26135- atomic_inc(&vcc->stats->tx_err);
26136+ atomic_inc_unchecked(&vcc->stats->tx_err);
26137
26138 fore200e->tx_sat++;
26139 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
26140diff -urNp linux-2.6.32.45/drivers/atm/he.c linux-2.6.32.45/drivers/atm/he.c
26141--- linux-2.6.32.45/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
26142+++ linux-2.6.32.45/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
26143@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26144
26145 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
26146 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
26147- atomic_inc(&vcc->stats->rx_drop);
26148+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26149 goto return_host_buffers;
26150 }
26151
26152@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26153 RBRQ_LEN_ERR(he_dev->rbrq_head)
26154 ? "LEN_ERR" : "",
26155 vcc->vpi, vcc->vci);
26156- atomic_inc(&vcc->stats->rx_err);
26157+ atomic_inc_unchecked(&vcc->stats->rx_err);
26158 goto return_host_buffers;
26159 }
26160
26161@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26162 vcc->push(vcc, skb);
26163 spin_lock(&he_dev->global_lock);
26164
26165- atomic_inc(&vcc->stats->rx);
26166+ atomic_inc_unchecked(&vcc->stats->rx);
26167
26168 return_host_buffers:
26169 ++pdus_assembled;
26170@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
26171 tpd->vcc->pop(tpd->vcc, tpd->skb);
26172 else
26173 dev_kfree_skb_any(tpd->skb);
26174- atomic_inc(&tpd->vcc->stats->tx_err);
26175+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
26176 }
26177 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
26178 return;
26179@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26180 vcc->pop(vcc, skb);
26181 else
26182 dev_kfree_skb_any(skb);
26183- atomic_inc(&vcc->stats->tx_err);
26184+ atomic_inc_unchecked(&vcc->stats->tx_err);
26185 return -EINVAL;
26186 }
26187
26188@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26189 vcc->pop(vcc, skb);
26190 else
26191 dev_kfree_skb_any(skb);
26192- atomic_inc(&vcc->stats->tx_err);
26193+ atomic_inc_unchecked(&vcc->stats->tx_err);
26194 return -EINVAL;
26195 }
26196 #endif
26197@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26198 vcc->pop(vcc, skb);
26199 else
26200 dev_kfree_skb_any(skb);
26201- atomic_inc(&vcc->stats->tx_err);
26202+ atomic_inc_unchecked(&vcc->stats->tx_err);
26203 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26204 return -ENOMEM;
26205 }
26206@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26207 vcc->pop(vcc, skb);
26208 else
26209 dev_kfree_skb_any(skb);
26210- atomic_inc(&vcc->stats->tx_err);
26211+ atomic_inc_unchecked(&vcc->stats->tx_err);
26212 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26213 return -ENOMEM;
26214 }
26215@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26216 __enqueue_tpd(he_dev, tpd, cid);
26217 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26218
26219- atomic_inc(&vcc->stats->tx);
26220+ atomic_inc_unchecked(&vcc->stats->tx);
26221
26222 return 0;
26223 }
26224diff -urNp linux-2.6.32.45/drivers/atm/horizon.c linux-2.6.32.45/drivers/atm/horizon.c
26225--- linux-2.6.32.45/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26226+++ linux-2.6.32.45/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26227@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26228 {
26229 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26230 // VC layer stats
26231- atomic_inc(&vcc->stats->rx);
26232+ atomic_inc_unchecked(&vcc->stats->rx);
26233 __net_timestamp(skb);
26234 // end of our responsability
26235 vcc->push (vcc, skb);
26236@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26237 dev->tx_iovec = NULL;
26238
26239 // VC layer stats
26240- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26241+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26242
26243 // free the skb
26244 hrz_kfree_skb (skb);
26245diff -urNp linux-2.6.32.45/drivers/atm/idt77252.c linux-2.6.32.45/drivers/atm/idt77252.c
26246--- linux-2.6.32.45/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26247+++ linux-2.6.32.45/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26248@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26249 else
26250 dev_kfree_skb(skb);
26251
26252- atomic_inc(&vcc->stats->tx);
26253+ atomic_inc_unchecked(&vcc->stats->tx);
26254 }
26255
26256 atomic_dec(&scq->used);
26257@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26258 if ((sb = dev_alloc_skb(64)) == NULL) {
26259 printk("%s: Can't allocate buffers for aal0.\n",
26260 card->name);
26261- atomic_add(i, &vcc->stats->rx_drop);
26262+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
26263 break;
26264 }
26265 if (!atm_charge(vcc, sb->truesize)) {
26266 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26267 card->name);
26268- atomic_add(i - 1, &vcc->stats->rx_drop);
26269+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26270 dev_kfree_skb(sb);
26271 break;
26272 }
26273@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26274 ATM_SKB(sb)->vcc = vcc;
26275 __net_timestamp(sb);
26276 vcc->push(vcc, sb);
26277- atomic_inc(&vcc->stats->rx);
26278+ atomic_inc_unchecked(&vcc->stats->rx);
26279
26280 cell += ATM_CELL_PAYLOAD;
26281 }
26282@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26283 "(CDC: %08x)\n",
26284 card->name, len, rpp->len, readl(SAR_REG_CDC));
26285 recycle_rx_pool_skb(card, rpp);
26286- atomic_inc(&vcc->stats->rx_err);
26287+ atomic_inc_unchecked(&vcc->stats->rx_err);
26288 return;
26289 }
26290 if (stat & SAR_RSQE_CRC) {
26291 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26292 recycle_rx_pool_skb(card, rpp);
26293- atomic_inc(&vcc->stats->rx_err);
26294+ atomic_inc_unchecked(&vcc->stats->rx_err);
26295 return;
26296 }
26297 if (skb_queue_len(&rpp->queue) > 1) {
26298@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26299 RXPRINTK("%s: Can't alloc RX skb.\n",
26300 card->name);
26301 recycle_rx_pool_skb(card, rpp);
26302- atomic_inc(&vcc->stats->rx_err);
26303+ atomic_inc_unchecked(&vcc->stats->rx_err);
26304 return;
26305 }
26306 if (!atm_charge(vcc, skb->truesize)) {
26307@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26308 __net_timestamp(skb);
26309
26310 vcc->push(vcc, skb);
26311- atomic_inc(&vcc->stats->rx);
26312+ atomic_inc_unchecked(&vcc->stats->rx);
26313
26314 return;
26315 }
26316@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26317 __net_timestamp(skb);
26318
26319 vcc->push(vcc, skb);
26320- atomic_inc(&vcc->stats->rx);
26321+ atomic_inc_unchecked(&vcc->stats->rx);
26322
26323 if (skb->truesize > SAR_FB_SIZE_3)
26324 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26325@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26326 if (vcc->qos.aal != ATM_AAL0) {
26327 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26328 card->name, vpi, vci);
26329- atomic_inc(&vcc->stats->rx_drop);
26330+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26331 goto drop;
26332 }
26333
26334 if ((sb = dev_alloc_skb(64)) == NULL) {
26335 printk("%s: Can't allocate buffers for AAL0.\n",
26336 card->name);
26337- atomic_inc(&vcc->stats->rx_err);
26338+ atomic_inc_unchecked(&vcc->stats->rx_err);
26339 goto drop;
26340 }
26341
26342@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26343 ATM_SKB(sb)->vcc = vcc;
26344 __net_timestamp(sb);
26345 vcc->push(vcc, sb);
26346- atomic_inc(&vcc->stats->rx);
26347+ atomic_inc_unchecked(&vcc->stats->rx);
26348
26349 drop:
26350 skb_pull(queue, 64);
26351@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26352
26353 if (vc == NULL) {
26354 printk("%s: NULL connection in send().\n", card->name);
26355- atomic_inc(&vcc->stats->tx_err);
26356+ atomic_inc_unchecked(&vcc->stats->tx_err);
26357 dev_kfree_skb(skb);
26358 return -EINVAL;
26359 }
26360 if (!test_bit(VCF_TX, &vc->flags)) {
26361 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26362- atomic_inc(&vcc->stats->tx_err);
26363+ atomic_inc_unchecked(&vcc->stats->tx_err);
26364 dev_kfree_skb(skb);
26365 return -EINVAL;
26366 }
26367@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26368 break;
26369 default:
26370 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26371- atomic_inc(&vcc->stats->tx_err);
26372+ atomic_inc_unchecked(&vcc->stats->tx_err);
26373 dev_kfree_skb(skb);
26374 return -EINVAL;
26375 }
26376
26377 if (skb_shinfo(skb)->nr_frags != 0) {
26378 printk("%s: No scatter-gather yet.\n", card->name);
26379- atomic_inc(&vcc->stats->tx_err);
26380+ atomic_inc_unchecked(&vcc->stats->tx_err);
26381 dev_kfree_skb(skb);
26382 return -EINVAL;
26383 }
26384@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26385
26386 err = queue_skb(card, vc, skb, oam);
26387 if (err) {
26388- atomic_inc(&vcc->stats->tx_err);
26389+ atomic_inc_unchecked(&vcc->stats->tx_err);
26390 dev_kfree_skb(skb);
26391 return err;
26392 }
26393@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26394 skb = dev_alloc_skb(64);
26395 if (!skb) {
26396 printk("%s: Out of memory in send_oam().\n", card->name);
26397- atomic_inc(&vcc->stats->tx_err);
26398+ atomic_inc_unchecked(&vcc->stats->tx_err);
26399 return -ENOMEM;
26400 }
26401 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26402diff -urNp linux-2.6.32.45/drivers/atm/iphase.c linux-2.6.32.45/drivers/atm/iphase.c
26403--- linux-2.6.32.45/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26404+++ linux-2.6.32.45/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26405@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26406 status = (u_short) (buf_desc_ptr->desc_mode);
26407 if (status & (RX_CER | RX_PTE | RX_OFL))
26408 {
26409- atomic_inc(&vcc->stats->rx_err);
26410+ atomic_inc_unchecked(&vcc->stats->rx_err);
26411 IF_ERR(printk("IA: bad packet, dropping it");)
26412 if (status & RX_CER) {
26413 IF_ERR(printk(" cause: packet CRC error\n");)
26414@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26415 len = dma_addr - buf_addr;
26416 if (len > iadev->rx_buf_sz) {
26417 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26418- atomic_inc(&vcc->stats->rx_err);
26419+ atomic_inc_unchecked(&vcc->stats->rx_err);
26420 goto out_free_desc;
26421 }
26422
26423@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26424 ia_vcc = INPH_IA_VCC(vcc);
26425 if (ia_vcc == NULL)
26426 {
26427- atomic_inc(&vcc->stats->rx_err);
26428+ atomic_inc_unchecked(&vcc->stats->rx_err);
26429 dev_kfree_skb_any(skb);
26430 atm_return(vcc, atm_guess_pdu2truesize(len));
26431 goto INCR_DLE;
26432@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26433 if ((length > iadev->rx_buf_sz) || (length >
26434 (skb->len - sizeof(struct cpcs_trailer))))
26435 {
26436- atomic_inc(&vcc->stats->rx_err);
26437+ atomic_inc_unchecked(&vcc->stats->rx_err);
26438 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26439 length, skb->len);)
26440 dev_kfree_skb_any(skb);
26441@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26442
26443 IF_RX(printk("rx_dle_intr: skb push");)
26444 vcc->push(vcc,skb);
26445- atomic_inc(&vcc->stats->rx);
26446+ atomic_inc_unchecked(&vcc->stats->rx);
26447 iadev->rx_pkt_cnt++;
26448 }
26449 INCR_DLE:
26450@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26451 {
26452 struct k_sonet_stats *stats;
26453 stats = &PRIV(_ia_dev[board])->sonet_stats;
26454- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26455- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26456- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26457- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26458- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26459- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26460- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26461- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26462- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26463+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26464+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26465+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26466+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26467+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26468+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26469+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26470+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26471+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26472 }
26473 ia_cmds.status = 0;
26474 break;
26475@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26476 if ((desc == 0) || (desc > iadev->num_tx_desc))
26477 {
26478 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26479- atomic_inc(&vcc->stats->tx);
26480+ atomic_inc_unchecked(&vcc->stats->tx);
26481 if (vcc->pop)
26482 vcc->pop(vcc, skb);
26483 else
26484@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26485 ATM_DESC(skb) = vcc->vci;
26486 skb_queue_tail(&iadev->tx_dma_q, skb);
26487
26488- atomic_inc(&vcc->stats->tx);
26489+ atomic_inc_unchecked(&vcc->stats->tx);
26490 iadev->tx_pkt_cnt++;
26491 /* Increment transaction counter */
26492 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26493
26494 #if 0
26495 /* add flow control logic */
26496- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26497+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26498 if (iavcc->vc_desc_cnt > 10) {
26499 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26500 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26501diff -urNp linux-2.6.32.45/drivers/atm/lanai.c linux-2.6.32.45/drivers/atm/lanai.c
26502--- linux-2.6.32.45/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26503+++ linux-2.6.32.45/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26504@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26505 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26506 lanai_endtx(lanai, lvcc);
26507 lanai_free_skb(lvcc->tx.atmvcc, skb);
26508- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26509+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26510 }
26511
26512 /* Try to fill the buffer - don't call unless there is backlog */
26513@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26514 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26515 __net_timestamp(skb);
26516 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26517- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26518+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26519 out:
26520 lvcc->rx.buf.ptr = end;
26521 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26522@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26523 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26524 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26525 lanai->stats.service_rxnotaal5++;
26526- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26527+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26528 return 0;
26529 }
26530 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26531@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26532 int bytes;
26533 read_unlock(&vcc_sklist_lock);
26534 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26535- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26536+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26537 lvcc->stats.x.aal5.service_trash++;
26538 bytes = (SERVICE_GET_END(s) * 16) -
26539 (((unsigned long) lvcc->rx.buf.ptr) -
26540@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26541 }
26542 if (s & SERVICE_STREAM) {
26543 read_unlock(&vcc_sklist_lock);
26544- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26545+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26546 lvcc->stats.x.aal5.service_stream++;
26547 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26548 "PDU on VCI %d!\n", lanai->number, vci);
26549@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26550 return 0;
26551 }
26552 DPRINTK("got rx crc error on vci %d\n", vci);
26553- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26554+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26555 lvcc->stats.x.aal5.service_rxcrc++;
26556 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26557 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26558diff -urNp linux-2.6.32.45/drivers/atm/nicstar.c linux-2.6.32.45/drivers/atm/nicstar.c
26559--- linux-2.6.32.45/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26560+++ linux-2.6.32.45/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26561@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26562 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26563 {
26564 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26565- atomic_inc(&vcc->stats->tx_err);
26566+ atomic_inc_unchecked(&vcc->stats->tx_err);
26567 dev_kfree_skb_any(skb);
26568 return -EINVAL;
26569 }
26570@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26571 if (!vc->tx)
26572 {
26573 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26574- atomic_inc(&vcc->stats->tx_err);
26575+ atomic_inc_unchecked(&vcc->stats->tx_err);
26576 dev_kfree_skb_any(skb);
26577 return -EINVAL;
26578 }
26579@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26580 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26581 {
26582 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26583- atomic_inc(&vcc->stats->tx_err);
26584+ atomic_inc_unchecked(&vcc->stats->tx_err);
26585 dev_kfree_skb_any(skb);
26586 return -EINVAL;
26587 }
26588@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26589 if (skb_shinfo(skb)->nr_frags != 0)
26590 {
26591 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26592- atomic_inc(&vcc->stats->tx_err);
26593+ atomic_inc_unchecked(&vcc->stats->tx_err);
26594 dev_kfree_skb_any(skb);
26595 return -EINVAL;
26596 }
26597@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26598
26599 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26600 {
26601- atomic_inc(&vcc->stats->tx_err);
26602+ atomic_inc_unchecked(&vcc->stats->tx_err);
26603 dev_kfree_skb_any(skb);
26604 return -EIO;
26605 }
26606- atomic_inc(&vcc->stats->tx);
26607+ atomic_inc_unchecked(&vcc->stats->tx);
26608
26609 return 0;
26610 }
26611@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26612 {
26613 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26614 card->index);
26615- atomic_add(i,&vcc->stats->rx_drop);
26616+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
26617 break;
26618 }
26619 if (!atm_charge(vcc, sb->truesize))
26620 {
26621 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26622 card->index);
26623- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26624+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26625 dev_kfree_skb_any(sb);
26626 break;
26627 }
26628@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26629 ATM_SKB(sb)->vcc = vcc;
26630 __net_timestamp(sb);
26631 vcc->push(vcc, sb);
26632- atomic_inc(&vcc->stats->rx);
26633+ atomic_inc_unchecked(&vcc->stats->rx);
26634 cell += ATM_CELL_PAYLOAD;
26635 }
26636
26637@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26638 if (iovb == NULL)
26639 {
26640 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26641- atomic_inc(&vcc->stats->rx_drop);
26642+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26643 recycle_rx_buf(card, skb);
26644 return;
26645 }
26646@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26647 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26648 {
26649 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26650- atomic_inc(&vcc->stats->rx_err);
26651+ atomic_inc_unchecked(&vcc->stats->rx_err);
26652 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26653 NS_SKB(iovb)->iovcnt = 0;
26654 iovb->len = 0;
26655@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26656 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26657 card->index);
26658 which_list(card, skb);
26659- atomic_inc(&vcc->stats->rx_err);
26660+ atomic_inc_unchecked(&vcc->stats->rx_err);
26661 recycle_rx_buf(card, skb);
26662 vc->rx_iov = NULL;
26663 recycle_iov_buf(card, iovb);
26664@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26665 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26666 card->index);
26667 which_list(card, skb);
26668- atomic_inc(&vcc->stats->rx_err);
26669+ atomic_inc_unchecked(&vcc->stats->rx_err);
26670 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26671 NS_SKB(iovb)->iovcnt);
26672 vc->rx_iov = NULL;
26673@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26674 printk(" - PDU size mismatch.\n");
26675 else
26676 printk(".\n");
26677- atomic_inc(&vcc->stats->rx_err);
26678+ atomic_inc_unchecked(&vcc->stats->rx_err);
26679 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26680 NS_SKB(iovb)->iovcnt);
26681 vc->rx_iov = NULL;
26682@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
26683 if (!atm_charge(vcc, skb->truesize))
26684 {
26685 push_rxbufs(card, skb);
26686- atomic_inc(&vcc->stats->rx_drop);
26687+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26688 }
26689 else
26690 {
26691@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
26692 ATM_SKB(skb)->vcc = vcc;
26693 __net_timestamp(skb);
26694 vcc->push(vcc, skb);
26695- atomic_inc(&vcc->stats->rx);
26696+ atomic_inc_unchecked(&vcc->stats->rx);
26697 }
26698 }
26699 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
26700@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
26701 if (!atm_charge(vcc, sb->truesize))
26702 {
26703 push_rxbufs(card, sb);
26704- atomic_inc(&vcc->stats->rx_drop);
26705+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26706 }
26707 else
26708 {
26709@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
26710 ATM_SKB(sb)->vcc = vcc;
26711 __net_timestamp(sb);
26712 vcc->push(vcc, sb);
26713- atomic_inc(&vcc->stats->rx);
26714+ atomic_inc_unchecked(&vcc->stats->rx);
26715 }
26716
26717 push_rxbufs(card, skb);
26718@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
26719 if (!atm_charge(vcc, skb->truesize))
26720 {
26721 push_rxbufs(card, skb);
26722- atomic_inc(&vcc->stats->rx_drop);
26723+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26724 }
26725 else
26726 {
26727@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
26728 ATM_SKB(skb)->vcc = vcc;
26729 __net_timestamp(skb);
26730 vcc->push(vcc, skb);
26731- atomic_inc(&vcc->stats->rx);
26732+ atomic_inc_unchecked(&vcc->stats->rx);
26733 }
26734
26735 push_rxbufs(card, sb);
26736@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
26737 if (hb == NULL)
26738 {
26739 printk("nicstar%d: Out of huge buffers.\n", card->index);
26740- atomic_inc(&vcc->stats->rx_drop);
26741+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26742 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26743 NS_SKB(iovb)->iovcnt);
26744 vc->rx_iov = NULL;
26745@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
26746 }
26747 else
26748 dev_kfree_skb_any(hb);
26749- atomic_inc(&vcc->stats->rx_drop);
26750+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26751 }
26752 else
26753 {
26754@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
26755 #endif /* NS_USE_DESTRUCTORS */
26756 __net_timestamp(hb);
26757 vcc->push(vcc, hb);
26758- atomic_inc(&vcc->stats->rx);
26759+ atomic_inc_unchecked(&vcc->stats->rx);
26760 }
26761 }
26762
26763diff -urNp linux-2.6.32.45/drivers/atm/solos-pci.c linux-2.6.32.45/drivers/atm/solos-pci.c
26764--- linux-2.6.32.45/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
26765+++ linux-2.6.32.45/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
26766@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
26767 }
26768 atm_charge(vcc, skb->truesize);
26769 vcc->push(vcc, skb);
26770- atomic_inc(&vcc->stats->rx);
26771+ atomic_inc_unchecked(&vcc->stats->rx);
26772 break;
26773
26774 case PKT_STATUS:
26775@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
26776 char msg[500];
26777 char item[10];
26778
26779+ pax_track_stack();
26780+
26781 len = buf->len;
26782 for (i = 0; i < len; i++){
26783 if(i % 8 == 0)
26784@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
26785 vcc = SKB_CB(oldskb)->vcc;
26786
26787 if (vcc) {
26788- atomic_inc(&vcc->stats->tx);
26789+ atomic_inc_unchecked(&vcc->stats->tx);
26790 solos_pop(vcc, oldskb);
26791 } else
26792 dev_kfree_skb_irq(oldskb);
26793diff -urNp linux-2.6.32.45/drivers/atm/suni.c linux-2.6.32.45/drivers/atm/suni.c
26794--- linux-2.6.32.45/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
26795+++ linux-2.6.32.45/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
26796@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26797
26798
26799 #define ADD_LIMITED(s,v) \
26800- atomic_add((v),&stats->s); \
26801- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26802+ atomic_add_unchecked((v),&stats->s); \
26803+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26804
26805
26806 static void suni_hz(unsigned long from_timer)
26807diff -urNp linux-2.6.32.45/drivers/atm/uPD98402.c linux-2.6.32.45/drivers/atm/uPD98402.c
26808--- linux-2.6.32.45/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
26809+++ linux-2.6.32.45/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
26810@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
26811 struct sonet_stats tmp;
26812 int error = 0;
26813
26814- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26815+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26816 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26817 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26818 if (zero && !error) {
26819@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
26820
26821
26822 #define ADD_LIMITED(s,v) \
26823- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26824- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26825- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26826+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26827+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26828+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26829
26830
26831 static void stat_event(struct atm_dev *dev)
26832@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
26833 if (reason & uPD98402_INT_PFM) stat_event(dev);
26834 if (reason & uPD98402_INT_PCO) {
26835 (void) GET(PCOCR); /* clear interrupt cause */
26836- atomic_add(GET(HECCT),
26837+ atomic_add_unchecked(GET(HECCT),
26838 &PRIV(dev)->sonet_stats.uncorr_hcs);
26839 }
26840 if ((reason & uPD98402_INT_RFO) &&
26841@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
26842 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26843 uPD98402_INT_LOS),PIMR); /* enable them */
26844 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26845- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26846- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26847- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26848+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26849+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26850+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26851 return 0;
26852 }
26853
26854diff -urNp linux-2.6.32.45/drivers/atm/zatm.c linux-2.6.32.45/drivers/atm/zatm.c
26855--- linux-2.6.32.45/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
26856+++ linux-2.6.32.45/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
26857@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26858 }
26859 if (!size) {
26860 dev_kfree_skb_irq(skb);
26861- if (vcc) atomic_inc(&vcc->stats->rx_err);
26862+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26863 continue;
26864 }
26865 if (!atm_charge(vcc,skb->truesize)) {
26866@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26867 skb->len = size;
26868 ATM_SKB(skb)->vcc = vcc;
26869 vcc->push(vcc,skb);
26870- atomic_inc(&vcc->stats->rx);
26871+ atomic_inc_unchecked(&vcc->stats->rx);
26872 }
26873 zout(pos & 0xffff,MTA(mbx));
26874 #if 0 /* probably a stupid idea */
26875@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
26876 skb_queue_head(&zatm_vcc->backlog,skb);
26877 break;
26878 }
26879- atomic_inc(&vcc->stats->tx);
26880+ atomic_inc_unchecked(&vcc->stats->tx);
26881 wake_up(&zatm_vcc->tx_wait);
26882 }
26883
26884diff -urNp linux-2.6.32.45/drivers/base/bus.c linux-2.6.32.45/drivers/base/bus.c
26885--- linux-2.6.32.45/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
26886+++ linux-2.6.32.45/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
26887@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
26888 return ret;
26889 }
26890
26891-static struct sysfs_ops driver_sysfs_ops = {
26892+static const struct sysfs_ops driver_sysfs_ops = {
26893 .show = drv_attr_show,
26894 .store = drv_attr_store,
26895 };
26896@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
26897 return ret;
26898 }
26899
26900-static struct sysfs_ops bus_sysfs_ops = {
26901+static const struct sysfs_ops bus_sysfs_ops = {
26902 .show = bus_attr_show,
26903 .store = bus_attr_store,
26904 };
26905@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
26906 return 0;
26907 }
26908
26909-static struct kset_uevent_ops bus_uevent_ops = {
26910+static const struct kset_uevent_ops bus_uevent_ops = {
26911 .filter = bus_uevent_filter,
26912 };
26913
26914diff -urNp linux-2.6.32.45/drivers/base/class.c linux-2.6.32.45/drivers/base/class.c
26915--- linux-2.6.32.45/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
26916+++ linux-2.6.32.45/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
26917@@ -63,7 +63,7 @@ static void class_release(struct kobject
26918 kfree(cp);
26919 }
26920
26921-static struct sysfs_ops class_sysfs_ops = {
26922+static const struct sysfs_ops class_sysfs_ops = {
26923 .show = class_attr_show,
26924 .store = class_attr_store,
26925 };
26926diff -urNp linux-2.6.32.45/drivers/base/core.c linux-2.6.32.45/drivers/base/core.c
26927--- linux-2.6.32.45/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
26928+++ linux-2.6.32.45/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
26929@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
26930 return ret;
26931 }
26932
26933-static struct sysfs_ops dev_sysfs_ops = {
26934+static const struct sysfs_ops dev_sysfs_ops = {
26935 .show = dev_attr_show,
26936 .store = dev_attr_store,
26937 };
26938@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
26939 return retval;
26940 }
26941
26942-static struct kset_uevent_ops device_uevent_ops = {
26943+static const struct kset_uevent_ops device_uevent_ops = {
26944 .filter = dev_uevent_filter,
26945 .name = dev_uevent_name,
26946 .uevent = dev_uevent,
26947diff -urNp linux-2.6.32.45/drivers/base/memory.c linux-2.6.32.45/drivers/base/memory.c
26948--- linux-2.6.32.45/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
26949+++ linux-2.6.32.45/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
26950@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
26951 return retval;
26952 }
26953
26954-static struct kset_uevent_ops memory_uevent_ops = {
26955+static const struct kset_uevent_ops memory_uevent_ops = {
26956 .name = memory_uevent_name,
26957 .uevent = memory_uevent,
26958 };
26959diff -urNp linux-2.6.32.45/drivers/base/sys.c linux-2.6.32.45/drivers/base/sys.c
26960--- linux-2.6.32.45/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
26961+++ linux-2.6.32.45/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
26962@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
26963 return -EIO;
26964 }
26965
26966-static struct sysfs_ops sysfs_ops = {
26967+static const struct sysfs_ops sysfs_ops = {
26968 .show = sysdev_show,
26969 .store = sysdev_store,
26970 };
26971@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
26972 return -EIO;
26973 }
26974
26975-static struct sysfs_ops sysfs_class_ops = {
26976+static const struct sysfs_ops sysfs_class_ops = {
26977 .show = sysdev_class_show,
26978 .store = sysdev_class_store,
26979 };
26980diff -urNp linux-2.6.32.45/drivers/block/cciss.c linux-2.6.32.45/drivers/block/cciss.c
26981--- linux-2.6.32.45/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
26982+++ linux-2.6.32.45/drivers/block/cciss.c 2011-08-05 20:33:55.000000000 -0400
26983@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
26984 int err;
26985 u32 cp;
26986
26987+ memset(&arg64, 0, sizeof(arg64));
26988+
26989 err = 0;
26990 err |=
26991 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26992@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ct
26993 /* Wait (up to 20 seconds) for a command to complete */
26994
26995 for (i = 20 * HZ; i > 0; i--) {
26996- done = hba[ctlr]->access.command_completed(hba[ctlr]);
26997+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
26998 if (done == FIFO_EMPTY)
26999 schedule_timeout_uninterruptible(1);
27000 else
27001@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h,
27002 resend_cmd1:
27003
27004 /* Disable interrupt on the board. */
27005- h->access.set_intr_mask(h, CCISS_INTR_OFF);
27006+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
27007
27008 /* Make sure there is room in the command FIFO */
27009 /* Actually it should be completely empty at this time */
27010@@ -2884,13 +2886,13 @@ resend_cmd1:
27011 /* tape side of the driver. */
27012 for (i = 200000; i > 0; i--) {
27013 /* if fifo isn't full go */
27014- if (!(h->access.fifo_full(h)))
27015+ if (!(h->access->fifo_full(h)))
27016 break;
27017 udelay(10);
27018 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
27019 " waiting!\n", h->ctlr);
27020 }
27021- h->access.submit_command(h, c); /* Send the cmd */
27022+ h->access->submit_command(h, c); /* Send the cmd */
27023 do {
27024 complete = pollcomplete(h->ctlr);
27025
27026@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
27027 while (!hlist_empty(&h->reqQ)) {
27028 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
27029 /* can't do anything if fifo is full */
27030- if ((h->access.fifo_full(h))) {
27031+ if ((h->access->fifo_full(h))) {
27032 printk(KERN_WARNING "cciss: fifo full\n");
27033 break;
27034 }
27035@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
27036 h->Qdepth--;
27037
27038 /* Tell the controller execute command */
27039- h->access.submit_command(h, c);
27040+ h->access->submit_command(h, c);
27041
27042 /* Put job onto the completed Q */
27043 addQ(&h->cmpQ, c);
27044@@ -3393,17 +3395,17 @@ startio:
27045
27046 static inline unsigned long get_next_completion(ctlr_info_t *h)
27047 {
27048- return h->access.command_completed(h);
27049+ return h->access->command_completed(h);
27050 }
27051
27052 static inline int interrupt_pending(ctlr_info_t *h)
27053 {
27054- return h->access.intr_pending(h);
27055+ return h->access->intr_pending(h);
27056 }
27057
27058 static inline long interrupt_not_for_us(ctlr_info_t *h)
27059 {
27060- return (((h->access.intr_pending(h) == 0) ||
27061+ return (((h->access->intr_pending(h) == 0) ||
27062 (h->interrupts_enabled == 0)));
27063 }
27064
27065@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr
27066 */
27067 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
27068 c->product_name = products[prod_index].product_name;
27069- c->access = *(products[prod_index].access);
27070+ c->access = products[prod_index].access;
27071 c->nr_cmds = c->max_commands - 4;
27072 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
27073 (readb(&c->cfgtable->Signature[1]) != 'I') ||
27074@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(stru
27075 }
27076
27077 /* make sure the board interrupts are off */
27078- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
27079+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
27080 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
27081 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
27082 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
27083@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(stru
27084 cciss_scsi_setup(i);
27085
27086 /* Turn the interrupts on so we can service requests */
27087- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
27088+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
27089
27090 /* Get the firmware version */
27091 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27092diff -urNp linux-2.6.32.45/drivers/block/cciss.h linux-2.6.32.45/drivers/block/cciss.h
27093--- linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:35:28.000000000 -0400
27094+++ linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:33:59.000000000 -0400
27095@@ -90,7 +90,7 @@ struct ctlr_info
27096 // information about each logical volume
27097 drive_info_struct *drv[CISS_MAX_LUN];
27098
27099- struct access_method access;
27100+ struct access_method *access;
27101
27102 /* queue and queue Info */
27103 struct hlist_head reqQ;
27104diff -urNp linux-2.6.32.45/drivers/block/cpqarray.c linux-2.6.32.45/drivers/block/cpqarray.c
27105--- linux-2.6.32.45/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
27106+++ linux-2.6.32.45/drivers/block/cpqarray.c 2011-08-05 20:33:55.000000000 -0400
27107@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr
27108 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27109 goto Enomem4;
27110 }
27111- hba[i]->access.set_intr_mask(hba[i], 0);
27112+ hba[i]->access->set_intr_mask(hba[i], 0);
27113 if (request_irq(hba[i]->intr, do_ida_intr,
27114 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27115 {
27116@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr
27117 add_timer(&hba[i]->timer);
27118
27119 /* Enable IRQ now that spinlock and rate limit timer are set up */
27120- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27121+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27122
27123 for(j=0; j<NWD; j++) {
27124 struct gendisk *disk = ida_gendisk[i][j];
27125@@ -695,7 +695,7 @@ DBGINFO(
27126 for(i=0; i<NR_PRODUCTS; i++) {
27127 if (board_id == products[i].board_id) {
27128 c->product_name = products[i].product_name;
27129- c->access = *(products[i].access);
27130+ c->access = products[i].access;
27131 break;
27132 }
27133 }
27134@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(v
27135 hba[ctlr]->intr = intr;
27136 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27137 hba[ctlr]->product_name = products[j].product_name;
27138- hba[ctlr]->access = *(products[j].access);
27139+ hba[ctlr]->access = products[j].access;
27140 hba[ctlr]->ctlr = ctlr;
27141 hba[ctlr]->board_id = board_id;
27142 hba[ctlr]->pci_dev = NULL; /* not PCI */
27143@@ -896,6 +896,8 @@ static void do_ida_request(struct reques
27144 struct scatterlist tmp_sg[SG_MAX];
27145 int i, dir, seg;
27146
27147+ pax_track_stack();
27148+
27149 if (blk_queue_plugged(q))
27150 goto startio;
27151
27152@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
27153
27154 while((c = h->reqQ) != NULL) {
27155 /* Can't do anything if we're busy */
27156- if (h->access.fifo_full(h) == 0)
27157+ if (h->access->fifo_full(h) == 0)
27158 return;
27159
27160 /* Get the first entry from the request Q */
27161@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
27162 h->Qdepth--;
27163
27164 /* Tell the controller to do our bidding */
27165- h->access.submit_command(h, c);
27166+ h->access->submit_command(h, c);
27167
27168 /* Get onto the completion Q */
27169 addQ(&h->cmpQ, c);
27170@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq,
27171 unsigned long flags;
27172 __u32 a,a1;
27173
27174- istat = h->access.intr_pending(h);
27175+ istat = h->access->intr_pending(h);
27176 /* Is this interrupt for us? */
27177 if (istat == 0)
27178 return IRQ_NONE;
27179@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq,
27180 */
27181 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
27182 if (istat & FIFO_NOT_EMPTY) {
27183- while((a = h->access.command_completed(h))) {
27184+ while((a = h->access->command_completed(h))) {
27185 a1 = a; a &= ~3;
27186 if ((c = h->cmpQ) == NULL)
27187 {
27188@@ -1434,11 +1436,11 @@ static int sendcmd(
27189 /*
27190 * Disable interrupt
27191 */
27192- info_p->access.set_intr_mask(info_p, 0);
27193+ info_p->access->set_intr_mask(info_p, 0);
27194 /* Make sure there is room in the command FIFO */
27195 /* Actually it should be completely empty at this time. */
27196 for (i = 200000; i > 0; i--) {
27197- temp = info_p->access.fifo_full(info_p);
27198+ temp = info_p->access->fifo_full(info_p);
27199 if (temp != 0) {
27200 break;
27201 }
27202@@ -1451,7 +1453,7 @@ DBG(
27203 /*
27204 * Send the cmd
27205 */
27206- info_p->access.submit_command(info_p, c);
27207+ info_p->access->submit_command(info_p, c);
27208 complete = pollcomplete(ctlr);
27209
27210 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
27211@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t
27212 * we check the new geometry. Then turn interrupts back on when
27213 * we're done.
27214 */
27215- host->access.set_intr_mask(host, 0);
27216+ host->access->set_intr_mask(host, 0);
27217 getgeometry(ctlr);
27218- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
27219+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
27220
27221 for(i=0; i<NWD; i++) {
27222 struct gendisk *disk = ida_gendisk[ctlr][i];
27223@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
27224 /* Wait (up to 2 seconds) for a command to complete */
27225
27226 for (i = 200000; i > 0; i--) {
27227- done = hba[ctlr]->access.command_completed(hba[ctlr]);
27228+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
27229 if (done == 0) {
27230 udelay(10); /* a short fixed delay */
27231 } else
27232diff -urNp linux-2.6.32.45/drivers/block/cpqarray.h linux-2.6.32.45/drivers/block/cpqarray.h
27233--- linux-2.6.32.45/drivers/block/cpqarray.h 2011-03-27 14:31:47.000000000 -0400
27234+++ linux-2.6.32.45/drivers/block/cpqarray.h 2011-08-05 20:33:55.000000000 -0400
27235@@ -99,7 +99,7 @@ struct ctlr_info {
27236 drv_info_t drv[NWD];
27237 struct proc_dir_entry *proc;
27238
27239- struct access_method access;
27240+ struct access_method *access;
27241
27242 cmdlist_t *reqQ;
27243 cmdlist_t *cmpQ;
27244diff -urNp linux-2.6.32.45/drivers/block/DAC960.c linux-2.6.32.45/drivers/block/DAC960.c
27245--- linux-2.6.32.45/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
27246+++ linux-2.6.32.45/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
27247@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
27248 unsigned long flags;
27249 int Channel, TargetID;
27250
27251+ pax_track_stack();
27252+
27253 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
27254 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
27255 sizeof(DAC960_SCSI_Inquiry_T) +
27256diff -urNp linux-2.6.32.45/drivers/block/nbd.c linux-2.6.32.45/drivers/block/nbd.c
27257--- linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
27258+++ linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
27259@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
27260 struct kvec iov;
27261 sigset_t blocked, oldset;
27262
27263+ pax_track_stack();
27264+
27265 if (unlikely(!sock)) {
27266 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
27267 lo->disk->disk_name, (send ? "send" : "recv"));
27268@@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
27269 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
27270 unsigned int cmd, unsigned long arg)
27271 {
27272+ pax_track_stack();
27273+
27274 switch (cmd) {
27275 case NBD_DISCONNECT: {
27276 struct request sreq;
27277diff -urNp linux-2.6.32.45/drivers/block/pktcdvd.c linux-2.6.32.45/drivers/block/pktcdvd.c
27278--- linux-2.6.32.45/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
27279+++ linux-2.6.32.45/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
27280@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
27281 return len;
27282 }
27283
27284-static struct sysfs_ops kobj_pkt_ops = {
27285+static const struct sysfs_ops kobj_pkt_ops = {
27286 .show = kobj_pkt_show,
27287 .store = kobj_pkt_store
27288 };
27289diff -urNp linux-2.6.32.45/drivers/char/agp/frontend.c linux-2.6.32.45/drivers/char/agp/frontend.c
27290--- linux-2.6.32.45/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
27291+++ linux-2.6.32.45/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
27292@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
27293 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27294 return -EFAULT;
27295
27296- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27297+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27298 return -EFAULT;
27299
27300 client = agp_find_client_by_pid(reserve.pid);
27301diff -urNp linux-2.6.32.45/drivers/char/briq_panel.c linux-2.6.32.45/drivers/char/briq_panel.c
27302--- linux-2.6.32.45/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
27303+++ linux-2.6.32.45/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
27304@@ -10,6 +10,7 @@
27305 #include <linux/types.h>
27306 #include <linux/errno.h>
27307 #include <linux/tty.h>
27308+#include <linux/mutex.h>
27309 #include <linux/timer.h>
27310 #include <linux/kernel.h>
27311 #include <linux/wait.h>
27312@@ -36,6 +37,7 @@ static int vfd_is_open;
27313 static unsigned char vfd[40];
27314 static int vfd_cursor;
27315 static unsigned char ledpb, led;
27316+static DEFINE_MUTEX(vfd_mutex);
27317
27318 static void update_vfd(void)
27319 {
27320@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
27321 if (!vfd_is_open)
27322 return -EBUSY;
27323
27324+ mutex_lock(&vfd_mutex);
27325 for (;;) {
27326 char c;
27327 if (!indx)
27328 break;
27329- if (get_user(c, buf))
27330+ if (get_user(c, buf)) {
27331+ mutex_unlock(&vfd_mutex);
27332 return -EFAULT;
27333+ }
27334 if (esc) {
27335 set_led(c);
27336 esc = 0;
27337@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
27338 buf++;
27339 }
27340 update_vfd();
27341+ mutex_unlock(&vfd_mutex);
27342
27343 return len;
27344 }
27345diff -urNp linux-2.6.32.45/drivers/char/genrtc.c linux-2.6.32.45/drivers/char/genrtc.c
27346--- linux-2.6.32.45/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
27347+++ linux-2.6.32.45/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
27348@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
27349 switch (cmd) {
27350
27351 case RTC_PLL_GET:
27352+ memset(&pll, 0, sizeof(pll));
27353 if (get_rtc_pll(&pll))
27354 return -EINVAL;
27355 else
27356diff -urNp linux-2.6.32.45/drivers/char/hpet.c linux-2.6.32.45/drivers/char/hpet.c
27357--- linux-2.6.32.45/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
27358+++ linux-2.6.32.45/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
27359@@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
27360 return 0;
27361 }
27362
27363-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
27364+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
27365
27366 static int
27367 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
27368@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
27369 }
27370
27371 static int
27372-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
27373+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
27374 {
27375 struct hpet_timer __iomem *timer;
27376 struct hpet __iomem *hpet;
27377@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
27378 {
27379 struct hpet_info info;
27380
27381+ memset(&info, 0, sizeof(info));
27382+
27383 if (devp->hd_ireqfreq)
27384 info.hi_ireqfreq =
27385 hpet_time_div(hpetp, devp->hd_ireqfreq);
27386- else
27387- info.hi_ireqfreq = 0;
27388 info.hi_flags =
27389 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
27390 info.hi_hpet = hpetp->hp_which;
27391diff -urNp linux-2.6.32.45/drivers/char/hvc_beat.c linux-2.6.32.45/drivers/char/hvc_beat.c
27392--- linux-2.6.32.45/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
27393+++ linux-2.6.32.45/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
27394@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
27395 return cnt;
27396 }
27397
27398-static struct hv_ops hvc_beat_get_put_ops = {
27399+static const struct hv_ops hvc_beat_get_put_ops = {
27400 .get_chars = hvc_beat_get_chars,
27401 .put_chars = hvc_beat_put_chars,
27402 };
27403diff -urNp linux-2.6.32.45/drivers/char/hvc_console.c linux-2.6.32.45/drivers/char/hvc_console.c
27404--- linux-2.6.32.45/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
27405+++ linux-2.6.32.45/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
27406@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
27407 * console interfaces but can still be used as a tty device. This has to be
27408 * static because kmalloc will not work during early console init.
27409 */
27410-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27411+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27412 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
27413 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
27414
27415@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
27416 * vty adapters do NOT get an hvc_instantiate() callback since they
27417 * appear after early console init.
27418 */
27419-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
27420+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
27421 {
27422 struct hvc_struct *hp;
27423
27424@@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
27425 };
27426
27427 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
27428- struct hv_ops *ops, int outbuf_size)
27429+ const struct hv_ops *ops, int outbuf_size)
27430 {
27431 struct hvc_struct *hp;
27432 int i;
27433diff -urNp linux-2.6.32.45/drivers/char/hvc_console.h linux-2.6.32.45/drivers/char/hvc_console.h
27434--- linux-2.6.32.45/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
27435+++ linux-2.6.32.45/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
27436@@ -55,7 +55,7 @@ struct hvc_struct {
27437 int outbuf_size;
27438 int n_outbuf;
27439 uint32_t vtermno;
27440- struct hv_ops *ops;
27441+ const struct hv_ops *ops;
27442 int irq_requested;
27443 int data;
27444 struct winsize ws;
27445@@ -76,11 +76,11 @@ struct hv_ops {
27446 };
27447
27448 /* Register a vterm and a slot index for use as a console (console_init) */
27449-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
27450+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
27451
27452 /* register a vterm for hvc tty operation (module_init or hotplug add) */
27453 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
27454- struct hv_ops *ops, int outbuf_size);
27455+ const struct hv_ops *ops, int outbuf_size);
27456 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27457 extern int hvc_remove(struct hvc_struct *hp);
27458
27459diff -urNp linux-2.6.32.45/drivers/char/hvc_iseries.c linux-2.6.32.45/drivers/char/hvc_iseries.c
27460--- linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27461+++ linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27462@@ -197,7 +197,7 @@ done:
27463 return sent;
27464 }
27465
27466-static struct hv_ops hvc_get_put_ops = {
27467+static const struct hv_ops hvc_get_put_ops = {
27468 .get_chars = get_chars,
27469 .put_chars = put_chars,
27470 .notifier_add = notifier_add_irq,
27471diff -urNp linux-2.6.32.45/drivers/char/hvc_iucv.c linux-2.6.32.45/drivers/char/hvc_iucv.c
27472--- linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27473+++ linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27474@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27475
27476
27477 /* HVC operations */
27478-static struct hv_ops hvc_iucv_ops = {
27479+static const struct hv_ops hvc_iucv_ops = {
27480 .get_chars = hvc_iucv_get_chars,
27481 .put_chars = hvc_iucv_put_chars,
27482 .notifier_add = hvc_iucv_notifier_add,
27483diff -urNp linux-2.6.32.45/drivers/char/hvc_rtas.c linux-2.6.32.45/drivers/char/hvc_rtas.c
27484--- linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27485+++ linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27486@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27487 return i;
27488 }
27489
27490-static struct hv_ops hvc_rtas_get_put_ops = {
27491+static const struct hv_ops hvc_rtas_get_put_ops = {
27492 .get_chars = hvc_rtas_read_console,
27493 .put_chars = hvc_rtas_write_console,
27494 };
27495diff -urNp linux-2.6.32.45/drivers/char/hvcs.c linux-2.6.32.45/drivers/char/hvcs.c
27496--- linux-2.6.32.45/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27497+++ linux-2.6.32.45/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27498@@ -82,6 +82,7 @@
27499 #include <asm/hvcserver.h>
27500 #include <asm/uaccess.h>
27501 #include <asm/vio.h>
27502+#include <asm/local.h>
27503
27504 /*
27505 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27506@@ -269,7 +270,7 @@ struct hvcs_struct {
27507 unsigned int index;
27508
27509 struct tty_struct *tty;
27510- int open_count;
27511+ local_t open_count;
27512
27513 /*
27514 * Used to tell the driver kernel_thread what operations need to take
27515@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27516
27517 spin_lock_irqsave(&hvcsd->lock, flags);
27518
27519- if (hvcsd->open_count > 0) {
27520+ if (local_read(&hvcsd->open_count) > 0) {
27521 spin_unlock_irqrestore(&hvcsd->lock, flags);
27522 printk(KERN_INFO "HVCS: vterm state unchanged. "
27523 "The hvcs device node is still in use.\n");
27524@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27525 if ((retval = hvcs_partner_connect(hvcsd)))
27526 goto error_release;
27527
27528- hvcsd->open_count = 1;
27529+ local_set(&hvcsd->open_count, 1);
27530 hvcsd->tty = tty;
27531 tty->driver_data = hvcsd;
27532
27533@@ -1169,7 +1170,7 @@ fast_open:
27534
27535 spin_lock_irqsave(&hvcsd->lock, flags);
27536 kref_get(&hvcsd->kref);
27537- hvcsd->open_count++;
27538+ local_inc(&hvcsd->open_count);
27539 hvcsd->todo_mask |= HVCS_SCHED_READ;
27540 spin_unlock_irqrestore(&hvcsd->lock, flags);
27541
27542@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27543 hvcsd = tty->driver_data;
27544
27545 spin_lock_irqsave(&hvcsd->lock, flags);
27546- if (--hvcsd->open_count == 0) {
27547+ if (local_dec_and_test(&hvcsd->open_count)) {
27548
27549 vio_disable_interrupts(hvcsd->vdev);
27550
27551@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27552 free_irq(irq, hvcsd);
27553 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27554 return;
27555- } else if (hvcsd->open_count < 0) {
27556+ } else if (local_read(&hvcsd->open_count) < 0) {
27557 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27558 " is missmanaged.\n",
27559- hvcsd->vdev->unit_address, hvcsd->open_count);
27560+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27561 }
27562
27563 spin_unlock_irqrestore(&hvcsd->lock, flags);
27564@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27565
27566 spin_lock_irqsave(&hvcsd->lock, flags);
27567 /* Preserve this so that we know how many kref refs to put */
27568- temp_open_count = hvcsd->open_count;
27569+ temp_open_count = local_read(&hvcsd->open_count);
27570
27571 /*
27572 * Don't kref put inside the spinlock because the destruction
27573@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27574 hvcsd->tty->driver_data = NULL;
27575 hvcsd->tty = NULL;
27576
27577- hvcsd->open_count = 0;
27578+ local_set(&hvcsd->open_count, 0);
27579
27580 /* This will drop any buffered data on the floor which is OK in a hangup
27581 * scenario. */
27582@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27583 * the middle of a write operation? This is a crummy place to do this
27584 * but we want to keep it all in the spinlock.
27585 */
27586- if (hvcsd->open_count <= 0) {
27587+ if (local_read(&hvcsd->open_count) <= 0) {
27588 spin_unlock_irqrestore(&hvcsd->lock, flags);
27589 return -ENODEV;
27590 }
27591@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27592 {
27593 struct hvcs_struct *hvcsd = tty->driver_data;
27594
27595- if (!hvcsd || hvcsd->open_count <= 0)
27596+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27597 return 0;
27598
27599 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27600diff -urNp linux-2.6.32.45/drivers/char/hvc_udbg.c linux-2.6.32.45/drivers/char/hvc_udbg.c
27601--- linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27602+++ linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27603@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27604 return i;
27605 }
27606
27607-static struct hv_ops hvc_udbg_ops = {
27608+static const struct hv_ops hvc_udbg_ops = {
27609 .get_chars = hvc_udbg_get,
27610 .put_chars = hvc_udbg_put,
27611 };
27612diff -urNp linux-2.6.32.45/drivers/char/hvc_vio.c linux-2.6.32.45/drivers/char/hvc_vio.c
27613--- linux-2.6.32.45/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27614+++ linux-2.6.32.45/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27615@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27616 return got;
27617 }
27618
27619-static struct hv_ops hvc_get_put_ops = {
27620+static const struct hv_ops hvc_get_put_ops = {
27621 .get_chars = filtered_get_chars,
27622 .put_chars = hvc_put_chars,
27623 .notifier_add = notifier_add_irq,
27624diff -urNp linux-2.6.32.45/drivers/char/hvc_xen.c linux-2.6.32.45/drivers/char/hvc_xen.c
27625--- linux-2.6.32.45/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27626+++ linux-2.6.32.45/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27627@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27628 return recv;
27629 }
27630
27631-static struct hv_ops hvc_ops = {
27632+static const struct hv_ops hvc_ops = {
27633 .get_chars = read_console,
27634 .put_chars = write_console,
27635 .notifier_add = notifier_add_irq,
27636diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c
27637--- linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27638+++ linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27639@@ -414,7 +414,7 @@ struct ipmi_smi {
27640 struct proc_dir_entry *proc_dir;
27641 char proc_dir_name[10];
27642
27643- atomic_t stats[IPMI_NUM_STATS];
27644+ atomic_unchecked_t stats[IPMI_NUM_STATS];
27645
27646 /*
27647 * run_to_completion duplicate of smb_info, smi_info
27648@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27649
27650
27651 #define ipmi_inc_stat(intf, stat) \
27652- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27653+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27654 #define ipmi_get_stat(intf, stat) \
27655- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27656+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27657
27658 static int is_lan_addr(struct ipmi_addr *addr)
27659 {
27660@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27661 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27662 init_waitqueue_head(&intf->waitq);
27663 for (i = 0; i < IPMI_NUM_STATS; i++)
27664- atomic_set(&intf->stats[i], 0);
27665+ atomic_set_unchecked(&intf->stats[i], 0);
27666
27667 intf->proc_dir = NULL;
27668
27669@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27670 struct ipmi_smi_msg smi_msg;
27671 struct ipmi_recv_msg recv_msg;
27672
27673+ pax_track_stack();
27674+
27675 si = (struct ipmi_system_interface_addr *) &addr;
27676 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27677 si->channel = IPMI_BMC_CHANNEL;
27678diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c
27679--- linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27680+++ linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27681@@ -277,7 +277,7 @@ struct smi_info {
27682 unsigned char slave_addr;
27683
27684 /* Counters and things for the proc filesystem. */
27685- atomic_t stats[SI_NUM_STATS];
27686+ atomic_unchecked_t stats[SI_NUM_STATS];
27687
27688 struct task_struct *thread;
27689
27690@@ -285,9 +285,9 @@ struct smi_info {
27691 };
27692
27693 #define smi_inc_stat(smi, stat) \
27694- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27695+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27696 #define smi_get_stat(smi, stat) \
27697- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27698+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27699
27700 #define SI_MAX_PARMS 4
27701
27702@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
27703 atomic_set(&new_smi->req_events, 0);
27704 new_smi->run_to_completion = 0;
27705 for (i = 0; i < SI_NUM_STATS; i++)
27706- atomic_set(&new_smi->stats[i], 0);
27707+ atomic_set_unchecked(&new_smi->stats[i], 0);
27708
27709 new_smi->interrupt_disabled = 0;
27710 atomic_set(&new_smi->stop_operation, 0);
27711diff -urNp linux-2.6.32.45/drivers/char/istallion.c linux-2.6.32.45/drivers/char/istallion.c
27712--- linux-2.6.32.45/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
27713+++ linux-2.6.32.45/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
27714@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
27715 * re-used for each stats call.
27716 */
27717 static comstats_t stli_comstats;
27718-static combrd_t stli_brdstats;
27719 static struct asystats stli_cdkstats;
27720
27721 /*****************************************************************************/
27722@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
27723 {
27724 struct stlibrd *brdp;
27725 unsigned int i;
27726+ combrd_t stli_brdstats;
27727
27728 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
27729 return -EFAULT;
27730@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
27731 struct stliport stli_dummyport;
27732 struct stliport *portp;
27733
27734+ pax_track_stack();
27735+
27736 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
27737 return -EFAULT;
27738 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
27739@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
27740 struct stlibrd stli_dummybrd;
27741 struct stlibrd *brdp;
27742
27743+ pax_track_stack();
27744+
27745 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
27746 return -EFAULT;
27747 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
27748diff -urNp linux-2.6.32.45/drivers/char/Kconfig linux-2.6.32.45/drivers/char/Kconfig
27749--- linux-2.6.32.45/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
27750+++ linux-2.6.32.45/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
27751@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
27752
27753 config DEVKMEM
27754 bool "/dev/kmem virtual device support"
27755- default y
27756+ default n
27757+ depends on !GRKERNSEC_KMEM
27758 help
27759 Say Y here if you want to support the /dev/kmem device. The
27760 /dev/kmem device is rarely used, but can be used for certain
27761@@ -1114,6 +1115,7 @@ config DEVPORT
27762 bool
27763 depends on !M68K
27764 depends on ISA || PCI
27765+ depends on !GRKERNSEC_KMEM
27766 default y
27767
27768 source "drivers/s390/char/Kconfig"
27769diff -urNp linux-2.6.32.45/drivers/char/keyboard.c linux-2.6.32.45/drivers/char/keyboard.c
27770--- linux-2.6.32.45/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
27771+++ linux-2.6.32.45/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
27772@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
27773 kbd->kbdmode == VC_MEDIUMRAW) &&
27774 value != KVAL(K_SAK))
27775 return; /* SAK is allowed even in raw mode */
27776+
27777+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
27778+ {
27779+ void *func = fn_handler[value];
27780+ if (func == fn_show_state || func == fn_show_ptregs ||
27781+ func == fn_show_mem)
27782+ return;
27783+ }
27784+#endif
27785+
27786 fn_handler[value](vc);
27787 }
27788
27789@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
27790 .evbit = { BIT_MASK(EV_SND) },
27791 },
27792
27793- { }, /* Terminating entry */
27794+ { 0 }, /* Terminating entry */
27795 };
27796
27797 MODULE_DEVICE_TABLE(input, kbd_ids);
27798diff -urNp linux-2.6.32.45/drivers/char/mem.c linux-2.6.32.45/drivers/char/mem.c
27799--- linux-2.6.32.45/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
27800+++ linux-2.6.32.45/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
27801@@ -18,6 +18,7 @@
27802 #include <linux/raw.h>
27803 #include <linux/tty.h>
27804 #include <linux/capability.h>
27805+#include <linux/security.h>
27806 #include <linux/ptrace.h>
27807 #include <linux/device.h>
27808 #include <linux/highmem.h>
27809@@ -35,6 +36,10 @@
27810 # include <linux/efi.h>
27811 #endif
27812
27813+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27814+extern struct file_operations grsec_fops;
27815+#endif
27816+
27817 static inline unsigned long size_inside_page(unsigned long start,
27818 unsigned long size)
27819 {
27820@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
27821
27822 while (cursor < to) {
27823 if (!devmem_is_allowed(pfn)) {
27824+#ifdef CONFIG_GRKERNSEC_KMEM
27825+ gr_handle_mem_readwrite(from, to);
27826+#else
27827 printk(KERN_INFO
27828 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27829 current->comm, from, to);
27830+#endif
27831 return 0;
27832 }
27833 cursor += PAGE_SIZE;
27834@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
27835 }
27836 return 1;
27837 }
27838+#elif defined(CONFIG_GRKERNSEC_KMEM)
27839+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27840+{
27841+ return 0;
27842+}
27843 #else
27844 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27845 {
27846@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
27847 #endif
27848
27849 while (count > 0) {
27850+ char *temp;
27851+
27852 /*
27853 * Handle first page in case it's not aligned
27854 */
27855@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
27856 if (!ptr)
27857 return -EFAULT;
27858
27859- if (copy_to_user(buf, ptr, sz)) {
27860+#ifdef CONFIG_PAX_USERCOPY
27861+ temp = kmalloc(sz, GFP_KERNEL);
27862+ if (!temp) {
27863+ unxlate_dev_mem_ptr(p, ptr);
27864+ return -ENOMEM;
27865+ }
27866+ memcpy(temp, ptr, sz);
27867+#else
27868+ temp = ptr;
27869+#endif
27870+
27871+ if (copy_to_user(buf, temp, sz)) {
27872+
27873+#ifdef CONFIG_PAX_USERCOPY
27874+ kfree(temp);
27875+#endif
27876+
27877 unxlate_dev_mem_ptr(p, ptr);
27878 return -EFAULT;
27879 }
27880
27881+#ifdef CONFIG_PAX_USERCOPY
27882+ kfree(temp);
27883+#endif
27884+
27885 unxlate_dev_mem_ptr(p, ptr);
27886
27887 buf += sz;
27888@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
27889 size_t count, loff_t *ppos)
27890 {
27891 unsigned long p = *ppos;
27892- ssize_t low_count, read, sz;
27893+ ssize_t low_count, read, sz, err = 0;
27894 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27895- int err = 0;
27896
27897 read = 0;
27898 if (p < (unsigned long) high_memory) {
27899@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
27900 }
27901 #endif
27902 while (low_count > 0) {
27903+ char *temp;
27904+
27905 sz = size_inside_page(p, low_count);
27906
27907 /*
27908@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
27909 */
27910 kbuf = xlate_dev_kmem_ptr((char *)p);
27911
27912- if (copy_to_user(buf, kbuf, sz))
27913+#ifdef CONFIG_PAX_USERCOPY
27914+ temp = kmalloc(sz, GFP_KERNEL);
27915+ if (!temp)
27916+ return -ENOMEM;
27917+ memcpy(temp, kbuf, sz);
27918+#else
27919+ temp = kbuf;
27920+#endif
27921+
27922+ err = copy_to_user(buf, temp, sz);
27923+
27924+#ifdef CONFIG_PAX_USERCOPY
27925+ kfree(temp);
27926+#endif
27927+
27928+ if (err)
27929 return -EFAULT;
27930 buf += sz;
27931 p += sz;
27932@@ -889,6 +941,9 @@ static const struct memdev {
27933 #ifdef CONFIG_CRASH_DUMP
27934 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27935 #endif
27936+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27937+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27938+#endif
27939 };
27940
27941 static int memory_open(struct inode *inode, struct file *filp)
27942diff -urNp linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c
27943--- linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
27944+++ linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
27945@@ -29,6 +29,7 @@
27946 #include <linux/tty_driver.h>
27947 #include <linux/tty_flip.h>
27948 #include <linux/uaccess.h>
27949+#include <asm/local.h>
27950
27951 #include "tty.h"
27952 #include "network.h"
27953@@ -51,7 +52,7 @@ struct ipw_tty {
27954 int tty_type;
27955 struct ipw_network *network;
27956 struct tty_struct *linux_tty;
27957- int open_count;
27958+ local_t open_count;
27959 unsigned int control_lines;
27960 struct mutex ipw_tty_mutex;
27961 int tx_bytes_queued;
27962@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
27963 mutex_unlock(&tty->ipw_tty_mutex);
27964 return -ENODEV;
27965 }
27966- if (tty->open_count == 0)
27967+ if (local_read(&tty->open_count) == 0)
27968 tty->tx_bytes_queued = 0;
27969
27970- tty->open_count++;
27971+ local_inc(&tty->open_count);
27972
27973 tty->linux_tty = linux_tty;
27974 linux_tty->driver_data = tty;
27975@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
27976
27977 static void do_ipw_close(struct ipw_tty *tty)
27978 {
27979- tty->open_count--;
27980-
27981- if (tty->open_count == 0) {
27982+ if (local_dec_return(&tty->open_count) == 0) {
27983 struct tty_struct *linux_tty = tty->linux_tty;
27984
27985 if (linux_tty != NULL) {
27986@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
27987 return;
27988
27989 mutex_lock(&tty->ipw_tty_mutex);
27990- if (tty->open_count == 0) {
27991+ if (local_read(&tty->open_count) == 0) {
27992 mutex_unlock(&tty->ipw_tty_mutex);
27993 return;
27994 }
27995@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
27996 return;
27997 }
27998
27999- if (!tty->open_count) {
28000+ if (!local_read(&tty->open_count)) {
28001 mutex_unlock(&tty->ipw_tty_mutex);
28002 return;
28003 }
28004@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
28005 return -ENODEV;
28006
28007 mutex_lock(&tty->ipw_tty_mutex);
28008- if (!tty->open_count) {
28009+ if (!local_read(&tty->open_count)) {
28010 mutex_unlock(&tty->ipw_tty_mutex);
28011 return -EINVAL;
28012 }
28013@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
28014 if (!tty)
28015 return -ENODEV;
28016
28017- if (!tty->open_count)
28018+ if (!local_read(&tty->open_count))
28019 return -EINVAL;
28020
28021 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
28022@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
28023 if (!tty)
28024 return 0;
28025
28026- if (!tty->open_count)
28027+ if (!local_read(&tty->open_count))
28028 return 0;
28029
28030 return tty->tx_bytes_queued;
28031@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
28032 if (!tty)
28033 return -ENODEV;
28034
28035- if (!tty->open_count)
28036+ if (!local_read(&tty->open_count))
28037 return -EINVAL;
28038
28039 return get_control_lines(tty);
28040@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
28041 if (!tty)
28042 return -ENODEV;
28043
28044- if (!tty->open_count)
28045+ if (!local_read(&tty->open_count))
28046 return -EINVAL;
28047
28048 return set_control_lines(tty, set, clear);
28049@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
28050 if (!tty)
28051 return -ENODEV;
28052
28053- if (!tty->open_count)
28054+ if (!local_read(&tty->open_count))
28055 return -EINVAL;
28056
28057 /* FIXME: Exactly how is the tty object locked here .. */
28058@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
28059 against a parallel ioctl etc */
28060 mutex_lock(&ttyj->ipw_tty_mutex);
28061 }
28062- while (ttyj->open_count)
28063+ while (local_read(&ttyj->open_count))
28064 do_ipw_close(ttyj);
28065 ipwireless_disassociate_network_ttys(network,
28066 ttyj->channel_idx);
28067diff -urNp linux-2.6.32.45/drivers/char/pty.c linux-2.6.32.45/drivers/char/pty.c
28068--- linux-2.6.32.45/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
28069+++ linux-2.6.32.45/drivers/char/pty.c 2011-08-05 20:33:55.000000000 -0400
28070@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
28071 register_sysctl_table(pty_root_table);
28072
28073 /* Now create the /dev/ptmx special device */
28074+ pax_open_kernel();
28075 tty_default_fops(&ptmx_fops);
28076- ptmx_fops.open = ptmx_open;
28077+ *(void **)&ptmx_fops.open = ptmx_open;
28078+ pax_close_kernel();
28079
28080 cdev_init(&ptmx_cdev, &ptmx_fops);
28081 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
28082diff -urNp linux-2.6.32.45/drivers/char/random.c linux-2.6.32.45/drivers/char/random.c
28083--- linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:37:25.000000000 -0400
28084+++ linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:43:23.000000000 -0400
28085@@ -254,8 +254,13 @@
28086 /*
28087 * Configuration information
28088 */
28089+#ifdef CONFIG_GRKERNSEC_RANDNET
28090+#define INPUT_POOL_WORDS 512
28091+#define OUTPUT_POOL_WORDS 128
28092+#else
28093 #define INPUT_POOL_WORDS 128
28094 #define OUTPUT_POOL_WORDS 32
28095+#endif
28096 #define SEC_XFER_SIZE 512
28097
28098 /*
28099@@ -292,10 +297,17 @@ static struct poolinfo {
28100 int poolwords;
28101 int tap1, tap2, tap3, tap4, tap5;
28102 } poolinfo_table[] = {
28103+#ifdef CONFIG_GRKERNSEC_RANDNET
28104+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28105+ { 512, 411, 308, 208, 104, 1 },
28106+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28107+ { 128, 103, 76, 51, 25, 1 },
28108+#else
28109 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28110 { 128, 103, 76, 51, 25, 1 },
28111 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28112 { 32, 26, 20, 14, 7, 1 },
28113+#endif
28114 #if 0
28115 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28116 { 2048, 1638, 1231, 819, 411, 1 },
28117@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28118 #include <linux/sysctl.h>
28119
28120 static int min_read_thresh = 8, min_write_thresh;
28121-static int max_read_thresh = INPUT_POOL_WORDS * 32;
28122+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28123 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28124 static char sysctl_bootid[16];
28125
28126diff -urNp linux-2.6.32.45/drivers/char/rocket.c linux-2.6.32.45/drivers/char/rocket.c
28127--- linux-2.6.32.45/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
28128+++ linux-2.6.32.45/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
28129@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
28130 struct rocket_ports tmp;
28131 int board;
28132
28133+ pax_track_stack();
28134+
28135 if (!retports)
28136 return -EFAULT;
28137 memset(&tmp, 0, sizeof (tmp));
28138diff -urNp linux-2.6.32.45/drivers/char/sonypi.c linux-2.6.32.45/drivers/char/sonypi.c
28139--- linux-2.6.32.45/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
28140+++ linux-2.6.32.45/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
28141@@ -55,6 +55,7 @@
28142 #include <asm/uaccess.h>
28143 #include <asm/io.h>
28144 #include <asm/system.h>
28145+#include <asm/local.h>
28146
28147 #include <linux/sonypi.h>
28148
28149@@ -491,7 +492,7 @@ static struct sonypi_device {
28150 spinlock_t fifo_lock;
28151 wait_queue_head_t fifo_proc_list;
28152 struct fasync_struct *fifo_async;
28153- int open_count;
28154+ local_t open_count;
28155 int model;
28156 struct input_dev *input_jog_dev;
28157 struct input_dev *input_key_dev;
28158@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
28159 static int sonypi_misc_release(struct inode *inode, struct file *file)
28160 {
28161 mutex_lock(&sonypi_device.lock);
28162- sonypi_device.open_count--;
28163+ local_dec(&sonypi_device.open_count);
28164 mutex_unlock(&sonypi_device.lock);
28165 return 0;
28166 }
28167@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
28168 lock_kernel();
28169 mutex_lock(&sonypi_device.lock);
28170 /* Flush input queue on first open */
28171- if (!sonypi_device.open_count)
28172+ if (!local_read(&sonypi_device.open_count))
28173 kfifo_reset(sonypi_device.fifo);
28174- sonypi_device.open_count++;
28175+ local_inc(&sonypi_device.open_count);
28176 mutex_unlock(&sonypi_device.lock);
28177 unlock_kernel();
28178 return 0;
28179diff -urNp linux-2.6.32.45/drivers/char/stallion.c linux-2.6.32.45/drivers/char/stallion.c
28180--- linux-2.6.32.45/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
28181+++ linux-2.6.32.45/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
28182@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
28183 struct stlport stl_dummyport;
28184 struct stlport *portp;
28185
28186+ pax_track_stack();
28187+
28188 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
28189 return -EFAULT;
28190 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
28191diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm_bios.c linux-2.6.32.45/drivers/char/tpm/tpm_bios.c
28192--- linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
28193+++ linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
28194@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
28195 event = addr;
28196
28197 if ((event->event_type == 0 && event->event_size == 0) ||
28198- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28199+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28200 return NULL;
28201
28202 return addr;
28203@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
28204 return NULL;
28205
28206 if ((event->event_type == 0 && event->event_size == 0) ||
28207- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28208+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28209 return NULL;
28210
28211 (*pos)++;
28212@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
28213 int i;
28214
28215 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28216- seq_putc(m, data[i]);
28217+ if (!seq_putc(m, data[i]))
28218+ return -EFAULT;
28219
28220 return 0;
28221 }
28222@@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
28223 log->bios_event_log_end = log->bios_event_log + len;
28224
28225 virt = acpi_os_map_memory(start, len);
28226+ if (!virt) {
28227+ kfree(log->bios_event_log);
28228+ log->bios_event_log = NULL;
28229+ return -EFAULT;
28230+ }
28231
28232 memcpy(log->bios_event_log, virt, len);
28233
28234diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm.c linux-2.6.32.45/drivers/char/tpm/tpm.c
28235--- linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
28236+++ linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
28237@@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
28238 chip->vendor.req_complete_val)
28239 goto out_recv;
28240
28241- if ((status == chip->vendor.req_canceled)) {
28242+ if (status == chip->vendor.req_canceled) {
28243 dev_err(chip->dev, "Operation Canceled\n");
28244 rc = -ECANCELED;
28245 goto out;
28246@@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
28247
28248 struct tpm_chip *chip = dev_get_drvdata(dev);
28249
28250+ pax_track_stack();
28251+
28252 tpm_cmd.header.in = tpm_readpubek_header;
28253 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
28254 "attempting to read the PUBEK");
28255diff -urNp linux-2.6.32.45/drivers/char/tty_io.c linux-2.6.32.45/drivers/char/tty_io.c
28256--- linux-2.6.32.45/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
28257+++ linux-2.6.32.45/drivers/char/tty_io.c 2011-08-05 20:33:55.000000000 -0400
28258@@ -2582,8 +2582,10 @@ long tty_ioctl(struct file *file, unsign
28259 return retval;
28260 }
28261
28262+EXPORT_SYMBOL(tty_ioctl);
28263+
28264 #ifdef CONFIG_COMPAT
28265-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
28266+long tty_compat_ioctl(struct file *file, unsigned int cmd,
28267 unsigned long arg)
28268 {
28269 struct inode *inode = file->f_dentry->d_inode;
28270@@ -2607,6 +2609,8 @@ static long tty_compat_ioctl(struct file
28271
28272 return retval;
28273 }
28274+
28275+EXPORT_SYMBOL(tty_compat_ioctl);
28276 #endif
28277
28278 /*
28279@@ -3052,7 +3056,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
28280
28281 void tty_default_fops(struct file_operations *fops)
28282 {
28283- *fops = tty_fops;
28284+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
28285 }
28286
28287 /*
28288diff -urNp linux-2.6.32.45/drivers/char/tty_ldisc.c linux-2.6.32.45/drivers/char/tty_ldisc.c
28289--- linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
28290+++ linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
28291@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
28292 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
28293 struct tty_ldisc_ops *ldo = ld->ops;
28294
28295- ldo->refcount--;
28296+ atomic_dec(&ldo->refcount);
28297 module_put(ldo->owner);
28298 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28299
28300@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
28301 spin_lock_irqsave(&tty_ldisc_lock, flags);
28302 tty_ldiscs[disc] = new_ldisc;
28303 new_ldisc->num = disc;
28304- new_ldisc->refcount = 0;
28305+ atomic_set(&new_ldisc->refcount, 0);
28306 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28307
28308 return ret;
28309@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
28310 return -EINVAL;
28311
28312 spin_lock_irqsave(&tty_ldisc_lock, flags);
28313- if (tty_ldiscs[disc]->refcount)
28314+ if (atomic_read(&tty_ldiscs[disc]->refcount))
28315 ret = -EBUSY;
28316 else
28317 tty_ldiscs[disc] = NULL;
28318@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
28319 if (ldops) {
28320 ret = ERR_PTR(-EAGAIN);
28321 if (try_module_get(ldops->owner)) {
28322- ldops->refcount++;
28323+ atomic_inc(&ldops->refcount);
28324 ret = ldops;
28325 }
28326 }
28327@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
28328 unsigned long flags;
28329
28330 spin_lock_irqsave(&tty_ldisc_lock, flags);
28331- ldops->refcount--;
28332+ atomic_dec(&ldops->refcount);
28333 module_put(ldops->owner);
28334 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28335 }
28336diff -urNp linux-2.6.32.45/drivers/char/virtio_console.c linux-2.6.32.45/drivers/char/virtio_console.c
28337--- linux-2.6.32.45/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28338+++ linux-2.6.32.45/drivers/char/virtio_console.c 2011-08-05 20:33:55.000000000 -0400
28339@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *
28340 * virtqueue, so we let the drivers do some boutique early-output thing. */
28341 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
28342 {
28343- virtio_cons.put_chars = put_chars;
28344+ pax_open_kernel();
28345+ *(void **)&virtio_cons.put_chars = put_chars;
28346+ pax_close_kernel();
28347 return hvc_instantiate(0, 0, &virtio_cons);
28348 }
28349
28350@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(stru
28351 out_vq = vqs[1];
28352
28353 /* Start using the new console output. */
28354- virtio_cons.get_chars = get_chars;
28355- virtio_cons.put_chars = put_chars;
28356- virtio_cons.notifier_add = notifier_add_vio;
28357- virtio_cons.notifier_del = notifier_del_vio;
28358- virtio_cons.notifier_hangup = notifier_del_vio;
28359+ pax_open_kernel();
28360+ *(void **)&virtio_cons.get_chars = get_chars;
28361+ *(void **)&virtio_cons.put_chars = put_chars;
28362+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
28363+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
28364+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
28365+ pax_close_kernel();
28366
28367 /* The first argument of hvc_alloc() is the virtual console number, so
28368 * we use zero. The second argument is the parameter for the
28369diff -urNp linux-2.6.32.45/drivers/char/vt.c linux-2.6.32.45/drivers/char/vt.c
28370--- linux-2.6.32.45/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28371+++ linux-2.6.32.45/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28372@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28373
28374 static void notify_write(struct vc_data *vc, unsigned int unicode)
28375 {
28376- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28377+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
28378 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28379 }
28380
28381diff -urNp linux-2.6.32.45/drivers/char/vt_ioctl.c linux-2.6.32.45/drivers/char/vt_ioctl.c
28382--- linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28383+++ linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28384@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28385 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28386 return -EFAULT;
28387
28388- if (!capable(CAP_SYS_TTY_CONFIG))
28389- perm = 0;
28390-
28391 switch (cmd) {
28392 case KDGKBENT:
28393 key_map = key_maps[s];
28394@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28395 val = (i ? K_HOLE : K_NOSUCHMAP);
28396 return put_user(val, &user_kbe->kb_value);
28397 case KDSKBENT:
28398+ if (!capable(CAP_SYS_TTY_CONFIG))
28399+ perm = 0;
28400+
28401 if (!perm)
28402 return -EPERM;
28403+
28404 if (!i && v == K_NOSUCHMAP) {
28405 /* deallocate map */
28406 key_map = key_maps[s];
28407@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28408 int i, j, k;
28409 int ret;
28410
28411- if (!capable(CAP_SYS_TTY_CONFIG))
28412- perm = 0;
28413-
28414 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28415 if (!kbs) {
28416 ret = -ENOMEM;
28417@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28418 kfree(kbs);
28419 return ((p && *p) ? -EOVERFLOW : 0);
28420 case KDSKBSENT:
28421+ if (!capable(CAP_SYS_TTY_CONFIG))
28422+ perm = 0;
28423+
28424 if (!perm) {
28425 ret = -EPERM;
28426 goto reterr;
28427diff -urNp linux-2.6.32.45/drivers/cpufreq/cpufreq.c linux-2.6.32.45/drivers/cpufreq/cpufreq.c
28428--- linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28429+++ linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28430@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28431 complete(&policy->kobj_unregister);
28432 }
28433
28434-static struct sysfs_ops sysfs_ops = {
28435+static const struct sysfs_ops sysfs_ops = {
28436 .show = show,
28437 .store = store,
28438 };
28439diff -urNp linux-2.6.32.45/drivers/cpuidle/sysfs.c linux-2.6.32.45/drivers/cpuidle/sysfs.c
28440--- linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28441+++ linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28442@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28443 return ret;
28444 }
28445
28446-static struct sysfs_ops cpuidle_sysfs_ops = {
28447+static const struct sysfs_ops cpuidle_sysfs_ops = {
28448 .show = cpuidle_show,
28449 .store = cpuidle_store,
28450 };
28451@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28452 return ret;
28453 }
28454
28455-static struct sysfs_ops cpuidle_state_sysfs_ops = {
28456+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28457 .show = cpuidle_state_show,
28458 };
28459
28460@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28461 .release = cpuidle_state_sysfs_release,
28462 };
28463
28464-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28465+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28466 {
28467 kobject_put(&device->kobjs[i]->kobj);
28468 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28469diff -urNp linux-2.6.32.45/drivers/crypto/hifn_795x.c linux-2.6.32.45/drivers/crypto/hifn_795x.c
28470--- linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28471+++ linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28472@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28473 0xCA, 0x34, 0x2B, 0x2E};
28474 struct scatterlist sg;
28475
28476+ pax_track_stack();
28477+
28478 memset(src, 0, sizeof(src));
28479 memset(ctx.key, 0, sizeof(ctx.key));
28480
28481diff -urNp linux-2.6.32.45/drivers/crypto/padlock-aes.c linux-2.6.32.45/drivers/crypto/padlock-aes.c
28482--- linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28483+++ linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28484@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28485 struct crypto_aes_ctx gen_aes;
28486 int cpu;
28487
28488+ pax_track_stack();
28489+
28490 if (key_len % 8) {
28491 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28492 return -EINVAL;
28493diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.c linux-2.6.32.45/drivers/dma/ioat/dma.c
28494--- linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28495+++ linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28496@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28497 return entry->show(&chan->common, page);
28498 }
28499
28500-struct sysfs_ops ioat_sysfs_ops = {
28501+const struct sysfs_ops ioat_sysfs_ops = {
28502 .show = ioat_attr_show,
28503 };
28504
28505diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.h linux-2.6.32.45/drivers/dma/ioat/dma.h
28506--- linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28507+++ linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28508@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28509 unsigned long *phys_complete);
28510 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28511 void ioat_kobject_del(struct ioatdma_device *device);
28512-extern struct sysfs_ops ioat_sysfs_ops;
28513+extern const struct sysfs_ops ioat_sysfs_ops;
28514 extern struct ioat_sysfs_entry ioat_version_attr;
28515 extern struct ioat_sysfs_entry ioat_cap_attr;
28516 #endif /* IOATDMA_H */
28517diff -urNp linux-2.6.32.45/drivers/edac/edac_device_sysfs.c linux-2.6.32.45/drivers/edac/edac_device_sysfs.c
28518--- linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28519+++ linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28520@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28521 }
28522
28523 /* edac_dev file operations for an 'ctl_info' */
28524-static struct sysfs_ops device_ctl_info_ops = {
28525+static const struct sysfs_ops device_ctl_info_ops = {
28526 .show = edac_dev_ctl_info_show,
28527 .store = edac_dev_ctl_info_store
28528 };
28529@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28530 }
28531
28532 /* edac_dev file operations for an 'instance' */
28533-static struct sysfs_ops device_instance_ops = {
28534+static const struct sysfs_ops device_instance_ops = {
28535 .show = edac_dev_instance_show,
28536 .store = edac_dev_instance_store
28537 };
28538@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28539 }
28540
28541 /* edac_dev file operations for a 'block' */
28542-static struct sysfs_ops device_block_ops = {
28543+static const struct sysfs_ops device_block_ops = {
28544 .show = edac_dev_block_show,
28545 .store = edac_dev_block_store
28546 };
28547diff -urNp linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c
28548--- linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28549+++ linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28550@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28551 return -EIO;
28552 }
28553
28554-static struct sysfs_ops csrowfs_ops = {
28555+static const struct sysfs_ops csrowfs_ops = {
28556 .show = csrowdev_show,
28557 .store = csrowdev_store
28558 };
28559@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28560 }
28561
28562 /* Intermediate show/store table */
28563-static struct sysfs_ops mci_ops = {
28564+static const struct sysfs_ops mci_ops = {
28565 .show = mcidev_show,
28566 .store = mcidev_store
28567 };
28568diff -urNp linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c
28569--- linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28570+++ linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28571@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28572 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28573 static int edac_pci_poll_msec = 1000; /* one second workq period */
28574
28575-static atomic_t pci_parity_count = ATOMIC_INIT(0);
28576-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28577+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28578+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28579
28580 static struct kobject *edac_pci_top_main_kobj;
28581 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28582@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28583 }
28584
28585 /* fs_ops table */
28586-static struct sysfs_ops pci_instance_ops = {
28587+static const struct sysfs_ops pci_instance_ops = {
28588 .show = edac_pci_instance_show,
28589 .store = edac_pci_instance_store
28590 };
28591@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28592 return -EIO;
28593 }
28594
28595-static struct sysfs_ops edac_pci_sysfs_ops = {
28596+static const struct sysfs_ops edac_pci_sysfs_ops = {
28597 .show = edac_pci_dev_show,
28598 .store = edac_pci_dev_store
28599 };
28600@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28601 edac_printk(KERN_CRIT, EDAC_PCI,
28602 "Signaled System Error on %s\n",
28603 pci_name(dev));
28604- atomic_inc(&pci_nonparity_count);
28605+ atomic_inc_unchecked(&pci_nonparity_count);
28606 }
28607
28608 if (status & (PCI_STATUS_PARITY)) {
28609@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28610 "Master Data Parity Error on %s\n",
28611 pci_name(dev));
28612
28613- atomic_inc(&pci_parity_count);
28614+ atomic_inc_unchecked(&pci_parity_count);
28615 }
28616
28617 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28618@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28619 "Detected Parity Error on %s\n",
28620 pci_name(dev));
28621
28622- atomic_inc(&pci_parity_count);
28623+ atomic_inc_unchecked(&pci_parity_count);
28624 }
28625 }
28626
28627@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28628 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28629 "Signaled System Error on %s\n",
28630 pci_name(dev));
28631- atomic_inc(&pci_nonparity_count);
28632+ atomic_inc_unchecked(&pci_nonparity_count);
28633 }
28634
28635 if (status & (PCI_STATUS_PARITY)) {
28636@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28637 "Master Data Parity Error on "
28638 "%s\n", pci_name(dev));
28639
28640- atomic_inc(&pci_parity_count);
28641+ atomic_inc_unchecked(&pci_parity_count);
28642 }
28643
28644 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28645@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28646 "Detected Parity Error on %s\n",
28647 pci_name(dev));
28648
28649- atomic_inc(&pci_parity_count);
28650+ atomic_inc_unchecked(&pci_parity_count);
28651 }
28652 }
28653 }
28654@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28655 if (!check_pci_errors)
28656 return;
28657
28658- before_count = atomic_read(&pci_parity_count);
28659+ before_count = atomic_read_unchecked(&pci_parity_count);
28660
28661 /* scan all PCI devices looking for a Parity Error on devices and
28662 * bridges.
28663@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28664 /* Only if operator has selected panic on PCI Error */
28665 if (edac_pci_get_panic_on_pe()) {
28666 /* If the count is different 'after' from 'before' */
28667- if (before_count != atomic_read(&pci_parity_count))
28668+ if (before_count != atomic_read_unchecked(&pci_parity_count))
28669 panic("EDAC: PCI Parity Error");
28670 }
28671 }
28672diff -urNp linux-2.6.32.45/drivers/firewire/core-card.c linux-2.6.32.45/drivers/firewire/core-card.c
28673--- linux-2.6.32.45/drivers/firewire/core-card.c 2011-03-27 14:31:47.000000000 -0400
28674+++ linux-2.6.32.45/drivers/firewire/core-card.c 2011-08-05 20:33:55.000000000 -0400
28675@@ -569,8 +569,10 @@ void fw_core_remove_card(struct fw_card
28676 mutex_unlock(&card_mutex);
28677
28678 /* Switch off most of the card driver interface. */
28679- dummy_driver.free_iso_context = card->driver->free_iso_context;
28680- dummy_driver.stop_iso = card->driver->stop_iso;
28681+ pax_open_kernel();
28682+ *(void **)&dummy_driver.free_iso_context = card->driver->free_iso_context;
28683+ *(void **)&dummy_driver.stop_iso = card->driver->stop_iso;
28684+ pax_close_kernel();
28685 card->driver = &dummy_driver;
28686
28687 fw_destroy_nodes(card);
28688diff -urNp linux-2.6.32.45/drivers/firewire/core-cdev.c linux-2.6.32.45/drivers/firewire/core-cdev.c
28689--- linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
28690+++ linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
28691@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
28692 int ret;
28693
28694 if ((request->channels == 0 && request->bandwidth == 0) ||
28695- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
28696- request->bandwidth < 0)
28697+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
28698 return -EINVAL;
28699
28700 r = kmalloc(sizeof(*r), GFP_KERNEL);
28701diff -urNp linux-2.6.32.45/drivers/firewire/core-transaction.c linux-2.6.32.45/drivers/firewire/core-transaction.c
28702--- linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
28703+++ linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
28704@@ -36,6 +36,7 @@
28705 #include <linux/string.h>
28706 #include <linux/timer.h>
28707 #include <linux/types.h>
28708+#include <linux/sched.h>
28709
28710 #include <asm/byteorder.h>
28711
28712@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
28713 struct transaction_callback_data d;
28714 struct fw_transaction t;
28715
28716+ pax_track_stack();
28717+
28718 init_completion(&d.done);
28719 d.payload = payload;
28720 fw_send_request(card, &t, tcode, destination_id, generation, speed,
28721diff -urNp linux-2.6.32.45/drivers/firmware/dmi_scan.c linux-2.6.32.45/drivers/firmware/dmi_scan.c
28722--- linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
28723+++ linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
28724@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
28725 }
28726 }
28727 else {
28728- /*
28729- * no iounmap() for that ioremap(); it would be a no-op, but
28730- * it's so early in setup that sucker gets confused into doing
28731- * what it shouldn't if we actually call it.
28732- */
28733 p = dmi_ioremap(0xF0000, 0x10000);
28734 if (p == NULL)
28735 goto error;
28736diff -urNp linux-2.6.32.45/drivers/firmware/edd.c linux-2.6.32.45/drivers/firmware/edd.c
28737--- linux-2.6.32.45/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
28738+++ linux-2.6.32.45/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
28739@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
28740 return ret;
28741 }
28742
28743-static struct sysfs_ops edd_attr_ops = {
28744+static const struct sysfs_ops edd_attr_ops = {
28745 .show = edd_attr_show,
28746 };
28747
28748diff -urNp linux-2.6.32.45/drivers/firmware/efivars.c linux-2.6.32.45/drivers/firmware/efivars.c
28749--- linux-2.6.32.45/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
28750+++ linux-2.6.32.45/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
28751@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
28752 return ret;
28753 }
28754
28755-static struct sysfs_ops efivar_attr_ops = {
28756+static const struct sysfs_ops efivar_attr_ops = {
28757 .show = efivar_attr_show,
28758 .store = efivar_attr_store,
28759 };
28760diff -urNp linux-2.6.32.45/drivers/firmware/iscsi_ibft.c linux-2.6.32.45/drivers/firmware/iscsi_ibft.c
28761--- linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
28762+++ linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
28763@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
28764 return ret;
28765 }
28766
28767-static struct sysfs_ops ibft_attr_ops = {
28768+static const struct sysfs_ops ibft_attr_ops = {
28769 .show = ibft_show_attribute,
28770 };
28771
28772diff -urNp linux-2.6.32.45/drivers/firmware/memmap.c linux-2.6.32.45/drivers/firmware/memmap.c
28773--- linux-2.6.32.45/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
28774+++ linux-2.6.32.45/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
28775@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
28776 NULL
28777 };
28778
28779-static struct sysfs_ops memmap_attr_ops = {
28780+static const struct sysfs_ops memmap_attr_ops = {
28781 .show = memmap_attr_show,
28782 };
28783
28784diff -urNp linux-2.6.32.45/drivers/gpio/vr41xx_giu.c linux-2.6.32.45/drivers/gpio/vr41xx_giu.c
28785--- linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
28786+++ linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
28787@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
28788 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
28789 maskl, pendl, maskh, pendh);
28790
28791- atomic_inc(&irq_err_count);
28792+ atomic_inc_unchecked(&irq_err_count);
28793
28794 return -EINVAL;
28795 }
28796diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c
28797--- linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
28798+++ linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
28799@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
28800 struct drm_crtc *tmp;
28801 int crtc_mask = 1;
28802
28803- WARN(!crtc, "checking null crtc?");
28804+ BUG_ON(!crtc);
28805
28806 dev = crtc->dev;
28807
28808@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
28809
28810 adjusted_mode = drm_mode_duplicate(dev, mode);
28811
28812+ pax_track_stack();
28813+
28814 crtc->enabled = drm_helper_crtc_in_use(crtc);
28815
28816 if (!crtc->enabled)
28817diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_drv.c linux-2.6.32.45/drivers/gpu/drm/drm_drv.c
28818--- linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
28819+++ linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
28820@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
28821 char *kdata = NULL;
28822
28823 atomic_inc(&dev->ioctl_count);
28824- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28825+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28826 ++file_priv->ioctl_count;
28827
28828 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28829diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_fops.c linux-2.6.32.45/drivers/gpu/drm/drm_fops.c
28830--- linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
28831+++ linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
28832@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
28833 }
28834
28835 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28836- atomic_set(&dev->counts[i], 0);
28837+ atomic_set_unchecked(&dev->counts[i], 0);
28838
28839 dev->sigdata.lock = NULL;
28840
28841@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
28842
28843 retcode = drm_open_helper(inode, filp, dev);
28844 if (!retcode) {
28845- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28846+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28847 spin_lock(&dev->count_lock);
28848- if (!dev->open_count++) {
28849+ if (local_inc_return(&dev->open_count) == 1) {
28850 spin_unlock(&dev->count_lock);
28851 retcode = drm_setup(dev);
28852 goto out;
28853@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
28854
28855 lock_kernel();
28856
28857- DRM_DEBUG("open_count = %d\n", dev->open_count);
28858+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28859
28860 if (dev->driver->preclose)
28861 dev->driver->preclose(dev, file_priv);
28862@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
28863 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28864 task_pid_nr(current),
28865 (long)old_encode_dev(file_priv->minor->device),
28866- dev->open_count);
28867+ local_read(&dev->open_count));
28868
28869 /* if the master has gone away we can't do anything with the lock */
28870 if (file_priv->minor->master)
28871@@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
28872 * End inline drm_release
28873 */
28874
28875- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28876+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28877 spin_lock(&dev->count_lock);
28878- if (!--dev->open_count) {
28879+ if (local_dec_and_test(&dev->open_count)) {
28880 if (atomic_read(&dev->ioctl_count)) {
28881 DRM_ERROR("Device busy: %d\n",
28882 atomic_read(&dev->ioctl_count));
28883diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_gem.c linux-2.6.32.45/drivers/gpu/drm/drm_gem.c
28884--- linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
28885+++ linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
28886@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
28887 spin_lock_init(&dev->object_name_lock);
28888 idr_init(&dev->object_name_idr);
28889 atomic_set(&dev->object_count, 0);
28890- atomic_set(&dev->object_memory, 0);
28891+ atomic_set_unchecked(&dev->object_memory, 0);
28892 atomic_set(&dev->pin_count, 0);
28893- atomic_set(&dev->pin_memory, 0);
28894+ atomic_set_unchecked(&dev->pin_memory, 0);
28895 atomic_set(&dev->gtt_count, 0);
28896- atomic_set(&dev->gtt_memory, 0);
28897+ atomic_set_unchecked(&dev->gtt_memory, 0);
28898
28899 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
28900 if (!mm) {
28901@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
28902 goto fput;
28903 }
28904 atomic_inc(&dev->object_count);
28905- atomic_add(obj->size, &dev->object_memory);
28906+ atomic_add_unchecked(obj->size, &dev->object_memory);
28907 return obj;
28908 fput:
28909 fput(obj->filp);
28910@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
28911
28912 fput(obj->filp);
28913 atomic_dec(&dev->object_count);
28914- atomic_sub(obj->size, &dev->object_memory);
28915+ atomic_sub_unchecked(obj->size, &dev->object_memory);
28916 kfree(obj);
28917 }
28918 EXPORT_SYMBOL(drm_gem_object_free);
28919diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_info.c linux-2.6.32.45/drivers/gpu/drm/drm_info.c
28920--- linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
28921+++ linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
28922@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
28923 struct drm_local_map *map;
28924 struct drm_map_list *r_list;
28925
28926- /* Hardcoded from _DRM_FRAME_BUFFER,
28927- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28928- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28929- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28930+ static const char * const types[] = {
28931+ [_DRM_FRAME_BUFFER] = "FB",
28932+ [_DRM_REGISTERS] = "REG",
28933+ [_DRM_SHM] = "SHM",
28934+ [_DRM_AGP] = "AGP",
28935+ [_DRM_SCATTER_GATHER] = "SG",
28936+ [_DRM_CONSISTENT] = "PCI",
28937+ [_DRM_GEM] = "GEM" };
28938 const char *type;
28939 int i;
28940
28941@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
28942 map = r_list->map;
28943 if (!map)
28944 continue;
28945- if (map->type < 0 || map->type > 5)
28946+ if (map->type >= ARRAY_SIZE(types))
28947 type = "??";
28948 else
28949 type = types[map->type];
28950@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
28951 struct drm_device *dev = node->minor->dev;
28952
28953 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
28954- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
28955+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
28956 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
28957- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
28958- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
28959+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
28960+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
28961 seq_printf(m, "%d gtt total\n", dev->gtt_total);
28962 return 0;
28963 }
28964@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
28965 mutex_lock(&dev->struct_mutex);
28966 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
28967 atomic_read(&dev->vma_count),
28968+#ifdef CONFIG_GRKERNSEC_HIDESYM
28969+ NULL, 0);
28970+#else
28971 high_memory, (u64)virt_to_phys(high_memory));
28972+#endif
28973
28974 list_for_each_entry(pt, &dev->vmalist, head) {
28975 vma = pt->vma;
28976@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
28977 continue;
28978 seq_printf(m,
28979 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
28980- pt->pid, vma->vm_start, vma->vm_end,
28981+ pt->pid,
28982+#ifdef CONFIG_GRKERNSEC_HIDESYM
28983+ 0, 0,
28984+#else
28985+ vma->vm_start, vma->vm_end,
28986+#endif
28987 vma->vm_flags & VM_READ ? 'r' : '-',
28988 vma->vm_flags & VM_WRITE ? 'w' : '-',
28989 vma->vm_flags & VM_EXEC ? 'x' : '-',
28990 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28991 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28992 vma->vm_flags & VM_IO ? 'i' : '-',
28993+#ifdef CONFIG_GRKERNSEC_HIDESYM
28994+ 0);
28995+#else
28996 vma->vm_pgoff);
28997+#endif
28998
28999 #if defined(__i386__)
29000 pgprot = pgprot_val(vma->vm_page_prot);
29001diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c
29002--- linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
29003+++ linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
29004@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
29005 stats->data[i].value =
29006 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29007 else
29008- stats->data[i].value = atomic_read(&dev->counts[i]);
29009+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29010 stats->data[i].type = dev->types[i];
29011 }
29012
29013diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_lock.c linux-2.6.32.45/drivers/gpu/drm/drm_lock.c
29014--- linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
29015+++ linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
29016@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
29017 if (drm_lock_take(&master->lock, lock->context)) {
29018 master->lock.file_priv = file_priv;
29019 master->lock.lock_time = jiffies;
29020- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29021+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29022 break; /* Got lock */
29023 }
29024
29025@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
29026 return -EINVAL;
29027 }
29028
29029- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29030+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29031
29032 /* kernel_context_switch isn't used by any of the x86 drm
29033 * modules but is required by the Sparc driver.
29034diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c
29035--- linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
29036+++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
29037@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
29038 dma->buflist[vertex->idx],
29039 vertex->discard, vertex->used);
29040
29041- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29042- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29043+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29044+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29045 sarea_priv->last_enqueue = dev_priv->counter - 1;
29046 sarea_priv->last_dispatch = (int)hw_status[5];
29047
29048@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
29049 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29050 mc->last_render);
29051
29052- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29053- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29054+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29055+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29056 sarea_priv->last_enqueue = dev_priv->counter - 1;
29057 sarea_priv->last_dispatch = (int)hw_status[5];
29058
29059diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h
29060--- linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
29061+++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
29062@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29063 int page_flipping;
29064
29065 wait_queue_head_t irq_queue;
29066- atomic_t irq_received;
29067- atomic_t irq_emitted;
29068+ atomic_unchecked_t irq_received;
29069+ atomic_unchecked_t irq_emitted;
29070
29071 int front_offset;
29072 } drm_i810_private_t;
29073diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h
29074--- linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
29075+++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
29076@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
29077 int page_flipping;
29078
29079 wait_queue_head_t irq_queue;
29080- atomic_t irq_received;
29081- atomic_t irq_emitted;
29082+ atomic_unchecked_t irq_received;
29083+ atomic_unchecked_t irq_emitted;
29084
29085 int use_mi_batchbuffer_start;
29086
29087diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c
29088--- linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
29089+++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
29090@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
29091
29092 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
29093
29094- atomic_inc(&dev_priv->irq_received);
29095+ atomic_inc_unchecked(&dev_priv->irq_received);
29096 wake_up_interruptible(&dev_priv->irq_queue);
29097
29098 return IRQ_HANDLED;
29099@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
29100
29101 DRM_DEBUG("%s\n", __func__);
29102
29103- atomic_inc(&dev_priv->irq_emitted);
29104+ atomic_inc_unchecked(&dev_priv->irq_emitted);
29105
29106 BEGIN_LP_RING(2);
29107 OUT_RING(0);
29108 OUT_RING(GFX_OP_USER_INTERRUPT);
29109 ADVANCE_LP_RING();
29110
29111- return atomic_read(&dev_priv->irq_emitted);
29112+ return atomic_read_unchecked(&dev_priv->irq_emitted);
29113 }
29114
29115 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
29116@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
29117
29118 DRM_DEBUG("%s\n", __func__);
29119
29120- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29121+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29122 return 0;
29123
29124 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
29125@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
29126
29127 for (;;) {
29128 __set_current_state(TASK_INTERRUPTIBLE);
29129- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29130+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29131 break;
29132 if ((signed)(end - jiffies) <= 0) {
29133 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
29134@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
29135 I830_WRITE16(I830REG_HWSTAM, 0xffff);
29136 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
29137 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
29138- atomic_set(&dev_priv->irq_received, 0);
29139- atomic_set(&dev_priv->irq_emitted, 0);
29140+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29141+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
29142 init_waitqueue_head(&dev_priv->irq_queue);
29143 }
29144
29145diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c
29146--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
29147+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
29148@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
29149 }
29150 }
29151
29152-struct intel_dvo_dev_ops ch7017_ops = {
29153+const struct intel_dvo_dev_ops ch7017_ops = {
29154 .init = ch7017_init,
29155 .detect = ch7017_detect,
29156 .mode_valid = ch7017_mode_valid,
29157diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c
29158--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
29159+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
29160@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
29161 }
29162 }
29163
29164-struct intel_dvo_dev_ops ch7xxx_ops = {
29165+const struct intel_dvo_dev_ops ch7xxx_ops = {
29166 .init = ch7xxx_init,
29167 .detect = ch7xxx_detect,
29168 .mode_valid = ch7xxx_mode_valid,
29169diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h
29170--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
29171+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
29172@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
29173 *
29174 * \return singly-linked list of modes or NULL if no modes found.
29175 */
29176- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
29177+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
29178
29179 /**
29180 * Clean up driver-specific bits of the output
29181 */
29182- void (*destroy) (struct intel_dvo_device *dvo);
29183+ void (* const destroy) (struct intel_dvo_device *dvo);
29184
29185 /**
29186 * Debugging hook to dump device registers to log file
29187 */
29188- void (*dump_regs)(struct intel_dvo_device *dvo);
29189+ void (* const dump_regs)(struct intel_dvo_device *dvo);
29190 };
29191
29192-extern struct intel_dvo_dev_ops sil164_ops;
29193-extern struct intel_dvo_dev_ops ch7xxx_ops;
29194-extern struct intel_dvo_dev_ops ivch_ops;
29195-extern struct intel_dvo_dev_ops tfp410_ops;
29196-extern struct intel_dvo_dev_ops ch7017_ops;
29197+extern const struct intel_dvo_dev_ops sil164_ops;
29198+extern const struct intel_dvo_dev_ops ch7xxx_ops;
29199+extern const struct intel_dvo_dev_ops ivch_ops;
29200+extern const struct intel_dvo_dev_ops tfp410_ops;
29201+extern const struct intel_dvo_dev_ops ch7017_ops;
29202
29203 #endif /* _INTEL_DVO_H */
29204diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c
29205--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
29206+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
29207@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
29208 }
29209 }
29210
29211-struct intel_dvo_dev_ops ivch_ops= {
29212+const struct intel_dvo_dev_ops ivch_ops= {
29213 .init = ivch_init,
29214 .dpms = ivch_dpms,
29215 .save = ivch_save,
29216diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c
29217--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
29218+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
29219@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
29220 }
29221 }
29222
29223-struct intel_dvo_dev_ops sil164_ops = {
29224+const struct intel_dvo_dev_ops sil164_ops = {
29225 .init = sil164_init,
29226 .detect = sil164_detect,
29227 .mode_valid = sil164_mode_valid,
29228diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c
29229--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
29230+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
29231@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
29232 }
29233 }
29234
29235-struct intel_dvo_dev_ops tfp410_ops = {
29236+const struct intel_dvo_dev_ops tfp410_ops = {
29237 .init = tfp410_init,
29238 .detect = tfp410_detect,
29239 .mode_valid = tfp410_mode_valid,
29240diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c
29241--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
29242+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
29243@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
29244 I915_READ(GTIMR));
29245 }
29246 seq_printf(m, "Interrupts received: %d\n",
29247- atomic_read(&dev_priv->irq_received));
29248+ atomic_read_unchecked(&dev_priv->irq_received));
29249 if (dev_priv->hw_status_page != NULL) {
29250 seq_printf(m, "Current sequence: %d\n",
29251 i915_get_gem_seqno(dev));
29252diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c
29253--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
29254+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
29255@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
29256 return i915_resume(dev);
29257 }
29258
29259-static struct vm_operations_struct i915_gem_vm_ops = {
29260+static const struct vm_operations_struct i915_gem_vm_ops = {
29261 .fault = i915_gem_fault,
29262 .open = drm_gem_vm_open,
29263 .close = drm_gem_vm_close,
29264diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h
29265--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
29266+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:33:55.000000000 -0400
29267@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
29268 /* display clock increase/decrease */
29269 /* pll clock increase/decrease */
29270 /* clock gating init */
29271-};
29272+} __no_const;
29273
29274 typedef struct drm_i915_private {
29275 struct drm_device *dev;
29276@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
29277 int page_flipping;
29278
29279 wait_queue_head_t irq_queue;
29280- atomic_t irq_received;
29281+ atomic_unchecked_t irq_received;
29282 /** Protects user_irq_refcount and irq_mask_reg */
29283 spinlock_t user_irq_lock;
29284 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
29285diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c
29286--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
29287+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
29288@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
29289
29290 args->aper_size = dev->gtt_total;
29291 args->aper_available_size = (args->aper_size -
29292- atomic_read(&dev->pin_memory));
29293+ atomic_read_unchecked(&dev->pin_memory));
29294
29295 return 0;
29296 }
29297@@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
29298 return -EINVAL;
29299 }
29300
29301+ if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29302+ drm_gem_object_unreference(obj);
29303+ return -EFAULT;
29304+ }
29305+
29306 if (i915_gem_object_needs_bit17_swizzle(obj)) {
29307 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
29308 } else {
29309@@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
29310 return -EINVAL;
29311 }
29312
29313+ if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29314+ drm_gem_object_unreference(obj);
29315+ return -EFAULT;
29316+ }
29317+
29318 /* We can only do the GTT pwrite on untiled buffers, as otherwise
29319 * it would end up going through the fenced access, and we'll get
29320 * different detiling behavior between reading and writing.
29321@@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
29322
29323 if (obj_priv->gtt_space) {
29324 atomic_dec(&dev->gtt_count);
29325- atomic_sub(obj->size, &dev->gtt_memory);
29326+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
29327
29328 drm_mm_put_block(obj_priv->gtt_space);
29329 obj_priv->gtt_space = NULL;
29330@@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
29331 goto search_free;
29332 }
29333 atomic_inc(&dev->gtt_count);
29334- atomic_add(obj->size, &dev->gtt_memory);
29335+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
29336
29337 /* Assert that the object is not currently in any GPU domain. As it
29338 * wasn't in the GTT, there shouldn't be any way it could have been in
29339@@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
29340 "%d/%d gtt bytes\n",
29341 atomic_read(&dev->object_count),
29342 atomic_read(&dev->pin_count),
29343- atomic_read(&dev->object_memory),
29344- atomic_read(&dev->pin_memory),
29345- atomic_read(&dev->gtt_memory),
29346+ atomic_read_unchecked(&dev->object_memory),
29347+ atomic_read_unchecked(&dev->pin_memory),
29348+ atomic_read_unchecked(&dev->gtt_memory),
29349 dev->gtt_total);
29350 }
29351 goto err;
29352@@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
29353 */
29354 if (obj_priv->pin_count == 1) {
29355 atomic_inc(&dev->pin_count);
29356- atomic_add(obj->size, &dev->pin_memory);
29357+ atomic_add_unchecked(obj->size, &dev->pin_memory);
29358 if (!obj_priv->active &&
29359 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
29360 !list_empty(&obj_priv->list))
29361@@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
29362 list_move_tail(&obj_priv->list,
29363 &dev_priv->mm.inactive_list);
29364 atomic_dec(&dev->pin_count);
29365- atomic_sub(obj->size, &dev->pin_memory);
29366+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
29367 }
29368 i915_verify_inactive(dev, __FILE__, __LINE__);
29369 }
29370diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c
29371--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
29372+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
29373@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
29374 int irq_received;
29375 int ret = IRQ_NONE;
29376
29377- atomic_inc(&dev_priv->irq_received);
29378+ atomic_inc_unchecked(&dev_priv->irq_received);
29379
29380 if (IS_IGDNG(dev))
29381 return igdng_irq_handler(dev);
29382@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29383 {
29384 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29385
29386- atomic_set(&dev_priv->irq_received, 0);
29387+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29388
29389 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29390 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29391diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c
29392--- linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-03-27 14:31:47.000000000 -0400
29393+++ linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-08-05 20:33:55.000000000 -0400
29394@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *
29395 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
29396
29397 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
29398- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29399+ pax_open_kernel();
29400+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29401+ pax_close_kernel();
29402
29403 /* Read the regs to test if we can talk to the device */
29404 for (i = 0; i < 0x40; i++) {
29405diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h
29406--- linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29407+++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29408@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29409 u32 clear_cmd;
29410 u32 maccess;
29411
29412- atomic_t vbl_received; /**< Number of vblanks received. */
29413+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29414 wait_queue_head_t fence_queue;
29415- atomic_t last_fence_retired;
29416+ atomic_unchecked_t last_fence_retired;
29417 u32 next_fence_to_post;
29418
29419 unsigned int fb_cpp;
29420diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c
29421--- linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29422+++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29423@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29424 if (crtc != 0)
29425 return 0;
29426
29427- return atomic_read(&dev_priv->vbl_received);
29428+ return atomic_read_unchecked(&dev_priv->vbl_received);
29429 }
29430
29431
29432@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29433 /* VBLANK interrupt */
29434 if (status & MGA_VLINEPEN) {
29435 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29436- atomic_inc(&dev_priv->vbl_received);
29437+ atomic_inc_unchecked(&dev_priv->vbl_received);
29438 drm_handle_vblank(dev, 0);
29439 handled = 1;
29440 }
29441@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29442 MGA_WRITE(MGA_PRIMEND, prim_end);
29443 }
29444
29445- atomic_inc(&dev_priv->last_fence_retired);
29446+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
29447 DRM_WAKEUP(&dev_priv->fence_queue);
29448 handled = 1;
29449 }
29450@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29451 * using fences.
29452 */
29453 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29454- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29455+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29456 - *sequence) <= (1 << 23)));
29457
29458 *sequence = cur_fence;
29459diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c
29460--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29461+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29462@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29463
29464 /* GH: Simple idle check.
29465 */
29466- atomic_set(&dev_priv->idle_count, 0);
29467+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29468
29469 /* We don't support anything other than bus-mastering ring mode,
29470 * but the ring can be in either AGP or PCI space for the ring
29471diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h
29472--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29473+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29474@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29475 int is_pci;
29476 unsigned long cce_buffers_offset;
29477
29478- atomic_t idle_count;
29479+ atomic_unchecked_t idle_count;
29480
29481 int page_flipping;
29482 int current_page;
29483 u32 crtc_offset;
29484 u32 crtc_offset_cntl;
29485
29486- atomic_t vbl_received;
29487+ atomic_unchecked_t vbl_received;
29488
29489 u32 color_fmt;
29490 unsigned int front_offset;
29491diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c
29492--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29493+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29494@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29495 if (crtc != 0)
29496 return 0;
29497
29498- return atomic_read(&dev_priv->vbl_received);
29499+ return atomic_read_unchecked(&dev_priv->vbl_received);
29500 }
29501
29502 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29503@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29504 /* VBLANK interrupt */
29505 if (status & R128_CRTC_VBLANK_INT) {
29506 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29507- atomic_inc(&dev_priv->vbl_received);
29508+ atomic_inc_unchecked(&dev_priv->vbl_received);
29509 drm_handle_vblank(dev, 0);
29510 return IRQ_HANDLED;
29511 }
29512diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c
29513--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29514+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29515@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29516
29517 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29518 {
29519- if (atomic_read(&dev_priv->idle_count) == 0) {
29520+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29521 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29522 } else {
29523- atomic_set(&dev_priv->idle_count, 0);
29524+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29525 }
29526 }
29527
29528diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c
29529--- linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
29530+++ linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
29531@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
29532 char name[512];
29533 int i;
29534
29535+ pax_track_stack();
29536+
29537 ctx->card = card;
29538 ctx->bios = bios;
29539
29540diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c
29541--- linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29542+++ linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29543@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29544 regex_t mask_rex;
29545 regmatch_t match[4];
29546 char buf[1024];
29547- size_t end;
29548+ long end;
29549 int len;
29550 int done = 0;
29551 int r;
29552 unsigned o;
29553 struct offset *offset;
29554 char last_reg_s[10];
29555- int last_reg;
29556+ unsigned long last_reg;
29557
29558 if (regcomp
29559 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29560diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c
29561--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29562+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29563@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29564 bool linkb;
29565 struct radeon_i2c_bus_rec ddc_bus;
29566
29567+ pax_track_stack();
29568+
29569 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29570
29571 if (data_offset == 0)
29572@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29573 }
29574 }
29575
29576-struct bios_connector {
29577+static struct bios_connector {
29578 bool valid;
29579 uint16_t line_mux;
29580 uint16_t devices;
29581 int connector_type;
29582 struct radeon_i2c_bus_rec ddc_bus;
29583-};
29584+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29585
29586 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29587 drm_device
29588@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29589 uint8_t dac;
29590 union atom_supported_devices *supported_devices;
29591 int i, j;
29592- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29593
29594 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29595
29596diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c
29597--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29598+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29599@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29600
29601 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29602 error = freq - current_freq;
29603- error = error < 0 ? 0xffffffff : error;
29604+ error = (int32_t)error < 0 ? 0xffffffff : error;
29605 } else
29606 error = abs(current_freq - freq);
29607 vco_diff = abs(vco - best_vco);
29608diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h
29609--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29610+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29611@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29612
29613 /* SW interrupt */
29614 wait_queue_head_t swi_queue;
29615- atomic_t swi_emitted;
29616+ atomic_unchecked_t swi_emitted;
29617 int vblank_crtc;
29618 uint32_t irq_enable_reg;
29619 uint32_t r500_disp_irq_reg;
29620diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c
29621--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29622+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29623@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29624 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29625 return 0;
29626 }
29627- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29628+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29629 if (!rdev->cp.ready) {
29630 /* FIXME: cp is not running assume everythings is done right
29631 * away
29632@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29633 return r;
29634 }
29635 WREG32(rdev->fence_drv.scratch_reg, 0);
29636- atomic_set(&rdev->fence_drv.seq, 0);
29637+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29638 INIT_LIST_HEAD(&rdev->fence_drv.created);
29639 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29640 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29641diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h
29642--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29643+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:33:55.000000000 -0400
29644@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29645 */
29646 struct radeon_fence_driver {
29647 uint32_t scratch_reg;
29648- atomic_t seq;
29649+ atomic_unchecked_t seq;
29650 uint32_t last_seq;
29651 unsigned long count_timeout;
29652 wait_queue_head_t queue;
29653@@ -640,7 +640,7 @@ struct radeon_asic {
29654 uint32_t offset, uint32_t obj_size);
29655 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
29656 void (*bandwidth_update)(struct radeon_device *rdev);
29657-};
29658+} __no_const;
29659
29660 /*
29661 * Asic structures
29662diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c
29663--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
29664+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
29665@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
29666 request = compat_alloc_user_space(sizeof(*request));
29667 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
29668 || __put_user(req32.param, &request->param)
29669- || __put_user((void __user *)(unsigned long)req32.value,
29670+ || __put_user((unsigned long)req32.value,
29671 &request->value))
29672 return -EFAULT;
29673
29674diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c
29675--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
29676+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
29677@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
29678 unsigned int ret;
29679 RING_LOCALS;
29680
29681- atomic_inc(&dev_priv->swi_emitted);
29682- ret = atomic_read(&dev_priv->swi_emitted);
29683+ atomic_inc_unchecked(&dev_priv->swi_emitted);
29684+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
29685
29686 BEGIN_RING(4);
29687 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
29688@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
29689 drm_radeon_private_t *dev_priv =
29690 (drm_radeon_private_t *) dev->dev_private;
29691
29692- atomic_set(&dev_priv->swi_emitted, 0);
29693+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
29694 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
29695
29696 dev->max_vblank_count = 0x001fffff;
29697diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c
29698--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
29699+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
29700@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
29701 {
29702 drm_radeon_private_t *dev_priv = dev->dev_private;
29703 drm_radeon_getparam_t *param = data;
29704- int value;
29705+ int value = 0;
29706
29707 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
29708
29709diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c
29710--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
29711+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
29712@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
29713 DRM_INFO("radeon: ttm finalized\n");
29714 }
29715
29716-static struct vm_operations_struct radeon_ttm_vm_ops;
29717-static const struct vm_operations_struct *ttm_vm_ops = NULL;
29718-
29719-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
29720-{
29721- struct ttm_buffer_object *bo;
29722- int r;
29723-
29724- bo = (struct ttm_buffer_object *)vma->vm_private_data;
29725- if (bo == NULL) {
29726- return VM_FAULT_NOPAGE;
29727- }
29728- r = ttm_vm_ops->fault(vma, vmf);
29729- return r;
29730-}
29731-
29732 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29733 {
29734 struct drm_file *file_priv;
29735 struct radeon_device *rdev;
29736- int r;
29737
29738 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
29739 return drm_mmap(filp, vma);
29740@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
29741
29742 file_priv = (struct drm_file *)filp->private_data;
29743 rdev = file_priv->minor->dev->dev_private;
29744- if (rdev == NULL) {
29745+ if (!rdev)
29746 return -EINVAL;
29747- }
29748- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29749- if (unlikely(r != 0)) {
29750- return r;
29751- }
29752- if (unlikely(ttm_vm_ops == NULL)) {
29753- ttm_vm_ops = vma->vm_ops;
29754- radeon_ttm_vm_ops = *ttm_vm_ops;
29755- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29756- }
29757- vma->vm_ops = &radeon_ttm_vm_ops;
29758- return 0;
29759+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29760 }
29761
29762
29763diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c
29764--- linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
29765+++ linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
29766@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
29767 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29768 rdev->pm.sideport_bandwidth.full)
29769 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29770- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
29771+ read_delay_latency.full = rfixed_const(800 * 1000);
29772 read_delay_latency.full = rfixed_div(read_delay_latency,
29773 rdev->pm.igp_sideport_mclk);
29774+ a.full = rfixed_const(370);
29775+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
29776 } else {
29777 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29778 rdev->pm.k8_bandwidth.full)
29779diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c
29780--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
29781+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
29782@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
29783 NULL
29784 };
29785
29786-static struct sysfs_ops ttm_bo_global_ops = {
29787+static const struct sysfs_ops ttm_bo_global_ops = {
29788 .show = &ttm_bo_global_show
29789 };
29790
29791diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c
29792--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
29793+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
29794@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
29795 {
29796 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
29797 vma->vm_private_data;
29798- struct ttm_bo_device *bdev = bo->bdev;
29799+ struct ttm_bo_device *bdev;
29800 unsigned long bus_base;
29801 unsigned long bus_offset;
29802 unsigned long bus_size;
29803@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
29804 unsigned long address = (unsigned long)vmf->virtual_address;
29805 int retval = VM_FAULT_NOPAGE;
29806
29807+ if (!bo)
29808+ return VM_FAULT_NOPAGE;
29809+ bdev = bo->bdev;
29810+
29811 /*
29812 * Work around locking order reversal in fault / nopfn
29813 * between mmap_sem and bo_reserve: Perform a trylock operation
29814diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c
29815--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
29816+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
29817@@ -36,7 +36,7 @@
29818 struct ttm_global_item {
29819 struct mutex mutex;
29820 void *object;
29821- int refcount;
29822+ atomic_t refcount;
29823 };
29824
29825 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
29826@@ -49,7 +49,7 @@ void ttm_global_init(void)
29827 struct ttm_global_item *item = &glob[i];
29828 mutex_init(&item->mutex);
29829 item->object = NULL;
29830- item->refcount = 0;
29831+ atomic_set(&item->refcount, 0);
29832 }
29833 }
29834
29835@@ -59,7 +59,7 @@ void ttm_global_release(void)
29836 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
29837 struct ttm_global_item *item = &glob[i];
29838 BUG_ON(item->object != NULL);
29839- BUG_ON(item->refcount != 0);
29840+ BUG_ON(atomic_read(&item->refcount) != 0);
29841 }
29842 }
29843
29844@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
29845 void *object;
29846
29847 mutex_lock(&item->mutex);
29848- if (item->refcount == 0) {
29849+ if (atomic_read(&item->refcount) == 0) {
29850 item->object = kzalloc(ref->size, GFP_KERNEL);
29851 if (unlikely(item->object == NULL)) {
29852 ret = -ENOMEM;
29853@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
29854 goto out_err;
29855
29856 }
29857- ++item->refcount;
29858+ atomic_inc(&item->refcount);
29859 ref->object = item->object;
29860 object = item->object;
29861 mutex_unlock(&item->mutex);
29862@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
29863 struct ttm_global_item *item = &glob[ref->global_type];
29864
29865 mutex_lock(&item->mutex);
29866- BUG_ON(item->refcount == 0);
29867+ BUG_ON(atomic_read(&item->refcount) == 0);
29868 BUG_ON(ref->object != item->object);
29869- if (--item->refcount == 0) {
29870+ if (atomic_dec_and_test(&item->refcount)) {
29871 ref->release(ref);
29872 item->object = NULL;
29873 }
29874diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c
29875--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
29876+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
29877@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
29878 NULL
29879 };
29880
29881-static struct sysfs_ops ttm_mem_zone_ops = {
29882+static const struct sysfs_ops ttm_mem_zone_ops = {
29883 .show = &ttm_mem_zone_show,
29884 .store = &ttm_mem_zone_store
29885 };
29886diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h
29887--- linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
29888+++ linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
29889@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29890 typedef uint32_t maskarray_t[5];
29891
29892 typedef struct drm_via_irq {
29893- atomic_t irq_received;
29894+ atomic_unchecked_t irq_received;
29895 uint32_t pending_mask;
29896 uint32_t enable_mask;
29897 wait_queue_head_t irq_queue;
29898@@ -75,7 +75,7 @@ typedef struct drm_via_private {
29899 struct timeval last_vblank;
29900 int last_vblank_valid;
29901 unsigned usec_per_vblank;
29902- atomic_t vbl_received;
29903+ atomic_unchecked_t vbl_received;
29904 drm_via_state_t hc_state;
29905 char pci_buf[VIA_PCI_BUF_SIZE];
29906 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29907diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c
29908--- linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
29909+++ linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
29910@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
29911 if (crtc != 0)
29912 return 0;
29913
29914- return atomic_read(&dev_priv->vbl_received);
29915+ return atomic_read_unchecked(&dev_priv->vbl_received);
29916 }
29917
29918 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29919@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
29920
29921 status = VIA_READ(VIA_REG_INTERRUPT);
29922 if (status & VIA_IRQ_VBLANK_PENDING) {
29923- atomic_inc(&dev_priv->vbl_received);
29924- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29925+ atomic_inc_unchecked(&dev_priv->vbl_received);
29926+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29927 do_gettimeofday(&cur_vblank);
29928 if (dev_priv->last_vblank_valid) {
29929 dev_priv->usec_per_vblank =
29930@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29931 dev_priv->last_vblank = cur_vblank;
29932 dev_priv->last_vblank_valid = 1;
29933 }
29934- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29935+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29936 DRM_DEBUG("US per vblank is: %u\n",
29937 dev_priv->usec_per_vblank);
29938 }
29939@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29940
29941 for (i = 0; i < dev_priv->num_irqs; ++i) {
29942 if (status & cur_irq->pending_mask) {
29943- atomic_inc(&cur_irq->irq_received);
29944+ atomic_inc_unchecked(&cur_irq->irq_received);
29945 DRM_WAKEUP(&cur_irq->irq_queue);
29946 handled = 1;
29947 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
29948@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
29949 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29950 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
29951 masks[irq][4]));
29952- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
29953+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
29954 } else {
29955 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29956 (((cur_irq_sequence =
29957- atomic_read(&cur_irq->irq_received)) -
29958+ atomic_read_unchecked(&cur_irq->irq_received)) -
29959 *sequence) <= (1 << 23)));
29960 }
29961 *sequence = cur_irq_sequence;
29962@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
29963 }
29964
29965 for (i = 0; i < dev_priv->num_irqs; ++i) {
29966- atomic_set(&cur_irq->irq_received, 0);
29967+ atomic_set_unchecked(&cur_irq->irq_received, 0);
29968 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
29969 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
29970 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
29971@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
29972 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
29973 case VIA_IRQ_RELATIVE:
29974 irqwait->request.sequence +=
29975- atomic_read(&cur_irq->irq_received);
29976+ atomic_read_unchecked(&cur_irq->irq_received);
29977 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
29978 case VIA_IRQ_ABSOLUTE:
29979 break;
29980diff -urNp linux-2.6.32.45/drivers/hid/hid-core.c linux-2.6.32.45/drivers/hid/hid-core.c
29981--- linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
29982+++ linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
29983@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
29984
29985 int hid_add_device(struct hid_device *hdev)
29986 {
29987- static atomic_t id = ATOMIC_INIT(0);
29988+ static atomic_unchecked_t id = ATOMIC_INIT(0);
29989 int ret;
29990
29991 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29992@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
29993 /* XXX hack, any other cleaner solution after the driver core
29994 * is converted to allow more than 20 bytes as the device name? */
29995 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29996- hdev->vendor, hdev->product, atomic_inc_return(&id));
29997+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29998
29999 ret = device_add(&hdev->dev);
30000 if (!ret)
30001diff -urNp linux-2.6.32.45/drivers/hid/usbhid/hiddev.c linux-2.6.32.45/drivers/hid/usbhid/hiddev.c
30002--- linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
30003+++ linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
30004@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
30005 return put_user(HID_VERSION, (int __user *)arg);
30006
30007 case HIDIOCAPPLICATION:
30008- if (arg < 0 || arg >= hid->maxapplication)
30009+ if (arg >= hid->maxapplication)
30010 return -EINVAL;
30011
30012 for (i = 0; i < hid->maxcollection; i++)
30013diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.c linux-2.6.32.45/drivers/hwmon/lis3lv02d.c
30014--- linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
30015+++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
30016@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
30017 * the lid is closed. This leads to interrupts as soon as a little move
30018 * is done.
30019 */
30020- atomic_inc(&lis3_dev.count);
30021+ atomic_inc_unchecked(&lis3_dev.count);
30022
30023 wake_up_interruptible(&lis3_dev.misc_wait);
30024 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30025@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
30026 if (test_and_set_bit(0, &lis3_dev.misc_opened))
30027 return -EBUSY; /* already open */
30028
30029- atomic_set(&lis3_dev.count, 0);
30030+ atomic_set_unchecked(&lis3_dev.count, 0);
30031
30032 /*
30033 * The sensor can generate interrupts for free-fall and direction
30034@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
30035 add_wait_queue(&lis3_dev.misc_wait, &wait);
30036 while (true) {
30037 set_current_state(TASK_INTERRUPTIBLE);
30038- data = atomic_xchg(&lis3_dev.count, 0);
30039+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30040 if (data)
30041 break;
30042
30043@@ -244,7 +244,7 @@ out:
30044 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30045 {
30046 poll_wait(file, &lis3_dev.misc_wait, wait);
30047- if (atomic_read(&lis3_dev.count))
30048+ if (atomic_read_unchecked(&lis3_dev.count))
30049 return POLLIN | POLLRDNORM;
30050 return 0;
30051 }
30052diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.h linux-2.6.32.45/drivers/hwmon/lis3lv02d.h
30053--- linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
30054+++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
30055@@ -201,7 +201,7 @@ struct lis3lv02d {
30056
30057 struct input_polled_dev *idev; /* input device */
30058 struct platform_device *pdev; /* platform device */
30059- atomic_t count; /* interrupt count after last read */
30060+ atomic_unchecked_t count; /* interrupt count after last read */
30061 int xcalib; /* calibrated null value for x */
30062 int ycalib; /* calibrated null value for y */
30063 int zcalib; /* calibrated null value for z */
30064diff -urNp linux-2.6.32.45/drivers/hwmon/sht15.c linux-2.6.32.45/drivers/hwmon/sht15.c
30065--- linux-2.6.32.45/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
30066+++ linux-2.6.32.45/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
30067@@ -112,7 +112,7 @@ struct sht15_data {
30068 int supply_uV;
30069 int supply_uV_valid;
30070 struct work_struct update_supply_work;
30071- atomic_t interrupt_handled;
30072+ atomic_unchecked_t interrupt_handled;
30073 };
30074
30075 /**
30076@@ -245,13 +245,13 @@ static inline int sht15_update_single_va
30077 return ret;
30078
30079 gpio_direction_input(data->pdata->gpio_data);
30080- atomic_set(&data->interrupt_handled, 0);
30081+ atomic_set_unchecked(&data->interrupt_handled, 0);
30082
30083 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30084 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30085 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30086 /* Only relevant if the interrupt hasn't occured. */
30087- if (!atomic_read(&data->interrupt_handled))
30088+ if (!atomic_read_unchecked(&data->interrupt_handled))
30089 schedule_work(&data->read_work);
30090 }
30091 ret = wait_event_timeout(data->wait_queue,
30092@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
30093 struct sht15_data *data = d;
30094 /* First disable the interrupt */
30095 disable_irq_nosync(irq);
30096- atomic_inc(&data->interrupt_handled);
30097+ atomic_inc_unchecked(&data->interrupt_handled);
30098 /* Then schedule a reading work struct */
30099 if (data->flag != SHT15_READING_NOTHING)
30100 schedule_work(&data->read_work);
30101@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
30102 here as could have gone low in meantime so verify
30103 it hasn't!
30104 */
30105- atomic_set(&data->interrupt_handled, 0);
30106+ atomic_set_unchecked(&data->interrupt_handled, 0);
30107 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30108 /* If still not occured or another handler has been scheduled */
30109 if (gpio_get_value(data->pdata->gpio_data)
30110- || atomic_read(&data->interrupt_handled))
30111+ || atomic_read_unchecked(&data->interrupt_handled))
30112 return;
30113 }
30114 /* Read the data back from the device */
30115diff -urNp linux-2.6.32.45/drivers/hwmon/w83791d.c linux-2.6.32.45/drivers/hwmon/w83791d.c
30116--- linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
30117+++ linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
30118@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
30119 struct i2c_board_info *info);
30120 static int w83791d_remove(struct i2c_client *client);
30121
30122-static int w83791d_read(struct i2c_client *client, u8 register);
30123-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
30124+static int w83791d_read(struct i2c_client *client, u8 reg);
30125+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
30126 static struct w83791d_data *w83791d_update_device(struct device *dev);
30127
30128 #ifdef DEBUG
30129diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c
30130--- linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-03-27 14:31:47.000000000 -0400
30131+++ linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:33:55.000000000 -0400
30132@@ -189,23 +189,23 @@ static int __init amd756_s4882_init(void
30133 }
30134
30135 /* Fill in the new structures */
30136- s4882_algo[0] = *(amd756_smbus.algo);
30137- s4882_algo[0].smbus_xfer = amd756_access_virt0;
30138+ memcpy((void *)&s4882_algo[0], amd756_smbus.algo, sizeof(s4882_algo[0]));
30139+ *(void **)&s4882_algo[0].smbus_xfer = amd756_access_virt0;
30140 s4882_adapter[0] = amd756_smbus;
30141 s4882_adapter[0].algo = s4882_algo;
30142- s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30143+ *(void **)&s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30144 for (i = 1; i < 5; i++) {
30145- s4882_algo[i] = *(amd756_smbus.algo);
30146+ memcpy((void *)&s4882_algo[i], amd756_smbus.algo, sizeof(s4882_algo[i]));
30147 s4882_adapter[i] = amd756_smbus;
30148 snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name),
30149 "SMBus 8111 adapter (CPU%d)", i-1);
30150 s4882_adapter[i].algo = s4882_algo+i;
30151 s4882_adapter[i].dev.parent = amd756_smbus.dev.parent;
30152 }
30153- s4882_algo[1].smbus_xfer = amd756_access_virt1;
30154- s4882_algo[2].smbus_xfer = amd756_access_virt2;
30155- s4882_algo[3].smbus_xfer = amd756_access_virt3;
30156- s4882_algo[4].smbus_xfer = amd756_access_virt4;
30157+ *(void **)&s4882_algo[1].smbus_xfer = amd756_access_virt1;
30158+ *(void **)&s4882_algo[2].smbus_xfer = amd756_access_virt2;
30159+ *(void **)&s4882_algo[3].smbus_xfer = amd756_access_virt3;
30160+ *(void **)&s4882_algo[4].smbus_xfer = amd756_access_virt4;
30161
30162 /* Register virtual adapters */
30163 for (i = 0; i < 5; i++) {
30164diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c
30165--- linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-03-27 14:31:47.000000000 -0400
30166+++ linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:33:55.000000000 -0400
30167@@ -184,23 +184,23 @@ static int __init nforce2_s4985_init(voi
30168 }
30169
30170 /* Fill in the new structures */
30171- s4985_algo[0] = *(nforce2_smbus->algo);
30172- s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30173+ memcpy((void *)&s4985_algo[0], nforce2_smbus->algo, sizeof(s4985_algo[0]));
30174+ *(void **)&s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30175 s4985_adapter[0] = *nforce2_smbus;
30176 s4985_adapter[0].algo = s4985_algo;
30177 s4985_adapter[0].dev.parent = nforce2_smbus->dev.parent;
30178 for (i = 1; i < 5; i++) {
30179- s4985_algo[i] = *(nforce2_smbus->algo);
30180+ memcpy((void *)&s4985_algo[i], nforce2_smbus->algo, sizeof(s4985_algo[i]));
30181 s4985_adapter[i] = *nforce2_smbus;
30182 snprintf(s4985_adapter[i].name, sizeof(s4985_adapter[i].name),
30183 "SMBus nForce2 adapter (CPU%d)", i - 1);
30184 s4985_adapter[i].algo = s4985_algo + i;
30185 s4985_adapter[i].dev.parent = nforce2_smbus->dev.parent;
30186 }
30187- s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30188- s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30189- s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30190- s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30191+ *(void **)&s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30192+ *(void **)&s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30193+ *(void **)&s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30194+ *(void **)&s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30195
30196 /* Register virtual adapters */
30197 for (i = 0; i < 5; i++) {
30198diff -urNp linux-2.6.32.45/drivers/ide/ide-cd.c linux-2.6.32.45/drivers/ide/ide-cd.c
30199--- linux-2.6.32.45/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
30200+++ linux-2.6.32.45/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
30201@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
30202 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30203 if ((unsigned long)buf & alignment
30204 || blk_rq_bytes(rq) & q->dma_pad_mask
30205- || object_is_on_stack(buf))
30206+ || object_starts_on_stack(buf))
30207 drive->dma = 0;
30208 }
30209 }
30210diff -urNp linux-2.6.32.45/drivers/ide/ide-floppy.c linux-2.6.32.45/drivers/ide/ide-floppy.c
30211--- linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
30212+++ linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
30213@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
30214 u8 pc_buf[256], header_len, desc_cnt;
30215 int i, rc = 1, blocks, length;
30216
30217+ pax_track_stack();
30218+
30219 ide_debug_log(IDE_DBG_FUNC, "enter");
30220
30221 drive->bios_cyl = 0;
30222diff -urNp linux-2.6.32.45/drivers/ide/setup-pci.c linux-2.6.32.45/drivers/ide/setup-pci.c
30223--- linux-2.6.32.45/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
30224+++ linux-2.6.32.45/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
30225@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
30226 int ret, i, n_ports = dev2 ? 4 : 2;
30227 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
30228
30229+ pax_track_stack();
30230+
30231 for (i = 0; i < n_ports / 2; i++) {
30232 ret = ide_setup_pci_controller(pdev[i], d, !i);
30233 if (ret < 0)
30234diff -urNp linux-2.6.32.45/drivers/ieee1394/dv1394.c linux-2.6.32.45/drivers/ieee1394/dv1394.c
30235--- linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
30236+++ linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
30237@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
30238 based upon DIF section and sequence
30239 */
30240
30241-static void inline
30242+static inline void
30243 frame_put_packet (struct frame *f, struct packet *p)
30244 {
30245 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
30246diff -urNp linux-2.6.32.45/drivers/ieee1394/hosts.c linux-2.6.32.45/drivers/ieee1394/hosts.c
30247--- linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
30248+++ linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
30249@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
30250 }
30251
30252 static struct hpsb_host_driver dummy_driver = {
30253+ .name = "dummy",
30254 .transmit_packet = dummy_transmit_packet,
30255 .devctl = dummy_devctl,
30256 .isoctl = dummy_isoctl
30257diff -urNp linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c
30258--- linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
30259+++ linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
30260@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
30261 for (func = 0; func < 8; func++) {
30262 u32 class = read_pci_config(num,slot,func,
30263 PCI_CLASS_REVISION);
30264- if ((class == 0xffffffff))
30265+ if (class == 0xffffffff)
30266 continue; /* No device at this func */
30267
30268 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
30269diff -urNp linux-2.6.32.45/drivers/ieee1394/ohci1394.c linux-2.6.32.45/drivers/ieee1394/ohci1394.c
30270--- linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
30271+++ linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
30272@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
30273 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
30274
30275 /* Module Parameters */
30276-static int phys_dma = 1;
30277+static int phys_dma;
30278 module_param(phys_dma, int, 0444);
30279-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
30280+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
30281
30282 static void dma_trm_tasklet(unsigned long data);
30283 static void dma_trm_reset(struct dma_trm_ctx *d);
30284diff -urNp linux-2.6.32.45/drivers/ieee1394/sbp2.c linux-2.6.32.45/drivers/ieee1394/sbp2.c
30285--- linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
30286+++ linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
30287@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
30288 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
30289 MODULE_LICENSE("GPL");
30290
30291-static int sbp2_module_init(void)
30292+static int __init sbp2_module_init(void)
30293 {
30294 int ret;
30295
30296diff -urNp linux-2.6.32.45/drivers/infiniband/core/cm.c linux-2.6.32.45/drivers/infiniband/core/cm.c
30297--- linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
30298+++ linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
30299@@ -112,7 +112,7 @@ static char const counter_group_names[CM
30300
30301 struct cm_counter_group {
30302 struct kobject obj;
30303- atomic_long_t counter[CM_ATTR_COUNT];
30304+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30305 };
30306
30307 struct cm_counter_attribute {
30308@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
30309 struct ib_mad_send_buf *msg = NULL;
30310 int ret;
30311
30312- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30313+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30314 counter[CM_REQ_COUNTER]);
30315
30316 /* Quick state check to discard duplicate REQs. */
30317@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
30318 if (!cm_id_priv)
30319 return;
30320
30321- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30322+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30323 counter[CM_REP_COUNTER]);
30324 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30325 if (ret)
30326@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
30327 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30328 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30329 spin_unlock_irq(&cm_id_priv->lock);
30330- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30331+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30332 counter[CM_RTU_COUNTER]);
30333 goto out;
30334 }
30335@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
30336 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30337 dreq_msg->local_comm_id);
30338 if (!cm_id_priv) {
30339- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30340+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30341 counter[CM_DREQ_COUNTER]);
30342 cm_issue_drep(work->port, work->mad_recv_wc);
30343 return -EINVAL;
30344@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
30345 case IB_CM_MRA_REP_RCVD:
30346 break;
30347 case IB_CM_TIMEWAIT:
30348- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30349+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30350 counter[CM_DREQ_COUNTER]);
30351 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30352 goto unlock;
30353@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
30354 cm_free_msg(msg);
30355 goto deref;
30356 case IB_CM_DREQ_RCVD:
30357- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30358+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30359 counter[CM_DREQ_COUNTER]);
30360 goto unlock;
30361 default:
30362@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
30363 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30364 cm_id_priv->msg, timeout)) {
30365 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30366- atomic_long_inc(&work->port->
30367+ atomic_long_inc_unchecked(&work->port->
30368 counter_group[CM_RECV_DUPLICATES].
30369 counter[CM_MRA_COUNTER]);
30370 goto out;
30371@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
30372 break;
30373 case IB_CM_MRA_REQ_RCVD:
30374 case IB_CM_MRA_REP_RCVD:
30375- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30376+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30377 counter[CM_MRA_COUNTER]);
30378 /* fall through */
30379 default:
30380@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
30381 case IB_CM_LAP_IDLE:
30382 break;
30383 case IB_CM_MRA_LAP_SENT:
30384- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30385+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30386 counter[CM_LAP_COUNTER]);
30387 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30388 goto unlock;
30389@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
30390 cm_free_msg(msg);
30391 goto deref;
30392 case IB_CM_LAP_RCVD:
30393- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30394+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30395 counter[CM_LAP_COUNTER]);
30396 goto unlock;
30397 default:
30398@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
30399 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30400 if (cur_cm_id_priv) {
30401 spin_unlock_irq(&cm.lock);
30402- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30403+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30404 counter[CM_SIDR_REQ_COUNTER]);
30405 goto out; /* Duplicate message. */
30406 }
30407@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
30408 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30409 msg->retries = 1;
30410
30411- atomic_long_add(1 + msg->retries,
30412+ atomic_long_add_unchecked(1 + msg->retries,
30413 &port->counter_group[CM_XMIT].counter[attr_index]);
30414 if (msg->retries)
30415- atomic_long_add(msg->retries,
30416+ atomic_long_add_unchecked(msg->retries,
30417 &port->counter_group[CM_XMIT_RETRIES].
30418 counter[attr_index]);
30419
30420@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
30421 }
30422
30423 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30424- atomic_long_inc(&port->counter_group[CM_RECV].
30425+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30426 counter[attr_id - CM_ATTR_ID_OFFSET]);
30427
30428 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30429@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
30430 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30431
30432 return sprintf(buf, "%ld\n",
30433- atomic_long_read(&group->counter[cm_attr->index]));
30434+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30435 }
30436
30437-static struct sysfs_ops cm_counter_ops = {
30438+static const struct sysfs_ops cm_counter_ops = {
30439 .show = cm_show_counter
30440 };
30441
30442diff -urNp linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c
30443--- linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
30444+++ linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
30445@@ -97,8 +97,8 @@ struct ib_fmr_pool {
30446
30447 struct task_struct *thread;
30448
30449- atomic_t req_ser;
30450- atomic_t flush_ser;
30451+ atomic_unchecked_t req_ser;
30452+ atomic_unchecked_t flush_ser;
30453
30454 wait_queue_head_t force_wait;
30455 };
30456@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
30457 struct ib_fmr_pool *pool = pool_ptr;
30458
30459 do {
30460- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30461+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30462 ib_fmr_batch_release(pool);
30463
30464- atomic_inc(&pool->flush_ser);
30465+ atomic_inc_unchecked(&pool->flush_ser);
30466 wake_up_interruptible(&pool->force_wait);
30467
30468 if (pool->flush_function)
30469@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30470 }
30471
30472 set_current_state(TASK_INTERRUPTIBLE);
30473- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30474+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30475 !kthread_should_stop())
30476 schedule();
30477 __set_current_state(TASK_RUNNING);
30478@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
30479 pool->dirty_watermark = params->dirty_watermark;
30480 pool->dirty_len = 0;
30481 spin_lock_init(&pool->pool_lock);
30482- atomic_set(&pool->req_ser, 0);
30483- atomic_set(&pool->flush_ser, 0);
30484+ atomic_set_unchecked(&pool->req_ser, 0);
30485+ atomic_set_unchecked(&pool->flush_ser, 0);
30486 init_waitqueue_head(&pool->force_wait);
30487
30488 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30489@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
30490 }
30491 spin_unlock_irq(&pool->pool_lock);
30492
30493- serial = atomic_inc_return(&pool->req_ser);
30494+ serial = atomic_inc_return_unchecked(&pool->req_ser);
30495 wake_up_process(pool->thread);
30496
30497 if (wait_event_interruptible(pool->force_wait,
30498- atomic_read(&pool->flush_ser) - serial >= 0))
30499+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30500 return -EINTR;
30501
30502 return 0;
30503@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30504 } else {
30505 list_add_tail(&fmr->list, &pool->dirty_list);
30506 if (++pool->dirty_len >= pool->dirty_watermark) {
30507- atomic_inc(&pool->req_ser);
30508+ atomic_inc_unchecked(&pool->req_ser);
30509 wake_up_process(pool->thread);
30510 }
30511 }
30512diff -urNp linux-2.6.32.45/drivers/infiniband/core/sysfs.c linux-2.6.32.45/drivers/infiniband/core/sysfs.c
30513--- linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30514+++ linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30515@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30516 return port_attr->show(p, port_attr, buf);
30517 }
30518
30519-static struct sysfs_ops port_sysfs_ops = {
30520+static const struct sysfs_ops port_sysfs_ops = {
30521 .show = port_attr_show
30522 };
30523
30524diff -urNp linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c
30525--- linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30526+++ linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30527@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
30528 dst->grh.sgid_index = src->grh.sgid_index;
30529 dst->grh.hop_limit = src->grh.hop_limit;
30530 dst->grh.traffic_class = src->grh.traffic_class;
30531+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
30532 dst->dlid = src->dlid;
30533 dst->sl = src->sl;
30534 dst->src_path_bits = src->src_path_bits;
30535 dst->static_rate = src->static_rate;
30536 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
30537 dst->port_num = src->port_num;
30538+ dst->reserved = 0;
30539 }
30540 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
30541
30542 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
30543 struct ib_qp_attr *src)
30544 {
30545+ dst->qp_state = src->qp_state;
30546 dst->cur_qp_state = src->cur_qp_state;
30547 dst->path_mtu = src->path_mtu;
30548 dst->path_mig_state = src->path_mig_state;
30549@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
30550 dst->rnr_retry = src->rnr_retry;
30551 dst->alt_port_num = src->alt_port_num;
30552 dst->alt_timeout = src->alt_timeout;
30553+ memset(dst->reserved, 0, sizeof(dst->reserved));
30554 }
30555 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
30556
30557diff -urNp linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c
30558--- linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
30559+++ linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
30560@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
30561 struct infinipath_counters counters;
30562 struct ipath_devdata *dd;
30563
30564+ pax_track_stack();
30565+
30566 dd = file->f_path.dentry->d_inode->i_private;
30567 dd->ipath_f_read_counters(dd, &counters);
30568
30569diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c
30570--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
30571+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
30572@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
30573 LIST_HEAD(nes_adapter_list);
30574 static LIST_HEAD(nes_dev_list);
30575
30576-atomic_t qps_destroyed;
30577+atomic_unchecked_t qps_destroyed;
30578
30579 static unsigned int ee_flsh_adapter;
30580 static unsigned int sysfs_nonidx_addr;
30581@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
30582 struct nes_adapter *nesadapter = nesdev->nesadapter;
30583 u32 qp_id;
30584
30585- atomic_inc(&qps_destroyed);
30586+ atomic_inc_unchecked(&qps_destroyed);
30587
30588 /* Free the control structures */
30589
30590diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c
30591--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
30592+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
30593@@ -69,11 +69,11 @@ u32 cm_packets_received;
30594 u32 cm_listens_created;
30595 u32 cm_listens_destroyed;
30596 u32 cm_backlog_drops;
30597-atomic_t cm_loopbacks;
30598-atomic_t cm_nodes_created;
30599-atomic_t cm_nodes_destroyed;
30600-atomic_t cm_accel_dropped_pkts;
30601-atomic_t cm_resets_recvd;
30602+atomic_unchecked_t cm_loopbacks;
30603+atomic_unchecked_t cm_nodes_created;
30604+atomic_unchecked_t cm_nodes_destroyed;
30605+atomic_unchecked_t cm_accel_dropped_pkts;
30606+atomic_unchecked_t cm_resets_recvd;
30607
30608 static inline int mini_cm_accelerated(struct nes_cm_core *,
30609 struct nes_cm_node *);
30610@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
30611
30612 static struct nes_cm_core *g_cm_core;
30613
30614-atomic_t cm_connects;
30615-atomic_t cm_accepts;
30616-atomic_t cm_disconnects;
30617-atomic_t cm_closes;
30618-atomic_t cm_connecteds;
30619-atomic_t cm_connect_reqs;
30620-atomic_t cm_rejects;
30621+atomic_unchecked_t cm_connects;
30622+atomic_unchecked_t cm_accepts;
30623+atomic_unchecked_t cm_disconnects;
30624+atomic_unchecked_t cm_closes;
30625+atomic_unchecked_t cm_connecteds;
30626+atomic_unchecked_t cm_connect_reqs;
30627+atomic_unchecked_t cm_rejects;
30628
30629
30630 /**
30631@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30632 cm_node->rem_mac);
30633
30634 add_hte_node(cm_core, cm_node);
30635- atomic_inc(&cm_nodes_created);
30636+ atomic_inc_unchecked(&cm_nodes_created);
30637
30638 return cm_node;
30639 }
30640@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30641 }
30642
30643 atomic_dec(&cm_core->node_cnt);
30644- atomic_inc(&cm_nodes_destroyed);
30645+ atomic_inc_unchecked(&cm_nodes_destroyed);
30646 nesqp = cm_node->nesqp;
30647 if (nesqp) {
30648 nesqp->cm_node = NULL;
30649@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30650
30651 static void drop_packet(struct sk_buff *skb)
30652 {
30653- atomic_inc(&cm_accel_dropped_pkts);
30654+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30655 dev_kfree_skb_any(skb);
30656 }
30657
30658@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30659
30660 int reset = 0; /* whether to send reset in case of err.. */
30661 int passive_state;
30662- atomic_inc(&cm_resets_recvd);
30663+ atomic_inc_unchecked(&cm_resets_recvd);
30664 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30665 " refcnt=%d\n", cm_node, cm_node->state,
30666 atomic_read(&cm_node->ref_count));
30667@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30668 rem_ref_cm_node(cm_node->cm_core, cm_node);
30669 return NULL;
30670 }
30671- atomic_inc(&cm_loopbacks);
30672+ atomic_inc_unchecked(&cm_loopbacks);
30673 loopbackremotenode->loopbackpartner = cm_node;
30674 loopbackremotenode->tcp_cntxt.rcv_wscale =
30675 NES_CM_DEFAULT_RCV_WND_SCALE;
30676@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30677 add_ref_cm_node(cm_node);
30678 } else if (cm_node->state == NES_CM_STATE_TSA) {
30679 rem_ref_cm_node(cm_core, cm_node);
30680- atomic_inc(&cm_accel_dropped_pkts);
30681+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30682 dev_kfree_skb_any(skb);
30683 break;
30684 }
30685@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30686
30687 if ((cm_id) && (cm_id->event_handler)) {
30688 if (issue_disconn) {
30689- atomic_inc(&cm_disconnects);
30690+ atomic_inc_unchecked(&cm_disconnects);
30691 cm_event.event = IW_CM_EVENT_DISCONNECT;
30692 cm_event.status = disconn_status;
30693 cm_event.local_addr = cm_id->local_addr;
30694@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30695 }
30696
30697 if (issue_close) {
30698- atomic_inc(&cm_closes);
30699+ atomic_inc_unchecked(&cm_closes);
30700 nes_disconnect(nesqp, 1);
30701
30702 cm_id->provider_data = nesqp;
30703@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30704
30705 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30706 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30707- atomic_inc(&cm_accepts);
30708+ atomic_inc_unchecked(&cm_accepts);
30709
30710 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30711 atomic_read(&nesvnic->netdev->refcnt));
30712@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
30713
30714 struct nes_cm_core *cm_core;
30715
30716- atomic_inc(&cm_rejects);
30717+ atomic_inc_unchecked(&cm_rejects);
30718 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30719 loopback = cm_node->loopbackpartner;
30720 cm_core = cm_node->cm_core;
30721@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
30722 ntohl(cm_id->local_addr.sin_addr.s_addr),
30723 ntohs(cm_id->local_addr.sin_port));
30724
30725- atomic_inc(&cm_connects);
30726+ atomic_inc_unchecked(&cm_connects);
30727 nesqp->active_conn = 1;
30728
30729 /* cache the cm_id in the qp */
30730@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
30731 if (nesqp->destroyed) {
30732 return;
30733 }
30734- atomic_inc(&cm_connecteds);
30735+ atomic_inc_unchecked(&cm_connecteds);
30736 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30737 " local port 0x%04X. jiffies = %lu.\n",
30738 nesqp->hwqp.qp_id,
30739@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
30740
30741 ret = cm_id->event_handler(cm_id, &cm_event);
30742 cm_id->add_ref(cm_id);
30743- atomic_inc(&cm_closes);
30744+ atomic_inc_unchecked(&cm_closes);
30745 cm_event.event = IW_CM_EVENT_CLOSE;
30746 cm_event.status = IW_CM_EVENT_STATUS_OK;
30747 cm_event.provider_data = cm_id->provider_data;
30748@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
30749 return;
30750 cm_id = cm_node->cm_id;
30751
30752- atomic_inc(&cm_connect_reqs);
30753+ atomic_inc_unchecked(&cm_connect_reqs);
30754 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30755 cm_node, cm_id, jiffies);
30756
30757@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
30758 return;
30759 cm_id = cm_node->cm_id;
30760
30761- atomic_inc(&cm_connect_reqs);
30762+ atomic_inc_unchecked(&cm_connect_reqs);
30763 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30764 cm_node, cm_id, jiffies);
30765
30766diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h
30767--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
30768+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
30769@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
30770 extern unsigned int wqm_quanta;
30771 extern struct list_head nes_adapter_list;
30772
30773-extern atomic_t cm_connects;
30774-extern atomic_t cm_accepts;
30775-extern atomic_t cm_disconnects;
30776-extern atomic_t cm_closes;
30777-extern atomic_t cm_connecteds;
30778-extern atomic_t cm_connect_reqs;
30779-extern atomic_t cm_rejects;
30780-extern atomic_t mod_qp_timouts;
30781-extern atomic_t qps_created;
30782-extern atomic_t qps_destroyed;
30783-extern atomic_t sw_qps_destroyed;
30784+extern atomic_unchecked_t cm_connects;
30785+extern atomic_unchecked_t cm_accepts;
30786+extern atomic_unchecked_t cm_disconnects;
30787+extern atomic_unchecked_t cm_closes;
30788+extern atomic_unchecked_t cm_connecteds;
30789+extern atomic_unchecked_t cm_connect_reqs;
30790+extern atomic_unchecked_t cm_rejects;
30791+extern atomic_unchecked_t mod_qp_timouts;
30792+extern atomic_unchecked_t qps_created;
30793+extern atomic_unchecked_t qps_destroyed;
30794+extern atomic_unchecked_t sw_qps_destroyed;
30795 extern u32 mh_detected;
30796 extern u32 mh_pauses_sent;
30797 extern u32 cm_packets_sent;
30798@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
30799 extern u32 cm_listens_created;
30800 extern u32 cm_listens_destroyed;
30801 extern u32 cm_backlog_drops;
30802-extern atomic_t cm_loopbacks;
30803-extern atomic_t cm_nodes_created;
30804-extern atomic_t cm_nodes_destroyed;
30805-extern atomic_t cm_accel_dropped_pkts;
30806-extern atomic_t cm_resets_recvd;
30807+extern atomic_unchecked_t cm_loopbacks;
30808+extern atomic_unchecked_t cm_nodes_created;
30809+extern atomic_unchecked_t cm_nodes_destroyed;
30810+extern atomic_unchecked_t cm_accel_dropped_pkts;
30811+extern atomic_unchecked_t cm_resets_recvd;
30812
30813 extern u32 int_mod_timer_init;
30814 extern u32 int_mod_cq_depth_256;
30815diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c
30816--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
30817+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
30818@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
30819 target_stat_values[++index] = mh_detected;
30820 target_stat_values[++index] = mh_pauses_sent;
30821 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30822- target_stat_values[++index] = atomic_read(&cm_connects);
30823- target_stat_values[++index] = atomic_read(&cm_accepts);
30824- target_stat_values[++index] = atomic_read(&cm_disconnects);
30825- target_stat_values[++index] = atomic_read(&cm_connecteds);
30826- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30827- target_stat_values[++index] = atomic_read(&cm_rejects);
30828- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30829- target_stat_values[++index] = atomic_read(&qps_created);
30830- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30831- target_stat_values[++index] = atomic_read(&qps_destroyed);
30832- target_stat_values[++index] = atomic_read(&cm_closes);
30833+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30834+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30835+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30836+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30837+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30838+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30839+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30840+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30841+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30842+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30843+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30844 target_stat_values[++index] = cm_packets_sent;
30845 target_stat_values[++index] = cm_packets_bounced;
30846 target_stat_values[++index] = cm_packets_created;
30847@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
30848 target_stat_values[++index] = cm_listens_created;
30849 target_stat_values[++index] = cm_listens_destroyed;
30850 target_stat_values[++index] = cm_backlog_drops;
30851- target_stat_values[++index] = atomic_read(&cm_loopbacks);
30852- target_stat_values[++index] = atomic_read(&cm_nodes_created);
30853- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30854- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30855- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30856+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30857+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30858+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30859+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30860+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30861 target_stat_values[++index] = int_mod_timer_init;
30862 target_stat_values[++index] = int_mod_cq_depth_1;
30863 target_stat_values[++index] = int_mod_cq_depth_4;
30864diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c
30865--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
30866+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
30867@@ -45,9 +45,9 @@
30868
30869 #include <rdma/ib_umem.h>
30870
30871-atomic_t mod_qp_timouts;
30872-atomic_t qps_created;
30873-atomic_t sw_qps_destroyed;
30874+atomic_unchecked_t mod_qp_timouts;
30875+atomic_unchecked_t qps_created;
30876+atomic_unchecked_t sw_qps_destroyed;
30877
30878 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30879
30880@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
30881 if (init_attr->create_flags)
30882 return ERR_PTR(-EINVAL);
30883
30884- atomic_inc(&qps_created);
30885+ atomic_inc_unchecked(&qps_created);
30886 switch (init_attr->qp_type) {
30887 case IB_QPT_RC:
30888 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30889@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
30890 struct iw_cm_event cm_event;
30891 int ret;
30892
30893- atomic_inc(&sw_qps_destroyed);
30894+ atomic_inc_unchecked(&sw_qps_destroyed);
30895 nesqp->destroyed = 1;
30896
30897 /* Blow away the connection if it exists. */
30898diff -urNp linux-2.6.32.45/drivers/input/gameport/gameport.c linux-2.6.32.45/drivers/input/gameport/gameport.c
30899--- linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
30900+++ linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
30901@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
30902 */
30903 static void gameport_init_port(struct gameport *gameport)
30904 {
30905- static atomic_t gameport_no = ATOMIC_INIT(0);
30906+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30907
30908 __module_get(THIS_MODULE);
30909
30910 mutex_init(&gameport->drv_mutex);
30911 device_initialize(&gameport->dev);
30912- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
30913+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30914 gameport->dev.bus = &gameport_bus;
30915 gameport->dev.release = gameport_release_port;
30916 if (gameport->parent)
30917diff -urNp linux-2.6.32.45/drivers/input/input.c linux-2.6.32.45/drivers/input/input.c
30918--- linux-2.6.32.45/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
30919+++ linux-2.6.32.45/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
30920@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
30921 */
30922 int input_register_device(struct input_dev *dev)
30923 {
30924- static atomic_t input_no = ATOMIC_INIT(0);
30925+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30926 struct input_handler *handler;
30927 const char *path;
30928 int error;
30929@@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
30930 dev->setkeycode = input_default_setkeycode;
30931
30932 dev_set_name(&dev->dev, "input%ld",
30933- (unsigned long) atomic_inc_return(&input_no) - 1);
30934+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30935
30936 error = device_add(&dev->dev);
30937 if (error)
30938diff -urNp linux-2.6.32.45/drivers/input/joystick/sidewinder.c linux-2.6.32.45/drivers/input/joystick/sidewinder.c
30939--- linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
30940+++ linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
30941@@ -30,6 +30,7 @@
30942 #include <linux/kernel.h>
30943 #include <linux/module.h>
30944 #include <linux/slab.h>
30945+#include <linux/sched.h>
30946 #include <linux/init.h>
30947 #include <linux/input.h>
30948 #include <linux/gameport.h>
30949@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
30950 unsigned char buf[SW_LENGTH];
30951 int i;
30952
30953+ pax_track_stack();
30954+
30955 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
30956
30957 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
30958diff -urNp linux-2.6.32.45/drivers/input/joystick/xpad.c linux-2.6.32.45/drivers/input/joystick/xpad.c
30959--- linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
30960+++ linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
30961@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
30962
30963 static int xpad_led_probe(struct usb_xpad *xpad)
30964 {
30965- static atomic_t led_seq = ATOMIC_INIT(0);
30966+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30967 long led_no;
30968 struct xpad_led *led;
30969 struct led_classdev *led_cdev;
30970@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
30971 if (!led)
30972 return -ENOMEM;
30973
30974- led_no = (long)atomic_inc_return(&led_seq) - 1;
30975+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30976
30977 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30978 led->xpad = xpad;
30979diff -urNp linux-2.6.32.45/drivers/input/serio/serio.c linux-2.6.32.45/drivers/input/serio/serio.c
30980--- linux-2.6.32.45/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
30981+++ linux-2.6.32.45/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
30982@@ -527,7 +527,7 @@ static void serio_release_port(struct de
30983 */
30984 static void serio_init_port(struct serio *serio)
30985 {
30986- static atomic_t serio_no = ATOMIC_INIT(0);
30987+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30988
30989 __module_get(THIS_MODULE);
30990
30991@@ -536,7 +536,7 @@ static void serio_init_port(struct serio
30992 mutex_init(&serio->drv_mutex);
30993 device_initialize(&serio->dev);
30994 dev_set_name(&serio->dev, "serio%ld",
30995- (long)atomic_inc_return(&serio_no) - 1);
30996+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
30997 serio->dev.bus = &serio_bus;
30998 serio->dev.release = serio_release_port;
30999 if (serio->parent) {
31000diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/common.c linux-2.6.32.45/drivers/isdn/gigaset/common.c
31001--- linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
31002+++ linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
31003@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
31004 cs->commands_pending = 0;
31005 cs->cur_at_seq = 0;
31006 cs->gotfwver = -1;
31007- cs->open_count = 0;
31008+ local_set(&cs->open_count, 0);
31009 cs->dev = NULL;
31010 cs->tty = NULL;
31011 cs->tty_dev = NULL;
31012diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h
31013--- linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
31014+++ linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
31015@@ -34,6 +34,7 @@
31016 #include <linux/tty_driver.h>
31017 #include <linux/list.h>
31018 #include <asm/atomic.h>
31019+#include <asm/local.h>
31020
31021 #define GIG_VERSION {0,5,0,0}
31022 #define GIG_COMPAT {0,4,0,0}
31023@@ -446,7 +447,7 @@ struct cardstate {
31024 spinlock_t cmdlock;
31025 unsigned curlen, cmdbytes;
31026
31027- unsigned open_count;
31028+ local_t open_count;
31029 struct tty_struct *tty;
31030 struct tasklet_struct if_wake_tasklet;
31031 unsigned control_state;
31032diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/interface.c linux-2.6.32.45/drivers/isdn/gigaset/interface.c
31033--- linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
31034+++ linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
31035@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
31036 return -ERESTARTSYS; // FIXME -EINTR?
31037 tty->driver_data = cs;
31038
31039- ++cs->open_count;
31040-
31041- if (cs->open_count == 1) {
31042+ if (local_inc_return(&cs->open_count) == 1) {
31043 spin_lock_irqsave(&cs->lock, flags);
31044 cs->tty = tty;
31045 spin_unlock_irqrestore(&cs->lock, flags);
31046@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
31047
31048 if (!cs->connected)
31049 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31050- else if (!cs->open_count)
31051+ else if (!local_read(&cs->open_count))
31052 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31053 else {
31054- if (!--cs->open_count) {
31055+ if (!local_dec_return(&cs->open_count)) {
31056 spin_lock_irqsave(&cs->lock, flags);
31057 cs->tty = NULL;
31058 spin_unlock_irqrestore(&cs->lock, flags);
31059@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
31060 if (!cs->connected) {
31061 gig_dbg(DEBUG_IF, "not connected");
31062 retval = -ENODEV;
31063- } else if (!cs->open_count)
31064+ } else if (!local_read(&cs->open_count))
31065 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31066 else {
31067 retval = 0;
31068@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
31069 if (!cs->connected) {
31070 gig_dbg(DEBUG_IF, "not connected");
31071 retval = -ENODEV;
31072- } else if (!cs->open_count)
31073+ } else if (!local_read(&cs->open_count))
31074 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31075 else if (cs->mstate != MS_LOCKED) {
31076 dev_warn(cs->dev, "can't write to unlocked device\n");
31077@@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
31078 if (!cs->connected) {
31079 gig_dbg(DEBUG_IF, "not connected");
31080 retval = -ENODEV;
31081- } else if (!cs->open_count)
31082+ } else if (!local_read(&cs->open_count))
31083 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31084 else if (cs->mstate != MS_LOCKED) {
31085 dev_warn(cs->dev, "can't write to unlocked device\n");
31086@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
31087
31088 if (!cs->connected)
31089 gig_dbg(DEBUG_IF, "not connected");
31090- else if (!cs->open_count)
31091+ else if (!local_read(&cs->open_count))
31092 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31093 else if (cs->mstate != MS_LOCKED)
31094 dev_warn(cs->dev, "can't write to unlocked device\n");
31095@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
31096
31097 if (!cs->connected)
31098 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31099- else if (!cs->open_count)
31100+ else if (!local_read(&cs->open_count))
31101 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31102 else {
31103 //FIXME
31104@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
31105
31106 if (!cs->connected)
31107 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31108- else if (!cs->open_count)
31109+ else if (!local_read(&cs->open_count))
31110 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31111 else {
31112 //FIXME
31113@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
31114 goto out;
31115 }
31116
31117- if (!cs->open_count) {
31118+ if (!local_read(&cs->open_count)) {
31119 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31120 goto out;
31121 }
31122diff -urNp linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c
31123--- linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
31124+++ linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
31125@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
31126 }
31127 if (left) {
31128 if (t4file->user) {
31129- if (copy_from_user(buf, dp, left))
31130+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31131 return -EFAULT;
31132 } else {
31133 memcpy(buf, dp, left);
31134@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
31135 }
31136 if (left) {
31137 if (config->user) {
31138- if (copy_from_user(buf, dp, left))
31139+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31140 return -EFAULT;
31141 } else {
31142 memcpy(buf, dp, left);
31143diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c
31144--- linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
31145+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
31146@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
31147 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
31148 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
31149
31150+ pax_track_stack();
31151
31152 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
31153 {
31154diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c
31155--- linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
31156+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
31157@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
31158 IDI_SYNC_REQ req;
31159 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31160
31161+ pax_track_stack();
31162+
31163 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31164
31165 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31166diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c
31167--- linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
31168+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
31169@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
31170 IDI_SYNC_REQ req;
31171 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31172
31173+ pax_track_stack();
31174+
31175 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31176
31177 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31178diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c
31179--- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
31180+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
31181@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
31182 IDI_SYNC_REQ req;
31183 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31184
31185+ pax_track_stack();
31186+
31187 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31188
31189 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31190diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h
31191--- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-03-27 14:31:47.000000000 -0400
31192+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:33:55.000000000 -0400
31193@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31194 } diva_didd_add_adapter_t;
31195 typedef struct _diva_didd_remove_adapter {
31196 IDI_CALL p_request;
31197-} diva_didd_remove_adapter_t;
31198+} __no_const diva_didd_remove_adapter_t;
31199 typedef struct _diva_didd_read_adapter_array {
31200 void * buffer;
31201 dword length;
31202diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c
31203--- linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
31204+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
31205@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
31206 IDI_SYNC_REQ req;
31207 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31208
31209+ pax_track_stack();
31210+
31211 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31212
31213 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31214diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c
31215--- linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
31216+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
31217@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
31218 dword d;
31219 word w;
31220
31221+ pax_track_stack();
31222+
31223 a = plci->adapter;
31224 Id = ((word)plci->Id<<8)|a->Id;
31225 PUT_WORD(&SS_Ind[4],0x0000);
31226@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
31227 word j, n, w;
31228 dword d;
31229
31230+ pax_track_stack();
31231+
31232
31233 for(i=0;i<8;i++) bp_parms[i].length = 0;
31234 for(i=0;i<2;i++) global_config[i].length = 0;
31235@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
31236 const byte llc3[] = {4,3,2,2,6,6,0};
31237 const byte header[] = {0,2,3,3,0,0,0};
31238
31239+ pax_track_stack();
31240+
31241 for(i=0;i<8;i++) bp_parms[i].length = 0;
31242 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
31243 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
31244@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
31245 word appl_number_group_type[MAX_APPL];
31246 PLCI *auxplci;
31247
31248+ pax_track_stack();
31249+
31250 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
31251
31252 if(!a->group_optimization_enabled)
31253diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c
31254--- linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
31255+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
31256@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
31257 IDI_SYNC_REQ req;
31258 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31259
31260+ pax_track_stack();
31261+
31262 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31263
31264 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31265diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h
31266--- linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-03-27 14:31:47.000000000 -0400
31267+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:33:55.000000000 -0400
31268@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31269 typedef struct _diva_os_idi_adapter_interface {
31270 diva_init_card_proc_t cleanup_adapter_proc;
31271 diva_cmd_card_proc_t cmd_proc;
31272-} diva_os_idi_adapter_interface_t;
31273+} __no_const diva_os_idi_adapter_interface_t;
31274
31275 typedef struct _diva_os_xdi_adapter {
31276 struct list_head link;
31277diff -urNp linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c
31278--- linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
31279+++ linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
31280@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
31281 } iocpar;
31282 void __user *argp = (void __user *)arg;
31283
31284+ pax_track_stack();
31285+
31286 #define name iocpar.name
31287 #define bname iocpar.bname
31288 #define iocts iocpar.iocts
31289diff -urNp linux-2.6.32.45/drivers/isdn/icn/icn.c linux-2.6.32.45/drivers/isdn/icn/icn.c
31290--- linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
31291+++ linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
31292@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
31293 if (count > len)
31294 count = len;
31295 if (user) {
31296- if (copy_from_user(msg, buf, count))
31297+ if (count > sizeof msg || copy_from_user(msg, buf, count))
31298 return -EFAULT;
31299 } else
31300 memcpy(msg, buf, count);
31301diff -urNp linux-2.6.32.45/drivers/isdn/mISDN/socket.c linux-2.6.32.45/drivers/isdn/mISDN/socket.c
31302--- linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
31303+++ linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
31304@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
31305 if (dev) {
31306 struct mISDN_devinfo di;
31307
31308+ memset(&di, 0, sizeof(di));
31309 di.id = dev->id;
31310 di.Dprotocols = dev->Dprotocols;
31311 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31312@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
31313 if (dev) {
31314 struct mISDN_devinfo di;
31315
31316+ memset(&di, 0, sizeof(di));
31317 di.id = dev->id;
31318 di.Dprotocols = dev->Dprotocols;
31319 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31320diff -urNp linux-2.6.32.45/drivers/isdn/sc/interrupt.c linux-2.6.32.45/drivers/isdn/sc/interrupt.c
31321--- linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
31322+++ linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
31323@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
31324 }
31325 else if(callid>=0x0000 && callid<=0x7FFF)
31326 {
31327+ int len;
31328+
31329 pr_debug("%s: Got Incoming Call\n",
31330 sc_adapter[card]->devicename);
31331- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
31332- strcpy(setup.eazmsn,
31333- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
31334+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
31335+ sizeof(setup.phone));
31336+ if (len >= sizeof(setup.phone))
31337+ continue;
31338+ len = strlcpy(setup.eazmsn,
31339+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31340+ sizeof(setup.eazmsn));
31341+ if (len >= sizeof(setup.eazmsn))
31342+ continue;
31343 setup.si1 = 7;
31344 setup.si2 = 0;
31345 setup.plan = 0;
31346@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
31347 * Handle a GetMyNumber Rsp
31348 */
31349 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
31350- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
31351+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31352+ rcvmsg.msg_data.byte_array,
31353+ sizeof(rcvmsg.msg_data.byte_array));
31354 continue;
31355 }
31356
31357diff -urNp linux-2.6.32.45/drivers/lguest/core.c linux-2.6.32.45/drivers/lguest/core.c
31358--- linux-2.6.32.45/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
31359+++ linux-2.6.32.45/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
31360@@ -91,9 +91,17 @@ static __init int map_switcher(void)
31361 * it's worked so far. The end address needs +1 because __get_vm_area
31362 * allocates an extra guard page, so we need space for that.
31363 */
31364+
31365+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31366+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31367+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31368+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31369+#else
31370 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31371 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31372 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31373+#endif
31374+
31375 if (!switcher_vma) {
31376 err = -ENOMEM;
31377 printk("lguest: could not map switcher pages high\n");
31378@@ -118,7 +126,7 @@ static __init int map_switcher(void)
31379 * Now the Switcher is mapped at the right address, we can't fail!
31380 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
31381 */
31382- memcpy(switcher_vma->addr, start_switcher_text,
31383+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31384 end_switcher_text - start_switcher_text);
31385
31386 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31387diff -urNp linux-2.6.32.45/drivers/lguest/x86/core.c linux-2.6.32.45/drivers/lguest/x86/core.c
31388--- linux-2.6.32.45/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
31389+++ linux-2.6.32.45/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
31390@@ -59,7 +59,7 @@ static struct {
31391 /* Offset from where switcher.S was compiled to where we've copied it */
31392 static unsigned long switcher_offset(void)
31393 {
31394- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31395+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31396 }
31397
31398 /* This cpu's struct lguest_pages. */
31399@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
31400 * These copies are pretty cheap, so we do them unconditionally: */
31401 /* Save the current Host top-level page directory.
31402 */
31403+
31404+#ifdef CONFIG_PAX_PER_CPU_PGD
31405+ pages->state.host_cr3 = read_cr3();
31406+#else
31407 pages->state.host_cr3 = __pa(current->mm->pgd);
31408+#endif
31409+
31410 /*
31411 * Set up the Guest's page tables to see this CPU's pages (and no
31412 * other CPU's pages).
31413@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
31414 * compiled-in switcher code and the high-mapped copy we just made.
31415 */
31416 for (i = 0; i < IDT_ENTRIES; i++)
31417- default_idt_entries[i] += switcher_offset();
31418+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31419
31420 /*
31421 * Set up the Switcher's per-cpu areas.
31422@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
31423 * it will be undisturbed when we switch. To change %cs and jump we
31424 * need this structure to feed to Intel's "lcall" instruction.
31425 */
31426- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31427+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31428 lguest_entry.segment = LGUEST_CS;
31429
31430 /*
31431diff -urNp linux-2.6.32.45/drivers/lguest/x86/switcher_32.S linux-2.6.32.45/drivers/lguest/x86/switcher_32.S
31432--- linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
31433+++ linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
31434@@ -87,6 +87,7 @@
31435 #include <asm/page.h>
31436 #include <asm/segment.h>
31437 #include <asm/lguest.h>
31438+#include <asm/processor-flags.h>
31439
31440 // We mark the start of the code to copy
31441 // It's placed in .text tho it's never run here
31442@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31443 // Changes type when we load it: damn Intel!
31444 // For after we switch over our page tables
31445 // That entry will be read-only: we'd crash.
31446+
31447+#ifdef CONFIG_PAX_KERNEXEC
31448+ mov %cr0, %edx
31449+ xor $X86_CR0_WP, %edx
31450+ mov %edx, %cr0
31451+#endif
31452+
31453 movl $(GDT_ENTRY_TSS*8), %edx
31454 ltr %dx
31455
31456@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31457 // Let's clear it again for our return.
31458 // The GDT descriptor of the Host
31459 // Points to the table after two "size" bytes
31460- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31461+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31462 // Clear "used" from type field (byte 5, bit 2)
31463- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31464+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31465+
31466+#ifdef CONFIG_PAX_KERNEXEC
31467+ mov %cr0, %eax
31468+ xor $X86_CR0_WP, %eax
31469+ mov %eax, %cr0
31470+#endif
31471
31472 // Once our page table's switched, the Guest is live!
31473 // The Host fades as we run this final step.
31474@@ -295,13 +309,12 @@ deliver_to_host:
31475 // I consulted gcc, and it gave
31476 // These instructions, which I gladly credit:
31477 leal (%edx,%ebx,8), %eax
31478- movzwl (%eax),%edx
31479- movl 4(%eax), %eax
31480- xorw %ax, %ax
31481- orl %eax, %edx
31482+ movl 4(%eax), %edx
31483+ movw (%eax), %dx
31484 // Now the address of the handler's in %edx
31485 // We call it now: its "iret" drops us home.
31486- jmp *%edx
31487+ ljmp $__KERNEL_CS, $1f
31488+1: jmp *%edx
31489
31490 // Every interrupt can come to us here
31491 // But we must truly tell each apart.
31492diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c
31493--- linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
31494+++ linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
31495@@ -15,7 +15,7 @@
31496
31497 #define MAX_PMU_LEVEL 0xFF
31498
31499-static struct backlight_ops pmu_backlight_data;
31500+static const struct backlight_ops pmu_backlight_data;
31501 static DEFINE_SPINLOCK(pmu_backlight_lock);
31502 static int sleeping, uses_pmu_bl;
31503 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
31504@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
31505 return bd->props.brightness;
31506 }
31507
31508-static struct backlight_ops pmu_backlight_data = {
31509+static const struct backlight_ops pmu_backlight_data = {
31510 .get_brightness = pmu_backlight_get_brightness,
31511 .update_status = pmu_backlight_update_status,
31512
31513diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu.c linux-2.6.32.45/drivers/macintosh/via-pmu.c
31514--- linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
31515+++ linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
31516@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
31517 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
31518 }
31519
31520-static struct platform_suspend_ops pmu_pm_ops = {
31521+static const struct platform_suspend_ops pmu_pm_ops = {
31522 .enter = powerbook_sleep,
31523 .valid = pmu_sleep_valid,
31524 };
31525diff -urNp linux-2.6.32.45/drivers/md/dm.c linux-2.6.32.45/drivers/md/dm.c
31526--- linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:35:29.000000000 -0400
31527+++ linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:33:59.000000000 -0400
31528@@ -165,9 +165,9 @@ struct mapped_device {
31529 /*
31530 * Event handling.
31531 */
31532- atomic_t event_nr;
31533+ atomic_unchecked_t event_nr;
31534 wait_queue_head_t eventq;
31535- atomic_t uevent_seq;
31536+ atomic_unchecked_t uevent_seq;
31537 struct list_head uevent_list;
31538 spinlock_t uevent_lock; /* Protect access to uevent_list */
31539
31540@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(i
31541 rwlock_init(&md->map_lock);
31542 atomic_set(&md->holders, 1);
31543 atomic_set(&md->open_count, 0);
31544- atomic_set(&md->event_nr, 0);
31545- atomic_set(&md->uevent_seq, 0);
31546+ atomic_set_unchecked(&md->event_nr, 0);
31547+ atomic_set_unchecked(&md->uevent_seq, 0);
31548 INIT_LIST_HEAD(&md->uevent_list);
31549 spin_lock_init(&md->uevent_lock);
31550
31551@@ -1927,7 +1927,7 @@ static void event_callback(void *context
31552
31553 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31554
31555- atomic_inc(&md->event_nr);
31556+ atomic_inc_unchecked(&md->event_nr);
31557 wake_up(&md->eventq);
31558 }
31559
31560@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_dev
31561
31562 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31563 {
31564- return atomic_add_return(1, &md->uevent_seq);
31565+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31566 }
31567
31568 uint32_t dm_get_event_nr(struct mapped_device *md)
31569 {
31570- return atomic_read(&md->event_nr);
31571+ return atomic_read_unchecked(&md->event_nr);
31572 }
31573
31574 int dm_wait_event(struct mapped_device *md, int event_nr)
31575 {
31576 return wait_event_interruptible(md->eventq,
31577- (event_nr != atomic_read(&md->event_nr)));
31578+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31579 }
31580
31581 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31582diff -urNp linux-2.6.32.45/drivers/md/dm-ioctl.c linux-2.6.32.45/drivers/md/dm-ioctl.c
31583--- linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
31584+++ linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
31585@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
31586 cmd == DM_LIST_VERSIONS_CMD)
31587 return 0;
31588
31589- if ((cmd == DM_DEV_CREATE_CMD)) {
31590+ if (cmd == DM_DEV_CREATE_CMD) {
31591 if (!*param->name) {
31592 DMWARN("name not supplied when creating device");
31593 return -EINVAL;
31594diff -urNp linux-2.6.32.45/drivers/md/dm-raid1.c linux-2.6.32.45/drivers/md/dm-raid1.c
31595--- linux-2.6.32.45/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
31596+++ linux-2.6.32.45/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
31597@@ -41,7 +41,7 @@ enum dm_raid1_error {
31598
31599 struct mirror {
31600 struct mirror_set *ms;
31601- atomic_t error_count;
31602+ atomic_unchecked_t error_count;
31603 unsigned long error_type;
31604 struct dm_dev *dev;
31605 sector_t offset;
31606@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
31607 * simple way to tell if a device has encountered
31608 * errors.
31609 */
31610- atomic_inc(&m->error_count);
31611+ atomic_inc_unchecked(&m->error_count);
31612
31613 if (test_and_set_bit(error_type, &m->error_type))
31614 return;
31615@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
31616 }
31617
31618 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
31619- if (!atomic_read(&new->error_count)) {
31620+ if (!atomic_read_unchecked(&new->error_count)) {
31621 set_default_mirror(new);
31622 break;
31623 }
31624@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
31625 struct mirror *m = get_default_mirror(ms);
31626
31627 do {
31628- if (likely(!atomic_read(&m->error_count)))
31629+ if (likely(!atomic_read_unchecked(&m->error_count)))
31630 return m;
31631
31632 if (m-- == ms->mirror)
31633@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
31634 {
31635 struct mirror *default_mirror = get_default_mirror(m->ms);
31636
31637- return !atomic_read(&default_mirror->error_count);
31638+ return !atomic_read_unchecked(&default_mirror->error_count);
31639 }
31640
31641 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31642@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31643 */
31644 if (likely(region_in_sync(ms, region, 1)))
31645 m = choose_mirror(ms, bio->bi_sector);
31646- else if (m && atomic_read(&m->error_count))
31647+ else if (m && atomic_read_unchecked(&m->error_count))
31648 m = NULL;
31649
31650 if (likely(m))
31651@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31652 }
31653
31654 ms->mirror[mirror].ms = ms;
31655- atomic_set(&(ms->mirror[mirror].error_count), 0);
31656+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31657 ms->mirror[mirror].error_type = 0;
31658 ms->mirror[mirror].offset = offset;
31659
31660@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31661 */
31662 static char device_status_char(struct mirror *m)
31663 {
31664- if (!atomic_read(&(m->error_count)))
31665+ if (!atomic_read_unchecked(&(m->error_count)))
31666 return 'A';
31667
31668 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31669diff -urNp linux-2.6.32.45/drivers/md/dm-stripe.c linux-2.6.32.45/drivers/md/dm-stripe.c
31670--- linux-2.6.32.45/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31671+++ linux-2.6.32.45/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31672@@ -20,7 +20,7 @@ struct stripe {
31673 struct dm_dev *dev;
31674 sector_t physical_start;
31675
31676- atomic_t error_count;
31677+ atomic_unchecked_t error_count;
31678 };
31679
31680 struct stripe_c {
31681@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31682 kfree(sc);
31683 return r;
31684 }
31685- atomic_set(&(sc->stripe[i].error_count), 0);
31686+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31687 }
31688
31689 ti->private = sc;
31690@@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31691 DMEMIT("%d ", sc->stripes);
31692 for (i = 0; i < sc->stripes; i++) {
31693 DMEMIT("%s ", sc->stripe[i].dev->name);
31694- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31695+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31696 'D' : 'A';
31697 }
31698 buffer[i] = '\0';
31699@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31700 */
31701 for (i = 0; i < sc->stripes; i++)
31702 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31703- atomic_inc(&(sc->stripe[i].error_count));
31704- if (atomic_read(&(sc->stripe[i].error_count)) <
31705+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31706+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31707 DM_IO_ERROR_THRESHOLD)
31708 queue_work(kstriped, &sc->kstriped_ws);
31709 }
31710diff -urNp linux-2.6.32.45/drivers/md/dm-sysfs.c linux-2.6.32.45/drivers/md/dm-sysfs.c
31711--- linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
31712+++ linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
31713@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
31714 NULL,
31715 };
31716
31717-static struct sysfs_ops dm_sysfs_ops = {
31718+static const struct sysfs_ops dm_sysfs_ops = {
31719 .show = dm_attr_show,
31720 };
31721
31722diff -urNp linux-2.6.32.45/drivers/md/dm-table.c linux-2.6.32.45/drivers/md/dm-table.c
31723--- linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
31724+++ linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
31725@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
31726 if (!dev_size)
31727 return 0;
31728
31729- if ((start >= dev_size) || (start + len > dev_size)) {
31730+ if ((start >= dev_size) || (len > dev_size - start)) {
31731 DMWARN("%s: %s too small for target: "
31732 "start=%llu, len=%llu, dev_size=%llu",
31733 dm_device_name(ti->table->md), bdevname(bdev, b),
31734diff -urNp linux-2.6.32.45/drivers/md/md.c linux-2.6.32.45/drivers/md/md.c
31735--- linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
31736+++ linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
31737@@ -153,10 +153,10 @@ static int start_readonly;
31738 * start build, activate spare
31739 */
31740 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31741-static atomic_t md_event_count;
31742+static atomic_unchecked_t md_event_count;
31743 void md_new_event(mddev_t *mddev)
31744 {
31745- atomic_inc(&md_event_count);
31746+ atomic_inc_unchecked(&md_event_count);
31747 wake_up(&md_event_waiters);
31748 }
31749 EXPORT_SYMBOL_GPL(md_new_event);
31750@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31751 */
31752 static void md_new_event_inintr(mddev_t *mddev)
31753 {
31754- atomic_inc(&md_event_count);
31755+ atomic_inc_unchecked(&md_event_count);
31756 wake_up(&md_event_waiters);
31757 }
31758
31759@@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
31760
31761 rdev->preferred_minor = 0xffff;
31762 rdev->data_offset = le64_to_cpu(sb->data_offset);
31763- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31764+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31765
31766 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31767 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31768@@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
31769 else
31770 sb->resync_offset = cpu_to_le64(0);
31771
31772- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31773+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31774
31775 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31776 sb->size = cpu_to_le64(mddev->dev_sectors);
31777@@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
31778 static ssize_t
31779 errors_show(mdk_rdev_t *rdev, char *page)
31780 {
31781- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31782+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31783 }
31784
31785 static ssize_t
31786@@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
31787 char *e;
31788 unsigned long n = simple_strtoul(buf, &e, 10);
31789 if (*buf && (*e == 0 || *e == '\n')) {
31790- atomic_set(&rdev->corrected_errors, n);
31791+ atomic_set_unchecked(&rdev->corrected_errors, n);
31792 return len;
31793 }
31794 return -EINVAL;
31795@@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
31796 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
31797 kfree(rdev);
31798 }
31799-static struct sysfs_ops rdev_sysfs_ops = {
31800+static const struct sysfs_ops rdev_sysfs_ops = {
31801 .show = rdev_attr_show,
31802 .store = rdev_attr_store,
31803 };
31804@@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
31805 rdev->data_offset = 0;
31806 rdev->sb_events = 0;
31807 atomic_set(&rdev->nr_pending, 0);
31808- atomic_set(&rdev->read_errors, 0);
31809- atomic_set(&rdev->corrected_errors, 0);
31810+ atomic_set_unchecked(&rdev->read_errors, 0);
31811+ atomic_set_unchecked(&rdev->corrected_errors, 0);
31812
31813 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
31814 if (!size) {
31815@@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
31816 kfree(mddev);
31817 }
31818
31819-static struct sysfs_ops md_sysfs_ops = {
31820+static const struct sysfs_ops md_sysfs_ops = {
31821 .show = md_attr_show,
31822 .store = md_attr_store,
31823 };
31824@@ -4474,7 +4474,8 @@ out:
31825 err = 0;
31826 blk_integrity_unregister(disk);
31827 md_new_event(mddev);
31828- sysfs_notify_dirent(mddev->sysfs_state);
31829+ if (mddev->sysfs_state)
31830+ sysfs_notify_dirent(mddev->sysfs_state);
31831 return err;
31832 }
31833
31834@@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
31835
31836 spin_unlock(&pers_lock);
31837 seq_printf(seq, "\n");
31838- mi->event = atomic_read(&md_event_count);
31839+ mi->event = atomic_read_unchecked(&md_event_count);
31840 return 0;
31841 }
31842 if (v == (void*)2) {
31843@@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
31844 chunk_kb ? "KB" : "B");
31845 if (bitmap->file) {
31846 seq_printf(seq, ", file: ");
31847- seq_path(seq, &bitmap->file->f_path, " \t\n");
31848+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31849 }
31850
31851 seq_printf(seq, "\n");
31852@@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
31853 else {
31854 struct seq_file *p = file->private_data;
31855 p->private = mi;
31856- mi->event = atomic_read(&md_event_count);
31857+ mi->event = atomic_read_unchecked(&md_event_count);
31858 }
31859 return error;
31860 }
31861@@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
31862 /* always allow read */
31863 mask = POLLIN | POLLRDNORM;
31864
31865- if (mi->event != atomic_read(&md_event_count))
31866+ if (mi->event != atomic_read_unchecked(&md_event_count))
31867 mask |= POLLERR | POLLPRI;
31868 return mask;
31869 }
31870@@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
31871 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31872 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31873 (int)part_stat_read(&disk->part0, sectors[1]) -
31874- atomic_read(&disk->sync_io);
31875+ atomic_read_unchecked(&disk->sync_io);
31876 /* sync IO will cause sync_io to increase before the disk_stats
31877 * as sync_io is counted when a request starts, and
31878 * disk_stats is counted when it completes.
31879diff -urNp linux-2.6.32.45/drivers/md/md.h linux-2.6.32.45/drivers/md/md.h
31880--- linux-2.6.32.45/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
31881+++ linux-2.6.32.45/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
31882@@ -94,10 +94,10 @@ struct mdk_rdev_s
31883 * only maintained for arrays that
31884 * support hot removal
31885 */
31886- atomic_t read_errors; /* number of consecutive read errors that
31887+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
31888 * we have tried to ignore.
31889 */
31890- atomic_t corrected_errors; /* number of corrected read errors,
31891+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31892 * for reporting to userspace and storing
31893 * in superblock.
31894 */
31895@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
31896
31897 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31898 {
31899- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31900+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31901 }
31902
31903 struct mdk_personality
31904diff -urNp linux-2.6.32.45/drivers/md/raid10.c linux-2.6.32.45/drivers/md/raid10.c
31905--- linux-2.6.32.45/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
31906+++ linux-2.6.32.45/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
31907@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
31908 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
31909 set_bit(R10BIO_Uptodate, &r10_bio->state);
31910 else {
31911- atomic_add(r10_bio->sectors,
31912+ atomic_add_unchecked(r10_bio->sectors,
31913 &conf->mirrors[d].rdev->corrected_errors);
31914 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
31915 md_error(r10_bio->mddev,
31916@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
31917 test_bit(In_sync, &rdev->flags)) {
31918 atomic_inc(&rdev->nr_pending);
31919 rcu_read_unlock();
31920- atomic_add(s, &rdev->corrected_errors);
31921+ atomic_add_unchecked(s, &rdev->corrected_errors);
31922 if (sync_page_io(rdev->bdev,
31923 r10_bio->devs[sl].addr +
31924 sect + rdev->data_offset,
31925diff -urNp linux-2.6.32.45/drivers/md/raid1.c linux-2.6.32.45/drivers/md/raid1.c
31926--- linux-2.6.32.45/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
31927+++ linux-2.6.32.45/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
31928@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
31929 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
31930 continue;
31931 rdev = conf->mirrors[d].rdev;
31932- atomic_add(s, &rdev->corrected_errors);
31933+ atomic_add_unchecked(s, &rdev->corrected_errors);
31934 if (sync_page_io(rdev->bdev,
31935 sect + rdev->data_offset,
31936 s<<9,
31937@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
31938 /* Well, this device is dead */
31939 md_error(mddev, rdev);
31940 else {
31941- atomic_add(s, &rdev->corrected_errors);
31942+ atomic_add_unchecked(s, &rdev->corrected_errors);
31943 printk(KERN_INFO
31944 "raid1:%s: read error corrected "
31945 "(%d sectors at %llu on %s)\n",
31946diff -urNp linux-2.6.32.45/drivers/md/raid5.c linux-2.6.32.45/drivers/md/raid5.c
31947--- linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
31948+++ linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
31949@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
31950 bi->bi_next = NULL;
31951 if ((rw & WRITE) &&
31952 test_bit(R5_ReWrite, &sh->dev[i].flags))
31953- atomic_add(STRIPE_SECTORS,
31954+ atomic_add_unchecked(STRIPE_SECTORS,
31955 &rdev->corrected_errors);
31956 generic_make_request(bi);
31957 } else {
31958@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
31959 clear_bit(R5_ReadError, &sh->dev[i].flags);
31960 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31961 }
31962- if (atomic_read(&conf->disks[i].rdev->read_errors))
31963- atomic_set(&conf->disks[i].rdev->read_errors, 0);
31964+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31965+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31966 } else {
31967 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31968 int retry = 0;
31969 rdev = conf->disks[i].rdev;
31970
31971 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31972- atomic_inc(&rdev->read_errors);
31973+ atomic_inc_unchecked(&rdev->read_errors);
31974 if (conf->mddev->degraded >= conf->max_degraded)
31975 printk_rl(KERN_WARNING
31976 "raid5:%s: read error not correctable "
31977@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
31978 (unsigned long long)(sh->sector
31979 + rdev->data_offset),
31980 bdn);
31981- else if (atomic_read(&rdev->read_errors)
31982+ else if (atomic_read_unchecked(&rdev->read_errors)
31983 > conf->max_nr_stripes)
31984 printk(KERN_WARNING
31985 "raid5:%s: Too many read errors, failing device %s.\n",
31986@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
31987 sector_t r_sector;
31988 struct stripe_head sh2;
31989
31990+ pax_track_stack();
31991
31992 chunk_offset = sector_div(new_sector, sectors_per_chunk);
31993 stripe = new_sector;
31994diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_fops.c linux-2.6.32.45/drivers/media/common/saa7146_fops.c
31995--- linux-2.6.32.45/drivers/media/common/saa7146_fops.c 2011-03-27 14:31:47.000000000 -0400
31996+++ linux-2.6.32.45/drivers/media/common/saa7146_fops.c 2011-08-05 20:33:55.000000000 -0400
31997@@ -458,7 +458,7 @@ int saa7146_vv_init(struct saa7146_dev*
31998 ERR(("out of memory. aborting.\n"));
31999 return -ENOMEM;
32000 }
32001- ext_vv->ops = saa7146_video_ioctl_ops;
32002+ memcpy((void *)&ext_vv->ops, &saa7146_video_ioctl_ops, sizeof(saa7146_video_ioctl_ops));
32003 ext_vv->core_ops = &saa7146_video_ioctl_ops;
32004
32005 DEB_EE(("dev:%p\n",dev));
32006diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_hlp.c linux-2.6.32.45/drivers/media/common/saa7146_hlp.c
32007--- linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
32008+++ linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
32009@@ -353,6 +353,8 @@ static void calculate_clipping_registers
32010
32011 int x[32], y[32], w[32], h[32];
32012
32013+ pax_track_stack();
32014+
32015 /* clear out memory */
32016 memset(&line_list[0], 0x00, sizeof(u32)*32);
32017 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
32018diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
32019--- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
32020+++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
32021@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
32022 u8 buf[HOST_LINK_BUF_SIZE];
32023 int i;
32024
32025+ pax_track_stack();
32026+
32027 dprintk("%s\n", __func__);
32028
32029 /* check if we have space for a link buf in the rx_buffer */
32030@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
32031 unsigned long timeout;
32032 int written;
32033
32034+ pax_track_stack();
32035+
32036 dprintk("%s\n", __func__);
32037
32038 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
32039diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h
32040--- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-03-27 14:31:47.000000000 -0400
32041+++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:33:55.000000000 -0400
32042@@ -71,7 +71,7 @@ struct dvb_demux_feed {
32043 union {
32044 dmx_ts_cb ts;
32045 dmx_section_cb sec;
32046- } cb;
32047+ } __no_const cb;
32048
32049 struct dvb_demux *demux;
32050 void *priv;
32051diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c
32052--- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
32053+++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:33:55.000000000 -0400
32054@@ -228,8 +228,8 @@ int dvb_register_device(struct dvb_adapt
32055 dvbdev->fops = dvbdevfops;
32056 init_waitqueue_head (&dvbdev->wait_queue);
32057
32058- memcpy(dvbdevfops, template->fops, sizeof(struct file_operations));
32059- dvbdevfops->owner = adap->module;
32060+ memcpy((void *)dvbdevfops, template->fops, sizeof(struct file_operations));
32061+ *(void **)&dvbdevfops->owner = adap->module;
32062
32063 list_add_tail (&dvbdev->list_head, &adap->device_list);
32064
32065diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c
32066--- linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-03-27 14:31:47.000000000 -0400
32067+++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:33:55.000000000 -0400
32068@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_di
32069 struct dib0700_adapter_state {
32070 int (*set_param_save) (struct dvb_frontend *,
32071 struct dvb_frontend_parameters *);
32072-};
32073+} __no_const;
32074
32075 static int dib7070_set_param_override(struct dvb_frontend *fe,
32076 struct dvb_frontend_parameters *fep)
32077diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c
32078--- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
32079+++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
32080@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
32081
32082 u8 buf[260];
32083
32084+ pax_track_stack();
32085+
32086 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
32087 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
32088
32089diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c
32090--- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-05-10 22:12:01.000000000 -0400
32091+++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-08-05 20:33:55.000000000 -0400
32092@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "
32093
32094 struct dib0700_adapter_state {
32095 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
32096-};
32097+} __no_const;
32098
32099 /* Hauppauge Nova-T 500 (aka Bristol)
32100 * has a LNA on GPIO0 which is enabled by setting 1 */
32101diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h
32102--- linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-03-27 14:31:47.000000000 -0400
32103+++ linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:33:55.000000000 -0400
32104@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
32105 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
32106 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
32107 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
32108-};
32109+} __no_const;
32110
32111 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
32112 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
32113diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c
32114--- linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
32115+++ linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
32116@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
32117 u8 tudata[585];
32118 int i;
32119
32120+ pax_track_stack();
32121+
32122 dprintk("Firmware is %zd bytes\n",fw->size);
32123
32124 /* Get eprom data */
32125diff -urNp linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c
32126--- linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c 2011-03-27 14:31:47.000000000 -0400
32127+++ linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c 2011-08-05 20:33:55.000000000 -0400
32128@@ -796,18 +796,18 @@ int av7110_init_v4l(struct av7110 *av711
32129 ERR(("cannot init capture device. skipping.\n"));
32130 return -ENODEV;
32131 }
32132- vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32133- vv_data->ops.vidioc_g_input = vidioc_g_input;
32134- vv_data->ops.vidioc_s_input = vidioc_s_input;
32135- vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32136- vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32137- vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32138- vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32139- vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32140- vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32141- vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32142- vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32143- vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32144+ *(void **)&vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32145+ *(void **)&vv_data->ops.vidioc_g_input = vidioc_g_input;
32146+ *(void **)&vv_data->ops.vidioc_s_input = vidioc_s_input;
32147+ *(void **)&vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32148+ *(void **)&vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32149+ *(void **)&vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32150+ *(void **)&vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32151+ *(void **)&vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32152+ *(void **)&vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32153+ *(void **)&vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32154+ *(void **)&vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32155+ *(void **)&vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32156
32157 if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) {
32158 ERR(("cannot register capture device. skipping.\n"));
32159diff -urNp linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c
32160--- linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c 2011-03-27 14:31:47.000000000 -0400
32161+++ linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c 2011-08-05 20:33:55.000000000 -0400
32162@@ -1477,9 +1477,9 @@ static int budget_av_attach(struct saa71
32163 ERR(("cannot init vv subsystem.\n"));
32164 return err;
32165 }
32166- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32167- vv_data.ops.vidioc_g_input = vidioc_g_input;
32168- vv_data.ops.vidioc_s_input = vidioc_s_input;
32169+ *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32170+ *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32171+ *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32172
32173 if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) {
32174 /* fixme: proper cleanup here */
32175diff -urNp linux-2.6.32.45/drivers/media/radio/radio-cadet.c linux-2.6.32.45/drivers/media/radio/radio-cadet.c
32176--- linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
32177+++ linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
32178@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
32179 while (i < count && dev->rdsin != dev->rdsout)
32180 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
32181
32182- if (copy_to_user(data, readbuf, i))
32183+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
32184 return -EFAULT;
32185 return i;
32186 }
32187diff -urNp linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c
32188--- linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
32189+++ linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
32190@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
32191
32192 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
32193
32194-static atomic_t cx18_instance = ATOMIC_INIT(0);
32195+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
32196
32197 /* Parameter declarations */
32198 static int cardtype[CX18_MAX_CARDS];
32199@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
32200 struct i2c_client c;
32201 u8 eedata[256];
32202
32203+ pax_track_stack();
32204+
32205 memset(&c, 0, sizeof(c));
32206 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
32207 c.adapter = &cx->i2c_adap[0];
32208@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
32209 struct cx18 *cx;
32210
32211 /* FIXME - module parameter arrays constrain max instances */
32212- i = atomic_inc_return(&cx18_instance) - 1;
32213+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
32214 if (i >= CX18_MAX_CARDS) {
32215 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
32216 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
32217diff -urNp linux-2.6.32.45/drivers/media/video/hexium_gemini.c linux-2.6.32.45/drivers/media/video/hexium_gemini.c
32218--- linux-2.6.32.45/drivers/media/video/hexium_gemini.c 2011-03-27 14:31:47.000000000 -0400
32219+++ linux-2.6.32.45/drivers/media/video/hexium_gemini.c 2011-08-05 20:33:55.000000000 -0400
32220@@ -394,12 +394,12 @@ static int hexium_attach(struct saa7146_
32221 hexium->cur_input = 0;
32222
32223 saa7146_vv_init(dev, &vv_data);
32224- vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32225- vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32226- vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32227- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32228- vv_data.ops.vidioc_g_input = vidioc_g_input;
32229- vv_data.ops.vidioc_s_input = vidioc_s_input;
32230+ *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32231+ *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32232+ *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32233+ *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32234+ *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32235+ *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32236 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER)) {
32237 printk("hexium_gemini: cannot register capture v4l2 device. skipping.\n");
32238 return -1;
32239diff -urNp linux-2.6.32.45/drivers/media/video/hexium_orion.c linux-2.6.32.45/drivers/media/video/hexium_orion.c
32240--- linux-2.6.32.45/drivers/media/video/hexium_orion.c 2011-03-27 14:31:47.000000000 -0400
32241+++ linux-2.6.32.45/drivers/media/video/hexium_orion.c 2011-08-05 20:33:55.000000000 -0400
32242@@ -369,9 +369,9 @@ static int hexium_attach(struct saa7146_
32243 DEB_EE((".\n"));
32244
32245 saa7146_vv_init(dev, &vv_data);
32246- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32247- vv_data.ops.vidioc_g_input = vidioc_g_input;
32248- vv_data.ops.vidioc_s_input = vidioc_s_input;
32249+ *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32250+ *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32251+ *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32252 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
32253 printk("hexium_orion: cannot register capture v4l2 device. skipping.\n");
32254 return -1;
32255diff -urNp linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c
32256--- linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
32257+++ linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
32258@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
32259 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
32260
32261 /* ivtv instance counter */
32262-static atomic_t ivtv_instance = ATOMIC_INIT(0);
32263+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
32264
32265 /* Parameter declarations */
32266 static int cardtype[IVTV_MAX_CARDS];
32267diff -urNp linux-2.6.32.45/drivers/media/video/mxb.c linux-2.6.32.45/drivers/media/video/mxb.c
32268--- linux-2.6.32.45/drivers/media/video/mxb.c 2011-03-27 14:31:47.000000000 -0400
32269+++ linux-2.6.32.45/drivers/media/video/mxb.c 2011-08-05 20:33:55.000000000 -0400
32270@@ -703,23 +703,23 @@ static int mxb_attach(struct saa7146_dev
32271 already did this in "mxb_vl42_probe" */
32272
32273 saa7146_vv_init(dev, &vv_data);
32274- vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32275- vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32276- vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32277- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32278- vv_data.ops.vidioc_g_input = vidioc_g_input;
32279- vv_data.ops.vidioc_s_input = vidioc_s_input;
32280- vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32281- vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32282- vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32283- vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32284- vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32285- vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32286+ *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32287+ *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32288+ *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32289+ *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32290+ *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32291+ *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32292+ *(void **)&vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32293+ *(void **)&vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32294+ *(void **)&vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32295+ *(void **)&vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32296+ *(void **)&vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32297+ *(void **)&vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32298 #ifdef CONFIG_VIDEO_ADV_DEBUG
32299- vv_data.ops.vidioc_g_register = vidioc_g_register;
32300- vv_data.ops.vidioc_s_register = vidioc_s_register;
32301+ *(void **)&vv_data.ops.vidioc_g_register = vidioc_g_register;
32302+ *(void **)&vv_data.ops.vidioc_s_register = vidioc_s_register;
32303 #endif
32304- vv_data.ops.vidioc_default = vidioc_default;
32305+ *(void **)&vv_data.ops.vidioc_default = vidioc_default;
32306 if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
32307 ERR(("cannot register capture v4l2 device. skipping.\n"));
32308 return -1;
32309diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.c linux-2.6.32.45/drivers/media/video/omap24xxcam.c
32310--- linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
32311+++ linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
32312@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
32313 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
32314
32315 do_gettimeofday(&vb->ts);
32316- vb->field_count = atomic_add_return(2, &fh->field_count);
32317+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
32318 if (csr & csr_error) {
32319 vb->state = VIDEOBUF_ERROR;
32320 if (!atomic_read(&fh->cam->in_reset)) {
32321diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.h linux-2.6.32.45/drivers/media/video/omap24xxcam.h
32322--- linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
32323+++ linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
32324@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
32325 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
32326 struct videobuf_queue vbq;
32327 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
32328- atomic_t field_count; /* field counter for videobuf_buffer */
32329+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
32330 /* accessing cam here doesn't need serialisation: it's constant */
32331 struct omap24xxcam_device *cam;
32332 };
32333diff -urNp linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32334--- linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
32335+++ linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
32336@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
32337 u8 *eeprom;
32338 struct tveeprom tvdata;
32339
32340+ pax_track_stack();
32341+
32342 memset(&tvdata,0,sizeof(tvdata));
32343
32344 eeprom = pvr2_eeprom_fetch(hdw);
32345diff -urNp linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c
32346--- linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
32347+++ linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
32348@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
32349 unsigned char localPAT[256];
32350 unsigned char localPMT[256];
32351
32352+ pax_track_stack();
32353+
32354 /* Set video format - must be done first as it resets other settings */
32355 set_reg8(client, 0x41, h->video_format);
32356
32357diff -urNp linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c
32358--- linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
32359+++ linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
32360@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
32361 wait_queue_head_t *q = 0;
32362 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32363
32364+ pax_track_stack();
32365+
32366 /* While any outstand message on the bus exists... */
32367 do {
32368
32369@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
32370 u8 tmp[512];
32371 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32372
32373+ pax_track_stack();
32374+
32375 while (loop) {
32376
32377 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
32378diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c
32379--- linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-03-27 14:31:47.000000000 -0400
32380+++ linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-08-05 20:33:55.000000000 -0400
32381@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] =
32382 static int __init ibmcam_init(void)
32383 {
32384 struct usbvideo_cb cbTbl;
32385- memset(&cbTbl, 0, sizeof(cbTbl));
32386- cbTbl.probe = ibmcam_probe;
32387- cbTbl.setupOnOpen = ibmcam_setup_on_open;
32388- cbTbl.videoStart = ibmcam_video_start;
32389- cbTbl.videoStop = ibmcam_video_stop;
32390- cbTbl.processData = ibmcam_ProcessIsocData;
32391- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32392- cbTbl.adjustPicture = ibmcam_adjust_picture;
32393- cbTbl.getFPS = ibmcam_calculate_fps;
32394+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
32395+ *(void **)&cbTbl.probe = ibmcam_probe;
32396+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
32397+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
32398+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
32399+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
32400+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32401+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
32402+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
32403 return usbvideo_register(
32404 &cams,
32405 MAX_IBMCAM,
32406diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c
32407--- linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
32408+++ linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-08-05 20:33:55.000000000 -0400
32409@@ -225,7 +225,7 @@ static void konicawc_register_input(stru
32410 int error;
32411
32412 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32413- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32414+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32415
32416 cam->input = input_dev = input_allocate_device();
32417 if (!input_dev) {
32418@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
32419 struct usbvideo_cb cbTbl;
32420 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
32421 DRIVER_DESC "\n");
32422- memset(&cbTbl, 0, sizeof(cbTbl));
32423- cbTbl.probe = konicawc_probe;
32424- cbTbl.setupOnOpen = konicawc_setup_on_open;
32425- cbTbl.processData = konicawc_process_isoc;
32426- cbTbl.getFPS = konicawc_calculate_fps;
32427- cbTbl.setVideoMode = konicawc_set_video_mode;
32428- cbTbl.startDataPump = konicawc_start_data;
32429- cbTbl.stopDataPump = konicawc_stop_data;
32430- cbTbl.adjustPicture = konicawc_adjust_picture;
32431- cbTbl.userFree = konicawc_free_uvd;
32432+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
32433+ *(void **)&cbTbl.probe = konicawc_probe;
32434+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
32435+ *(void **)&cbTbl.processData = konicawc_process_isoc;
32436+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
32437+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
32438+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
32439+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
32440+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
32441+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
32442 return usbvideo_register(
32443 &cams,
32444 MAX_CAMERAS,
32445diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c
32446--- linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
32447+++ linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
32448@@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
32449 int error;
32450
32451 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32452- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32453+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32454
32455 cam->input = input_dev = input_allocate_device();
32456 if (!input_dev) {
32457diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c
32458--- linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-03-27 14:31:47.000000000 -0400
32459+++ linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-08-05 20:33:55.000000000 -0400
32460@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
32461 {
32462 struct usbvideo_cb cbTbl;
32463 memset(&cbTbl, 0, sizeof(cbTbl));
32464- cbTbl.probe = ultracam_probe;
32465- cbTbl.setupOnOpen = ultracam_setup_on_open;
32466- cbTbl.videoStart = ultracam_video_start;
32467- cbTbl.videoStop = ultracam_video_stop;
32468- cbTbl.processData = ultracam_ProcessIsocData;
32469- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32470- cbTbl.adjustPicture = ultracam_adjust_picture;
32471- cbTbl.getFPS = ultracam_calculate_fps;
32472+ *(void **)&cbTbl.probe = ultracam_probe;
32473+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
32474+ *(void **)&cbTbl.videoStart = ultracam_video_start;
32475+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
32476+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
32477+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32478+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
32479+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
32480 return usbvideo_register(
32481 &cams,
32482 MAX_CAMERAS,
32483diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c
32484--- linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-03-27 14:31:47.000000000 -0400
32485+++ linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-08-05 20:33:55.000000000 -0400
32486@@ -697,15 +697,15 @@ int usbvideo_register(
32487 __func__, cams, base_size, num_cams);
32488
32489 /* Copy callbacks, apply defaults for those that are not set */
32490- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
32491+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
32492 if (cams->cb.getFrame == NULL)
32493- cams->cb.getFrame = usbvideo_GetFrame;
32494+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
32495 if (cams->cb.disconnect == NULL)
32496- cams->cb.disconnect = usbvideo_Disconnect;
32497+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
32498 if (cams->cb.startDataPump == NULL)
32499- cams->cb.startDataPump = usbvideo_StartDataPump;
32500+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
32501 if (cams->cb.stopDataPump == NULL)
32502- cams->cb.stopDataPump = usbvideo_StopDataPump;
32503+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
32504
32505 cams->num_cameras = num_cams;
32506 cams->cam = (struct uvd *) &cams[1];
32507diff -urNp linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c
32508--- linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
32509+++ linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
32510@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
32511 unsigned char rv, gv, bv;
32512 static unsigned char *Y, *U, *V;
32513
32514+ pax_track_stack();
32515+
32516 frame = usbvision->curFrame;
32517 imageSize = frame->frmwidth * frame->frmheight;
32518 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
32519diff -urNp linux-2.6.32.45/drivers/media/video/v4l2-device.c linux-2.6.32.45/drivers/media/video/v4l2-device.c
32520--- linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
32521+++ linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
32522@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
32523 EXPORT_SYMBOL_GPL(v4l2_device_register);
32524
32525 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
32526- atomic_t *instance)
32527+ atomic_unchecked_t *instance)
32528 {
32529- int num = atomic_inc_return(instance) - 1;
32530+ int num = atomic_inc_return_unchecked(instance) - 1;
32531 int len = strlen(basename);
32532
32533 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
32534diff -urNp linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c
32535--- linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
32536+++ linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
32537@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
32538 {
32539 struct videobuf_queue q;
32540
32541+ pax_track_stack();
32542+
32543 /* Required to make generic handler to call __videobuf_alloc */
32544 q.int_ops = &sg_ops;
32545
32546diff -urNp linux-2.6.32.45/drivers/message/fusion/mptbase.c linux-2.6.32.45/drivers/message/fusion/mptbase.c
32547--- linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
32548+++ linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
32549@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
32550 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
32551 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
32552
32553+#ifdef CONFIG_GRKERNSEC_HIDESYM
32554+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32555+ NULL, NULL);
32556+#else
32557 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32558 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
32559+#endif
32560+
32561 /*
32562 * Rounding UP to nearest 4-kB boundary here...
32563 */
32564diff -urNp linux-2.6.32.45/drivers/message/fusion/mptsas.c linux-2.6.32.45/drivers/message/fusion/mptsas.c
32565--- linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
32566+++ linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
32567@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
32568 return 0;
32569 }
32570
32571+static inline void
32572+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32573+{
32574+ if (phy_info->port_details) {
32575+ phy_info->port_details->rphy = rphy;
32576+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32577+ ioc->name, rphy));
32578+ }
32579+
32580+ if (rphy) {
32581+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32582+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32583+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32584+ ioc->name, rphy, rphy->dev.release));
32585+ }
32586+}
32587+
32588 /* no mutex */
32589 static void
32590 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
32591@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
32592 return NULL;
32593 }
32594
32595-static inline void
32596-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32597-{
32598- if (phy_info->port_details) {
32599- phy_info->port_details->rphy = rphy;
32600- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32601- ioc->name, rphy));
32602- }
32603-
32604- if (rphy) {
32605- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32606- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32607- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32608- ioc->name, rphy, rphy->dev.release));
32609- }
32610-}
32611-
32612 static inline struct sas_port *
32613 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32614 {
32615diff -urNp linux-2.6.32.45/drivers/message/fusion/mptscsih.c linux-2.6.32.45/drivers/message/fusion/mptscsih.c
32616--- linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
32617+++ linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
32618@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32619
32620 h = shost_priv(SChost);
32621
32622- if (h) {
32623- if (h->info_kbuf == NULL)
32624- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32625- return h->info_kbuf;
32626- h->info_kbuf[0] = '\0';
32627+ if (!h)
32628+ return NULL;
32629
32630- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32631- h->info_kbuf[size-1] = '\0';
32632- }
32633+ if (h->info_kbuf == NULL)
32634+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32635+ return h->info_kbuf;
32636+ h->info_kbuf[0] = '\0';
32637+
32638+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32639+ h->info_kbuf[size-1] = '\0';
32640
32641 return h->info_kbuf;
32642 }
32643diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_config.c linux-2.6.32.45/drivers/message/i2o/i2o_config.c
32644--- linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
32645+++ linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
32646@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
32647 struct i2o_message *msg;
32648 unsigned int iop;
32649
32650+ pax_track_stack();
32651+
32652 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32653 return -EFAULT;
32654
32655diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_proc.c linux-2.6.32.45/drivers/message/i2o/i2o_proc.c
32656--- linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
32657+++ linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
32658@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
32659 "Array Controller Device"
32660 };
32661
32662-static char *chtostr(u8 * chars, int n)
32663-{
32664- char tmp[256];
32665- tmp[0] = 0;
32666- return strncat(tmp, (char *)chars, n);
32667-}
32668-
32669 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32670 char *group)
32671 {
32672@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
32673
32674 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32675 seq_printf(seq, "%-#8x", ddm_table.module_id);
32676- seq_printf(seq, "%-29s",
32677- chtostr(ddm_table.module_name_version, 28));
32678+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32679 seq_printf(seq, "%9d ", ddm_table.data_size);
32680 seq_printf(seq, "%8d", ddm_table.code_size);
32681
32682@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
32683
32684 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32685 seq_printf(seq, "%-#8x", dst->module_id);
32686- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32687- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32688+ seq_printf(seq, "%-.28s", dst->module_name_version);
32689+ seq_printf(seq, "%-.8s", dst->date);
32690 seq_printf(seq, "%8d ", dst->module_size);
32691 seq_printf(seq, "%8d ", dst->mpb_size);
32692 seq_printf(seq, "0x%04x", dst->module_flags);
32693@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
32694 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32695 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32696 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32697- seq_printf(seq, "Vendor info : %s\n",
32698- chtostr((u8 *) (work32 + 2), 16));
32699- seq_printf(seq, "Product info : %s\n",
32700- chtostr((u8 *) (work32 + 6), 16));
32701- seq_printf(seq, "Description : %s\n",
32702- chtostr((u8 *) (work32 + 10), 16));
32703- seq_printf(seq, "Product rev. : %s\n",
32704- chtostr((u8 *) (work32 + 14), 8));
32705+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32706+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32707+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32708+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32709
32710 seq_printf(seq, "Serial number : ");
32711 print_serial_number(seq, (u8 *) (work32 + 16),
32712@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
32713 }
32714
32715 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32716- seq_printf(seq, "Module name : %s\n",
32717- chtostr(result.module_name, 24));
32718- seq_printf(seq, "Module revision : %s\n",
32719- chtostr(result.module_rev, 8));
32720+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
32721+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32722
32723 seq_printf(seq, "Serial number : ");
32724 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32725@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
32726 return 0;
32727 }
32728
32729- seq_printf(seq, "Device name : %s\n",
32730- chtostr(result.device_name, 64));
32731- seq_printf(seq, "Service name : %s\n",
32732- chtostr(result.service_name, 64));
32733- seq_printf(seq, "Physical name : %s\n",
32734- chtostr(result.physical_location, 64));
32735- seq_printf(seq, "Instance number : %s\n",
32736- chtostr(result.instance_number, 4));
32737+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
32738+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
32739+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32740+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32741
32742 return 0;
32743 }
32744diff -urNp linux-2.6.32.45/drivers/message/i2o/iop.c linux-2.6.32.45/drivers/message/i2o/iop.c
32745--- linux-2.6.32.45/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
32746+++ linux-2.6.32.45/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
32747@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
32748
32749 spin_lock_irqsave(&c->context_list_lock, flags);
32750
32751- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32752- atomic_inc(&c->context_list_counter);
32753+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32754+ atomic_inc_unchecked(&c->context_list_counter);
32755
32756- entry->context = atomic_read(&c->context_list_counter);
32757+ entry->context = atomic_read_unchecked(&c->context_list_counter);
32758
32759 list_add(&entry->list, &c->context_list);
32760
32761@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
32762
32763 #if BITS_PER_LONG == 64
32764 spin_lock_init(&c->context_list_lock);
32765- atomic_set(&c->context_list_counter, 0);
32766+ atomic_set_unchecked(&c->context_list_counter, 0);
32767 INIT_LIST_HEAD(&c->context_list);
32768 #endif
32769
32770diff -urNp linux-2.6.32.45/drivers/mfd/wm8350-i2c.c linux-2.6.32.45/drivers/mfd/wm8350-i2c.c
32771--- linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
32772+++ linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
32773@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
32774 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32775 int ret;
32776
32777+ pax_track_stack();
32778+
32779 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32780 return -EINVAL;
32781
32782diff -urNp linux-2.6.32.45/drivers/misc/kgdbts.c linux-2.6.32.45/drivers/misc/kgdbts.c
32783--- linux-2.6.32.45/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
32784+++ linux-2.6.32.45/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
32785@@ -118,7 +118,7 @@
32786 } while (0)
32787 #define MAX_CONFIG_LEN 40
32788
32789-static struct kgdb_io kgdbts_io_ops;
32790+static const struct kgdb_io kgdbts_io_ops;
32791 static char get_buf[BUFMAX];
32792 static int get_buf_cnt;
32793 static char put_buf[BUFMAX];
32794@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
32795 module_put(THIS_MODULE);
32796 }
32797
32798-static struct kgdb_io kgdbts_io_ops = {
32799+static const struct kgdb_io kgdbts_io_ops = {
32800 .name = "kgdbts",
32801 .read_char = kgdbts_get_char,
32802 .write_char = kgdbts_put_char,
32803diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c
32804--- linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
32805+++ linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
32806@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
32807
32808 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32809 {
32810- atomic_long_inc(&mcs_op_statistics[op].count);
32811- atomic_long_add(clks, &mcs_op_statistics[op].total);
32812+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32813+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
32814 if (mcs_op_statistics[op].max < clks)
32815 mcs_op_statistics[op].max = clks;
32816 }
32817diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c
32818--- linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
32819+++ linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
32820@@ -32,9 +32,9 @@
32821
32822 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32823
32824-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32825+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32826 {
32827- unsigned long val = atomic_long_read(v);
32828+ unsigned long val = atomic_long_read_unchecked(v);
32829
32830 if (val)
32831 seq_printf(s, "%16lu %s\n", val, id);
32832@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
32833 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
32834
32835 for (op = 0; op < mcsop_last; op++) {
32836- count = atomic_long_read(&mcs_op_statistics[op].count);
32837- total = atomic_long_read(&mcs_op_statistics[op].total);
32838+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32839+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32840 max = mcs_op_statistics[op].max;
32841 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32842 count ? total / count : 0, max);
32843diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h
32844--- linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
32845+++ linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
32846@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
32847 * GRU statistics.
32848 */
32849 struct gru_stats_s {
32850- atomic_long_t vdata_alloc;
32851- atomic_long_t vdata_free;
32852- atomic_long_t gts_alloc;
32853- atomic_long_t gts_free;
32854- atomic_long_t vdata_double_alloc;
32855- atomic_long_t gts_double_allocate;
32856- atomic_long_t assign_context;
32857- atomic_long_t assign_context_failed;
32858- atomic_long_t free_context;
32859- atomic_long_t load_user_context;
32860- atomic_long_t load_kernel_context;
32861- atomic_long_t lock_kernel_context;
32862- atomic_long_t unlock_kernel_context;
32863- atomic_long_t steal_user_context;
32864- atomic_long_t steal_kernel_context;
32865- atomic_long_t steal_context_failed;
32866- atomic_long_t nopfn;
32867- atomic_long_t break_cow;
32868- atomic_long_t asid_new;
32869- atomic_long_t asid_next;
32870- atomic_long_t asid_wrap;
32871- atomic_long_t asid_reuse;
32872- atomic_long_t intr;
32873- atomic_long_t intr_mm_lock_failed;
32874- atomic_long_t call_os;
32875- atomic_long_t call_os_offnode_reference;
32876- atomic_long_t call_os_check_for_bug;
32877- atomic_long_t call_os_wait_queue;
32878- atomic_long_t user_flush_tlb;
32879- atomic_long_t user_unload_context;
32880- atomic_long_t user_exception;
32881- atomic_long_t set_context_option;
32882- atomic_long_t migrate_check;
32883- atomic_long_t migrated_retarget;
32884- atomic_long_t migrated_unload;
32885- atomic_long_t migrated_unload_delay;
32886- atomic_long_t migrated_nopfn_retarget;
32887- atomic_long_t migrated_nopfn_unload;
32888- atomic_long_t tlb_dropin;
32889- atomic_long_t tlb_dropin_fail_no_asid;
32890- atomic_long_t tlb_dropin_fail_upm;
32891- atomic_long_t tlb_dropin_fail_invalid;
32892- atomic_long_t tlb_dropin_fail_range_active;
32893- atomic_long_t tlb_dropin_fail_idle;
32894- atomic_long_t tlb_dropin_fail_fmm;
32895- atomic_long_t tlb_dropin_fail_no_exception;
32896- atomic_long_t tlb_dropin_fail_no_exception_war;
32897- atomic_long_t tfh_stale_on_fault;
32898- atomic_long_t mmu_invalidate_range;
32899- atomic_long_t mmu_invalidate_page;
32900- atomic_long_t mmu_clear_flush_young;
32901- atomic_long_t flush_tlb;
32902- atomic_long_t flush_tlb_gru;
32903- atomic_long_t flush_tlb_gru_tgh;
32904- atomic_long_t flush_tlb_gru_zero_asid;
32905-
32906- atomic_long_t copy_gpa;
32907-
32908- atomic_long_t mesq_receive;
32909- atomic_long_t mesq_receive_none;
32910- atomic_long_t mesq_send;
32911- atomic_long_t mesq_send_failed;
32912- atomic_long_t mesq_noop;
32913- atomic_long_t mesq_send_unexpected_error;
32914- atomic_long_t mesq_send_lb_overflow;
32915- atomic_long_t mesq_send_qlimit_reached;
32916- atomic_long_t mesq_send_amo_nacked;
32917- atomic_long_t mesq_send_put_nacked;
32918- atomic_long_t mesq_qf_not_full;
32919- atomic_long_t mesq_qf_locked;
32920- atomic_long_t mesq_qf_noop_not_full;
32921- atomic_long_t mesq_qf_switch_head_failed;
32922- atomic_long_t mesq_qf_unexpected_error;
32923- atomic_long_t mesq_noop_unexpected_error;
32924- atomic_long_t mesq_noop_lb_overflow;
32925- atomic_long_t mesq_noop_qlimit_reached;
32926- atomic_long_t mesq_noop_amo_nacked;
32927- atomic_long_t mesq_noop_put_nacked;
32928+ atomic_long_unchecked_t vdata_alloc;
32929+ atomic_long_unchecked_t vdata_free;
32930+ atomic_long_unchecked_t gts_alloc;
32931+ atomic_long_unchecked_t gts_free;
32932+ atomic_long_unchecked_t vdata_double_alloc;
32933+ atomic_long_unchecked_t gts_double_allocate;
32934+ atomic_long_unchecked_t assign_context;
32935+ atomic_long_unchecked_t assign_context_failed;
32936+ atomic_long_unchecked_t free_context;
32937+ atomic_long_unchecked_t load_user_context;
32938+ atomic_long_unchecked_t load_kernel_context;
32939+ atomic_long_unchecked_t lock_kernel_context;
32940+ atomic_long_unchecked_t unlock_kernel_context;
32941+ atomic_long_unchecked_t steal_user_context;
32942+ atomic_long_unchecked_t steal_kernel_context;
32943+ atomic_long_unchecked_t steal_context_failed;
32944+ atomic_long_unchecked_t nopfn;
32945+ atomic_long_unchecked_t break_cow;
32946+ atomic_long_unchecked_t asid_new;
32947+ atomic_long_unchecked_t asid_next;
32948+ atomic_long_unchecked_t asid_wrap;
32949+ atomic_long_unchecked_t asid_reuse;
32950+ atomic_long_unchecked_t intr;
32951+ atomic_long_unchecked_t intr_mm_lock_failed;
32952+ atomic_long_unchecked_t call_os;
32953+ atomic_long_unchecked_t call_os_offnode_reference;
32954+ atomic_long_unchecked_t call_os_check_for_bug;
32955+ atomic_long_unchecked_t call_os_wait_queue;
32956+ atomic_long_unchecked_t user_flush_tlb;
32957+ atomic_long_unchecked_t user_unload_context;
32958+ atomic_long_unchecked_t user_exception;
32959+ atomic_long_unchecked_t set_context_option;
32960+ atomic_long_unchecked_t migrate_check;
32961+ atomic_long_unchecked_t migrated_retarget;
32962+ atomic_long_unchecked_t migrated_unload;
32963+ atomic_long_unchecked_t migrated_unload_delay;
32964+ atomic_long_unchecked_t migrated_nopfn_retarget;
32965+ atomic_long_unchecked_t migrated_nopfn_unload;
32966+ atomic_long_unchecked_t tlb_dropin;
32967+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32968+ atomic_long_unchecked_t tlb_dropin_fail_upm;
32969+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
32970+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
32971+ atomic_long_unchecked_t tlb_dropin_fail_idle;
32972+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
32973+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32974+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
32975+ atomic_long_unchecked_t tfh_stale_on_fault;
32976+ atomic_long_unchecked_t mmu_invalidate_range;
32977+ atomic_long_unchecked_t mmu_invalidate_page;
32978+ atomic_long_unchecked_t mmu_clear_flush_young;
32979+ atomic_long_unchecked_t flush_tlb;
32980+ atomic_long_unchecked_t flush_tlb_gru;
32981+ atomic_long_unchecked_t flush_tlb_gru_tgh;
32982+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32983+
32984+ atomic_long_unchecked_t copy_gpa;
32985+
32986+ atomic_long_unchecked_t mesq_receive;
32987+ atomic_long_unchecked_t mesq_receive_none;
32988+ atomic_long_unchecked_t mesq_send;
32989+ atomic_long_unchecked_t mesq_send_failed;
32990+ atomic_long_unchecked_t mesq_noop;
32991+ atomic_long_unchecked_t mesq_send_unexpected_error;
32992+ atomic_long_unchecked_t mesq_send_lb_overflow;
32993+ atomic_long_unchecked_t mesq_send_qlimit_reached;
32994+ atomic_long_unchecked_t mesq_send_amo_nacked;
32995+ atomic_long_unchecked_t mesq_send_put_nacked;
32996+ atomic_long_unchecked_t mesq_qf_not_full;
32997+ atomic_long_unchecked_t mesq_qf_locked;
32998+ atomic_long_unchecked_t mesq_qf_noop_not_full;
32999+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
33000+ atomic_long_unchecked_t mesq_qf_unexpected_error;
33001+ atomic_long_unchecked_t mesq_noop_unexpected_error;
33002+ atomic_long_unchecked_t mesq_noop_lb_overflow;
33003+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
33004+ atomic_long_unchecked_t mesq_noop_amo_nacked;
33005+ atomic_long_unchecked_t mesq_noop_put_nacked;
33006
33007 };
33008
33009@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
33010 cchop_deallocate, tghop_invalidate, mcsop_last};
33011
33012 struct mcs_op_statistic {
33013- atomic_long_t count;
33014- atomic_long_t total;
33015+ atomic_long_unchecked_t count;
33016+ atomic_long_unchecked_t total;
33017 unsigned long max;
33018 };
33019
33020@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
33021
33022 #define STAT(id) do { \
33023 if (gru_options & OPT_STATS) \
33024- atomic_long_inc(&gru_stats.id); \
33025+ atomic_long_inc_unchecked(&gru_stats.id); \
33026 } while (0)
33027
33028 #ifdef CONFIG_SGI_GRU_DEBUG
33029diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h
33030--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-03-27 14:31:47.000000000 -0400
33031+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-08-05 20:33:55.000000000 -0400
33032@@ -876,7 +876,7 @@ extern struct xpc_registration xpc_regis
33033 /* found in xpc_main.c */
33034 extern struct device *xpc_part;
33035 extern struct device *xpc_chan;
33036-extern struct xpc_arch_operations xpc_arch_ops;
33037+extern const struct xpc_arch_operations xpc_arch_ops;
33038 extern int xpc_disengage_timelimit;
33039 extern int xpc_disengage_timedout;
33040 extern int xpc_activate_IRQ_rcvd;
33041diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c
33042--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-03-27 14:31:47.000000000 -0400
33043+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-08-05 20:33:55.000000000 -0400
33044@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_not
33045 .notifier_call = xpc_system_die,
33046 };
33047
33048-struct xpc_arch_operations xpc_arch_ops;
33049+const struct xpc_arch_operations xpc_arch_ops;
33050
33051 /*
33052 * Timer function to enforce the timelimit on the partition disengage.
33053diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c
33054--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-03-27 14:31:47.000000000 -0400
33055+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-08-05 20:33:55.000000000 -0400
33056@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_chan
33057 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
33058 }
33059
33060-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
33061+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
33062 .setup_partitions = xpc_setup_partitions_sn2,
33063 .teardown_partitions = xpc_teardown_partitions_sn2,
33064 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
33065@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
33066 int ret;
33067 size_t buf_size;
33068
33069- xpc_arch_ops = xpc_arch_ops_sn2;
33070+ pax_open_kernel();
33071+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
33072+ pax_close_kernel();
33073
33074 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
33075 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
33076diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c
33077--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-03-27 14:31:47.000000000 -0400
33078+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-08-05 20:33:55.000000000 -0400
33079@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_chann
33080 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
33081 }
33082
33083-static struct xpc_arch_operations xpc_arch_ops_uv = {
33084+static const struct xpc_arch_operations xpc_arch_ops_uv = {
33085 .setup_partitions = xpc_setup_partitions_uv,
33086 .teardown_partitions = xpc_teardown_partitions_uv,
33087 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
33088@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_ar
33089 int
33090 xpc_init_uv(void)
33091 {
33092- xpc_arch_ops = xpc_arch_ops_uv;
33093+ pax_open_kernel();
33094+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
33095+ pax_close_kernel();
33096
33097 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
33098 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
33099diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xp.h linux-2.6.32.45/drivers/misc/sgi-xp/xp.h
33100--- linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-03-27 14:31:47.000000000 -0400
33101+++ linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-08-05 20:33:55.000000000 -0400
33102@@ -289,7 +289,7 @@ struct xpc_interface {
33103 xpc_notify_func, void *);
33104 void (*received) (short, int, void *);
33105 enum xp_retval (*partid_to_nasids) (short, void *);
33106-};
33107+} __no_const;
33108
33109 extern struct xpc_interface xpc_interface;
33110
33111diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c
33112--- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
33113+++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
33114@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
33115 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
33116 unsigned long timeo = jiffies + HZ;
33117
33118+ pax_track_stack();
33119+
33120 /* Prevent setting state FL_SYNCING for chip in suspended state. */
33121 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
33122 goto sleep;
33123@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
33124 unsigned long initial_adr;
33125 int initial_len = len;
33126
33127+ pax_track_stack();
33128+
33129 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
33130 adr += chip->start;
33131 initial_adr = adr;
33132@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
33133 int retries = 3;
33134 int ret;
33135
33136+ pax_track_stack();
33137+
33138 adr += chip->start;
33139
33140 retry:
33141diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c
33142--- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
33143+++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
33144@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
33145 unsigned long cmd_addr;
33146 struct cfi_private *cfi = map->fldrv_priv;
33147
33148+ pax_track_stack();
33149+
33150 adr += chip->start;
33151
33152 /* Ensure cmd read/writes are aligned. */
33153@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
33154 DECLARE_WAITQUEUE(wait, current);
33155 int wbufsize, z;
33156
33157+ pax_track_stack();
33158+
33159 /* M58LW064A requires bus alignment for buffer wriets -- saw */
33160 if (adr & (map_bankwidth(map)-1))
33161 return -EINVAL;
33162@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
33163 DECLARE_WAITQUEUE(wait, current);
33164 int ret = 0;
33165
33166+ pax_track_stack();
33167+
33168 adr += chip->start;
33169
33170 /* Let's determine this according to the interleave only once */
33171@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
33172 unsigned long timeo = jiffies + HZ;
33173 DECLARE_WAITQUEUE(wait, current);
33174
33175+ pax_track_stack();
33176+
33177 adr += chip->start;
33178
33179 /* Let's determine this according to the interleave only once */
33180@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
33181 unsigned long timeo = jiffies + HZ;
33182 DECLARE_WAITQUEUE(wait, current);
33183
33184+ pax_track_stack();
33185+
33186 adr += chip->start;
33187
33188 /* Let's determine this according to the interleave only once */
33189diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2000.c linux-2.6.32.45/drivers/mtd/devices/doc2000.c
33190--- linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
33191+++ linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
33192@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
33193
33194 /* The ECC will not be calculated correctly if less than 512 is written */
33195 /* DBB-
33196- if (len != 0x200 && eccbuf)
33197+ if (len != 0x200)
33198 printk(KERN_WARNING
33199 "ECC needs a full sector write (adr: %lx size %lx)\n",
33200 (long) to, (long) len);
33201diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2001.c linux-2.6.32.45/drivers/mtd/devices/doc2001.c
33202--- linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
33203+++ linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
33204@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
33205 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33206
33207 /* Don't allow read past end of device */
33208- if (from >= this->totlen)
33209+ if (from >= this->totlen || !len)
33210 return -EINVAL;
33211
33212 /* Don't allow a single read to cross a 512-byte block boundary */
33213diff -urNp linux-2.6.32.45/drivers/mtd/ftl.c linux-2.6.32.45/drivers/mtd/ftl.c
33214--- linux-2.6.32.45/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
33215+++ linux-2.6.32.45/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
33216@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
33217 loff_t offset;
33218 uint16_t srcunitswap = cpu_to_le16(srcunit);
33219
33220+ pax_track_stack();
33221+
33222 eun = &part->EUNInfo[srcunit];
33223 xfer = &part->XferInfo[xferunit];
33224 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
33225diff -urNp linux-2.6.32.45/drivers/mtd/inftlcore.c linux-2.6.32.45/drivers/mtd/inftlcore.c
33226--- linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
33227+++ linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
33228@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
33229 struct inftl_oob oob;
33230 size_t retlen;
33231
33232+ pax_track_stack();
33233+
33234 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
33235 "pending=%d)\n", inftl, thisVUC, pendingblock);
33236
33237diff -urNp linux-2.6.32.45/drivers/mtd/inftlmount.c linux-2.6.32.45/drivers/mtd/inftlmount.c
33238--- linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
33239+++ linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
33240@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
33241 struct INFTLPartition *ip;
33242 size_t retlen;
33243
33244+ pax_track_stack();
33245+
33246 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
33247
33248 /*
33249diff -urNp linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c
33250--- linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
33251+++ linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
33252@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
33253 {
33254 map_word pfow_val[4];
33255
33256+ pax_track_stack();
33257+
33258 /* Check identification string */
33259 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
33260 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
33261diff -urNp linux-2.6.32.45/drivers/mtd/mtdchar.c linux-2.6.32.45/drivers/mtd/mtdchar.c
33262--- linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
33263+++ linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
33264@@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
33265 u_long size;
33266 struct mtd_info_user info;
33267
33268+ pax_track_stack();
33269+
33270 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
33271
33272 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
33273diff -urNp linux-2.6.32.45/drivers/mtd/nftlcore.c linux-2.6.32.45/drivers/mtd/nftlcore.c
33274--- linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
33275+++ linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
33276@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
33277 int inplace = 1;
33278 size_t retlen;
33279
33280+ pax_track_stack();
33281+
33282 memset(BlockMap, 0xff, sizeof(BlockMap));
33283 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
33284
33285diff -urNp linux-2.6.32.45/drivers/mtd/nftlmount.c linux-2.6.32.45/drivers/mtd/nftlmount.c
33286--- linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
33287+++ linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
33288@@ -23,6 +23,7 @@
33289 #include <asm/errno.h>
33290 #include <linux/delay.h>
33291 #include <linux/slab.h>
33292+#include <linux/sched.h>
33293 #include <linux/mtd/mtd.h>
33294 #include <linux/mtd/nand.h>
33295 #include <linux/mtd/nftl.h>
33296@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
33297 struct mtd_info *mtd = nftl->mbd.mtd;
33298 unsigned int i;
33299
33300+ pax_track_stack();
33301+
33302 /* Assume logical EraseSize == physical erasesize for starting the scan.
33303 We'll sort it out later if we find a MediaHeader which says otherwise */
33304 /* Actually, we won't. The new DiskOnChip driver has already scanned
33305diff -urNp linux-2.6.32.45/drivers/mtd/ubi/build.c linux-2.6.32.45/drivers/mtd/ubi/build.c
33306--- linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
33307+++ linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
33308@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
33309 static int __init bytes_str_to_int(const char *str)
33310 {
33311 char *endp;
33312- unsigned long result;
33313+ unsigned long result, scale = 1;
33314
33315 result = simple_strtoul(str, &endp, 0);
33316 if (str == endp || result >= INT_MAX) {
33317@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
33318
33319 switch (*endp) {
33320 case 'G':
33321- result *= 1024;
33322+ scale *= 1024;
33323 case 'M':
33324- result *= 1024;
33325+ scale *= 1024;
33326 case 'K':
33327- result *= 1024;
33328+ scale *= 1024;
33329 if (endp[1] == 'i' && endp[2] == 'B')
33330 endp += 2;
33331 case '\0':
33332@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
33333 return -EINVAL;
33334 }
33335
33336- return result;
33337+ if ((intoverflow_t)result*scale >= INT_MAX) {
33338+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33339+ str);
33340+ return -EINVAL;
33341+ }
33342+
33343+ return result*scale;
33344 }
33345
33346 /**
33347diff -urNp linux-2.6.32.45/drivers/net/bnx2.c linux-2.6.32.45/drivers/net/bnx2.c
33348--- linux-2.6.32.45/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
33349+++ linux-2.6.32.45/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
33350@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33351 int rc = 0;
33352 u32 magic, csum;
33353
33354+ pax_track_stack();
33355+
33356 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33357 goto test_nvram_done;
33358
33359diff -urNp linux-2.6.32.45/drivers/net/cxgb3/l2t.h linux-2.6.32.45/drivers/net/cxgb3/l2t.h
33360--- linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-03-27 14:31:47.000000000 -0400
33361+++ linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-08-05 20:33:55.000000000 -0400
33362@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
33363 */
33364 struct l2t_skb_cb {
33365 arp_failure_handler_func arp_failure_handler;
33366-};
33367+} __no_const;
33368
33369 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33370
33371diff -urNp linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c
33372--- linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
33373+++ linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
33374@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
33375 int i, addr, ret;
33376 struct t3_vpd vpd;
33377
33378+ pax_track_stack();
33379+
33380 /*
33381 * Card information is normally at VPD_BASE but some early cards had
33382 * it at 0.
33383diff -urNp linux-2.6.32.45/drivers/net/e1000e/82571.c linux-2.6.32.45/drivers/net/e1000e/82571.c
33384--- linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
33385+++ linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-08-05 20:33:55.000000000 -0400
33386@@ -245,22 +245,22 @@ static s32 e1000_init_mac_params_82571(s
33387 /* check for link */
33388 switch (hw->phy.media_type) {
33389 case e1000_media_type_copper:
33390- func->setup_physical_interface = e1000_setup_copper_link_82571;
33391- func->check_for_link = e1000e_check_for_copper_link;
33392- func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33393+ *(void **)&func->setup_physical_interface = e1000_setup_copper_link_82571;
33394+ *(void **)&func->check_for_link = e1000e_check_for_copper_link;
33395+ *(void **)&func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33396 break;
33397 case e1000_media_type_fiber:
33398- func->setup_physical_interface =
33399+ *(void **)&func->setup_physical_interface =
33400 e1000_setup_fiber_serdes_link_82571;
33401- func->check_for_link = e1000e_check_for_fiber_link;
33402- func->get_link_up_info =
33403+ *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
33404+ *(void **)&func->get_link_up_info =
33405 e1000e_get_speed_and_duplex_fiber_serdes;
33406 break;
33407 case e1000_media_type_internal_serdes:
33408- func->setup_physical_interface =
33409+ *(void **)&func->setup_physical_interface =
33410 e1000_setup_fiber_serdes_link_82571;
33411- func->check_for_link = e1000_check_for_serdes_link_82571;
33412- func->get_link_up_info =
33413+ *(void **)&func->check_for_link = e1000_check_for_serdes_link_82571;
33414+ *(void **)&func->get_link_up_info =
33415 e1000e_get_speed_and_duplex_fiber_serdes;
33416 break;
33417 default:
33418@@ -271,12 +271,12 @@ static s32 e1000_init_mac_params_82571(s
33419 switch (hw->mac.type) {
33420 case e1000_82574:
33421 case e1000_82583:
33422- func->check_mng_mode = e1000_check_mng_mode_82574;
33423- func->led_on = e1000_led_on_82574;
33424+ *(void **)&func->check_mng_mode = e1000_check_mng_mode_82574;
33425+ *(void **)&func->led_on = e1000_led_on_82574;
33426 break;
33427 default:
33428- func->check_mng_mode = e1000e_check_mng_mode_generic;
33429- func->led_on = e1000e_led_on_generic;
33430+ *(void **)&func->check_mng_mode = e1000e_check_mng_mode_generic;
33431+ *(void **)&func->led_on = e1000e_led_on_generic;
33432 break;
33433 }
33434
33435@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
33436 temp = er32(ICRXDMTC);
33437 }
33438
33439-static struct e1000_mac_operations e82571_mac_ops = {
33440+static const struct e1000_mac_operations e82571_mac_ops = {
33441 /* .check_mng_mode: mac type dependent */
33442 /* .check_for_link: media type dependent */
33443 .id_led_init = e1000e_id_led_init,
33444@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
33445 .setup_led = e1000e_setup_led_generic,
33446 };
33447
33448-static struct e1000_phy_operations e82_phy_ops_igp = {
33449+static const struct e1000_phy_operations e82_phy_ops_igp = {
33450 .acquire_phy = e1000_get_hw_semaphore_82571,
33451 .check_reset_block = e1000e_check_reset_block_generic,
33452 .commit_phy = NULL,
33453@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
33454 .cfg_on_link_up = NULL,
33455 };
33456
33457-static struct e1000_phy_operations e82_phy_ops_m88 = {
33458+static const struct e1000_phy_operations e82_phy_ops_m88 = {
33459 .acquire_phy = e1000_get_hw_semaphore_82571,
33460 .check_reset_block = e1000e_check_reset_block_generic,
33461 .commit_phy = e1000e_phy_sw_reset,
33462@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
33463 .cfg_on_link_up = NULL,
33464 };
33465
33466-static struct e1000_phy_operations e82_phy_ops_bm = {
33467+static const struct e1000_phy_operations e82_phy_ops_bm = {
33468 .acquire_phy = e1000_get_hw_semaphore_82571,
33469 .check_reset_block = e1000e_check_reset_block_generic,
33470 .commit_phy = e1000e_phy_sw_reset,
33471@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
33472 .cfg_on_link_up = NULL,
33473 };
33474
33475-static struct e1000_nvm_operations e82571_nvm_ops = {
33476+static const struct e1000_nvm_operations e82571_nvm_ops = {
33477 .acquire_nvm = e1000_acquire_nvm_82571,
33478 .read_nvm = e1000e_read_nvm_eerd,
33479 .release_nvm = e1000_release_nvm_82571,
33480diff -urNp linux-2.6.32.45/drivers/net/e1000e/e1000.h linux-2.6.32.45/drivers/net/e1000e/e1000.h
33481--- linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
33482+++ linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
33483@@ -375,9 +375,9 @@ struct e1000_info {
33484 u32 pba;
33485 u32 max_hw_frame_size;
33486 s32 (*get_variants)(struct e1000_adapter *);
33487- struct e1000_mac_operations *mac_ops;
33488- struct e1000_phy_operations *phy_ops;
33489- struct e1000_nvm_operations *nvm_ops;
33490+ const struct e1000_mac_operations *mac_ops;
33491+ const struct e1000_phy_operations *phy_ops;
33492+ const struct e1000_nvm_operations *nvm_ops;
33493 };
33494
33495 /* hardware capability, feature, and workaround flags */
33496diff -urNp linux-2.6.32.45/drivers/net/e1000e/es2lan.c linux-2.6.32.45/drivers/net/e1000e/es2lan.c
33497--- linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
33498+++ linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-08-05 20:33:55.000000000 -0400
33499@@ -229,16 +229,16 @@ static s32 e1000_init_mac_params_80003es
33500 /* check for link */
33501 switch (hw->phy.media_type) {
33502 case e1000_media_type_copper:
33503- func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
33504- func->check_for_link = e1000e_check_for_copper_link;
33505+ *(void **)&func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
33506+ *(void **)&func->check_for_link = e1000e_check_for_copper_link;
33507 break;
33508 case e1000_media_type_fiber:
33509- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33510- func->check_for_link = e1000e_check_for_fiber_link;
33511+ *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33512+ *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
33513 break;
33514 case e1000_media_type_internal_serdes:
33515- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33516- func->check_for_link = e1000e_check_for_serdes_link;
33517+ *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33518+ *(void **)&func->check_for_link = e1000e_check_for_serdes_link;
33519 break;
33520 default:
33521 return -E1000_ERR_CONFIG;
33522@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
33523 temp = er32(ICRXDMTC);
33524 }
33525
33526-static struct e1000_mac_operations es2_mac_ops = {
33527+static const struct e1000_mac_operations es2_mac_ops = {
33528 .id_led_init = e1000e_id_led_init,
33529 .check_mng_mode = e1000e_check_mng_mode_generic,
33530 /* check_for_link dependent on media type */
33531@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
33532 .setup_led = e1000e_setup_led_generic,
33533 };
33534
33535-static struct e1000_phy_operations es2_phy_ops = {
33536+static const struct e1000_phy_operations es2_phy_ops = {
33537 .acquire_phy = e1000_acquire_phy_80003es2lan,
33538 .check_reset_block = e1000e_check_reset_block_generic,
33539 .commit_phy = e1000e_phy_sw_reset,
33540@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
33541 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
33542 };
33543
33544-static struct e1000_nvm_operations es2_nvm_ops = {
33545+static const struct e1000_nvm_operations es2_nvm_ops = {
33546 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
33547 .read_nvm = e1000e_read_nvm_eerd,
33548 .release_nvm = e1000_release_nvm_80003es2lan,
33549diff -urNp linux-2.6.32.45/drivers/net/e1000e/hw.h linux-2.6.32.45/drivers/net/e1000e/hw.h
33550--- linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
33551+++ linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
33552@@ -756,34 +756,34 @@ struct e1000_mac_operations {
33553
33554 /* Function pointers for the PHY. */
33555 struct e1000_phy_operations {
33556- s32 (*acquire_phy)(struct e1000_hw *);
33557- s32 (*check_polarity)(struct e1000_hw *);
33558- s32 (*check_reset_block)(struct e1000_hw *);
33559- s32 (*commit_phy)(struct e1000_hw *);
33560- s32 (*force_speed_duplex)(struct e1000_hw *);
33561- s32 (*get_cfg_done)(struct e1000_hw *hw);
33562- s32 (*get_cable_length)(struct e1000_hw *);
33563- s32 (*get_phy_info)(struct e1000_hw *);
33564- s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
33565- s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
33566- void (*release_phy)(struct e1000_hw *);
33567- s32 (*reset_phy)(struct e1000_hw *);
33568- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
33569- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33570- s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
33571- s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33572- s32 (*cfg_on_link_up)(struct e1000_hw *);
33573+ s32 (* acquire_phy)(struct e1000_hw *);
33574+ s32 (* check_polarity)(struct e1000_hw *);
33575+ s32 (* check_reset_block)(struct e1000_hw *);
33576+ s32 (* commit_phy)(struct e1000_hw *);
33577+ s32 (* force_speed_duplex)(struct e1000_hw *);
33578+ s32 (* get_cfg_done)(struct e1000_hw *hw);
33579+ s32 (* get_cable_length)(struct e1000_hw *);
33580+ s32 (* get_phy_info)(struct e1000_hw *);
33581+ s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
33582+ s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
33583+ void (* release_phy)(struct e1000_hw *);
33584+ s32 (* reset_phy)(struct e1000_hw *);
33585+ s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
33586+ s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
33587+ s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
33588+ s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33589+ s32 (* cfg_on_link_up)(struct e1000_hw *);
33590 };
33591
33592 /* Function pointers for the NVM. */
33593 struct e1000_nvm_operations {
33594- s32 (*acquire_nvm)(struct e1000_hw *);
33595- s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
33596- void (*release_nvm)(struct e1000_hw *);
33597- s32 (*update_nvm)(struct e1000_hw *);
33598- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
33599- s32 (*validate_nvm)(struct e1000_hw *);
33600- s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33601+ s32 (* const acquire_nvm)(struct e1000_hw *);
33602+ s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
33603+ void (* const release_nvm)(struct e1000_hw *);
33604+ s32 (* const update_nvm)(struct e1000_hw *);
33605+ s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
33606+ s32 (* const validate_nvm)(struct e1000_hw *);
33607+ s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33608 };
33609
33610 struct e1000_mac_info {
33611diff -urNp linux-2.6.32.45/drivers/net/e1000e/ich8lan.c linux-2.6.32.45/drivers/net/e1000e/ich8lan.c
33612--- linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
33613+++ linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-08-05 20:33:55.000000000 -0400
33614@@ -265,13 +265,13 @@ static s32 e1000_init_phy_params_pchlan(
33615 phy->addr = 1;
33616 phy->reset_delay_us = 100;
33617
33618- phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33619- phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
33620- phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
33621- phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
33622- phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
33623- phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
33624- phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
33625+ *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33626+ *(void **)&phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
33627+ *(void **)&phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
33628+ *(void **)&phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
33629+ *(void **)&phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
33630+ *(void **)&phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
33631+ *(void **)&phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
33632 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33633
33634 /*
33635@@ -289,12 +289,12 @@ static s32 e1000_init_phy_params_pchlan(
33636 phy->type = e1000e_get_phy_type_from_id(phy->id);
33637
33638 if (phy->type == e1000_phy_82577) {
33639- phy->ops.check_polarity = e1000_check_polarity_82577;
33640- phy->ops.force_speed_duplex =
33641+ *(void **)&phy->ops.check_polarity = e1000_check_polarity_82577;
33642+ *(void **)&phy->ops.force_speed_duplex =
33643 e1000_phy_force_speed_duplex_82577;
33644- phy->ops.get_cable_length = e1000_get_cable_length_82577;
33645- phy->ops.get_phy_info = e1000_get_phy_info_82577;
33646- phy->ops.commit_phy = e1000e_phy_sw_reset;
33647+ *(void **)&phy->ops.get_cable_length = e1000_get_cable_length_82577;
33648+ *(void **)&phy->ops.get_phy_info = e1000_get_phy_info_82577;
33649+ *(void **)&phy->ops.commit_phy = e1000e_phy_sw_reset;
33650 }
33651
33652 out:
33653@@ -322,8 +322,8 @@ static s32 e1000_init_phy_params_ich8lan
33654 */
33655 ret_val = e1000e_determine_phy_address(hw);
33656 if (ret_val) {
33657- hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33658- hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33659+ *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33660+ *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33661 ret_val = e1000e_determine_phy_address(hw);
33662 if (ret_val)
33663 return ret_val;
33664@@ -343,8 +343,8 @@ static s32 e1000_init_phy_params_ich8lan
33665 case IGP03E1000_E_PHY_ID:
33666 phy->type = e1000_phy_igp_3;
33667 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33668- phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
33669- phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
33670+ *(void **)&phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
33671+ *(void **)&phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
33672 break;
33673 case IFE_E_PHY_ID:
33674 case IFE_PLUS_E_PHY_ID:
33675@@ -355,16 +355,16 @@ static s32 e1000_init_phy_params_ich8lan
33676 case BME1000_E_PHY_ID:
33677 phy->type = e1000_phy_bm;
33678 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33679- hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33680- hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33681- hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
33682+ *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33683+ *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33684+ *(void **)&hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
33685 break;
33686 default:
33687 return -E1000_ERR_PHY;
33688 break;
33689 }
33690
33691- phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33692+ *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33693
33694 return 0;
33695 }
33696@@ -455,25 +455,25 @@ static s32 e1000_init_mac_params_ich8lan
33697 case e1000_ich9lan:
33698 case e1000_ich10lan:
33699 /* ID LED init */
33700- mac->ops.id_led_init = e1000e_id_led_init;
33701+ *(void **)&mac->ops.id_led_init = e1000e_id_led_init;
33702 /* setup LED */
33703- mac->ops.setup_led = e1000e_setup_led_generic;
33704+ *(void **)&mac->ops.setup_led = e1000e_setup_led_generic;
33705 /* cleanup LED */
33706- mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
33707+ *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
33708 /* turn on/off LED */
33709- mac->ops.led_on = e1000_led_on_ich8lan;
33710- mac->ops.led_off = e1000_led_off_ich8lan;
33711+ *(void **)&mac->ops.led_on = e1000_led_on_ich8lan;
33712+ *(void **)&mac->ops.led_off = e1000_led_off_ich8lan;
33713 break;
33714 case e1000_pchlan:
33715 /* ID LED init */
33716- mac->ops.id_led_init = e1000_id_led_init_pchlan;
33717+ *(void **)&mac->ops.id_led_init = e1000_id_led_init_pchlan;
33718 /* setup LED */
33719- mac->ops.setup_led = e1000_setup_led_pchlan;
33720+ *(void **)&mac->ops.setup_led = e1000_setup_led_pchlan;
33721 /* cleanup LED */
33722- mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
33723+ *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
33724 /* turn on/off LED */
33725- mac->ops.led_on = e1000_led_on_pchlan;
33726- mac->ops.led_off = e1000_led_off_pchlan;
33727+ *(void **)&mac->ops.led_on = e1000_led_on_pchlan;
33728+ *(void **)&mac->ops.led_off = e1000_led_off_pchlan;
33729 break;
33730 default:
33731 break;
33732@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
33733 }
33734 }
33735
33736-static struct e1000_mac_operations ich8_mac_ops = {
33737+static const struct e1000_mac_operations ich8_mac_ops = {
33738 .id_led_init = e1000e_id_led_init,
33739 .check_mng_mode = e1000_check_mng_mode_ich8lan,
33740 .check_for_link = e1000_check_for_copper_link_ich8lan,
33741@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
33742 /* id_led_init dependent on mac type */
33743 };
33744
33745-static struct e1000_phy_operations ich8_phy_ops = {
33746+static const struct e1000_phy_operations ich8_phy_ops = {
33747 .acquire_phy = e1000_acquire_swflag_ich8lan,
33748 .check_reset_block = e1000_check_reset_block_ich8lan,
33749 .commit_phy = NULL,
33750@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
33751 .write_phy_reg = e1000e_write_phy_reg_igp,
33752 };
33753
33754-static struct e1000_nvm_operations ich8_nvm_ops = {
33755+static const struct e1000_nvm_operations ich8_nvm_ops = {
33756 .acquire_nvm = e1000_acquire_nvm_ich8lan,
33757 .read_nvm = e1000_read_nvm_ich8lan,
33758 .release_nvm = e1000_release_nvm_ich8lan,
33759diff -urNp linux-2.6.32.45/drivers/net/e1000e/netdev.c linux-2.6.32.45/drivers/net/e1000e/netdev.c
33760--- linux-2.6.32.45/drivers/net/e1000e/netdev.c 2011-03-27 14:31:47.000000000 -0400
33761+++ linux-2.6.32.45/drivers/net/e1000e/netdev.c 2011-08-05 20:33:55.000000000 -0400
33762@@ -5071,9 +5071,9 @@ static int __devinit e1000_probe(struct
33763
33764 err = -EIO;
33765
33766- memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33767- memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33768- memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33769+ memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33770+ memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33771+ memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33772
33773 err = ei->get_variants(adapter);
33774 if (err)
33775diff -urNp linux-2.6.32.45/drivers/net/hamradio/6pack.c linux-2.6.32.45/drivers/net/hamradio/6pack.c
33776--- linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
33777+++ linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
33778@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
33779 unsigned char buf[512];
33780 int count1;
33781
33782+ pax_track_stack();
33783+
33784 if (!count)
33785 return;
33786
33787diff -urNp linux-2.6.32.45/drivers/net/ibmveth.c linux-2.6.32.45/drivers/net/ibmveth.c
33788--- linux-2.6.32.45/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
33789+++ linux-2.6.32.45/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
33790@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
33791 NULL,
33792 };
33793
33794-static struct sysfs_ops veth_pool_ops = {
33795+static const struct sysfs_ops veth_pool_ops = {
33796 .show = veth_pool_show,
33797 .store = veth_pool_store,
33798 };
33799diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_82575.c linux-2.6.32.45/drivers/net/igb/e1000_82575.c
33800--- linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
33801+++ linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-08-05 20:33:55.000000000 -0400
33802@@ -135,7 +135,7 @@ static s32 igb_get_invariants_82575(stru
33803 ? true : false;
33804
33805 /* physical interface link setup */
33806- mac->ops.setup_physical_interface =
33807+ *(void **)&mac->ops.setup_physical_interface =
33808 (hw->phy.media_type == e1000_media_type_copper)
33809 ? igb_setup_copper_link_82575
33810 : igb_setup_serdes_link_82575;
33811@@ -191,13 +191,13 @@ static s32 igb_get_invariants_82575(stru
33812
33813 /* PHY function pointers */
33814 if (igb_sgmii_active_82575(hw)) {
33815- phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
33816- phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
33817- phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
33818+ *(void **)&phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
33819+ *(void **)&phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
33820+ *(void **)&phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
33821 } else {
33822- phy->ops.reset = igb_phy_hw_reset;
33823- phy->ops.read_reg = igb_read_phy_reg_igp;
33824- phy->ops.write_reg = igb_write_phy_reg_igp;
33825+ *(void **)&phy->ops.reset = igb_phy_hw_reset;
33826+ *(void **)&phy->ops.read_reg = igb_read_phy_reg_igp;
33827+ *(void **)&phy->ops.write_reg = igb_write_phy_reg_igp;
33828 }
33829
33830 /* set lan id */
33831@@ -213,17 +213,17 @@ static s32 igb_get_invariants_82575(stru
33832 switch (phy->id) {
33833 case M88E1111_I_PHY_ID:
33834 phy->type = e1000_phy_m88;
33835- phy->ops.get_phy_info = igb_get_phy_info_m88;
33836- phy->ops.get_cable_length = igb_get_cable_length_m88;
33837- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
33838+ *(void **)&phy->ops.get_phy_info = igb_get_phy_info_m88;
33839+ *(void **)&phy->ops.get_cable_length = igb_get_cable_length_m88;
33840+ *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
33841 break;
33842 case IGP03E1000_E_PHY_ID:
33843 phy->type = e1000_phy_igp_3;
33844- phy->ops.get_phy_info = igb_get_phy_info_igp;
33845- phy->ops.get_cable_length = igb_get_cable_length_igp_2;
33846- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
33847- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
33848- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
33849+ *(void **)&phy->ops.get_phy_info = igb_get_phy_info_igp;
33850+ *(void **)&phy->ops.get_cable_length = igb_get_cable_length_igp_2;
33851+ *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
33852+ *(void **)&phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
33853+ *(void **)&phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
33854 break;
33855 default:
33856 return -E1000_ERR_PHY;
33857@@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
33858 wr32(E1000_VT_CTL, vt_ctl);
33859 }
33860
33861-static struct e1000_mac_operations e1000_mac_ops_82575 = {
33862+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
33863 .reset_hw = igb_reset_hw_82575,
33864 .init_hw = igb_init_hw_82575,
33865 .check_for_link = igb_check_for_link_82575,
33866@@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
33867 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
33868 };
33869
33870-static struct e1000_phy_operations e1000_phy_ops_82575 = {
33871+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
33872 .acquire = igb_acquire_phy_82575,
33873 .get_cfg_done = igb_get_cfg_done_82575,
33874 .release = igb_release_phy_82575,
33875 };
33876
33877-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33878+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33879 .acquire = igb_acquire_nvm_82575,
33880 .read = igb_read_nvm_eerd,
33881 .release = igb_release_nvm_82575,
33882diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_hw.h linux-2.6.32.45/drivers/net/igb/e1000_hw.h
33883--- linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
33884+++ linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
33885@@ -305,17 +305,17 @@ struct e1000_phy_operations {
33886 };
33887
33888 struct e1000_nvm_operations {
33889- s32 (*acquire)(struct e1000_hw *);
33890- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
33891- void (*release)(struct e1000_hw *);
33892- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
33893+ s32 (* const acquire)(struct e1000_hw *);
33894+ s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
33895+ void (* const release)(struct e1000_hw *);
33896+ s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
33897 };
33898
33899 struct e1000_info {
33900 s32 (*get_invariants)(struct e1000_hw *);
33901- struct e1000_mac_operations *mac_ops;
33902- struct e1000_phy_operations *phy_ops;
33903- struct e1000_nvm_operations *nvm_ops;
33904+ const struct e1000_mac_operations *mac_ops;
33905+ const struct e1000_phy_operations *phy_ops;
33906+ const struct e1000_nvm_operations *nvm_ops;
33907 };
33908
33909 extern const struct e1000_info e1000_82575_info;
33910diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_mbx.c linux-2.6.32.45/drivers/net/igb/e1000_mbx.c
33911--- linux-2.6.32.45/drivers/net/igb/e1000_mbx.c 2011-03-27 14:31:47.000000000 -0400
33912+++ linux-2.6.32.45/drivers/net/igb/e1000_mbx.c 2011-08-05 20:33:55.000000000 -0400
33913@@ -414,13 +414,13 @@ s32 igb_init_mbx_params_pf(struct e1000_
33914
33915 mbx->size = E1000_VFMAILBOX_SIZE;
33916
33917- mbx->ops.read = igb_read_mbx_pf;
33918- mbx->ops.write = igb_write_mbx_pf;
33919- mbx->ops.read_posted = igb_read_posted_mbx;
33920- mbx->ops.write_posted = igb_write_posted_mbx;
33921- mbx->ops.check_for_msg = igb_check_for_msg_pf;
33922- mbx->ops.check_for_ack = igb_check_for_ack_pf;
33923- mbx->ops.check_for_rst = igb_check_for_rst_pf;
33924+ *(void **)&mbx->ops.read = igb_read_mbx_pf;
33925+ *(void **)&mbx->ops.write = igb_write_mbx_pf;
33926+ *(void **)&mbx->ops.read_posted = igb_read_posted_mbx;
33927+ *(void **)&mbx->ops.write_posted = igb_write_posted_mbx;
33928+ *(void **)&mbx->ops.check_for_msg = igb_check_for_msg_pf;
33929+ *(void **)&mbx->ops.check_for_ack = igb_check_for_ack_pf;
33930+ *(void **)&mbx->ops.check_for_rst = igb_check_for_rst_pf;
33931
33932 mbx->stats.msgs_tx = 0;
33933 mbx->stats.msgs_rx = 0;
33934diff -urNp linux-2.6.32.45/drivers/net/igb/igb_main.c linux-2.6.32.45/drivers/net/igb/igb_main.c
33935--- linux-2.6.32.45/drivers/net/igb/igb_main.c 2011-03-27 14:31:47.000000000 -0400
33936+++ linux-2.6.32.45/drivers/net/igb/igb_main.c 2011-08-05 20:33:55.000000000 -0400
33937@@ -1295,9 +1295,9 @@ static int __devinit igb_probe(struct pc
33938 /* setup the private structure */
33939 hw->back = adapter;
33940 /* Copy the default MAC, PHY and NVM function pointers */
33941- memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33942- memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33943- memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33944+ memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33945+ memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33946+ memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33947 /* Initialize skew-specific constants */
33948 err = ei->get_invariants(hw);
33949 if (err)
33950diff -urNp linux-2.6.32.45/drivers/net/igbvf/mbx.c linux-2.6.32.45/drivers/net/igbvf/mbx.c
33951--- linux-2.6.32.45/drivers/net/igbvf/mbx.c 2011-03-27 14:31:47.000000000 -0400
33952+++ linux-2.6.32.45/drivers/net/igbvf/mbx.c 2011-08-05 20:33:55.000000000 -0400
33953@@ -331,13 +331,13 @@ s32 e1000_init_mbx_params_vf(struct e100
33954
33955 mbx->size = E1000_VFMAILBOX_SIZE;
33956
33957- mbx->ops.read = e1000_read_mbx_vf;
33958- mbx->ops.write = e1000_write_mbx_vf;
33959- mbx->ops.read_posted = e1000_read_posted_mbx;
33960- mbx->ops.write_posted = e1000_write_posted_mbx;
33961- mbx->ops.check_for_msg = e1000_check_for_msg_vf;
33962- mbx->ops.check_for_ack = e1000_check_for_ack_vf;
33963- mbx->ops.check_for_rst = e1000_check_for_rst_vf;
33964+ *(void **)&mbx->ops.read = e1000_read_mbx_vf;
33965+ *(void **)&mbx->ops.write = e1000_write_mbx_vf;
33966+ *(void **)&mbx->ops.read_posted = e1000_read_posted_mbx;
33967+ *(void **)&mbx->ops.write_posted = e1000_write_posted_mbx;
33968+ *(void **)&mbx->ops.check_for_msg = e1000_check_for_msg_vf;
33969+ *(void **)&mbx->ops.check_for_ack = e1000_check_for_ack_vf;
33970+ *(void **)&mbx->ops.check_for_rst = e1000_check_for_rst_vf;
33971
33972 mbx->stats.msgs_tx = 0;
33973 mbx->stats.msgs_rx = 0;
33974diff -urNp linux-2.6.32.45/drivers/net/igbvf/vf.c linux-2.6.32.45/drivers/net/igbvf/vf.c
33975--- linux-2.6.32.45/drivers/net/igbvf/vf.c 2011-03-27 14:31:47.000000000 -0400
33976+++ linux-2.6.32.45/drivers/net/igbvf/vf.c 2011-08-05 20:33:55.000000000 -0400
33977@@ -55,21 +55,21 @@ static s32 e1000_init_mac_params_vf(stru
33978
33979 /* Function pointers */
33980 /* reset */
33981- mac->ops.reset_hw = e1000_reset_hw_vf;
33982+ *(void **)&mac->ops.reset_hw = e1000_reset_hw_vf;
33983 /* hw initialization */
33984- mac->ops.init_hw = e1000_init_hw_vf;
33985+ *(void **)&mac->ops.init_hw = e1000_init_hw_vf;
33986 /* check for link */
33987- mac->ops.check_for_link = e1000_check_for_link_vf;
33988+ *(void **)&mac->ops.check_for_link = e1000_check_for_link_vf;
33989 /* link info */
33990- mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
33991+ *(void **)&mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
33992 /* multicast address update */
33993- mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
33994+ *(void **)&mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
33995 /* set mac address */
33996- mac->ops.rar_set = e1000_rar_set_vf;
33997+ *(void **)&mac->ops.rar_set = e1000_rar_set_vf;
33998 /* read mac address */
33999- mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34000+ *(void **)&mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34001 /* set vlan filter table array */
34002- mac->ops.set_vfta = e1000_set_vfta_vf;
34003+ *(void **)&mac->ops.set_vfta = e1000_set_vfta_vf;
34004
34005 return E1000_SUCCESS;
34006 }
34007@@ -80,8 +80,8 @@ static s32 e1000_init_mac_params_vf(stru
34008 **/
34009 void e1000_init_function_pointers_vf(struct e1000_hw *hw)
34010 {
34011- hw->mac.ops.init_params = e1000_init_mac_params_vf;
34012- hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34013+ *(void **)&hw->mac.ops.init_params = e1000_init_mac_params_vf;
34014+ *(void **)&hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34015 }
34016
34017 /**
34018diff -urNp linux-2.6.32.45/drivers/net/iseries_veth.c linux-2.6.32.45/drivers/net/iseries_veth.c
34019--- linux-2.6.32.45/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
34020+++ linux-2.6.32.45/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
34021@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
34022 NULL
34023 };
34024
34025-static struct sysfs_ops veth_cnx_sysfs_ops = {
34026+static const struct sysfs_ops veth_cnx_sysfs_ops = {
34027 .show = veth_cnx_attribute_show
34028 };
34029
34030@@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
34031 NULL
34032 };
34033
34034-static struct sysfs_ops veth_port_sysfs_ops = {
34035+static const struct sysfs_ops veth_port_sysfs_ops = {
34036 .show = veth_port_attribute_show
34037 };
34038
34039diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c
34040--- linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
34041+++ linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
34042@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
34043 u32 rctl;
34044 int i;
34045
34046+ pax_track_stack();
34047+
34048 /* Check for Promiscuous and All Multicast modes */
34049
34050 rctl = IXGB_READ_REG(hw, RCTL);
34051diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c
34052--- linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
34053+++ linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
34054@@ -260,6 +260,9 @@ void __devinit
34055 ixgb_check_options(struct ixgb_adapter *adapter)
34056 {
34057 int bd = adapter->bd_number;
34058+
34059+ pax_track_stack();
34060+
34061 if (bd >= IXGB_MAX_NIC) {
34062 printk(KERN_NOTICE
34063 "Warning: no configuration for board #%i\n", bd);
34064diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c
34065--- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c 2011-03-27 14:31:47.000000000 -0400
34066+++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c 2011-08-05 20:33:55.000000000 -0400
34067@@ -154,19 +154,19 @@ static s32 ixgbe_init_phy_ops_82598(stru
34068
34069 /* Overwrite the link function pointers if copper PHY */
34070 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34071- mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34072- mac->ops.get_link_capabilities =
34073+ *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34074+ *(void **)&mac->ops.get_link_capabilities =
34075 &ixgbe_get_copper_link_capabilities_82598;
34076 }
34077
34078 switch (hw->phy.type) {
34079 case ixgbe_phy_tn:
34080- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34081- phy->ops.get_firmware_version =
34082+ *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34083+ *(void **)&phy->ops.get_firmware_version =
34084 &ixgbe_get_phy_firmware_version_tnx;
34085 break;
34086 case ixgbe_phy_nl:
34087- phy->ops.reset = &ixgbe_reset_phy_nl;
34088+ *(void **)&phy->ops.reset = &ixgbe_reset_phy_nl;
34089
34090 /* Call SFP+ identify routine to get the SFP+ module type */
34091 ret_val = phy->ops.identify_sfp(hw);
34092diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c
34093--- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c 2011-03-27 14:31:47.000000000 -0400
34094+++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c 2011-08-05 20:33:55.000000000 -0400
34095@@ -62,9 +62,9 @@ static void ixgbe_init_mac_link_ops_8259
34096 struct ixgbe_mac_info *mac = &hw->mac;
34097 if (hw->phy.multispeed_fiber) {
34098 /* Set up dual speed SFP+ support */
34099- mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34100+ *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34101 } else {
34102- mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34103+ *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34104 }
34105 }
34106
34107@@ -76,7 +76,7 @@ static s32 ixgbe_setup_sfp_modules_82599
34108 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
34109 ixgbe_init_mac_link_ops_82599(hw);
34110
34111- hw->phy.ops.reset = NULL;
34112+ *(void **)&hw->phy.ops.reset = NULL;
34113
34114 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
34115 &data_offset);
34116@@ -171,16 +171,16 @@ static s32 ixgbe_init_phy_ops_82599(stru
34117
34118 /* If copper media, overwrite with copper function pointers */
34119 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34120- mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34121- mac->ops.get_link_capabilities =
34122+ *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34123+ *(void **)&mac->ops.get_link_capabilities =
34124 &ixgbe_get_copper_link_capabilities_82599;
34125 }
34126
34127 /* Set necessary function pointers based on phy type */
34128 switch (hw->phy.type) {
34129 case ixgbe_phy_tn:
34130- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34131- phy->ops.get_firmware_version =
34132+ *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34133+ *(void **)&phy->ops.get_firmware_version =
34134 &ixgbe_get_phy_firmware_version_tnx;
34135 break;
34136 default:
34137diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c
34138--- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c 2011-03-27 14:31:47.000000000 -0400
34139+++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c 2011-08-05 20:33:55.000000000 -0400
34140@@ -5638,18 +5638,18 @@ static int __devinit ixgbe_probe(struct
34141 adapter->bd_number = cards_found;
34142
34143 /* Setup hw api */
34144- memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34145+ memcpy((void *)&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34146 hw->mac.type = ii->mac;
34147
34148 /* EEPROM */
34149- memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34150+ memcpy((void *)&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34151 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
34152 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
34153 if (!(eec & (1 << 8)))
34154- hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34155+ *(void **)&hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34156
34157 /* PHY */
34158- memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34159+ memcpy((void *)&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34160 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
34161 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
34162 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
34163diff -urNp linux-2.6.32.45/drivers/net/mlx4/main.c linux-2.6.32.45/drivers/net/mlx4/main.c
34164--- linux-2.6.32.45/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
34165+++ linux-2.6.32.45/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
34166@@ -38,6 +38,7 @@
34167 #include <linux/errno.h>
34168 #include <linux/pci.h>
34169 #include <linux/dma-mapping.h>
34170+#include <linux/sched.h>
34171
34172 #include <linux/mlx4/device.h>
34173 #include <linux/mlx4/doorbell.h>
34174@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
34175 u64 icm_size;
34176 int err;
34177
34178+ pax_track_stack();
34179+
34180 err = mlx4_QUERY_FW(dev);
34181 if (err) {
34182 if (err == -EACCES)
34183diff -urNp linux-2.6.32.45/drivers/net/niu.c linux-2.6.32.45/drivers/net/niu.c
34184--- linux-2.6.32.45/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
34185+++ linux-2.6.32.45/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
34186@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
34187 int i, num_irqs, err;
34188 u8 first_ldg;
34189
34190+ pax_track_stack();
34191+
34192 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
34193 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
34194 ldg_num_map[i] = first_ldg + i;
34195diff -urNp linux-2.6.32.45/drivers/net/pcnet32.c linux-2.6.32.45/drivers/net/pcnet32.c
34196--- linux-2.6.32.45/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
34197+++ linux-2.6.32.45/drivers/net/pcnet32.c 2011-08-05 20:33:55.000000000 -0400
34198@@ -79,7 +79,7 @@ static int cards_found;
34199 /*
34200 * VLB I/O addresses
34201 */
34202-static unsigned int pcnet32_portlist[] __initdata =
34203+static unsigned int pcnet32_portlist[] __devinitdata =
34204 { 0x300, 0x320, 0x340, 0x360, 0 };
34205
34206 static int pcnet32_debug = 0;
34207@@ -267,7 +267,7 @@ struct pcnet32_private {
34208 struct sk_buff **rx_skbuff;
34209 dma_addr_t *tx_dma_addr;
34210 dma_addr_t *rx_dma_addr;
34211- struct pcnet32_access a;
34212+ struct pcnet32_access *a;
34213 spinlock_t lock; /* Guard lock */
34214 unsigned int cur_rx, cur_tx; /* The next free ring entry */
34215 unsigned int rx_ring_size; /* current rx ring size */
34216@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct n
34217 u16 val;
34218
34219 netif_wake_queue(dev);
34220- val = lp->a.read_csr(ioaddr, CSR3);
34221+ val = lp->a->read_csr(ioaddr, CSR3);
34222 val &= 0x00ff;
34223- lp->a.write_csr(ioaddr, CSR3, val);
34224+ lp->a->write_csr(ioaddr, CSR3, val);
34225 napi_enable(&lp->napi);
34226 }
34227
34228@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_d
34229 r = mii_link_ok(&lp->mii_if);
34230 } else if (lp->chip_version >= PCNET32_79C970A) {
34231 ulong ioaddr = dev->base_addr; /* card base I/O address */
34232- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34233+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34234 } else { /* can not detect link on really old chips */
34235 r = 1;
34236 }
34237@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct
34238 pcnet32_netif_stop(dev);
34239
34240 spin_lock_irqsave(&lp->lock, flags);
34241- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34242+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34243
34244 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
34245
34246@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct
34247 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
34248 {
34249 struct pcnet32_private *lp = netdev_priv(dev);
34250- struct pcnet32_access *a = &lp->a; /* access to registers */
34251+ struct pcnet32_access *a = lp->a; /* access to registers */
34252 ulong ioaddr = dev->base_addr; /* card base I/O address */
34253 struct sk_buff *skb; /* sk buff */
34254 int x, i; /* counters */
34255@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct
34256 pcnet32_netif_stop(dev);
34257
34258 spin_lock_irqsave(&lp->lock, flags);
34259- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34260+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34261
34262 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
34263
34264 /* Reset the PCNET32 */
34265- lp->a.reset(ioaddr);
34266- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34267+ lp->a->reset(ioaddr);
34268+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34269
34270 /* switch pcnet32 to 32bit mode */
34271- lp->a.write_bcr(ioaddr, 20, 2);
34272+ lp->a->write_bcr(ioaddr, 20, 2);
34273
34274 /* purge & init rings but don't actually restart */
34275 pcnet32_restart(dev, 0x0000);
34276
34277- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34278+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34279
34280 /* Initialize Transmit buffers. */
34281 size = data_len + 15;
34282@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct
34283
34284 /* set int loopback in CSR15 */
34285 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
34286- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
34287+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
34288
34289 teststatus = cpu_to_le16(0x8000);
34290- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34291+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34292
34293 /* Check status of descriptors */
34294 for (x = 0; x < numbuffs; x++) {
34295@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct
34296 }
34297 }
34298
34299- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34300+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34301 wmb();
34302 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
34303 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
34304@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct
34305 pcnet32_restart(dev, CSR0_NORMAL);
34306 } else {
34307 pcnet32_purge_rx_ring(dev);
34308- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34309+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34310 }
34311 spin_unlock_irqrestore(&lp->lock, flags);
34312
34313@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct
34314 static void pcnet32_led_blink_callback(struct net_device *dev)
34315 {
34316 struct pcnet32_private *lp = netdev_priv(dev);
34317- struct pcnet32_access *a = &lp->a;
34318+ struct pcnet32_access *a = lp->a;
34319 ulong ioaddr = dev->base_addr;
34320 unsigned long flags;
34321 int i;
34322@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(s
34323 static int pcnet32_phys_id(struct net_device *dev, u32 data)
34324 {
34325 struct pcnet32_private *lp = netdev_priv(dev);
34326- struct pcnet32_access *a = &lp->a;
34327+ struct pcnet32_access *a = lp->a;
34328 ulong ioaddr = dev->base_addr;
34329 unsigned long flags;
34330 int i, regs[4];
34331@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_de
34332 {
34333 int csr5;
34334 struct pcnet32_private *lp = netdev_priv(dev);
34335- struct pcnet32_access *a = &lp->a;
34336+ struct pcnet32_access *a = lp->a;
34337 ulong ioaddr = dev->base_addr;
34338 int ticks;
34339
34340@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_stru
34341 spin_lock_irqsave(&lp->lock, flags);
34342 if (pcnet32_tx(dev)) {
34343 /* reset the chip to clear the error condition, then restart */
34344- lp->a.reset(ioaddr);
34345- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34346+ lp->a->reset(ioaddr);
34347+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34348 pcnet32_restart(dev, CSR0_START);
34349 netif_wake_queue(dev);
34350 }
34351@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_stru
34352 __napi_complete(napi);
34353
34354 /* clear interrupt masks */
34355- val = lp->a.read_csr(ioaddr, CSR3);
34356+ val = lp->a->read_csr(ioaddr, CSR3);
34357 val &= 0x00ff;
34358- lp->a.write_csr(ioaddr, CSR3, val);
34359+ lp->a->write_csr(ioaddr, CSR3, val);
34360
34361 /* Set interrupt enable. */
34362- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
34363+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
34364
34365 spin_unlock_irqrestore(&lp->lock, flags);
34366 }
34367@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_
34368 int i, csr0;
34369 u16 *buff = ptr;
34370 struct pcnet32_private *lp = netdev_priv(dev);
34371- struct pcnet32_access *a = &lp->a;
34372+ struct pcnet32_access *a = lp->a;
34373 ulong ioaddr = dev->base_addr;
34374 unsigned long flags;
34375
34376@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_
34377 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
34378 if (lp->phymask & (1 << j)) {
34379 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
34380- lp->a.write_bcr(ioaddr, 33,
34381+ lp->a->write_bcr(ioaddr, 33,
34382 (j << 5) | i);
34383- *buff++ = lp->a.read_bcr(ioaddr, 34);
34384+ *buff++ = lp->a->read_bcr(ioaddr, 34);
34385 }
34386 }
34387 }
34388@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34389 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
34390 lp->options |= PCNET32_PORT_FD;
34391
34392- lp->a = *a;
34393+ lp->a = a;
34394
34395 /* prior to register_netdev, dev->name is not yet correct */
34396 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
34397@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34398 if (lp->mii) {
34399 /* lp->phycount and lp->phymask are set to 0 by memset above */
34400
34401- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34402+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34403 /* scan for PHYs */
34404 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34405 unsigned short id1, id2;
34406@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34407 "Found PHY %04x:%04x at address %d.\n",
34408 id1, id2, i);
34409 }
34410- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34411+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34412 if (lp->phycount > 1) {
34413 lp->options |= PCNET32_PORT_MII;
34414 }
34415@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_devic
34416 }
34417
34418 /* Reset the PCNET32 */
34419- lp->a.reset(ioaddr);
34420+ lp->a->reset(ioaddr);
34421
34422 /* switch pcnet32 to 32bit mode */
34423- lp->a.write_bcr(ioaddr, 20, 2);
34424+ lp->a->write_bcr(ioaddr, 20, 2);
34425
34426 if (netif_msg_ifup(lp))
34427 printk(KERN_DEBUG
34428@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_devic
34429 (u32) (lp->init_dma_addr));
34430
34431 /* set/reset autoselect bit */
34432- val = lp->a.read_bcr(ioaddr, 2) & ~2;
34433+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
34434 if (lp->options & PCNET32_PORT_ASEL)
34435 val |= 2;
34436- lp->a.write_bcr(ioaddr, 2, val);
34437+ lp->a->write_bcr(ioaddr, 2, val);
34438
34439 /* handle full duplex setting */
34440 if (lp->mii_if.full_duplex) {
34441- val = lp->a.read_bcr(ioaddr, 9) & ~3;
34442+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
34443 if (lp->options & PCNET32_PORT_FD) {
34444 val |= 1;
34445 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
34446@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_devic
34447 if (lp->chip_version == 0x2627)
34448 val |= 3;
34449 }
34450- lp->a.write_bcr(ioaddr, 9, val);
34451+ lp->a->write_bcr(ioaddr, 9, val);
34452 }
34453
34454 /* set/reset GPSI bit in test register */
34455- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
34456+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
34457 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
34458 val |= 0x10;
34459- lp->a.write_csr(ioaddr, 124, val);
34460+ lp->a->write_csr(ioaddr, 124, val);
34461
34462 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
34463 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
34464@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_devic
34465 * duplex, and/or enable auto negotiation, and clear DANAS
34466 */
34467 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
34468- lp->a.write_bcr(ioaddr, 32,
34469- lp->a.read_bcr(ioaddr, 32) | 0x0080);
34470+ lp->a->write_bcr(ioaddr, 32,
34471+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
34472 /* disable Auto Negotiation, set 10Mpbs, HD */
34473- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
34474+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
34475 if (lp->options & PCNET32_PORT_FD)
34476 val |= 0x10;
34477 if (lp->options & PCNET32_PORT_100)
34478 val |= 0x08;
34479- lp->a.write_bcr(ioaddr, 32, val);
34480+ lp->a->write_bcr(ioaddr, 32, val);
34481 } else {
34482 if (lp->options & PCNET32_PORT_ASEL) {
34483- lp->a.write_bcr(ioaddr, 32,
34484- lp->a.read_bcr(ioaddr,
34485+ lp->a->write_bcr(ioaddr, 32,
34486+ lp->a->read_bcr(ioaddr,
34487 32) | 0x0080);
34488 /* enable auto negotiate, setup, disable fd */
34489- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
34490+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
34491 val |= 0x20;
34492- lp->a.write_bcr(ioaddr, 32, val);
34493+ lp->a->write_bcr(ioaddr, 32, val);
34494 }
34495 }
34496 } else {
34497@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_devic
34498 * There is really no good other way to handle multiple PHYs
34499 * other than turning off all automatics
34500 */
34501- val = lp->a.read_bcr(ioaddr, 2);
34502- lp->a.write_bcr(ioaddr, 2, val & ~2);
34503- val = lp->a.read_bcr(ioaddr, 32);
34504- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34505+ val = lp->a->read_bcr(ioaddr, 2);
34506+ lp->a->write_bcr(ioaddr, 2, val & ~2);
34507+ val = lp->a->read_bcr(ioaddr, 32);
34508+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34509
34510 if (!(lp->options & PCNET32_PORT_ASEL)) {
34511 /* setup ecmd */
34512@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_devic
34513 ecmd.speed =
34514 lp->
34515 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
34516- bcr9 = lp->a.read_bcr(ioaddr, 9);
34517+ bcr9 = lp->a->read_bcr(ioaddr, 9);
34518
34519 if (lp->options & PCNET32_PORT_FD) {
34520 ecmd.duplex = DUPLEX_FULL;
34521@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_devic
34522 ecmd.duplex = DUPLEX_HALF;
34523 bcr9 |= ~(1 << 0);
34524 }
34525- lp->a.write_bcr(ioaddr, 9, bcr9);
34526+ lp->a->write_bcr(ioaddr, 9, bcr9);
34527 }
34528
34529 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34530@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_devic
34531
34532 #ifdef DO_DXSUFLO
34533 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
34534- val = lp->a.read_csr(ioaddr, CSR3);
34535+ val = lp->a->read_csr(ioaddr, CSR3);
34536 val |= 0x40;
34537- lp->a.write_csr(ioaddr, CSR3, val);
34538+ lp->a->write_csr(ioaddr, CSR3, val);
34539 }
34540 #endif
34541
34542@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_devic
34543 napi_enable(&lp->napi);
34544
34545 /* Re-initialize the PCNET32, and start it when done. */
34546- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34547- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34548+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34549+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34550
34551- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34552- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34553+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34554+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34555
34556 netif_start_queue(dev);
34557
34558@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_devic
34559
34560 i = 0;
34561 while (i++ < 100)
34562- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34563+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34564 break;
34565 /*
34566 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
34567 * reports that doing so triggers a bug in the '974.
34568 */
34569- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
34570+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
34571
34572 if (netif_msg_ifup(lp))
34573 printk(KERN_DEBUG
34574 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
34575 dev->name, i,
34576 (u32) (lp->init_dma_addr),
34577- lp->a.read_csr(ioaddr, CSR0));
34578+ lp->a->read_csr(ioaddr, CSR0));
34579
34580 spin_unlock_irqrestore(&lp->lock, flags);
34581
34582@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_devic
34583 * Switch back to 16bit mode to avoid problems with dumb
34584 * DOS packet driver after a warm reboot
34585 */
34586- lp->a.write_bcr(ioaddr, 20, 4);
34587+ lp->a->write_bcr(ioaddr, 20, 4);
34588
34589 err_free_irq:
34590 spin_unlock_irqrestore(&lp->lock, flags);
34591@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_d
34592
34593 /* wait for stop */
34594 for (i = 0; i < 100; i++)
34595- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
34596+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
34597 break;
34598
34599 if (i >= 100 && netif_msg_drv(lp))
34600@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_d
34601 return;
34602
34603 /* ReInit Ring */
34604- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34605+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34606 i = 0;
34607 while (i++ < 1000)
34608- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34609+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34610 break;
34611
34612- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
34613+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
34614 }
34615
34616 static void pcnet32_tx_timeout(struct net_device *dev)
34617@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct ne
34618 if (pcnet32_debug & NETIF_MSG_DRV)
34619 printk(KERN_ERR
34620 "%s: transmit timed out, status %4.4x, resetting.\n",
34621- dev->name, lp->a.read_csr(ioaddr, CSR0));
34622- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34623+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34624+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34625 dev->stats.tx_errors++;
34626 if (netif_msg_tx_err(lp)) {
34627 int i;
34628@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34629 if (netif_msg_tx_queued(lp)) {
34630 printk(KERN_DEBUG
34631 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
34632- dev->name, lp->a.read_csr(ioaddr, CSR0));
34633+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34634 }
34635
34636 /* Default status -- will not enable Successful-TxDone
34637@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34638 dev->stats.tx_bytes += skb->len;
34639
34640 /* Trigger an immediate send poll. */
34641- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34642+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34643
34644 dev->trans_start = jiffies;
34645
34646@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
34647
34648 spin_lock(&lp->lock);
34649
34650- csr0 = lp->a.read_csr(ioaddr, CSR0);
34651+ csr0 = lp->a->read_csr(ioaddr, CSR0);
34652 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
34653 if (csr0 == 0xffff) {
34654 break; /* PCMCIA remove happened */
34655 }
34656 /* Acknowledge all of the current interrupt sources ASAP. */
34657- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34658+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34659
34660 if (netif_msg_intr(lp))
34661 printk(KERN_DEBUG
34662 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
34663- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
34664+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
34665
34666 /* Log misc errors. */
34667 if (csr0 & 0x4000)
34668@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
34669 if (napi_schedule_prep(&lp->napi)) {
34670 u16 val;
34671 /* set interrupt masks */
34672- val = lp->a.read_csr(ioaddr, CSR3);
34673+ val = lp->a->read_csr(ioaddr, CSR3);
34674 val |= 0x5f00;
34675- lp->a.write_csr(ioaddr, CSR3, val);
34676+ lp->a->write_csr(ioaddr, CSR3, val);
34677
34678 __napi_schedule(&lp->napi);
34679 break;
34680 }
34681- csr0 = lp->a.read_csr(ioaddr, CSR0);
34682+ csr0 = lp->a->read_csr(ioaddr, CSR0);
34683 }
34684
34685 if (netif_msg_intr(lp))
34686 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
34687- dev->name, lp->a.read_csr(ioaddr, CSR0));
34688+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34689
34690 spin_unlock(&lp->lock);
34691
34692@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_devi
34693
34694 spin_lock_irqsave(&lp->lock, flags);
34695
34696- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34697+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34698
34699 if (netif_msg_ifdown(lp))
34700 printk(KERN_DEBUG
34701 "%s: Shutting down ethercard, status was %2.2x.\n",
34702- dev->name, lp->a.read_csr(ioaddr, CSR0));
34703+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34704
34705 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
34706- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34707+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34708
34709 /*
34710 * Switch back to 16bit mode to avoid problems with dumb
34711 * DOS packet driver after a warm reboot
34712 */
34713- lp->a.write_bcr(ioaddr, 20, 4);
34714+ lp->a->write_bcr(ioaddr, 20, 4);
34715
34716 spin_unlock_irqrestore(&lp->lock, flags);
34717
34718@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_
34719 unsigned long flags;
34720
34721 spin_lock_irqsave(&lp->lock, flags);
34722- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34723+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34724 spin_unlock_irqrestore(&lp->lock, flags);
34725
34726 return &dev->stats;
34727@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struc
34728 if (dev->flags & IFF_ALLMULTI) {
34729 ib->filter[0] = cpu_to_le32(~0U);
34730 ib->filter[1] = cpu_to_le32(~0U);
34731- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34732- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34733- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34734- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34735+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34736+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34737+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34738+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34739 return;
34740 }
34741 /* clear the multicast filter */
34742@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struc
34743 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
34744 }
34745 for (i = 0; i < 4; i++)
34746- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
34747+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
34748 le16_to_cpu(mcast_table[i]));
34749 return;
34750 }
34751@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(s
34752
34753 spin_lock_irqsave(&lp->lock, flags);
34754 suspended = pcnet32_suspend(dev, &flags, 0);
34755- csr15 = lp->a.read_csr(ioaddr, CSR15);
34756+ csr15 = lp->a->read_csr(ioaddr, CSR15);
34757 if (dev->flags & IFF_PROMISC) {
34758 /* Log any net taps. */
34759 if (netif_msg_hw(lp))
34760@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(s
34761 lp->init_block->mode =
34762 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
34763 7);
34764- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
34765+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
34766 } else {
34767 lp->init_block->mode =
34768 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
34769- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34770+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34771 pcnet32_load_multicast(dev);
34772 }
34773
34774 if (suspended) {
34775 int csr5;
34776 /* clear SUSPEND (SPND) - CSR5 bit 0 */
34777- csr5 = lp->a.read_csr(ioaddr, CSR5);
34778- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34779+ csr5 = lp->a->read_csr(ioaddr, CSR5);
34780+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34781 } else {
34782- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34783+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34784 pcnet32_restart(dev, CSR0_NORMAL);
34785 netif_wake_queue(dev);
34786 }
34787@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *
34788 if (!lp->mii)
34789 return 0;
34790
34791- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34792- val_out = lp->a.read_bcr(ioaddr, 34);
34793+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34794+ val_out = lp->a->read_bcr(ioaddr, 34);
34795
34796 return val_out;
34797 }
34798@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device
34799 if (!lp->mii)
34800 return;
34801
34802- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34803- lp->a.write_bcr(ioaddr, 34, val);
34804+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34805+ lp->a->write_bcr(ioaddr, 34, val);
34806 }
34807
34808 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34809@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct n
34810 curr_link = mii_link_ok(&lp->mii_if);
34811 } else {
34812 ulong ioaddr = dev->base_addr; /* card base I/O address */
34813- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34814+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34815 }
34816 if (!curr_link) {
34817 if (prev_link || verbose) {
34818@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct n
34819 (ecmd.duplex ==
34820 DUPLEX_FULL) ? "full" : "half");
34821 }
34822- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
34823+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
34824 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
34825 if (lp->mii_if.full_duplex)
34826 bcr9 |= (1 << 0);
34827 else
34828 bcr9 &= ~(1 << 0);
34829- lp->a.write_bcr(dev->base_addr, 9, bcr9);
34830+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
34831 }
34832 } else {
34833 if (netif_msg_link(lp))
34834diff -urNp linux-2.6.32.45/drivers/net/tg3.h linux-2.6.32.45/drivers/net/tg3.h
34835--- linux-2.6.32.45/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
34836+++ linux-2.6.32.45/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
34837@@ -95,6 +95,7 @@
34838 #define CHIPREV_ID_5750_A0 0x4000
34839 #define CHIPREV_ID_5750_A1 0x4001
34840 #define CHIPREV_ID_5750_A3 0x4003
34841+#define CHIPREV_ID_5750_C1 0x4201
34842 #define CHIPREV_ID_5750_C2 0x4202
34843 #define CHIPREV_ID_5752_A0_HW 0x5000
34844 #define CHIPREV_ID_5752_A0 0x6000
34845diff -urNp linux-2.6.32.45/drivers/net/tokenring/abyss.c linux-2.6.32.45/drivers/net/tokenring/abyss.c
34846--- linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-03-27 14:31:47.000000000 -0400
34847+++ linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-08-05 20:33:55.000000000 -0400
34848@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
34849
34850 static int __init abyss_init (void)
34851 {
34852- abyss_netdev_ops = tms380tr_netdev_ops;
34853+ pax_open_kernel();
34854+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34855
34856- abyss_netdev_ops.ndo_open = abyss_open;
34857- abyss_netdev_ops.ndo_stop = abyss_close;
34858+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34859+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34860+ pax_close_kernel();
34861
34862 return pci_register_driver(&abyss_driver);
34863 }
34864diff -urNp linux-2.6.32.45/drivers/net/tokenring/madgemc.c linux-2.6.32.45/drivers/net/tokenring/madgemc.c
34865--- linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-03-27 14:31:47.000000000 -0400
34866+++ linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-08-05 20:33:55.000000000 -0400
34867@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver
34868
34869 static int __init madgemc_init (void)
34870 {
34871- madgemc_netdev_ops = tms380tr_netdev_ops;
34872- madgemc_netdev_ops.ndo_open = madgemc_open;
34873- madgemc_netdev_ops.ndo_stop = madgemc_close;
34874+ pax_open_kernel();
34875+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34876+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34877+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34878+ pax_close_kernel();
34879
34880 return mca_register_driver (&madgemc_driver);
34881 }
34882diff -urNp linux-2.6.32.45/drivers/net/tokenring/proteon.c linux-2.6.32.45/drivers/net/tokenring/proteon.c
34883--- linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-03-27 14:31:47.000000000 -0400
34884+++ linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-08-05 20:33:55.000000000 -0400
34885@@ -353,9 +353,11 @@ static int __init proteon_init(void)
34886 struct platform_device *pdev;
34887 int i, num = 0, err = 0;
34888
34889- proteon_netdev_ops = tms380tr_netdev_ops;
34890- proteon_netdev_ops.ndo_open = proteon_open;
34891- proteon_netdev_ops.ndo_stop = tms380tr_close;
34892+ pax_open_kernel();
34893+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34894+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34895+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34896+ pax_close_kernel();
34897
34898 err = platform_driver_register(&proteon_driver);
34899 if (err)
34900diff -urNp linux-2.6.32.45/drivers/net/tokenring/skisa.c linux-2.6.32.45/drivers/net/tokenring/skisa.c
34901--- linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-03-27 14:31:47.000000000 -0400
34902+++ linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-08-05 20:33:55.000000000 -0400
34903@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34904 struct platform_device *pdev;
34905 int i, num = 0, err = 0;
34906
34907- sk_isa_netdev_ops = tms380tr_netdev_ops;
34908- sk_isa_netdev_ops.ndo_open = sk_isa_open;
34909- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34910+ pax_open_kernel();
34911+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34912+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34913+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34914+ pax_close_kernel();
34915
34916 err = platform_driver_register(&sk_isa_driver);
34917 if (err)
34918diff -urNp linux-2.6.32.45/drivers/net/tulip/de2104x.c linux-2.6.32.45/drivers/net/tulip/de2104x.c
34919--- linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
34920+++ linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
34921@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
34922 struct de_srom_info_leaf *il;
34923 void *bufp;
34924
34925+ pax_track_stack();
34926+
34927 /* download entire eeprom */
34928 for (i = 0; i < DE_EEPROM_WORDS; i++)
34929 ((__le16 *)ee_data)[i] =
34930diff -urNp linux-2.6.32.45/drivers/net/tulip/de4x5.c linux-2.6.32.45/drivers/net/tulip/de4x5.c
34931--- linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
34932+++ linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
34933@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
34934 for (i=0; i<ETH_ALEN; i++) {
34935 tmp.addr[i] = dev->dev_addr[i];
34936 }
34937- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34938+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34939 break;
34940
34941 case DE4X5_SET_HWADDR: /* Set the hardware address */
34942@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
34943 spin_lock_irqsave(&lp->lock, flags);
34944 memcpy(&statbuf, &lp->pktStats, ioc->len);
34945 spin_unlock_irqrestore(&lp->lock, flags);
34946- if (copy_to_user(ioc->data, &statbuf, ioc->len))
34947+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34948 return -EFAULT;
34949 break;
34950 }
34951diff -urNp linux-2.6.32.45/drivers/net/usb/hso.c linux-2.6.32.45/drivers/net/usb/hso.c
34952--- linux-2.6.32.45/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
34953+++ linux-2.6.32.45/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
34954@@ -71,7 +71,7 @@
34955 #include <asm/byteorder.h>
34956 #include <linux/serial_core.h>
34957 #include <linux/serial.h>
34958-
34959+#include <asm/local.h>
34960
34961 #define DRIVER_VERSION "1.2"
34962 #define MOD_AUTHOR "Option Wireless"
34963@@ -258,7 +258,7 @@ struct hso_serial {
34964
34965 /* from usb_serial_port */
34966 struct tty_struct *tty;
34967- int open_count;
34968+ local_t open_count;
34969 spinlock_t serial_lock;
34970
34971 int (*write_data) (struct hso_serial *serial);
34972@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
34973 struct urb *urb;
34974
34975 urb = serial->rx_urb[0];
34976- if (serial->open_count > 0) {
34977+ if (local_read(&serial->open_count) > 0) {
34978 count = put_rxbuf_data(urb, serial);
34979 if (count == -1)
34980 return;
34981@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
34982 DUMP1(urb->transfer_buffer, urb->actual_length);
34983
34984 /* Anyone listening? */
34985- if (serial->open_count == 0)
34986+ if (local_read(&serial->open_count) == 0)
34987 return;
34988
34989 if (status == 0) {
34990@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
34991 spin_unlock_irq(&serial->serial_lock);
34992
34993 /* check for port already opened, if not set the termios */
34994- serial->open_count++;
34995- if (serial->open_count == 1) {
34996+ if (local_inc_return(&serial->open_count) == 1) {
34997 tty->low_latency = 1;
34998 serial->rx_state = RX_IDLE;
34999 /* Force default termio settings */
35000@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
35001 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35002 if (result) {
35003 hso_stop_serial_device(serial->parent);
35004- serial->open_count--;
35005+ local_dec(&serial->open_count);
35006 kref_put(&serial->parent->ref, hso_serial_ref_free);
35007 }
35008 } else {
35009@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
35010
35011 /* reset the rts and dtr */
35012 /* do the actual close */
35013- serial->open_count--;
35014+ local_dec(&serial->open_count);
35015
35016- if (serial->open_count <= 0) {
35017- serial->open_count = 0;
35018+ if (local_read(&serial->open_count) <= 0) {
35019+ local_set(&serial->open_count, 0);
35020 spin_lock_irq(&serial->serial_lock);
35021 if (serial->tty == tty) {
35022 serial->tty->driver_data = NULL;
35023@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
35024
35025 /* the actual setup */
35026 spin_lock_irqsave(&serial->serial_lock, flags);
35027- if (serial->open_count)
35028+ if (local_read(&serial->open_count))
35029 _hso_serial_set_termios(tty, old);
35030 else
35031 tty->termios = old;
35032@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
35033 /* Start all serial ports */
35034 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35035 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35036- if (dev2ser(serial_table[i])->open_count) {
35037+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
35038 result =
35039 hso_start_serial_device(serial_table[i], GFP_NOIO);
35040 hso_kick_transmit(dev2ser(serial_table[i]));
35041diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-config.h linux-2.6.32.45/drivers/net/vxge/vxge-config.h
35042--- linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-03-27 14:31:47.000000000 -0400
35043+++ linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-08-05 20:33:55.000000000 -0400
35044@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
35045 void (*link_down)(struct __vxge_hw_device *devh);
35046 void (*crit_err)(struct __vxge_hw_device *devh,
35047 enum vxge_hw_event type, u64 ext_data);
35048-};
35049+} __no_const;
35050
35051 /*
35052 * struct __vxge_hw_blockpool_entry - Block private data structure
35053diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-main.c linux-2.6.32.45/drivers/net/vxge/vxge-main.c
35054--- linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
35055+++ linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
35056@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
35057 struct sk_buff *completed[NR_SKB_COMPLETED];
35058 int more;
35059
35060+ pax_track_stack();
35061+
35062 do {
35063 more = 0;
35064 skb_ptr = completed;
35065@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
35066 u8 mtable[256] = {0}; /* CPU to vpath mapping */
35067 int index;
35068
35069+ pax_track_stack();
35070+
35071 /*
35072 * Filling
35073 * - itable with bucket numbers
35074diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h
35075--- linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-03-27 14:31:47.000000000 -0400
35076+++ linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:33:55.000000000 -0400
35077@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
35078 struct vxge_hw_mempool_dma *dma_object,
35079 u32 index,
35080 u32 is_last);
35081-};
35082+} __no_const;
35083
35084 void
35085 __vxge_hw_mempool_destroy(
35086diff -urNp linux-2.6.32.45/drivers/net/wan/cycx_x25.c linux-2.6.32.45/drivers/net/wan/cycx_x25.c
35087--- linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
35088+++ linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
35089@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
35090 unsigned char hex[1024],
35091 * phex = hex;
35092
35093+ pax_track_stack();
35094+
35095 if (len >= (sizeof(hex) / 2))
35096 len = (sizeof(hex) / 2) - 1;
35097
35098diff -urNp linux-2.6.32.45/drivers/net/wan/hdlc_x25.c linux-2.6.32.45/drivers/net/wan/hdlc_x25.c
35099--- linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-03-27 14:31:47.000000000 -0400
35100+++ linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-08-05 20:33:55.000000000 -0400
35101@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
35102
35103 static int x25_open(struct net_device *dev)
35104 {
35105- struct lapb_register_struct cb;
35106+ static struct lapb_register_struct cb = {
35107+ .connect_confirmation = x25_connected,
35108+ .connect_indication = x25_connected,
35109+ .disconnect_confirmation = x25_disconnected,
35110+ .disconnect_indication = x25_disconnected,
35111+ .data_indication = x25_data_indication,
35112+ .data_transmit = x25_data_transmit
35113+ };
35114 int result;
35115
35116- cb.connect_confirmation = x25_connected;
35117- cb.connect_indication = x25_connected;
35118- cb.disconnect_confirmation = x25_disconnected;
35119- cb.disconnect_indication = x25_disconnected;
35120- cb.data_indication = x25_data_indication;
35121- cb.data_transmit = x25_data_transmit;
35122-
35123 result = lapb_register(dev, &cb);
35124 if (result != LAPB_OK)
35125 return result;
35126diff -urNp linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c
35127--- linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
35128+++ linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
35129@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
35130 int do_autopm = 1;
35131 DECLARE_COMPLETION_ONSTACK(notif_completion);
35132
35133+ pax_track_stack();
35134+
35135 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
35136 i2400m, ack, ack_size);
35137 BUG_ON(_ack == i2400m->bm_ack_buf);
35138diff -urNp linux-2.6.32.45/drivers/net/wireless/airo.c linux-2.6.32.45/drivers/net/wireless/airo.c
35139--- linux-2.6.32.45/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
35140+++ linux-2.6.32.45/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
35141@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
35142 BSSListElement * loop_net;
35143 BSSListElement * tmp_net;
35144
35145+ pax_track_stack();
35146+
35147 /* Blow away current list of scan results */
35148 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
35149 list_move_tail (&loop_net->list, &ai->network_free_list);
35150@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
35151 WepKeyRid wkr;
35152 int rc;
35153
35154+ pax_track_stack();
35155+
35156 memset( &mySsid, 0, sizeof( mySsid ) );
35157 kfree (ai->flash);
35158 ai->flash = NULL;
35159@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
35160 __le32 *vals = stats.vals;
35161 int len;
35162
35163+ pax_track_stack();
35164+
35165 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35166 return -ENOMEM;
35167 data = (struct proc_data *)file->private_data;
35168@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
35169 /* If doLoseSync is not 1, we won't do a Lose Sync */
35170 int doLoseSync = -1;
35171
35172+ pax_track_stack();
35173+
35174 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35175 return -ENOMEM;
35176 data = (struct proc_data *)file->private_data;
35177@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
35178 int i;
35179 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
35180
35181+ pax_track_stack();
35182+
35183 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
35184 if (!qual)
35185 return -ENOMEM;
35186@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
35187 CapabilityRid cap_rid;
35188 __le32 *vals = stats_rid.vals;
35189
35190+ pax_track_stack();
35191+
35192 /* Get stats out of the card */
35193 clear_bit(JOB_WSTATS, &local->jobs);
35194 if (local->power.event) {
35195diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c
35196--- linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
35197+++ linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
35198@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
35199 unsigned int v;
35200 u64 tsf;
35201
35202+ pax_track_stack();
35203+
35204 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
35205 len += snprintf(buf+len, sizeof(buf)-len,
35206 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
35207@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
35208 unsigned int len = 0;
35209 unsigned int i;
35210
35211+ pax_track_stack();
35212+
35213 len += snprintf(buf+len, sizeof(buf)-len,
35214 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
35215
35216diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c
35217--- linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
35218+++ linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
35219@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
35220 char buf[512];
35221 unsigned int len = 0;
35222
35223+ pax_track_stack();
35224+
35225 len += snprintf(buf + len, sizeof(buf) - len,
35226 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
35227 len += snprintf(buf + len, sizeof(buf) - len,
35228@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
35229 int i;
35230 u8 addr[ETH_ALEN];
35231
35232+ pax_track_stack();
35233+
35234 len += snprintf(buf + len, sizeof(buf) - len,
35235 "primary: %s (%s chan=%d ht=%d)\n",
35236 wiphy_name(sc->pri_wiphy->hw->wiphy),
35237diff -urNp linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c
35238--- linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35239+++ linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35240@@ -43,7 +43,7 @@ static struct dentry *rootdir;
35241 struct b43_debugfs_fops {
35242 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
35243 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
35244- struct file_operations fops;
35245+ const struct file_operations fops;
35246 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
35247 size_t file_struct_offset;
35248 };
35249diff -urNp linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c
35250--- linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35251+++ linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35252@@ -44,7 +44,7 @@ static struct dentry *rootdir;
35253 struct b43legacy_debugfs_fops {
35254 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
35255 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
35256- struct file_operations fops;
35257+ const struct file_operations fops;
35258 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
35259 size_t file_struct_offset;
35260 /* Take wl->irq_lock before calling read/write? */
35261diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c
35262--- linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
35263+++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
35264@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
35265 int err;
35266 DECLARE_SSID_BUF(ssid);
35267
35268+ pax_track_stack();
35269+
35270 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
35271
35272 if (ssid_len)
35273@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
35274 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
35275 int err;
35276
35277+ pax_track_stack();
35278+
35279 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
35280 idx, keylen, len);
35281
35282diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c
35283--- linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
35284+++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
35285@@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
35286 unsigned long flags;
35287 DECLARE_SSID_BUF(ssid);
35288
35289+ pax_track_stack();
35290+
35291 LIBIPW_DEBUG_SCAN("'%s' (%pM"
35292 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
35293 print_ssid(ssid, info_element->data, info_element->len),
35294diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c
35295--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
35296+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
35297@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
35298 },
35299 };
35300
35301-static struct iwl_ops iwl1000_ops = {
35302+static const struct iwl_ops iwl1000_ops = {
35303 .ucode = &iwl5000_ucode,
35304 .lib = &iwl1000_lib,
35305 .hcmd = &iwl5000_hcmd,
35306diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c
35307--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-03-27 14:31:47.000000000 -0400
35308+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-08-05 20:33:55.000000000 -0400
35309@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_
35310 */
35311 if (iwl3945_mod_params.disable_hw_scan) {
35312 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
35313- iwl3945_hw_ops.hw_scan = NULL;
35314+ pax_open_kernel();
35315+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
35316+ pax_close_kernel();
35317 }
35318
35319
35320diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c
35321--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
35322+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
35323@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
35324 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
35325 };
35326
35327-static struct iwl_ops iwl3945_ops = {
35328+static const struct iwl_ops iwl3945_ops = {
35329 .ucode = &iwl3945_ucode,
35330 .lib = &iwl3945_lib,
35331 .hcmd = &iwl3945_hcmd,
35332diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c
35333--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
35334+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
35335@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
35336 },
35337 };
35338
35339-static struct iwl_ops iwl4965_ops = {
35340+static const struct iwl_ops iwl4965_ops = {
35341 .ucode = &iwl4965_ucode,
35342 .lib = &iwl4965_lib,
35343 .hcmd = &iwl4965_hcmd,
35344diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c
35345--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
35346+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
35347@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
35348 },
35349 };
35350
35351-struct iwl_ops iwl5000_ops = {
35352+const struct iwl_ops iwl5000_ops = {
35353 .ucode = &iwl5000_ucode,
35354 .lib = &iwl5000_lib,
35355 .hcmd = &iwl5000_hcmd,
35356 .utils = &iwl5000_hcmd_utils,
35357 };
35358
35359-static struct iwl_ops iwl5150_ops = {
35360+static const struct iwl_ops iwl5150_ops = {
35361 .ucode = &iwl5000_ucode,
35362 .lib = &iwl5150_lib,
35363 .hcmd = &iwl5000_hcmd,
35364diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c
35365--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
35366+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
35367@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
35368 .calc_rssi = iwl5000_calc_rssi,
35369 };
35370
35371-static struct iwl_ops iwl6000_ops = {
35372+static const struct iwl_ops iwl6000_ops = {
35373 .ucode = &iwl5000_ucode,
35374 .lib = &iwl6000_lib,
35375 .hcmd = &iwl5000_hcmd,
35376diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c
35377--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-03-27 14:31:47.000000000 -0400
35378+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:33:55.000000000 -0400
35379@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev
35380 if (iwl_debug_level & IWL_DL_INFO)
35381 dev_printk(KERN_DEBUG, &(pdev->dev),
35382 "Disabling hw_scan\n");
35383- iwl_hw_ops.hw_scan = NULL;
35384+ pax_open_kernel();
35385+ *(void **)&iwl_hw_ops.hw_scan = NULL;
35386+ pax_close_kernel();
35387 }
35388
35389 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
35390diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35391--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
35392+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
35393@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
35394 u8 active_index = 0;
35395 s32 tpt = 0;
35396
35397+ pax_track_stack();
35398+
35399 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
35400
35401 if (!ieee80211_is_data(hdr->frame_control) ||
35402@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
35403 u8 valid_tx_ant = 0;
35404 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
35405
35406+ pax_track_stack();
35407+
35408 /* Override starting rate (index 0) if needed for debug purposes */
35409 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
35410
35411diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35412--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
35413+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
35414@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
35415 int pos = 0;
35416 const size_t bufsz = sizeof(buf);
35417
35418+ pax_track_stack();
35419+
35420 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
35421 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
35422 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
35423@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
35424 const size_t bufsz = sizeof(buf);
35425 ssize_t ret;
35426
35427+ pax_track_stack();
35428+
35429 for (i = 0; i < AC_NUM; i++) {
35430 pos += scnprintf(buf + pos, bufsz - pos,
35431 "\tcw_min\tcw_max\taifsn\ttxop\n");
35432diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h
35433--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
35434+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
35435@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
35436 #endif
35437
35438 #else
35439-#define IWL_DEBUG(__priv, level, fmt, args...)
35440-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
35441+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
35442+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
35443 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
35444 void *p, u32 len)
35445 {}
35446diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h
35447--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
35448+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
35449@@ -68,7 +68,7 @@ struct iwl_tx_queue;
35450
35451 /* shared structures from iwl-5000.c */
35452 extern struct iwl_mod_params iwl50_mod_params;
35453-extern struct iwl_ops iwl5000_ops;
35454+extern const struct iwl_ops iwl5000_ops;
35455 extern struct iwl_ucode_ops iwl5000_ucode;
35456 extern struct iwl_lib_ops iwl5000_lib;
35457 extern struct iwl_hcmd_ops iwl5000_hcmd;
35458diff -urNp linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c
35459--- linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35460+++ linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
35461@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
35462 int buf_len = 512;
35463 size_t len = 0;
35464
35465+ pax_track_stack();
35466+
35467 if (*ppos != 0)
35468 return 0;
35469 if (count < sizeof(buf))
35470diff -urNp linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c
35471--- linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35472+++ linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35473@@ -708,7 +708,7 @@ out_unlock:
35474 struct lbs_debugfs_files {
35475 const char *name;
35476 int perm;
35477- struct file_operations fops;
35478+ const struct file_operations fops;
35479 };
35480
35481 static const struct lbs_debugfs_files debugfs_files[] = {
35482diff -urNp linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c
35483--- linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
35484+++ linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
35485@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
35486
35487 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
35488
35489- if (rts_threshold < 0 || rts_threshold > 2347)
35490+ if (rts_threshold > 2347)
35491 rts_threshold = 2347;
35492
35493 tmp = cpu_to_le32(rts_threshold);
35494diff -urNp linux-2.6.32.45/drivers/oprofile/buffer_sync.c linux-2.6.32.45/drivers/oprofile/buffer_sync.c
35495--- linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
35496+++ linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
35497@@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
35498 if (cookie == NO_COOKIE)
35499 offset = pc;
35500 if (cookie == INVALID_COOKIE) {
35501- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35502+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35503 offset = pc;
35504 }
35505 if (cookie != last_cookie) {
35506@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
35507 /* add userspace sample */
35508
35509 if (!mm) {
35510- atomic_inc(&oprofile_stats.sample_lost_no_mm);
35511+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35512 return 0;
35513 }
35514
35515 cookie = lookup_dcookie(mm, s->eip, &offset);
35516
35517 if (cookie == INVALID_COOKIE) {
35518- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35519+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35520 return 0;
35521 }
35522
35523@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
35524 /* ignore backtraces if failed to add a sample */
35525 if (state == sb_bt_start) {
35526 state = sb_bt_ignore;
35527- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35528+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35529 }
35530 }
35531 release_mm(mm);
35532diff -urNp linux-2.6.32.45/drivers/oprofile/event_buffer.c linux-2.6.32.45/drivers/oprofile/event_buffer.c
35533--- linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
35534+++ linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
35535@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
35536 }
35537
35538 if (buffer_pos == buffer_size) {
35539- atomic_inc(&oprofile_stats.event_lost_overflow);
35540+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35541 return;
35542 }
35543
35544diff -urNp linux-2.6.32.45/drivers/oprofile/oprof.c linux-2.6.32.45/drivers/oprofile/oprof.c
35545--- linux-2.6.32.45/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
35546+++ linux-2.6.32.45/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
35547@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
35548 if (oprofile_ops.switch_events())
35549 return;
35550
35551- atomic_inc(&oprofile_stats.multiplex_counter);
35552+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35553 start_switch_worker();
35554 }
35555
35556diff -urNp linux-2.6.32.45/drivers/oprofile/oprofilefs.c linux-2.6.32.45/drivers/oprofile/oprofilefs.c
35557--- linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
35558+++ linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
35559@@ -187,7 +187,7 @@ static const struct file_operations atom
35560
35561
35562 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35563- char const *name, atomic_t *val)
35564+ char const *name, atomic_unchecked_t *val)
35565 {
35566 struct dentry *d = __oprofilefs_create_file(sb, root, name,
35567 &atomic_ro_fops, 0444);
35568diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.c linux-2.6.32.45/drivers/oprofile/oprofile_stats.c
35569--- linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
35570+++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
35571@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35572 cpu_buf->sample_invalid_eip = 0;
35573 }
35574
35575- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35576- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35577- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35578- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35579- atomic_set(&oprofile_stats.multiplex_counter, 0);
35580+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35581+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35582+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35583+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35584+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35585 }
35586
35587
35588diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.h linux-2.6.32.45/drivers/oprofile/oprofile_stats.h
35589--- linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
35590+++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
35591@@ -13,11 +13,11 @@
35592 #include <asm/atomic.h>
35593
35594 struct oprofile_stat_struct {
35595- atomic_t sample_lost_no_mm;
35596- atomic_t sample_lost_no_mapping;
35597- atomic_t bt_lost_no_mapping;
35598- atomic_t event_lost_overflow;
35599- atomic_t multiplex_counter;
35600+ atomic_unchecked_t sample_lost_no_mm;
35601+ atomic_unchecked_t sample_lost_no_mapping;
35602+ atomic_unchecked_t bt_lost_no_mapping;
35603+ atomic_unchecked_t event_lost_overflow;
35604+ atomic_unchecked_t multiplex_counter;
35605 };
35606
35607 extern struct oprofile_stat_struct oprofile_stats;
35608diff -urNp linux-2.6.32.45/drivers/parisc/pdc_stable.c linux-2.6.32.45/drivers/parisc/pdc_stable.c
35609--- linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
35610+++ linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
35611@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
35612 return ret;
35613 }
35614
35615-static struct sysfs_ops pdcspath_attr_ops = {
35616+static const struct sysfs_ops pdcspath_attr_ops = {
35617 .show = pdcspath_attr_show,
35618 .store = pdcspath_attr_store,
35619 };
35620diff -urNp linux-2.6.32.45/drivers/parport/procfs.c linux-2.6.32.45/drivers/parport/procfs.c
35621--- linux-2.6.32.45/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
35622+++ linux-2.6.32.45/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
35623@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
35624
35625 *ppos += len;
35626
35627- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35628+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35629 }
35630
35631 #ifdef CONFIG_PARPORT_1284
35632@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
35633
35634 *ppos += len;
35635
35636- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35637+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35638 }
35639 #endif /* IEEE1284.3 support. */
35640
35641diff -urNp linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c
35642--- linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
35643+++ linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
35644@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
35645 }
35646
35647
35648-static struct acpi_dock_ops acpiphp_dock_ops = {
35649+static const struct acpi_dock_ops acpiphp_dock_ops = {
35650 .handler = handle_hotplug_event_func,
35651 };
35652
35653diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h
35654--- linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-03-27 14:31:47.000000000 -0400
35655+++ linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:33:55.000000000 -0400
35656@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35657 int (*hardware_test) (struct slot* slot, u32 value);
35658 u8 (*get_power) (struct slot* slot);
35659 int (*set_power) (struct slot* slot, int value);
35660-};
35661+} __no_const;
35662
35663 struct cpci_hp_controller {
35664 unsigned int irq;
35665diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c
35666--- linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
35667+++ linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
35668@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
35669
35670 void compaq_nvram_init (void __iomem *rom_start)
35671 {
35672+
35673+#ifndef CONFIG_PAX_KERNEXEC
35674 if (rom_start) {
35675 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35676 }
35677+#endif
35678+
35679 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35680
35681 /* initialize our int15 lock */
35682diff -urNp linux-2.6.32.45/drivers/pci/hotplug/fakephp.c linux-2.6.32.45/drivers/pci/hotplug/fakephp.c
35683--- linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
35684+++ linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
35685@@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
35686 }
35687
35688 static struct kobj_type legacy_ktype = {
35689- .sysfs_ops = &(struct sysfs_ops){
35690+ .sysfs_ops = &(const struct sysfs_ops){
35691 .store = legacy_store, .show = legacy_show
35692 },
35693 .release = &legacy_release,
35694diff -urNp linux-2.6.32.45/drivers/pci/intel-iommu.c linux-2.6.32.45/drivers/pci/intel-iommu.c
35695--- linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
35696+++ linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
35697@@ -2643,7 +2643,7 @@ error:
35698 return 0;
35699 }
35700
35701-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
35702+dma_addr_t intel_map_page(struct device *dev, struct page *page,
35703 unsigned long offset, size_t size,
35704 enum dma_data_direction dir,
35705 struct dma_attrs *attrs)
35706@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
35707 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
35708 }
35709
35710-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35711+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35712 size_t size, enum dma_data_direction dir,
35713 struct dma_attrs *attrs)
35714 {
35715@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
35716 }
35717 }
35718
35719-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
35720+void *intel_alloc_coherent(struct device *hwdev, size_t size,
35721 dma_addr_t *dma_handle, gfp_t flags)
35722 {
35723 void *vaddr;
35724@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
35725 return NULL;
35726 }
35727
35728-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35729+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35730 dma_addr_t dma_handle)
35731 {
35732 int order;
35733@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
35734 free_pages((unsigned long)vaddr, order);
35735 }
35736
35737-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35738+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35739 int nelems, enum dma_data_direction dir,
35740 struct dma_attrs *attrs)
35741 {
35742@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
35743 return nelems;
35744 }
35745
35746-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35747+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35748 enum dma_data_direction dir, struct dma_attrs *attrs)
35749 {
35750 int i;
35751@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
35752 return nelems;
35753 }
35754
35755-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35756+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35757 {
35758 return !dma_addr;
35759 }
35760
35761-struct dma_map_ops intel_dma_ops = {
35762+const struct dma_map_ops intel_dma_ops = {
35763 .alloc_coherent = intel_alloc_coherent,
35764 .free_coherent = intel_free_coherent,
35765 .map_sg = intel_map_sg,
35766diff -urNp linux-2.6.32.45/drivers/pci/pcie/aspm.c linux-2.6.32.45/drivers/pci/pcie/aspm.c
35767--- linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
35768+++ linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
35769@@ -27,9 +27,9 @@
35770 #define MODULE_PARAM_PREFIX "pcie_aspm."
35771
35772 /* Note: those are not register definitions */
35773-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35774-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35775-#define ASPM_STATE_L1 (4) /* L1 state */
35776+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35777+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35778+#define ASPM_STATE_L1 (4U) /* L1 state */
35779 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35780 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35781
35782diff -urNp linux-2.6.32.45/drivers/pci/probe.c linux-2.6.32.45/drivers/pci/probe.c
35783--- linux-2.6.32.45/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
35784+++ linux-2.6.32.45/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
35785@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
35786 return ret;
35787 }
35788
35789-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
35790+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
35791 struct device_attribute *attr,
35792 char *buf)
35793 {
35794 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
35795 }
35796
35797-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
35798+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
35799 struct device_attribute *attr,
35800 char *buf)
35801 {
35802diff -urNp linux-2.6.32.45/drivers/pci/proc.c linux-2.6.32.45/drivers/pci/proc.c
35803--- linux-2.6.32.45/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
35804+++ linux-2.6.32.45/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
35805@@ -480,7 +480,16 @@ static const struct file_operations proc
35806 static int __init pci_proc_init(void)
35807 {
35808 struct pci_dev *dev = NULL;
35809+
35810+#ifdef CONFIG_GRKERNSEC_PROC_ADD
35811+#ifdef CONFIG_GRKERNSEC_PROC_USER
35812+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35813+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35814+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35815+#endif
35816+#else
35817 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35818+#endif
35819 proc_create("devices", 0, proc_bus_pci_dir,
35820 &proc_bus_pci_dev_operations);
35821 proc_initialized = 1;
35822diff -urNp linux-2.6.32.45/drivers/pci/slot.c linux-2.6.32.45/drivers/pci/slot.c
35823--- linux-2.6.32.45/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
35824+++ linux-2.6.32.45/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
35825@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
35826 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
35827 }
35828
35829-static struct sysfs_ops pci_slot_sysfs_ops = {
35830+static const struct sysfs_ops pci_slot_sysfs_ops = {
35831 .show = pci_slot_attr_show,
35832 .store = pci_slot_attr_store,
35833 };
35834diff -urNp linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c
35835--- linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
35836+++ linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
35837@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
35838 return -EFAULT;
35839 }
35840 }
35841- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35842+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35843 if (!buf)
35844 return -ENOMEM;
35845
35846diff -urNp linux-2.6.32.45/drivers/platform/x86/acer-wmi.c linux-2.6.32.45/drivers/platform/x86/acer-wmi.c
35847--- linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
35848+++ linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
35849@@ -918,7 +918,7 @@ static int update_bl_status(struct backl
35850 return 0;
35851 }
35852
35853-static struct backlight_ops acer_bl_ops = {
35854+static const struct backlight_ops acer_bl_ops = {
35855 .get_brightness = read_brightness,
35856 .update_status = update_bl_status,
35857 };
35858diff -urNp linux-2.6.32.45/drivers/platform/x86/asus_acpi.c linux-2.6.32.45/drivers/platform/x86/asus_acpi.c
35859--- linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
35860+++ linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
35861@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
35862 return 0;
35863 }
35864
35865-static struct backlight_ops asus_backlight_data = {
35866+static const struct backlight_ops asus_backlight_data = {
35867 .get_brightness = read_brightness,
35868 .update_status = set_brightness_status,
35869 };
35870diff -urNp linux-2.6.32.45/drivers/platform/x86/asus-laptop.c linux-2.6.32.45/drivers/platform/x86/asus-laptop.c
35871--- linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
35872+++ linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
35873@@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
35874 */
35875 static int read_brightness(struct backlight_device *bd);
35876 static int update_bl_status(struct backlight_device *bd);
35877-static struct backlight_ops asusbl_ops = {
35878+static const struct backlight_ops asusbl_ops = {
35879 .get_brightness = read_brightness,
35880 .update_status = update_bl_status,
35881 };
35882diff -urNp linux-2.6.32.45/drivers/platform/x86/compal-laptop.c linux-2.6.32.45/drivers/platform/x86/compal-laptop.c
35883--- linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
35884+++ linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
35885@@ -163,7 +163,7 @@ static int bl_update_status(struct backl
35886 return set_lcd_level(b->props.brightness);
35887 }
35888
35889-static struct backlight_ops compalbl_ops = {
35890+static const struct backlight_ops compalbl_ops = {
35891 .get_brightness = bl_get_brightness,
35892 .update_status = bl_update_status,
35893 };
35894diff -urNp linux-2.6.32.45/drivers/platform/x86/dell-laptop.c linux-2.6.32.45/drivers/platform/x86/dell-laptop.c
35895--- linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
35896+++ linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
35897@@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
35898 return buffer.output[1];
35899 }
35900
35901-static struct backlight_ops dell_ops = {
35902+static const struct backlight_ops dell_ops = {
35903 .get_brightness = dell_get_intensity,
35904 .update_status = dell_send_intensity,
35905 };
35906diff -urNp linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c
35907--- linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
35908+++ linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
35909@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
35910 */
35911 static int read_brightness(struct backlight_device *bd);
35912 static int update_bl_status(struct backlight_device *bd);
35913-static struct backlight_ops eeepcbl_ops = {
35914+static const struct backlight_ops eeepcbl_ops = {
35915 .get_brightness = read_brightness,
35916 .update_status = update_bl_status,
35917 };
35918diff -urNp linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c
35919--- linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
35920+++ linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
35921@@ -436,7 +436,7 @@ static int bl_update_status(struct backl
35922 return ret;
35923 }
35924
35925-static struct backlight_ops fujitsubl_ops = {
35926+static const struct backlight_ops fujitsubl_ops = {
35927 .get_brightness = bl_get_brightness,
35928 .update_status = bl_update_status,
35929 };
35930diff -urNp linux-2.6.32.45/drivers/platform/x86/msi-laptop.c linux-2.6.32.45/drivers/platform/x86/msi-laptop.c
35931--- linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
35932+++ linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
35933@@ -161,7 +161,7 @@ static int bl_update_status(struct backl
35934 return set_lcd_level(b->props.brightness);
35935 }
35936
35937-static struct backlight_ops msibl_ops = {
35938+static const struct backlight_ops msibl_ops = {
35939 .get_brightness = bl_get_brightness,
35940 .update_status = bl_update_status,
35941 };
35942diff -urNp linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c
35943--- linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
35944+++ linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
35945@@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
35946 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
35947 }
35948
35949-static struct backlight_ops pcc_backlight_ops = {
35950+static const struct backlight_ops pcc_backlight_ops = {
35951 .get_brightness = bl_get,
35952 .update_status = bl_set_status,
35953 };
35954diff -urNp linux-2.6.32.45/drivers/platform/x86/sony-laptop.c linux-2.6.32.45/drivers/platform/x86/sony-laptop.c
35955--- linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
35956+++ linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
35957@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
35958 }
35959
35960 static struct backlight_device *sony_backlight_device;
35961-static struct backlight_ops sony_backlight_ops = {
35962+static const struct backlight_ops sony_backlight_ops = {
35963 .update_status = sony_backlight_update_status,
35964 .get_brightness = sony_backlight_get_brightness,
35965 };
35966diff -urNp linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c
35967--- linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
35968+++ linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:33:55.000000000 -0400
35969@@ -2137,7 +2137,7 @@ static int hotkey_mask_get(void)
35970 return 0;
35971 }
35972
35973-void static hotkey_mask_warn_incomplete_mask(void)
35974+static void hotkey_mask_warn_incomplete_mask(void)
35975 {
35976 /* log only what the user can fix... */
35977 const u32 wantedmask = hotkey_driver_mask &
35978@@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
35979 BACKLIGHT_UPDATE_HOTKEY);
35980 }
35981
35982-static struct backlight_ops ibm_backlight_data = {
35983+static const struct backlight_ops ibm_backlight_data = {
35984 .get_brightness = brightness_get,
35985 .update_status = brightness_update_status,
35986 };
35987diff -urNp linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c
35988--- linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
35989+++ linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
35990@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
35991 return AE_OK;
35992 }
35993
35994-static struct backlight_ops toshiba_backlight_data = {
35995+static const struct backlight_ops toshiba_backlight_data = {
35996 .get_brightness = get_lcd,
35997 .update_status = set_lcd_status,
35998 };
35999diff -urNp linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c
36000--- linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
36001+++ linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
36002@@ -60,7 +60,7 @@ do { \
36003 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36004 } while(0)
36005
36006-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36007+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36008 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36009
36010 /*
36011@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
36012
36013 cpu = get_cpu();
36014 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36015+
36016+ pax_open_kernel();
36017 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36018+ pax_close_kernel();
36019
36020 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36021 spin_lock_irqsave(&pnp_bios_lock, flags);
36022@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
36023 :"memory");
36024 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36025
36026+ pax_open_kernel();
36027 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36028+ pax_close_kernel();
36029+
36030 put_cpu();
36031
36032 /* If we get here and this is set then the PnP BIOS faulted on us. */
36033@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
36034 return status;
36035 }
36036
36037-void pnpbios_calls_init(union pnp_bios_install_struct *header)
36038+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36039 {
36040 int i;
36041
36042@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
36043 pnp_bios_callpoint.offset = header->fields.pm16offset;
36044 pnp_bios_callpoint.segment = PNP_CS16;
36045
36046+ pax_open_kernel();
36047+
36048 for_each_possible_cpu(i) {
36049 struct desc_struct *gdt = get_cpu_gdt_table(i);
36050 if (!gdt)
36051@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
36052 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36053 (unsigned long)__va(header->fields.pm16dseg));
36054 }
36055+
36056+ pax_close_kernel();
36057 }
36058diff -urNp linux-2.6.32.45/drivers/pnp/resource.c linux-2.6.32.45/drivers/pnp/resource.c
36059--- linux-2.6.32.45/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
36060+++ linux-2.6.32.45/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
36061@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
36062 return 1;
36063
36064 /* check if the resource is valid */
36065- if (*irq < 0 || *irq > 15)
36066+ if (*irq > 15)
36067 return 0;
36068
36069 /* check if the resource is reserved */
36070@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
36071 return 1;
36072
36073 /* check if the resource is valid */
36074- if (*dma < 0 || *dma == 4 || *dma > 7)
36075+ if (*dma == 4 || *dma > 7)
36076 return 0;
36077
36078 /* check if the resource is reserved */
36079diff -urNp linux-2.6.32.45/drivers/power/bq27x00_battery.c linux-2.6.32.45/drivers/power/bq27x00_battery.c
36080--- linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-03-27 14:31:47.000000000 -0400
36081+++ linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-08-05 20:33:55.000000000 -0400
36082@@ -44,7 +44,7 @@ struct bq27x00_device_info;
36083 struct bq27x00_access_methods {
36084 int (*read)(u8 reg, int *rt_value, int b_single,
36085 struct bq27x00_device_info *di);
36086-};
36087+} __no_const;
36088
36089 struct bq27x00_device_info {
36090 struct device *dev;
36091diff -urNp linux-2.6.32.45/drivers/rtc/rtc-dev.c linux-2.6.32.45/drivers/rtc/rtc-dev.c
36092--- linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
36093+++ linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
36094@@ -14,6 +14,7 @@
36095 #include <linux/module.h>
36096 #include <linux/rtc.h>
36097 #include <linux/sched.h>
36098+#include <linux/grsecurity.h>
36099 #include "rtc-core.h"
36100
36101 static dev_t rtc_devt;
36102@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
36103 if (copy_from_user(&tm, uarg, sizeof(tm)))
36104 return -EFAULT;
36105
36106+ gr_log_timechange();
36107+
36108 return rtc_set_time(rtc, &tm);
36109
36110 case RTC_PIE_ON:
36111diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.c linux-2.6.32.45/drivers/s390/cio/qdio_perf.c
36112--- linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
36113+++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
36114@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
36115 static int qdio_perf_proc_show(struct seq_file *m, void *v)
36116 {
36117 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
36118- (long)atomic_long_read(&perf_stats.qdio_int));
36119+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
36120 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
36121- (long)atomic_long_read(&perf_stats.pci_int));
36122+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
36123 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
36124- (long)atomic_long_read(&perf_stats.thin_int));
36125+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
36126 seq_printf(m, "\n");
36127 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
36128- (long)atomic_long_read(&perf_stats.tasklet_inbound));
36129+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
36130 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
36131- (long)atomic_long_read(&perf_stats.tasklet_outbound));
36132+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
36133 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
36134- (long)atomic_long_read(&perf_stats.tasklet_thinint),
36135- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
36136+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
36137+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
36138 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
36139- (long)atomic_long_read(&perf_stats.thinint_inbound),
36140- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
36141+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
36142+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
36143 seq_printf(m, "\n");
36144 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
36145- (long)atomic_long_read(&perf_stats.siga_in));
36146+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
36147 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
36148- (long)atomic_long_read(&perf_stats.siga_out));
36149+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
36150 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
36151- (long)atomic_long_read(&perf_stats.siga_sync));
36152+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
36153 seq_printf(m, "\n");
36154 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
36155- (long)atomic_long_read(&perf_stats.inbound_handler));
36156+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
36157 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
36158- (long)atomic_long_read(&perf_stats.outbound_handler));
36159+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
36160 seq_printf(m, "\n");
36161 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
36162- (long)atomic_long_read(&perf_stats.fast_requeue));
36163+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
36164 seq_printf(m, "Number of outbound target full condition\t: %li\n",
36165- (long)atomic_long_read(&perf_stats.outbound_target_full));
36166+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
36167 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
36168- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
36169+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
36170 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
36171- (long)atomic_long_read(&perf_stats.debug_stop_polling));
36172+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
36173 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
36174- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
36175+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
36176 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
36177- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
36178- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
36179+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
36180+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
36181 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
36182- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
36183- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
36184+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
36185+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
36186 seq_printf(m, "\n");
36187 return 0;
36188 }
36189diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.h linux-2.6.32.45/drivers/s390/cio/qdio_perf.h
36190--- linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
36191+++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
36192@@ -13,46 +13,46 @@
36193
36194 struct qdio_perf_stats {
36195 /* interrupt handler calls */
36196- atomic_long_t qdio_int;
36197- atomic_long_t pci_int;
36198- atomic_long_t thin_int;
36199+ atomic_long_unchecked_t qdio_int;
36200+ atomic_long_unchecked_t pci_int;
36201+ atomic_long_unchecked_t thin_int;
36202
36203 /* tasklet runs */
36204- atomic_long_t tasklet_inbound;
36205- atomic_long_t tasklet_outbound;
36206- atomic_long_t tasklet_thinint;
36207- atomic_long_t tasklet_thinint_loop;
36208- atomic_long_t thinint_inbound;
36209- atomic_long_t thinint_inbound_loop;
36210- atomic_long_t thinint_inbound_loop2;
36211+ atomic_long_unchecked_t tasklet_inbound;
36212+ atomic_long_unchecked_t tasklet_outbound;
36213+ atomic_long_unchecked_t tasklet_thinint;
36214+ atomic_long_unchecked_t tasklet_thinint_loop;
36215+ atomic_long_unchecked_t thinint_inbound;
36216+ atomic_long_unchecked_t thinint_inbound_loop;
36217+ atomic_long_unchecked_t thinint_inbound_loop2;
36218
36219 /* signal adapter calls */
36220- atomic_long_t siga_out;
36221- atomic_long_t siga_in;
36222- atomic_long_t siga_sync;
36223+ atomic_long_unchecked_t siga_out;
36224+ atomic_long_unchecked_t siga_in;
36225+ atomic_long_unchecked_t siga_sync;
36226
36227 /* misc */
36228- atomic_long_t inbound_handler;
36229- atomic_long_t outbound_handler;
36230- atomic_long_t fast_requeue;
36231- atomic_long_t outbound_target_full;
36232+ atomic_long_unchecked_t inbound_handler;
36233+ atomic_long_unchecked_t outbound_handler;
36234+ atomic_long_unchecked_t fast_requeue;
36235+ atomic_long_unchecked_t outbound_target_full;
36236
36237 /* for debugging */
36238- atomic_long_t debug_tl_out_timer;
36239- atomic_long_t debug_stop_polling;
36240- atomic_long_t debug_eqbs_all;
36241- atomic_long_t debug_eqbs_incomplete;
36242- atomic_long_t debug_sqbs_all;
36243- atomic_long_t debug_sqbs_incomplete;
36244+ atomic_long_unchecked_t debug_tl_out_timer;
36245+ atomic_long_unchecked_t debug_stop_polling;
36246+ atomic_long_unchecked_t debug_eqbs_all;
36247+ atomic_long_unchecked_t debug_eqbs_incomplete;
36248+ atomic_long_unchecked_t debug_sqbs_all;
36249+ atomic_long_unchecked_t debug_sqbs_incomplete;
36250 };
36251
36252 extern struct qdio_perf_stats perf_stats;
36253 extern int qdio_performance_stats;
36254
36255-static inline void qdio_perf_stat_inc(atomic_long_t *count)
36256+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
36257 {
36258 if (qdio_performance_stats)
36259- atomic_long_inc(count);
36260+ atomic_long_inc_unchecked(count);
36261 }
36262
36263 int qdio_setup_perf_stats(void);
36264diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h
36265--- linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-03-27 14:31:47.000000000 -0400
36266+++ linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:33:55.000000000 -0400
36267@@ -471,7 +471,7 @@ struct adapter_ops
36268 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36269 /* Administrative operations */
36270 int (*adapter_comm)(struct aac_dev * dev, int comm);
36271-};
36272+} __no_const;
36273
36274 /*
36275 * Define which interrupt handler needs to be installed
36276diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c
36277--- linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
36278+++ linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
36279@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
36280 u32 actual_fibsize64, actual_fibsize = 0;
36281 int i;
36282
36283+ pax_track_stack();
36284
36285 if (dev->in_reset) {
36286 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
36287diff -urNp linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c
36288--- linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
36289+++ linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
36290@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
36291 flash_error_table[i].reason);
36292 }
36293
36294-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
36295+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
36296 asd_show_update_bios, asd_store_update_bios);
36297
36298 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
36299diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h
36300--- linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-03-27 14:31:47.000000000 -0400
36301+++ linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-08-05 20:33:55.000000000 -0400
36302@@ -61,7 +61,7 @@ struct bfa_hwif_s {
36303 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
36304 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
36305 u32 *nvecs, u32 *maxvec);
36306-};
36307+} __no_const;
36308 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36309
36310 struct bfa_iocfc_s {
36311diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h
36312--- linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-03-27 14:31:47.000000000 -0400
36313+++ linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:33:55.000000000 -0400
36314@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
36315 bfa_ioc_disable_cbfn_t disable_cbfn;
36316 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36317 bfa_ioc_reset_cbfn_t reset_cbfn;
36318-};
36319+} __no_const;
36320
36321 /**
36322 * Heartbeat failure notification queue element.
36323diff -urNp linux-2.6.32.45/drivers/scsi/BusLogic.c linux-2.6.32.45/drivers/scsi/BusLogic.c
36324--- linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
36325+++ linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
36326@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
36327 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
36328 *PrototypeHostAdapter)
36329 {
36330+ pax_track_stack();
36331+
36332 /*
36333 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
36334 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
36335diff -urNp linux-2.6.32.45/drivers/scsi/dpt_i2o.c linux-2.6.32.45/drivers/scsi/dpt_i2o.c
36336--- linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
36337+++ linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
36338@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
36339 dma_addr_t addr;
36340 ulong flags = 0;
36341
36342+ pax_track_stack();
36343+
36344 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
36345 // get user msg size in u32s
36346 if(get_user(size, &user_msg[0])){
36347@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
36348 s32 rcode;
36349 dma_addr_t addr;
36350
36351+ pax_track_stack();
36352+
36353 memset(msg, 0 , sizeof(msg));
36354 len = scsi_bufflen(cmd);
36355 direction = 0x00000000;
36356diff -urNp linux-2.6.32.45/drivers/scsi/eata.c linux-2.6.32.45/drivers/scsi/eata.c
36357--- linux-2.6.32.45/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
36358+++ linux-2.6.32.45/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
36359@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
36360 struct hostdata *ha;
36361 char name[16];
36362
36363+ pax_track_stack();
36364+
36365 sprintf(name, "%s%d", driver_name, j);
36366
36367 if (!request_region(port_base, REGION_SIZE, driver_name)) {
36368diff -urNp linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c
36369--- linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
36370+++ linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
36371@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
36372 size_t rlen;
36373 size_t dlen;
36374
36375+ pax_track_stack();
36376+
36377 fiph = (struct fip_header *)skb->data;
36378 sub = fiph->fip_subcode;
36379 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
36380diff -urNp linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c
36381--- linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-03-27 14:31:47.000000000 -0400
36382+++ linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-08-05 20:33:55.000000000 -0400
36383@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct p
36384 /* Start local port initiatialization */
36385
36386 lp->link_up = 0;
36387- lp->tt = fnic_transport_template;
36388+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
36389
36390 lp->max_retry_count = fnic->config.flogi_retries;
36391 lp->max_rport_retry_count = fnic->config.plogi_retries;
36392diff -urNp linux-2.6.32.45/drivers/scsi/gdth.c linux-2.6.32.45/drivers/scsi/gdth.c
36393--- linux-2.6.32.45/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
36394+++ linux-2.6.32.45/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
36395@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
36396 ulong flags;
36397 gdth_ha_str *ha;
36398
36399+ pax_track_stack();
36400+
36401 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
36402 return -EFAULT;
36403 ha = gdth_find_ha(ldrv.ionode);
36404@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
36405 gdth_ha_str *ha;
36406 int rval;
36407
36408+ pax_track_stack();
36409+
36410 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
36411 res.number >= MAX_HDRIVES)
36412 return -EFAULT;
36413@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
36414 gdth_ha_str *ha;
36415 int rval;
36416
36417+ pax_track_stack();
36418+
36419 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
36420 return -EFAULT;
36421 ha = gdth_find_ha(gen.ionode);
36422@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
36423 int i;
36424 gdth_cmd_str gdtcmd;
36425 char cmnd[MAX_COMMAND_SIZE];
36426+
36427+ pax_track_stack();
36428+
36429 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
36430
36431 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
36432diff -urNp linux-2.6.32.45/drivers/scsi/gdth_proc.c linux-2.6.32.45/drivers/scsi/gdth_proc.c
36433--- linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
36434+++ linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
36435@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
36436 ulong64 paddr;
36437
36438 char cmnd[MAX_COMMAND_SIZE];
36439+
36440+ pax_track_stack();
36441+
36442 memset(cmnd, 0xff, 12);
36443 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
36444
36445@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
36446 gdth_hget_str *phg;
36447 char cmnd[MAX_COMMAND_SIZE];
36448
36449+ pax_track_stack();
36450+
36451 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
36452 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
36453 if (!gdtcmd || !estr)
36454diff -urNp linux-2.6.32.45/drivers/scsi/hosts.c linux-2.6.32.45/drivers/scsi/hosts.c
36455--- linux-2.6.32.45/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
36456+++ linux-2.6.32.45/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
36457@@ -40,7 +40,7 @@
36458 #include "scsi_logging.h"
36459
36460
36461-static atomic_t scsi_host_next_hn; /* host_no for next new host */
36462+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36463
36464
36465 static void scsi_host_cls_release(struct device *dev)
36466@@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
36467 * subtract one because we increment first then return, but we need to
36468 * know what the next host number was before increment
36469 */
36470- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36471+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36472 shost->dma_channel = 0xff;
36473
36474 /* These three are default values which can be overridden */
36475diff -urNp linux-2.6.32.45/drivers/scsi/ipr.c linux-2.6.32.45/drivers/scsi/ipr.c
36476--- linux-2.6.32.45/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
36477+++ linux-2.6.32.45/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
36478@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
36479 return true;
36480 }
36481
36482-static struct ata_port_operations ipr_sata_ops = {
36483+static const struct ata_port_operations ipr_sata_ops = {
36484 .phy_reset = ipr_ata_phy_reset,
36485 .hardreset = ipr_sata_reset,
36486 .post_internal_cmd = ipr_ata_post_internal,
36487diff -urNp linux-2.6.32.45/drivers/scsi/ips.h linux-2.6.32.45/drivers/scsi/ips.h
36488--- linux-2.6.32.45/drivers/scsi/ips.h 2011-03-27 14:31:47.000000000 -0400
36489+++ linux-2.6.32.45/drivers/scsi/ips.h 2011-08-05 20:33:55.000000000 -0400
36490@@ -1027,7 +1027,7 @@ typedef struct {
36491 int (*intr)(struct ips_ha *);
36492 void (*enableint)(struct ips_ha *);
36493 uint32_t (*statupd)(struct ips_ha *);
36494-} ips_hw_func_t;
36495+} __no_const ips_hw_func_t;
36496
36497 typedef struct ips_ha {
36498 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36499diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c
36500--- linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c 2011-03-27 14:31:47.000000000 -0400
36501+++ linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c 2011-08-05 20:33:55.000000000 -0400
36502@@ -715,16 +715,16 @@ int fc_disc_init(struct fc_lport *lport)
36503 struct fc_disc *disc;
36504
36505 if (!lport->tt.disc_start)
36506- lport->tt.disc_start = fc_disc_start;
36507+ *(void **)&lport->tt.disc_start = fc_disc_start;
36508
36509 if (!lport->tt.disc_stop)
36510- lport->tt.disc_stop = fc_disc_stop;
36511+ *(void **)&lport->tt.disc_stop = fc_disc_stop;
36512
36513 if (!lport->tt.disc_stop_final)
36514- lport->tt.disc_stop_final = fc_disc_stop_final;
36515+ *(void **)&lport->tt.disc_stop_final = fc_disc_stop_final;
36516
36517 if (!lport->tt.disc_recv_req)
36518- lport->tt.disc_recv_req = fc_disc_recv_req;
36519+ *(void **)&lport->tt.disc_recv_req = fc_disc_recv_req;
36520
36521 disc = &lport->disc;
36522 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
36523diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c
36524--- linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c 2011-03-27 14:31:47.000000000 -0400
36525+++ linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c 2011-08-05 20:33:55.000000000 -0400
36526@@ -67,7 +67,7 @@ static struct fc_seq *fc_elsct_send(stru
36527 int fc_elsct_init(struct fc_lport *lport)
36528 {
36529 if (!lport->tt.elsct_send)
36530- lport->tt.elsct_send = fc_elsct_send;
36531+ *(void **)&lport->tt.elsct_send = fc_elsct_send;
36532
36533 return 0;
36534 }
36535diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c
36536--- linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
36537+++ linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-08-05 20:33:55.000000000 -0400
36538@@ -86,12 +86,12 @@ struct fc_exch_mgr {
36539 * all together if not used XXX
36540 */
36541 struct {
36542- atomic_t no_free_exch;
36543- atomic_t no_free_exch_xid;
36544- atomic_t xid_not_found;
36545- atomic_t xid_busy;
36546- atomic_t seq_not_found;
36547- atomic_t non_bls_resp;
36548+ atomic_unchecked_t no_free_exch;
36549+ atomic_unchecked_t no_free_exch_xid;
36550+ atomic_unchecked_t xid_not_found;
36551+ atomic_unchecked_t xid_busy;
36552+ atomic_unchecked_t seq_not_found;
36553+ atomic_unchecked_t non_bls_resp;
36554 } stats;
36555 };
36556 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
36557@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
36558 /* allocate memory for exchange */
36559 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36560 if (!ep) {
36561- atomic_inc(&mp->stats.no_free_exch);
36562+ atomic_inc_unchecked(&mp->stats.no_free_exch);
36563 goto out;
36564 }
36565 memset(ep, 0, sizeof(*ep));
36566@@ -557,7 +557,7 @@ out:
36567 return ep;
36568 err:
36569 spin_unlock_bh(&pool->lock);
36570- atomic_inc(&mp->stats.no_free_exch_xid);
36571+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36572 mempool_free(ep, mp->ep_pool);
36573 return NULL;
36574 }
36575@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36576 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36577 ep = fc_exch_find(mp, xid);
36578 if (!ep) {
36579- atomic_inc(&mp->stats.xid_not_found);
36580+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36581 reject = FC_RJT_OX_ID;
36582 goto out;
36583 }
36584@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36585 ep = fc_exch_find(mp, xid);
36586 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36587 if (ep) {
36588- atomic_inc(&mp->stats.xid_busy);
36589+ atomic_inc_unchecked(&mp->stats.xid_busy);
36590 reject = FC_RJT_RX_ID;
36591 goto rel;
36592 }
36593@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36594 }
36595 xid = ep->xid; /* get our XID */
36596 } else if (!ep) {
36597- atomic_inc(&mp->stats.xid_not_found);
36598+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36599 reject = FC_RJT_RX_ID; /* XID not found */
36600 goto out;
36601 }
36602@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36603 } else {
36604 sp = &ep->seq;
36605 if (sp->id != fh->fh_seq_id) {
36606- atomic_inc(&mp->stats.seq_not_found);
36607+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36608 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
36609 goto rel;
36610 }
36611@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
36612
36613 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36614 if (!ep) {
36615- atomic_inc(&mp->stats.xid_not_found);
36616+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36617 goto out;
36618 }
36619 if (ep->esb_stat & ESB_ST_COMPLETE) {
36620- atomic_inc(&mp->stats.xid_not_found);
36621+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36622 goto out;
36623 }
36624 if (ep->rxid == FC_XID_UNKNOWN)
36625 ep->rxid = ntohs(fh->fh_rx_id);
36626 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36627- atomic_inc(&mp->stats.xid_not_found);
36628+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36629 goto rel;
36630 }
36631 if (ep->did != ntoh24(fh->fh_s_id) &&
36632 ep->did != FC_FID_FLOGI) {
36633- atomic_inc(&mp->stats.xid_not_found);
36634+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36635 goto rel;
36636 }
36637 sof = fr_sof(fp);
36638@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
36639 } else {
36640 sp = &ep->seq;
36641 if (sp->id != fh->fh_seq_id) {
36642- atomic_inc(&mp->stats.seq_not_found);
36643+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36644 goto rel;
36645 }
36646 }
36647@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
36648 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36649
36650 if (!sp)
36651- atomic_inc(&mp->stats.xid_not_found);
36652+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36653 else
36654- atomic_inc(&mp->stats.non_bls_resp);
36655+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
36656
36657 fc_frame_free(fp);
36658 }
36659@@ -2027,25 +2027,25 @@ EXPORT_SYMBOL(fc_exch_recv);
36660 int fc_exch_init(struct fc_lport *lp)
36661 {
36662 if (!lp->tt.seq_start_next)
36663- lp->tt.seq_start_next = fc_seq_start_next;
36664+ *(void **)&lp->tt.seq_start_next = fc_seq_start_next;
36665
36666 if (!lp->tt.exch_seq_send)
36667- lp->tt.exch_seq_send = fc_exch_seq_send;
36668+ *(void **)&lp->tt.exch_seq_send = fc_exch_seq_send;
36669
36670 if (!lp->tt.seq_send)
36671- lp->tt.seq_send = fc_seq_send;
36672+ *(void **)&lp->tt.seq_send = fc_seq_send;
36673
36674 if (!lp->tt.seq_els_rsp_send)
36675- lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
36676+ *(void **)&lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
36677
36678 if (!lp->tt.exch_done)
36679- lp->tt.exch_done = fc_exch_done;
36680+ *(void **)&lp->tt.exch_done = fc_exch_done;
36681
36682 if (!lp->tt.exch_mgr_reset)
36683- lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
36684+ *(void **)&lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
36685
36686 if (!lp->tt.seq_exch_abort)
36687- lp->tt.seq_exch_abort = fc_seq_exch_abort;
36688+ *(void **)&lp->tt.seq_exch_abort = fc_seq_exch_abort;
36689
36690 /*
36691 * Initialize fc_cpu_mask and fc_cpu_order. The
36692diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c
36693--- linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c 2011-03-27 14:31:47.000000000 -0400
36694+++ linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c 2011-08-05 20:33:55.000000000 -0400
36695@@ -2105,13 +2105,13 @@ int fc_fcp_init(struct fc_lport *lp)
36696 struct fc_fcp_internal *si;
36697
36698 if (!lp->tt.fcp_cmd_send)
36699- lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
36700+ *(void **)&lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
36701
36702 if (!lp->tt.fcp_cleanup)
36703- lp->tt.fcp_cleanup = fc_fcp_cleanup;
36704+ *(void **)&lp->tt.fcp_cleanup = fc_fcp_cleanup;
36705
36706 if (!lp->tt.fcp_abort_io)
36707- lp->tt.fcp_abort_io = fc_fcp_abort_io;
36708+ *(void **)&lp->tt.fcp_abort_io = fc_fcp_abort_io;
36709
36710 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
36711 if (!si)
36712diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c
36713--- linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c 2011-03-27 14:31:47.000000000 -0400
36714+++ linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c 2011-08-05 20:33:55.000000000 -0400
36715@@ -569,7 +569,7 @@ int fc_lport_destroy(struct fc_lport *lp
36716 mutex_lock(&lport->lp_mutex);
36717 lport->state = LPORT_ST_DISABLED;
36718 lport->link_up = 0;
36719- lport->tt.frame_send = fc_frame_drop;
36720+ *(void **)&lport->tt.frame_send = fc_frame_drop;
36721 mutex_unlock(&lport->lp_mutex);
36722
36723 lport->tt.fcp_abort_io(lport);
36724@@ -1477,10 +1477,10 @@ EXPORT_SYMBOL(fc_lport_config);
36725 int fc_lport_init(struct fc_lport *lport)
36726 {
36727 if (!lport->tt.lport_recv)
36728- lport->tt.lport_recv = fc_lport_recv_req;
36729+ *(void **)&lport->tt.lport_recv = fc_lport_recv_req;
36730
36731 if (!lport->tt.lport_reset)
36732- lport->tt.lport_reset = fc_lport_reset;
36733+ *(void **)&lport->tt.lport_reset = fc_lport_reset;
36734
36735 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
36736 fc_host_node_name(lport->host) = lport->wwnn;
36737diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c
36738--- linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c 2011-03-27 14:31:47.000000000 -0400
36739+++ linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c 2011-08-05 20:33:55.000000000 -0400
36740@@ -1566,25 +1566,25 @@ static void fc_rport_flush_queue(void)
36741 int fc_rport_init(struct fc_lport *lport)
36742 {
36743 if (!lport->tt.rport_lookup)
36744- lport->tt.rport_lookup = fc_rport_lookup;
36745+ *(void **)&lport->tt.rport_lookup = fc_rport_lookup;
36746
36747 if (!lport->tt.rport_create)
36748- lport->tt.rport_create = fc_rport_create;
36749+ *(void **)&lport->tt.rport_create = fc_rport_create;
36750
36751 if (!lport->tt.rport_login)
36752- lport->tt.rport_login = fc_rport_login;
36753+ *(void **)&lport->tt.rport_login = fc_rport_login;
36754
36755 if (!lport->tt.rport_logoff)
36756- lport->tt.rport_logoff = fc_rport_logoff;
36757+ *(void **)&lport->tt.rport_logoff = fc_rport_logoff;
36758
36759 if (!lport->tt.rport_recv_req)
36760- lport->tt.rport_recv_req = fc_rport_recv_req;
36761+ *(void **)&lport->tt.rport_recv_req = fc_rport_recv_req;
36762
36763 if (!lport->tt.rport_flush_queue)
36764- lport->tt.rport_flush_queue = fc_rport_flush_queue;
36765+ *(void **)&lport->tt.rport_flush_queue = fc_rport_flush_queue;
36766
36767 if (!lport->tt.rport_destroy)
36768- lport->tt.rport_destroy = fc_rport_destroy;
36769+ *(void **)&lport->tt.rport_destroy = fc_rport_destroy;
36770
36771 return 0;
36772 }
36773diff -urNp linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c
36774--- linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
36775+++ linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
36776@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
36777 }
36778 }
36779
36780-static struct ata_port_operations sas_sata_ops = {
36781+static const struct ata_port_operations sas_sata_ops = {
36782 .phy_reset = sas_ata_phy_reset,
36783 .post_internal_cmd = sas_ata_post_internal,
36784 .qc_defer = ata_std_qc_defer,
36785diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c
36786--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
36787+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
36788@@ -124,7 +124,7 @@ struct lpfc_debug {
36789 int len;
36790 };
36791
36792-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36793+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36794 static unsigned long lpfc_debugfs_start_time = 0L;
36795
36796 /**
36797@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
36798 lpfc_debugfs_enable = 0;
36799
36800 len = 0;
36801- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36802+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36803 (lpfc_debugfs_max_disc_trc - 1);
36804 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36805 dtp = vport->disc_trc + i;
36806@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
36807 lpfc_debugfs_enable = 0;
36808
36809 len = 0;
36810- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36811+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36812 (lpfc_debugfs_max_slow_ring_trc - 1);
36813 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36814 dtp = phba->slow_ring_trc + i;
36815@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
36816 uint32_t *ptr;
36817 char buffer[1024];
36818
36819+ pax_track_stack();
36820+
36821 off = 0;
36822 spin_lock_irq(&phba->hbalock);
36823
36824@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
36825 !vport || !vport->disc_trc)
36826 return;
36827
36828- index = atomic_inc_return(&vport->disc_trc_cnt) &
36829+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36830 (lpfc_debugfs_max_disc_trc - 1);
36831 dtp = vport->disc_trc + index;
36832 dtp->fmt = fmt;
36833 dtp->data1 = data1;
36834 dtp->data2 = data2;
36835 dtp->data3 = data3;
36836- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36837+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36838 dtp->jif = jiffies;
36839 #endif
36840 return;
36841@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
36842 !phba || !phba->slow_ring_trc)
36843 return;
36844
36845- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36846+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36847 (lpfc_debugfs_max_slow_ring_trc - 1);
36848 dtp = phba->slow_ring_trc + index;
36849 dtp->fmt = fmt;
36850 dtp->data1 = data1;
36851 dtp->data2 = data2;
36852 dtp->data3 = data3;
36853- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36854+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36855 dtp->jif = jiffies;
36856 #endif
36857 return;
36858@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36859 "slow_ring buffer\n");
36860 goto debug_failed;
36861 }
36862- atomic_set(&phba->slow_ring_trc_cnt, 0);
36863+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36864 memset(phba->slow_ring_trc, 0,
36865 (sizeof(struct lpfc_debugfs_trc) *
36866 lpfc_debugfs_max_slow_ring_trc));
36867@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36868 "buffer\n");
36869 goto debug_failed;
36870 }
36871- atomic_set(&vport->disc_trc_cnt, 0);
36872+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36873
36874 snprintf(name, sizeof(name), "discovery_trace");
36875 vport->debug_disc_trc =
36876diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h
36877--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
36878+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
36879@@ -400,7 +400,7 @@ struct lpfc_vport {
36880 struct dentry *debug_nodelist;
36881 struct dentry *vport_debugfs_root;
36882 struct lpfc_debugfs_trc *disc_trc;
36883- atomic_t disc_trc_cnt;
36884+ atomic_unchecked_t disc_trc_cnt;
36885 #endif
36886 uint8_t stat_data_enabled;
36887 uint8_t stat_data_blocked;
36888@@ -725,8 +725,8 @@ struct lpfc_hba {
36889 struct timer_list fabric_block_timer;
36890 unsigned long bit_flags;
36891 #define FABRIC_COMANDS_BLOCKED 0
36892- atomic_t num_rsrc_err;
36893- atomic_t num_cmd_success;
36894+ atomic_unchecked_t num_rsrc_err;
36895+ atomic_unchecked_t num_cmd_success;
36896 unsigned long last_rsrc_error_time;
36897 unsigned long last_ramp_down_time;
36898 unsigned long last_ramp_up_time;
36899@@ -740,7 +740,7 @@ struct lpfc_hba {
36900 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
36901 struct dentry *debug_slow_ring_trc;
36902 struct lpfc_debugfs_trc *slow_ring_trc;
36903- atomic_t slow_ring_trc_cnt;
36904+ atomic_unchecked_t slow_ring_trc_cnt;
36905 #endif
36906
36907 /* Used for deferred freeing of ELS data buffers */
36908diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c
36909--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-03-27 14:31:47.000000000 -0400
36910+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:33:55.000000000 -0400
36911@@ -8021,8 +8021,10 @@ lpfc_init(void)
36912 printk(LPFC_COPYRIGHT "\n");
36913
36914 if (lpfc_enable_npiv) {
36915- lpfc_transport_functions.vport_create = lpfc_vport_create;
36916- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36917+ pax_open_kernel();
36918+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36919+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36920+ pax_close_kernel();
36921 }
36922 lpfc_transport_template =
36923 fc_attach_transport(&lpfc_transport_functions);
36924diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c
36925--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
36926+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
36927@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
36928 uint32_t evt_posted;
36929
36930 spin_lock_irqsave(&phba->hbalock, flags);
36931- atomic_inc(&phba->num_rsrc_err);
36932+ atomic_inc_unchecked(&phba->num_rsrc_err);
36933 phba->last_rsrc_error_time = jiffies;
36934
36935 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36936@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
36937 unsigned long flags;
36938 struct lpfc_hba *phba = vport->phba;
36939 uint32_t evt_posted;
36940- atomic_inc(&phba->num_cmd_success);
36941+ atomic_inc_unchecked(&phba->num_cmd_success);
36942
36943 if (vport->cfg_lun_queue_depth <= queue_depth)
36944 return;
36945@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36946 int i;
36947 struct lpfc_rport_data *rdata;
36948
36949- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36950- num_cmd_success = atomic_read(&phba->num_cmd_success);
36951+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36952+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36953
36954 vports = lpfc_create_vport_work_array(phba);
36955 if (vports != NULL)
36956@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36957 }
36958 }
36959 lpfc_destroy_vport_work_array(phba, vports);
36960- atomic_set(&phba->num_rsrc_err, 0);
36961- atomic_set(&phba->num_cmd_success, 0);
36962+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36963+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36964 }
36965
36966 /**
36967@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
36968 }
36969 }
36970 lpfc_destroy_vport_work_array(phba, vports);
36971- atomic_set(&phba->num_rsrc_err, 0);
36972- atomic_set(&phba->num_cmd_success, 0);
36973+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36974+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36975 }
36976
36977 /**
36978diff -urNp linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c
36979--- linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
36980+++ linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
36981@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
36982 int rval;
36983 int i;
36984
36985+ pax_track_stack();
36986+
36987 // Allocate memory for the base list of scb for management module.
36988 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
36989
36990diff -urNp linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c
36991--- linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
36992+++ linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
36993@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
36994 int nelem = ARRAY_SIZE(get_attrs), a = 0;
36995 int ret;
36996
36997+ pax_track_stack();
36998+
36999 or = osd_start_request(od, GFP_KERNEL);
37000 if (!or)
37001 return -ENOMEM;
37002diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.c linux-2.6.32.45/drivers/scsi/pmcraid.c
37003--- linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:35:29.000000000 -0400
37004+++ linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:33:59.000000000 -0400
37005@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
37006 res->scsi_dev = scsi_dev;
37007 scsi_dev->hostdata = res;
37008 res->change_detected = 0;
37009- atomic_set(&res->read_failures, 0);
37010- atomic_set(&res->write_failures, 0);
37011+ atomic_set_unchecked(&res->read_failures, 0);
37012+ atomic_set_unchecked(&res->write_failures, 0);
37013 rc = 0;
37014 }
37015 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37016@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
37017
37018 /* If this was a SCSI read/write command keep count of errors */
37019 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37020- atomic_inc(&res->read_failures);
37021+ atomic_inc_unchecked(&res->read_failures);
37022 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37023- atomic_inc(&res->write_failures);
37024+ atomic_inc_unchecked(&res->write_failures);
37025
37026 if (!RES_IS_GSCSI(res->cfg_entry) &&
37027 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37028@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(stru
37029
37030 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37031 /* add resources only after host is added into system */
37032- if (!atomic_read(&pinstance->expose_resources))
37033+ if (!atomic_read_unchecked(&pinstance->expose_resources))
37034 return;
37035
37036 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
37037@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instan
37038 init_waitqueue_head(&pinstance->reset_wait_q);
37039
37040 atomic_set(&pinstance->outstanding_cmds, 0);
37041- atomic_set(&pinstance->expose_resources, 0);
37042+ atomic_set_unchecked(&pinstance->expose_resources, 0);
37043
37044 INIT_LIST_HEAD(&pinstance->free_res_q);
37045 INIT_LIST_HEAD(&pinstance->used_res_q);
37046@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
37047 /* Schedule worker thread to handle CCN and take care of adding and
37048 * removing devices to OS
37049 */
37050- atomic_set(&pinstance->expose_resources, 1);
37051+ atomic_set_unchecked(&pinstance->expose_resources, 1);
37052 schedule_work(&pinstance->worker_q);
37053 return rc;
37054
37055diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.h linux-2.6.32.45/drivers/scsi/pmcraid.h
37056--- linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
37057+++ linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
37058@@ -690,7 +690,7 @@ struct pmcraid_instance {
37059 atomic_t outstanding_cmds;
37060
37061 /* should add/delete resources to mid-layer now ?*/
37062- atomic_t expose_resources;
37063+ atomic_unchecked_t expose_resources;
37064
37065 /* Tasklet to handle deferred processing */
37066 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
37067@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
37068 struct list_head queue; /* link to "to be exposed" resources */
37069 struct pmcraid_config_table_entry cfg_entry;
37070 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37071- atomic_t read_failures; /* count of failed READ commands */
37072- atomic_t write_failures; /* count of failed WRITE commands */
37073+ atomic_unchecked_t read_failures; /* count of failed READ commands */
37074+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37075
37076 /* To indicate add/delete/modify during CCN */
37077 u8 change_detected;
37078diff -urNp linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h
37079--- linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-03-27 14:31:47.000000000 -0400
37080+++ linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:33:55.000000000 -0400
37081@@ -2089,7 +2089,7 @@ struct isp_operations {
37082
37083 int (*get_flash_version) (struct scsi_qla_host *, void *);
37084 int (*start_scsi) (srb_t *);
37085-};
37086+} __no_const;
37087
37088 /* MSI-X Support *************************************************************/
37089
37090diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h
37091--- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
37092+++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
37093@@ -240,7 +240,7 @@ struct ddb_entry {
37094 atomic_t retry_relogin_timer; /* Min Time between relogins
37095 * (4000 only) */
37096 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
37097- atomic_t relogin_retry_count; /* Num of times relogin has been
37098+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37099 * retried */
37100
37101 uint16_t port;
37102diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c
37103--- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
37104+++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
37105@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
37106 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
37107 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37108 atomic_set(&ddb_entry->relogin_timer, 0);
37109- atomic_set(&ddb_entry->relogin_retry_count, 0);
37110+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37111 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37112 list_add_tail(&ddb_entry->list, &ha->ddb_list);
37113 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
37114@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
37115 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37116 atomic_set(&ddb_entry->port_down_timer,
37117 ha->port_down_retry_count);
37118- atomic_set(&ddb_entry->relogin_retry_count, 0);
37119+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37120 atomic_set(&ddb_entry->relogin_timer, 0);
37121 clear_bit(DF_RELOGIN, &ddb_entry->flags);
37122 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
37123diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c
37124--- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
37125+++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
37126@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
37127 ddb_entry->fw_ddb_device_state ==
37128 DDB_DS_SESSION_FAILED) {
37129 /* Reset retry relogin timer */
37130- atomic_inc(&ddb_entry->relogin_retry_count);
37131+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37132 DEBUG2(printk("scsi%ld: index[%d] relogin"
37133 " timed out-retrying"
37134 " relogin (%d)\n",
37135 ha->host_no,
37136 ddb_entry->fw_ddb_index,
37137- atomic_read(&ddb_entry->
37138+ atomic_read_unchecked(&ddb_entry->
37139 relogin_retry_count))
37140 );
37141 start_dpc++;
37142diff -urNp linux-2.6.32.45/drivers/scsi/scsi.c linux-2.6.32.45/drivers/scsi/scsi.c
37143--- linux-2.6.32.45/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
37144+++ linux-2.6.32.45/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
37145@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
37146 unsigned long timeout;
37147 int rtn = 0;
37148
37149- atomic_inc(&cmd->device->iorequest_cnt);
37150+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37151
37152 /* check if the device is still usable */
37153 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37154diff -urNp linux-2.6.32.45/drivers/scsi/scsi_debug.c linux-2.6.32.45/drivers/scsi/scsi_debug.c
37155--- linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
37156+++ linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
37157@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
37158 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
37159 unsigned char *cmd = (unsigned char *)scp->cmnd;
37160
37161+ pax_track_stack();
37162+
37163 if ((errsts = check_readiness(scp, 1, devip)))
37164 return errsts;
37165 memset(arr, 0, sizeof(arr));
37166@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
37167 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
37168 unsigned char *cmd = (unsigned char *)scp->cmnd;
37169
37170+ pax_track_stack();
37171+
37172 if ((errsts = check_readiness(scp, 1, devip)))
37173 return errsts;
37174 memset(arr, 0, sizeof(arr));
37175diff -urNp linux-2.6.32.45/drivers/scsi/scsi_lib.c linux-2.6.32.45/drivers/scsi/scsi_lib.c
37176--- linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
37177+++ linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
37178@@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
37179
37180 scsi_init_cmd_errh(cmd);
37181 cmd->result = DID_NO_CONNECT << 16;
37182- atomic_inc(&cmd->device->iorequest_cnt);
37183+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37184
37185 /*
37186 * SCSI request completion path will do scsi_device_unbusy(),
37187@@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
37188 */
37189 cmd->serial_number = 0;
37190
37191- atomic_inc(&cmd->device->iodone_cnt);
37192+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
37193 if (cmd->result)
37194- atomic_inc(&cmd->device->ioerr_cnt);
37195+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37196
37197 disposition = scsi_decide_disposition(cmd);
37198 if (disposition != SUCCESS &&
37199diff -urNp linux-2.6.32.45/drivers/scsi/scsi_sysfs.c linux-2.6.32.45/drivers/scsi/scsi_sysfs.c
37200--- linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
37201+++ linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
37202@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
37203 char *buf) \
37204 { \
37205 struct scsi_device *sdev = to_scsi_device(dev); \
37206- unsigned long long count = atomic_read(&sdev->field); \
37207+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
37208 return snprintf(buf, 20, "0x%llx\n", count); \
37209 } \
37210 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37211diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c
37212--- linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
37213+++ linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
37214@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
37215 * Netlink Infrastructure
37216 */
37217
37218-static atomic_t fc_event_seq;
37219+static atomic_unchecked_t fc_event_seq;
37220
37221 /**
37222 * fc_get_event_number - Obtain the next sequential FC event number
37223@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
37224 u32
37225 fc_get_event_number(void)
37226 {
37227- return atomic_add_return(1, &fc_event_seq);
37228+ return atomic_add_return_unchecked(1, &fc_event_seq);
37229 }
37230 EXPORT_SYMBOL(fc_get_event_number);
37231
37232@@ -641,7 +641,7 @@ static __init int fc_transport_init(void
37233 {
37234 int error;
37235
37236- atomic_set(&fc_event_seq, 0);
37237+ atomic_set_unchecked(&fc_event_seq, 0);
37238
37239 error = transport_class_register(&fc_host_class);
37240 if (error)
37241diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c
37242--- linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
37243+++ linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
37244@@ -81,7 +81,7 @@ struct iscsi_internal {
37245 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
37246 };
37247
37248-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37249+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37250 static struct workqueue_struct *iscsi_eh_timer_workq;
37251
37252 /*
37253@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
37254 int err;
37255
37256 ihost = shost->shost_data;
37257- session->sid = atomic_add_return(1, &iscsi_session_nr);
37258+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37259
37260 if (id == ISCSI_MAX_TARGET) {
37261 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
37262@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
37263 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37264 ISCSI_TRANSPORT_VERSION);
37265
37266- atomic_set(&iscsi_session_nr, 0);
37267+ atomic_set_unchecked(&iscsi_session_nr, 0);
37268
37269 err = class_register(&iscsi_transport_class);
37270 if (err)
37271diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c
37272--- linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
37273+++ linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
37274@@ -33,7 +33,7 @@
37275 #include "scsi_transport_srp_internal.h"
37276
37277 struct srp_host_attrs {
37278- atomic_t next_port_id;
37279+ atomic_unchecked_t next_port_id;
37280 };
37281 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37282
37283@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
37284 struct Scsi_Host *shost = dev_to_shost(dev);
37285 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37286
37287- atomic_set(&srp_host->next_port_id, 0);
37288+ atomic_set_unchecked(&srp_host->next_port_id, 0);
37289 return 0;
37290 }
37291
37292@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
37293 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37294 rport->roles = ids->roles;
37295
37296- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37297+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37298 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37299
37300 transport_setup_device(&rport->dev);
37301diff -urNp linux-2.6.32.45/drivers/scsi/sg.c linux-2.6.32.45/drivers/scsi/sg.c
37302--- linux-2.6.32.45/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
37303+++ linux-2.6.32.45/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
37304@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
37305 const struct file_operations * fops;
37306 };
37307
37308-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37309+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37310 {"allow_dio", &adio_fops},
37311 {"debug", &debug_fops},
37312 {"def_reserved_size", &dressz_fops},
37313@@ -2307,7 +2307,7 @@ sg_proc_init(void)
37314 {
37315 int k, mask;
37316 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
37317- struct sg_proc_leaf * leaf;
37318+ const struct sg_proc_leaf * leaf;
37319
37320 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
37321 if (!sg_proc_sgp)
37322diff -urNp linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c
37323--- linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
37324+++ linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
37325@@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
37326 int do_iounmap = 0;
37327 int do_disable_device = 1;
37328
37329+ pax_track_stack();
37330+
37331 memset(&sym_dev, 0, sizeof(sym_dev));
37332 memset(&nvram, 0, sizeof(nvram));
37333 sym_dev.pdev = pdev;
37334diff -urNp linux-2.6.32.45/drivers/serial/kgdboc.c linux-2.6.32.45/drivers/serial/kgdboc.c
37335--- linux-2.6.32.45/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
37336+++ linux-2.6.32.45/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
37337@@ -18,7 +18,7 @@
37338
37339 #define MAX_CONFIG_LEN 40
37340
37341-static struct kgdb_io kgdboc_io_ops;
37342+static const struct kgdb_io kgdboc_io_ops;
37343
37344 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37345 static int configured = -1;
37346@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
37347 module_put(THIS_MODULE);
37348 }
37349
37350-static struct kgdb_io kgdboc_io_ops = {
37351+static const struct kgdb_io kgdboc_io_ops = {
37352 .name = "kgdboc",
37353 .read_char = kgdboc_get_char,
37354 .write_char = kgdboc_put_char,
37355diff -urNp linux-2.6.32.45/drivers/spi/spi.c linux-2.6.32.45/drivers/spi/spi.c
37356--- linux-2.6.32.45/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
37357+++ linux-2.6.32.45/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
37358@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
37359 EXPORT_SYMBOL_GPL(spi_sync);
37360
37361 /* portable code must never pass more than 32 bytes */
37362-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37363+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
37364
37365 static u8 *buf;
37366
37367diff -urNp linux-2.6.32.45/drivers/ssb/driver_gige.c linux-2.6.32.45/drivers/ssb/driver_gige.c
37368--- linux-2.6.32.45/drivers/ssb/driver_gige.c 2011-03-27 14:31:47.000000000 -0400
37369+++ linux-2.6.32.45/drivers/ssb/driver_gige.c 2011-08-05 20:33:55.000000000 -0400
37370@@ -180,8 +180,8 @@ static int ssb_gige_probe(struct ssb_dev
37371 dev->pci_controller.io_resource = &dev->io_resource;
37372 dev->pci_controller.mem_resource = &dev->mem_resource;
37373 dev->pci_controller.io_map_base = 0x800;
37374- dev->pci_ops.read = ssb_gige_pci_read_config;
37375- dev->pci_ops.write = ssb_gige_pci_write_config;
37376+ *(void **)&dev->pci_ops.read = ssb_gige_pci_read_config;
37377+ *(void **)&dev->pci_ops.write = ssb_gige_pci_write_config;
37378
37379 dev->io_resource.name = SSB_GIGE_IO_RES_NAME;
37380 dev->io_resource.start = 0x800;
37381diff -urNp linux-2.6.32.45/drivers/staging/android/binder.c linux-2.6.32.45/drivers/staging/android/binder.c
37382--- linux-2.6.32.45/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
37383+++ linux-2.6.32.45/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
37384@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
37385 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
37386 }
37387
37388-static struct vm_operations_struct binder_vm_ops = {
37389+static const struct vm_operations_struct binder_vm_ops = {
37390 .open = binder_vma_open,
37391 .close = binder_vma_close,
37392 };
37393diff -urNp linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c
37394--- linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
37395+++ linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
37396@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
37397 return VM_FAULT_NOPAGE;
37398 }
37399
37400-static struct vm_operations_struct b3dfg_vm_ops = {
37401+static const struct vm_operations_struct b3dfg_vm_ops = {
37402 .fault = b3dfg_vma_fault,
37403 };
37404
37405@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
37406 return r;
37407 }
37408
37409-static struct file_operations b3dfg_fops = {
37410+static const struct file_operations b3dfg_fops = {
37411 .owner = THIS_MODULE,
37412 .open = b3dfg_open,
37413 .release = b3dfg_release,
37414diff -urNp linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c
37415--- linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:35:29.000000000 -0400
37416+++ linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:34:00.000000000 -0400
37417@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
37418 mutex_unlock(&dev->mutex);
37419 }
37420
37421-static struct vm_operations_struct comedi_vm_ops = {
37422+static const struct vm_operations_struct comedi_vm_ops = {
37423 .close = comedi_unmap,
37424 };
37425
37426diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c
37427--- linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
37428+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
37429@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
37430 static dev_t adsp_devno;
37431 static struct class *adsp_class;
37432
37433-static struct file_operations adsp_fops = {
37434+static const struct file_operations adsp_fops = {
37435 .owner = THIS_MODULE,
37436 .open = adsp_open,
37437 .unlocked_ioctl = adsp_ioctl,
37438diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c
37439--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
37440+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
37441@@ -1022,7 +1022,7 @@ done:
37442 return rc;
37443 }
37444
37445-static struct file_operations audio_aac_fops = {
37446+static const struct file_operations audio_aac_fops = {
37447 .owner = THIS_MODULE,
37448 .open = audio_open,
37449 .release = audio_release,
37450diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c
37451--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
37452+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
37453@@ -833,7 +833,7 @@ done:
37454 return rc;
37455 }
37456
37457-static struct file_operations audio_amrnb_fops = {
37458+static const struct file_operations audio_amrnb_fops = {
37459 .owner = THIS_MODULE,
37460 .open = audamrnb_open,
37461 .release = audamrnb_release,
37462diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c
37463--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
37464+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
37465@@ -805,7 +805,7 @@ dma_fail:
37466 return rc;
37467 }
37468
37469-static struct file_operations audio_evrc_fops = {
37470+static const struct file_operations audio_evrc_fops = {
37471 .owner = THIS_MODULE,
37472 .open = audevrc_open,
37473 .release = audevrc_release,
37474diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c
37475--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
37476+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
37477@@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
37478 return 0;
37479 }
37480
37481-static struct file_operations audio_fops = {
37482+static const struct file_operations audio_fops = {
37483 .owner = THIS_MODULE,
37484 .open = audio_in_open,
37485 .release = audio_in_release,
37486@@ -922,7 +922,7 @@ static struct file_operations audio_fops
37487 .unlocked_ioctl = audio_in_ioctl,
37488 };
37489
37490-static struct file_operations audpre_fops = {
37491+static const struct file_operations audpre_fops = {
37492 .owner = THIS_MODULE,
37493 .open = audpre_open,
37494 .unlocked_ioctl = audpre_ioctl,
37495diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c
37496--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
37497+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
37498@@ -941,7 +941,7 @@ done:
37499 return rc;
37500 }
37501
37502-static struct file_operations audio_mp3_fops = {
37503+static const struct file_operations audio_mp3_fops = {
37504 .owner = THIS_MODULE,
37505 .open = audio_open,
37506 .release = audio_release,
37507diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c
37508--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
37509+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
37510@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
37511 return 0;
37512 }
37513
37514-static struct file_operations audio_fops = {
37515+static const struct file_operations audio_fops = {
37516 .owner = THIS_MODULE,
37517 .open = audio_open,
37518 .release = audio_release,
37519@@ -819,7 +819,7 @@ static struct file_operations audio_fops
37520 .unlocked_ioctl = audio_ioctl,
37521 };
37522
37523-static struct file_operations audpp_fops = {
37524+static const struct file_operations audpp_fops = {
37525 .owner = THIS_MODULE,
37526 .open = audpp_open,
37527 .unlocked_ioctl = audpp_ioctl,
37528diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c
37529--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
37530+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
37531@@ -816,7 +816,7 @@ err:
37532 return rc;
37533 }
37534
37535-static struct file_operations audio_qcelp_fops = {
37536+static const struct file_operations audio_qcelp_fops = {
37537 .owner = THIS_MODULE,
37538 .open = audqcelp_open,
37539 .release = audqcelp_release,
37540diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c
37541--- linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
37542+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
37543@@ -242,7 +242,7 @@ err:
37544 return rc;
37545 }
37546
37547-static struct file_operations snd_fops = {
37548+static const struct file_operations snd_fops = {
37549 .owner = THIS_MODULE,
37550 .open = snd_open,
37551 .release = snd_release,
37552diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c
37553--- linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
37554+++ linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
37555@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
37556 return 0;
37557 }
37558
37559-static struct file_operations qmi_fops = {
37560+static const struct file_operations qmi_fops = {
37561 .owner = THIS_MODULE,
37562 .read = qmi_read,
37563 .write = qmi_write,
37564diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c
37565--- linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
37566+++ linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
37567@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
37568 return rc;
37569 }
37570
37571-static struct file_operations rpcrouter_server_fops = {
37572+static const struct file_operations rpcrouter_server_fops = {
37573 .owner = THIS_MODULE,
37574 .open = rpcrouter_open,
37575 .release = rpcrouter_release,
37576@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
37577 .unlocked_ioctl = rpcrouter_ioctl,
37578 };
37579
37580-static struct file_operations rpcrouter_router_fops = {
37581+static const struct file_operations rpcrouter_router_fops = {
37582 .owner = THIS_MODULE,
37583 .open = rpcrouter_open,
37584 .release = rpcrouter_release,
37585diff -urNp linux-2.6.32.45/drivers/staging/dst/dcore.c linux-2.6.32.45/drivers/staging/dst/dcore.c
37586--- linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
37587+++ linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
37588@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
37589 return 0;
37590 }
37591
37592-static struct block_device_operations dst_blk_ops = {
37593+static const struct block_device_operations dst_blk_ops = {
37594 .open = dst_bdev_open,
37595 .release = dst_bdev_release,
37596 .owner = THIS_MODULE,
37597@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
37598 n->size = ctl->size;
37599
37600 atomic_set(&n->refcnt, 1);
37601- atomic_long_set(&n->gen, 0);
37602+ atomic_long_set_unchecked(&n->gen, 0);
37603 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
37604
37605 err = dst_node_sysfs_init(n);
37606diff -urNp linux-2.6.32.45/drivers/staging/dst/trans.c linux-2.6.32.45/drivers/staging/dst/trans.c
37607--- linux-2.6.32.45/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
37608+++ linux-2.6.32.45/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
37609@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
37610 t->error = 0;
37611 t->retries = 0;
37612 atomic_set(&t->refcnt, 1);
37613- t->gen = atomic_long_inc_return(&n->gen);
37614+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
37615
37616 t->enc = bio_data_dir(bio);
37617 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
37618diff -urNp linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c
37619--- linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
37620+++ linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
37621@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
37622 struct net_device_stats *stats = &etdev->net_stats;
37623
37624 if (pMpTcb->Flags & fMP_DEST_BROAD)
37625- atomic_inc(&etdev->Stats.brdcstxmt);
37626+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
37627 else if (pMpTcb->Flags & fMP_DEST_MULTI)
37628- atomic_inc(&etdev->Stats.multixmt);
37629+ atomic_inc_unchecked(&etdev->Stats.multixmt);
37630 else
37631- atomic_inc(&etdev->Stats.unixmt);
37632+ atomic_inc_unchecked(&etdev->Stats.unixmt);
37633
37634 if (pMpTcb->Packet) {
37635 stats->tx_bytes += pMpTcb->Packet->len;
37636diff -urNp linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h
37637--- linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
37638+++ linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
37639@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
37640 * operations
37641 */
37642 u32 unircv; /* # multicast packets received */
37643- atomic_t unixmt; /* # multicast packets for Tx */
37644+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
37645 u32 multircv; /* # multicast packets received */
37646- atomic_t multixmt; /* # multicast packets for Tx */
37647+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
37648 u32 brdcstrcv; /* # broadcast packets received */
37649- atomic_t brdcstxmt; /* # broadcast packets for Tx */
37650+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
37651 u32 norcvbuf; /* # Rx packets discarded */
37652 u32 noxmtbuf; /* # Tx packets discarded */
37653
37654diff -urNp linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c
37655--- linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
37656+++ linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
37657@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
37658 return 0;
37659 }
37660
37661-static struct vm_operations_struct go7007_vm_ops = {
37662+static const struct vm_operations_struct go7007_vm_ops = {
37663 .open = go7007_vm_open,
37664 .close = go7007_vm_close,
37665 .fault = go7007_vm_fault,
37666diff -urNp linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c
37667--- linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
37668+++ linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
37669@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
37670 /* The one and only one */
37671 static struct blkvsc_driver_context g_blkvsc_drv;
37672
37673-static struct block_device_operations block_ops = {
37674+static const struct block_device_operations block_ops = {
37675 .owner = THIS_MODULE,
37676 .open = blkvsc_open,
37677 .release = blkvsc_release,
37678diff -urNp linux-2.6.32.45/drivers/staging/hv/Channel.c linux-2.6.32.45/drivers/staging/hv/Channel.c
37679--- linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
37680+++ linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
37681@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
37682
37683 DPRINT_ENTER(VMBUS);
37684
37685- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
37686- atomic_inc(&gVmbusConnection.NextGpadlHandle);
37687+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
37688+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
37689
37690 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
37691 ASSERT(msgInfo != NULL);
37692diff -urNp linux-2.6.32.45/drivers/staging/hv/Hv.c linux-2.6.32.45/drivers/staging/hv/Hv.c
37693--- linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
37694+++ linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
37695@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
37696 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
37697 u32 outputAddressHi = outputAddress >> 32;
37698 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
37699- volatile void *hypercallPage = gHvContext.HypercallPage;
37700+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
37701
37702 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
37703 Control, Input, Output);
37704diff -urNp linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c
37705--- linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
37706+++ linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
37707@@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
37708 to_device_context(root_device_obj);
37709 struct device_context *child_device_ctx =
37710 to_device_context(child_device_obj);
37711- static atomic_t device_num = ATOMIC_INIT(0);
37712+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
37713
37714 DPRINT_ENTER(VMBUS_DRV);
37715
37716@@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
37717
37718 /* Set the device name. Otherwise, device_register() will fail. */
37719 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
37720- atomic_inc_return(&device_num));
37721+ atomic_inc_return_unchecked(&device_num));
37722
37723 /* The new device belongs to this bus */
37724 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
37725diff -urNp linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h
37726--- linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
37727+++ linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
37728@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
37729 struct VMBUS_CONNECTION {
37730 enum VMBUS_CONNECT_STATE ConnectState;
37731
37732- atomic_t NextGpadlHandle;
37733+ atomic_unchecked_t NextGpadlHandle;
37734
37735 /*
37736 * Represents channel interrupts. Each bit position represents a
37737diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet.c linux-2.6.32.45/drivers/staging/octeon/ethernet.c
37738--- linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
37739+++ linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
37740@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
37741 * since the RX tasklet also increments it.
37742 */
37743 #ifdef CONFIG_64BIT
37744- atomic64_add(rx_status.dropped_packets,
37745- (atomic64_t *)&priv->stats.rx_dropped);
37746+ atomic64_add_unchecked(rx_status.dropped_packets,
37747+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37748 #else
37749- atomic_add(rx_status.dropped_packets,
37750- (atomic_t *)&priv->stats.rx_dropped);
37751+ atomic_add_unchecked(rx_status.dropped_packets,
37752+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
37753 #endif
37754 }
37755
37756diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c
37757--- linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
37758+++ linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
37759@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
37760 /* Increment RX stats for virtual ports */
37761 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37762 #ifdef CONFIG_64BIT
37763- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37764- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37765+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37766+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37767 #else
37768- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37769- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37770+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37771+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37772 #endif
37773 }
37774 netif_receive_skb(skb);
37775@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
37776 dev->name);
37777 */
37778 #ifdef CONFIG_64BIT
37779- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37780+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
37781 #else
37782- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37783+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
37784 #endif
37785 dev_kfree_skb_irq(skb);
37786 }
37787diff -urNp linux-2.6.32.45/drivers/staging/panel/panel.c linux-2.6.32.45/drivers/staging/panel/panel.c
37788--- linux-2.6.32.45/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
37789+++ linux-2.6.32.45/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
37790@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
37791 return 0;
37792 }
37793
37794-static struct file_operations lcd_fops = {
37795+static const struct file_operations lcd_fops = {
37796 .write = lcd_write,
37797 .open = lcd_open,
37798 .release = lcd_release,
37799@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
37800 return 0;
37801 }
37802
37803-static struct file_operations keypad_fops = {
37804+static const struct file_operations keypad_fops = {
37805 .read = keypad_read, /* read */
37806 .open = keypad_open, /* open */
37807 .release = keypad_release, /* close */
37808diff -urNp linux-2.6.32.45/drivers/staging/phison/phison.c linux-2.6.32.45/drivers/staging/phison/phison.c
37809--- linux-2.6.32.45/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
37810+++ linux-2.6.32.45/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
37811@@ -43,7 +43,7 @@ static struct scsi_host_template phison_
37812 ATA_BMDMA_SHT(DRV_NAME),
37813 };
37814
37815-static struct ata_port_operations phison_ops = {
37816+static const struct ata_port_operations phison_ops = {
37817 .inherits = &ata_bmdma_port_ops,
37818 .prereset = phison_pre_reset,
37819 };
37820diff -urNp linux-2.6.32.45/drivers/staging/poch/poch.c linux-2.6.32.45/drivers/staging/poch/poch.c
37821--- linux-2.6.32.45/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
37822+++ linux-2.6.32.45/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
37823@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
37824 return 0;
37825 }
37826
37827-static struct file_operations poch_fops = {
37828+static const struct file_operations poch_fops = {
37829 .owner = THIS_MODULE,
37830 .open = poch_open,
37831 .release = poch_release,
37832diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/inode.c linux-2.6.32.45/drivers/staging/pohmelfs/inode.c
37833--- linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37834+++ linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
37835@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
37836 mutex_init(&psb->mcache_lock);
37837 psb->mcache_root = RB_ROOT;
37838 psb->mcache_timeout = msecs_to_jiffies(5000);
37839- atomic_long_set(&psb->mcache_gen, 0);
37840+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
37841
37842 psb->trans_max_pages = 100;
37843
37844@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
37845 INIT_LIST_HEAD(&psb->crypto_ready_list);
37846 INIT_LIST_HEAD(&psb->crypto_active_list);
37847
37848- atomic_set(&psb->trans_gen, 1);
37849+ atomic_set_unchecked(&psb->trans_gen, 1);
37850 atomic_long_set(&psb->total_inodes, 0);
37851
37852 mutex_init(&psb->state_lock);
37853diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c
37854--- linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
37855+++ linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
37856@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
37857 m->data = data;
37858 m->start = start;
37859 m->size = size;
37860- m->gen = atomic_long_inc_return(&psb->mcache_gen);
37861+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
37862
37863 mutex_lock(&psb->mcache_lock);
37864 err = pohmelfs_mcache_insert(psb, m);
37865diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h
37866--- linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
37867+++ linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
37868@@ -570,14 +570,14 @@ struct pohmelfs_config;
37869 struct pohmelfs_sb {
37870 struct rb_root mcache_root;
37871 struct mutex mcache_lock;
37872- atomic_long_t mcache_gen;
37873+ atomic_long_unchecked_t mcache_gen;
37874 unsigned long mcache_timeout;
37875
37876 unsigned int idx;
37877
37878 unsigned int trans_retries;
37879
37880- atomic_t trans_gen;
37881+ atomic_unchecked_t trans_gen;
37882
37883 unsigned int crypto_attached_size;
37884 unsigned int crypto_align_size;
37885diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/trans.c linux-2.6.32.45/drivers/staging/pohmelfs/trans.c
37886--- linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
37887+++ linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
37888@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
37889 int err;
37890 struct netfs_cmd *cmd = t->iovec.iov_base;
37891
37892- t->gen = atomic_inc_return(&psb->trans_gen);
37893+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
37894
37895 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
37896 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
37897diff -urNp linux-2.6.32.45/drivers/staging/sep/sep_driver.c linux-2.6.32.45/drivers/staging/sep/sep_driver.c
37898--- linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
37899+++ linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
37900@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
37901 static dev_t sep_devno;
37902
37903 /* the files operations structure of the driver */
37904-static struct file_operations sep_file_operations = {
37905+static const struct file_operations sep_file_operations = {
37906 .owner = THIS_MODULE,
37907 .ioctl = sep_ioctl,
37908 .poll = sep_poll,
37909diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci.h linux-2.6.32.45/drivers/staging/usbip/vhci.h
37910--- linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
37911+++ linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
37912@@ -92,7 +92,7 @@ struct vhci_hcd {
37913 unsigned resuming:1;
37914 unsigned long re_timeout;
37915
37916- atomic_t seqnum;
37917+ atomic_unchecked_t seqnum;
37918
37919 /*
37920 * NOTE:
37921diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c
37922--- linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
37923+++ linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
37924@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
37925 return;
37926 }
37927
37928- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37929+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37930 if (priv->seqnum == 0xffff)
37931 usbip_uinfo("seqnum max\n");
37932
37933@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
37934 return -ENOMEM;
37935 }
37936
37937- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37938+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37939 if (unlink->seqnum == 0xffff)
37940 usbip_uinfo("seqnum max\n");
37941
37942@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
37943 vdev->rhport = rhport;
37944 }
37945
37946- atomic_set(&vhci->seqnum, 0);
37947+ atomic_set_unchecked(&vhci->seqnum, 0);
37948 spin_lock_init(&vhci->lock);
37949
37950
37951diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c
37952--- linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
37953+++ linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
37954@@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
37955 usbip_uerr("cannot find a urb of seqnum %u\n",
37956 pdu->base.seqnum);
37957 usbip_uinfo("max seqnum %d\n",
37958- atomic_read(&the_controller->seqnum));
37959+ atomic_read_unchecked(&the_controller->seqnum));
37960 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37961 return;
37962 }
37963diff -urNp linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c
37964--- linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
37965+++ linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
37966@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
37967 static int __init vme_user_probe(struct device *, int, int);
37968 static int __exit vme_user_remove(struct device *, int, int);
37969
37970-static struct file_operations vme_user_fops = {
37971+static const struct file_operations vme_user_fops = {
37972 .open = vme_user_open,
37973 .release = vme_user_release,
37974 .read = vme_user_read,
37975diff -urNp linux-2.6.32.45/drivers/telephony/ixj.c linux-2.6.32.45/drivers/telephony/ixj.c
37976--- linux-2.6.32.45/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
37977+++ linux-2.6.32.45/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
37978@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
37979 bool mContinue;
37980 char *pIn, *pOut;
37981
37982+ pax_track_stack();
37983+
37984 if (!SCI_Prepare(j))
37985 return 0;
37986
37987diff -urNp linux-2.6.32.45/drivers/uio/uio.c linux-2.6.32.45/drivers/uio/uio.c
37988--- linux-2.6.32.45/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
37989+++ linux-2.6.32.45/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
37990@@ -23,6 +23,7 @@
37991 #include <linux/string.h>
37992 #include <linux/kobject.h>
37993 #include <linux/uio_driver.h>
37994+#include <asm/local.h>
37995
37996 #define UIO_MAX_DEVICES 255
37997
37998@@ -30,10 +31,10 @@ struct uio_device {
37999 struct module *owner;
38000 struct device *dev;
38001 int minor;
38002- atomic_t event;
38003+ atomic_unchecked_t event;
38004 struct fasync_struct *async_queue;
38005 wait_queue_head_t wait;
38006- int vma_count;
38007+ local_t vma_count;
38008 struct uio_info *info;
38009 struct kobject *map_dir;
38010 struct kobject *portio_dir;
38011@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
38012 return entry->show(mem, buf);
38013 }
38014
38015-static struct sysfs_ops map_sysfs_ops = {
38016+static const struct sysfs_ops map_sysfs_ops = {
38017 .show = map_type_show,
38018 };
38019
38020@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
38021 return entry->show(port, buf);
38022 }
38023
38024-static struct sysfs_ops portio_sysfs_ops = {
38025+static const struct sysfs_ops portio_sysfs_ops = {
38026 .show = portio_type_show,
38027 };
38028
38029@@ -255,7 +256,7 @@ static ssize_t show_event(struct device
38030 struct uio_device *idev = dev_get_drvdata(dev);
38031 if (idev)
38032 return sprintf(buf, "%u\n",
38033- (unsigned int)atomic_read(&idev->event));
38034+ (unsigned int)atomic_read_unchecked(&idev->event));
38035 else
38036 return -ENODEV;
38037 }
38038@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
38039 {
38040 struct uio_device *idev = info->uio_dev;
38041
38042- atomic_inc(&idev->event);
38043+ atomic_inc_unchecked(&idev->event);
38044 wake_up_interruptible(&idev->wait);
38045 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38046 }
38047@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
38048 }
38049
38050 listener->dev = idev;
38051- listener->event_count = atomic_read(&idev->event);
38052+ listener->event_count = atomic_read_unchecked(&idev->event);
38053 filep->private_data = listener;
38054
38055 if (idev->info->open) {
38056@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
38057 return -EIO;
38058
38059 poll_wait(filep, &idev->wait, wait);
38060- if (listener->event_count != atomic_read(&idev->event))
38061+ if (listener->event_count != atomic_read_unchecked(&idev->event))
38062 return POLLIN | POLLRDNORM;
38063 return 0;
38064 }
38065@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
38066 do {
38067 set_current_state(TASK_INTERRUPTIBLE);
38068
38069- event_count = atomic_read(&idev->event);
38070+ event_count = atomic_read_unchecked(&idev->event);
38071 if (event_count != listener->event_count) {
38072 if (copy_to_user(buf, &event_count, count))
38073 retval = -EFAULT;
38074@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
38075 static void uio_vma_open(struct vm_area_struct *vma)
38076 {
38077 struct uio_device *idev = vma->vm_private_data;
38078- idev->vma_count++;
38079+ local_inc(&idev->vma_count);
38080 }
38081
38082 static void uio_vma_close(struct vm_area_struct *vma)
38083 {
38084 struct uio_device *idev = vma->vm_private_data;
38085- idev->vma_count--;
38086+ local_dec(&idev->vma_count);
38087 }
38088
38089 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38090@@ -840,7 +841,7 @@ int __uio_register_device(struct module
38091 idev->owner = owner;
38092 idev->info = info;
38093 init_waitqueue_head(&idev->wait);
38094- atomic_set(&idev->event, 0);
38095+ atomic_set_unchecked(&idev->event, 0);
38096
38097 ret = uio_get_minor(idev);
38098 if (ret)
38099diff -urNp linux-2.6.32.45/drivers/usb/atm/usbatm.c linux-2.6.32.45/drivers/usb/atm/usbatm.c
38100--- linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
38101+++ linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
38102@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
38103 if (printk_ratelimit())
38104 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38105 __func__, vpi, vci);
38106- atomic_inc(&vcc->stats->rx_err);
38107+ atomic_inc_unchecked(&vcc->stats->rx_err);
38108 return;
38109 }
38110
38111@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
38112 if (length > ATM_MAX_AAL5_PDU) {
38113 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38114 __func__, length, vcc);
38115- atomic_inc(&vcc->stats->rx_err);
38116+ atomic_inc_unchecked(&vcc->stats->rx_err);
38117 goto out;
38118 }
38119
38120@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
38121 if (sarb->len < pdu_length) {
38122 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38123 __func__, pdu_length, sarb->len, vcc);
38124- atomic_inc(&vcc->stats->rx_err);
38125+ atomic_inc_unchecked(&vcc->stats->rx_err);
38126 goto out;
38127 }
38128
38129 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38130 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38131 __func__, vcc);
38132- atomic_inc(&vcc->stats->rx_err);
38133+ atomic_inc_unchecked(&vcc->stats->rx_err);
38134 goto out;
38135 }
38136
38137@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
38138 if (printk_ratelimit())
38139 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38140 __func__, length);
38141- atomic_inc(&vcc->stats->rx_drop);
38142+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38143 goto out;
38144 }
38145
38146@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
38147
38148 vcc->push(vcc, skb);
38149
38150- atomic_inc(&vcc->stats->rx);
38151+ atomic_inc_unchecked(&vcc->stats->rx);
38152 out:
38153 skb_trim(sarb, 0);
38154 }
38155@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
38156 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38157
38158 usbatm_pop(vcc, skb);
38159- atomic_inc(&vcc->stats->tx);
38160+ atomic_inc_unchecked(&vcc->stats->tx);
38161
38162 skb = skb_dequeue(&instance->sndqueue);
38163 }
38164@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
38165 if (!left--)
38166 return sprintf(page,
38167 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38168- atomic_read(&atm_dev->stats.aal5.tx),
38169- atomic_read(&atm_dev->stats.aal5.tx_err),
38170- atomic_read(&atm_dev->stats.aal5.rx),
38171- atomic_read(&atm_dev->stats.aal5.rx_err),
38172- atomic_read(&atm_dev->stats.aal5.rx_drop));
38173+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38174+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38175+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38176+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38177+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38178
38179 if (!left--) {
38180 if (instance->disconnected)
38181diff -urNp linux-2.6.32.45/drivers/usb/class/cdc-wdm.c linux-2.6.32.45/drivers/usb/class/cdc-wdm.c
38182--- linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
38183+++ linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
38184@@ -314,7 +314,7 @@ static ssize_t wdm_write
38185 if (r < 0)
38186 goto outnp;
38187
38188- if (!file->f_flags && O_NONBLOCK)
38189+ if (!(file->f_flags & O_NONBLOCK))
38190 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
38191 &desc->flags));
38192 else
38193diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.c linux-2.6.32.45/drivers/usb/core/hcd.c
38194--- linux-2.6.32.45/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
38195+++ linux-2.6.32.45/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
38196@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
38197
38198 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38199
38200-struct usb_mon_operations *mon_ops;
38201+const struct usb_mon_operations *mon_ops;
38202
38203 /*
38204 * The registration is unlocked.
38205@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
38206 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
38207 */
38208
38209-int usb_mon_register (struct usb_mon_operations *ops)
38210+int usb_mon_register (const struct usb_mon_operations *ops)
38211 {
38212
38213 if (mon_ops)
38214diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.h linux-2.6.32.45/drivers/usb/core/hcd.h
38215--- linux-2.6.32.45/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
38216+++ linux-2.6.32.45/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
38217@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
38218 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38219
38220 struct usb_mon_operations {
38221- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
38222- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38223- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38224+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
38225+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38226+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38227 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
38228 };
38229
38230-extern struct usb_mon_operations *mon_ops;
38231+extern const struct usb_mon_operations *mon_ops;
38232
38233 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
38234 {
38235@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
38236 (*mon_ops->urb_complete)(bus, urb, status);
38237 }
38238
38239-int usb_mon_register(struct usb_mon_operations *ops);
38240+int usb_mon_register(const struct usb_mon_operations *ops);
38241 void usb_mon_deregister(void);
38242
38243 #else
38244diff -urNp linux-2.6.32.45/drivers/usb/core/message.c linux-2.6.32.45/drivers/usb/core/message.c
38245--- linux-2.6.32.45/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
38246+++ linux-2.6.32.45/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
38247@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
38248 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
38249 if (buf) {
38250 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
38251- if (len > 0) {
38252- smallbuf = kmalloc(++len, GFP_NOIO);
38253+ if (len++ > 0) {
38254+ smallbuf = kmalloc(len, GFP_NOIO);
38255 if (!smallbuf)
38256 return buf;
38257 memcpy(smallbuf, buf, len);
38258diff -urNp linux-2.6.32.45/drivers/usb/misc/appledisplay.c linux-2.6.32.45/drivers/usb/misc/appledisplay.c
38259--- linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
38260+++ linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
38261@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
38262 return pdata->msgdata[1];
38263 }
38264
38265-static struct backlight_ops appledisplay_bl_data = {
38266+static const struct backlight_ops appledisplay_bl_data = {
38267 .get_brightness = appledisplay_bl_get_brightness,
38268 .update_status = appledisplay_bl_update_status,
38269 };
38270diff -urNp linux-2.6.32.45/drivers/usb/mon/mon_main.c linux-2.6.32.45/drivers/usb/mon/mon_main.c
38271--- linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
38272+++ linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
38273@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
38274 /*
38275 * Ops
38276 */
38277-static struct usb_mon_operations mon_ops_0 = {
38278+static const struct usb_mon_operations mon_ops_0 = {
38279 .urb_submit = mon_submit,
38280 .urb_submit_error = mon_submit_error,
38281 .urb_complete = mon_complete,
38282diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h
38283--- linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
38284+++ linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
38285@@ -192,7 +192,7 @@ struct wahc {
38286 struct list_head xfer_delayed_list;
38287 spinlock_t xfer_list_lock;
38288 struct work_struct xfer_work;
38289- atomic_t xfer_id_count;
38290+ atomic_unchecked_t xfer_id_count;
38291 };
38292
38293
38294@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
38295 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38296 spin_lock_init(&wa->xfer_list_lock);
38297 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38298- atomic_set(&wa->xfer_id_count, 1);
38299+ atomic_set_unchecked(&wa->xfer_id_count, 1);
38300 }
38301
38302 /**
38303diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c
38304--- linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
38305+++ linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
38306@@ -293,7 +293,7 @@ out:
38307 */
38308 static void wa_xfer_id_init(struct wa_xfer *xfer)
38309 {
38310- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38311+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38312 }
38313
38314 /*
38315diff -urNp linux-2.6.32.45/drivers/uwb/wlp/messages.c linux-2.6.32.45/drivers/uwb/wlp/messages.c
38316--- linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
38317+++ linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
38318@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
38319 size_t len = skb->len;
38320 size_t used;
38321 ssize_t result;
38322- struct wlp_nonce enonce, rnonce;
38323+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
38324 enum wlp_assc_error assc_err;
38325 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
38326 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
38327diff -urNp linux-2.6.32.45/drivers/uwb/wlp/sysfs.c linux-2.6.32.45/drivers/uwb/wlp/sysfs.c
38328--- linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
38329+++ linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
38330@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
38331 return ret;
38332 }
38333
38334-static
38335-struct sysfs_ops wss_sysfs_ops = {
38336+static const struct sysfs_ops wss_sysfs_ops = {
38337 .show = wlp_wss_attr_show,
38338 .store = wlp_wss_attr_store,
38339 };
38340diff -urNp linux-2.6.32.45/drivers/video/atmel_lcdfb.c linux-2.6.32.45/drivers/video/atmel_lcdfb.c
38341--- linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
38342+++ linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
38343@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
38344 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
38345 }
38346
38347-static struct backlight_ops atmel_lcdc_bl_ops = {
38348+static const struct backlight_ops atmel_lcdc_bl_ops = {
38349 .update_status = atmel_bl_update_status,
38350 .get_brightness = atmel_bl_get_brightness,
38351 };
38352diff -urNp linux-2.6.32.45/drivers/video/aty/aty128fb.c linux-2.6.32.45/drivers/video/aty/aty128fb.c
38353--- linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
38354+++ linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
38355@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
38356 return bd->props.brightness;
38357 }
38358
38359-static struct backlight_ops aty128_bl_data = {
38360+static const struct backlight_ops aty128_bl_data = {
38361 .get_brightness = aty128_bl_get_brightness,
38362 .update_status = aty128_bl_update_status,
38363 };
38364diff -urNp linux-2.6.32.45/drivers/video/aty/atyfb_base.c linux-2.6.32.45/drivers/video/aty/atyfb_base.c
38365--- linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
38366+++ linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
38367@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
38368 return bd->props.brightness;
38369 }
38370
38371-static struct backlight_ops aty_bl_data = {
38372+static const struct backlight_ops aty_bl_data = {
38373 .get_brightness = aty_bl_get_brightness,
38374 .update_status = aty_bl_update_status,
38375 };
38376diff -urNp linux-2.6.32.45/drivers/video/aty/radeon_backlight.c linux-2.6.32.45/drivers/video/aty/radeon_backlight.c
38377--- linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
38378+++ linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
38379@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
38380 return bd->props.brightness;
38381 }
38382
38383-static struct backlight_ops radeon_bl_data = {
38384+static const struct backlight_ops radeon_bl_data = {
38385 .get_brightness = radeon_bl_get_brightness,
38386 .update_status = radeon_bl_update_status,
38387 };
38388diff -urNp linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c
38389--- linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
38390+++ linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
38391@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
38392 return error ? data->current_brightness : reg_val;
38393 }
38394
38395-static struct backlight_ops adp5520_bl_ops = {
38396+static const struct backlight_ops adp5520_bl_ops = {
38397 .update_status = adp5520_bl_update_status,
38398 .get_brightness = adp5520_bl_get_brightness,
38399 };
38400diff -urNp linux-2.6.32.45/drivers/video/backlight/adx_bl.c linux-2.6.32.45/drivers/video/backlight/adx_bl.c
38401--- linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
38402+++ linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
38403@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
38404 return 1;
38405 }
38406
38407-static struct backlight_ops adx_backlight_ops = {
38408+static const struct backlight_ops adx_backlight_ops = {
38409 .options = 0,
38410 .update_status = adx_backlight_update_status,
38411 .get_brightness = adx_backlight_get_brightness,
38412diff -urNp linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c
38413--- linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
38414+++ linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
38415@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
38416 return pwm_channel_enable(&pwmbl->pwmc);
38417 }
38418
38419-static struct backlight_ops atmel_pwm_bl_ops = {
38420+static const struct backlight_ops atmel_pwm_bl_ops = {
38421 .get_brightness = atmel_pwm_bl_get_intensity,
38422 .update_status = atmel_pwm_bl_set_intensity,
38423 };
38424diff -urNp linux-2.6.32.45/drivers/video/backlight/backlight.c linux-2.6.32.45/drivers/video/backlight/backlight.c
38425--- linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
38426+++ linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
38427@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
38428 * ERR_PTR() or a pointer to the newly allocated device.
38429 */
38430 struct backlight_device *backlight_device_register(const char *name,
38431- struct device *parent, void *devdata, struct backlight_ops *ops)
38432+ struct device *parent, void *devdata, const struct backlight_ops *ops)
38433 {
38434 struct backlight_device *new_bd;
38435 int rc;
38436diff -urNp linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c
38437--- linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
38438+++ linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
38439@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
38440 }
38441 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
38442
38443-static struct backlight_ops corgi_bl_ops = {
38444+static const struct backlight_ops corgi_bl_ops = {
38445 .get_brightness = corgi_bl_get_intensity,
38446 .update_status = corgi_bl_update_status,
38447 };
38448diff -urNp linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c
38449--- linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
38450+++ linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
38451@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
38452 return intensity;
38453 }
38454
38455-static struct backlight_ops cr_backlight_ops = {
38456+static const struct backlight_ops cr_backlight_ops = {
38457 .get_brightness = cr_backlight_get_intensity,
38458 .update_status = cr_backlight_set_intensity,
38459 };
38460diff -urNp linux-2.6.32.45/drivers/video/backlight/da903x_bl.c linux-2.6.32.45/drivers/video/backlight/da903x_bl.c
38461--- linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
38462+++ linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
38463@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
38464 return data->current_brightness;
38465 }
38466
38467-static struct backlight_ops da903x_backlight_ops = {
38468+static const struct backlight_ops da903x_backlight_ops = {
38469 .update_status = da903x_backlight_update_status,
38470 .get_brightness = da903x_backlight_get_brightness,
38471 };
38472diff -urNp linux-2.6.32.45/drivers/video/backlight/generic_bl.c linux-2.6.32.45/drivers/video/backlight/generic_bl.c
38473--- linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
38474+++ linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
38475@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
38476 }
38477 EXPORT_SYMBOL(corgibl_limit_intensity);
38478
38479-static struct backlight_ops genericbl_ops = {
38480+static const struct backlight_ops genericbl_ops = {
38481 .options = BL_CORE_SUSPENDRESUME,
38482 .get_brightness = genericbl_get_intensity,
38483 .update_status = genericbl_send_intensity,
38484diff -urNp linux-2.6.32.45/drivers/video/backlight/hp680_bl.c linux-2.6.32.45/drivers/video/backlight/hp680_bl.c
38485--- linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
38486+++ linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
38487@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
38488 return current_intensity;
38489 }
38490
38491-static struct backlight_ops hp680bl_ops = {
38492+static const struct backlight_ops hp680bl_ops = {
38493 .get_brightness = hp680bl_get_intensity,
38494 .update_status = hp680bl_set_intensity,
38495 };
38496diff -urNp linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c
38497--- linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
38498+++ linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
38499@@ -93,7 +93,7 @@ out:
38500 return ret;
38501 }
38502
38503-static struct backlight_ops jornada_bl_ops = {
38504+static const struct backlight_ops jornada_bl_ops = {
38505 .get_brightness = jornada_bl_get_brightness,
38506 .update_status = jornada_bl_update_status,
38507 .options = BL_CORE_SUSPENDRESUME,
38508diff -urNp linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c
38509--- linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
38510+++ linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
38511@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
38512 return kb3886bl_intensity;
38513 }
38514
38515-static struct backlight_ops kb3886bl_ops = {
38516+static const struct backlight_ops kb3886bl_ops = {
38517 .get_brightness = kb3886bl_get_intensity,
38518 .update_status = kb3886bl_send_intensity,
38519 };
38520diff -urNp linux-2.6.32.45/drivers/video/backlight/locomolcd.c linux-2.6.32.45/drivers/video/backlight/locomolcd.c
38521--- linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
38522+++ linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
38523@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
38524 return current_intensity;
38525 }
38526
38527-static struct backlight_ops locomobl_data = {
38528+static const struct backlight_ops locomobl_data = {
38529 .get_brightness = locomolcd_get_intensity,
38530 .update_status = locomolcd_set_intensity,
38531 };
38532diff -urNp linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c
38533--- linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
38534+++ linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
38535@@ -33,7 +33,7 @@ struct dmi_match_data {
38536 unsigned long iostart;
38537 unsigned long iolen;
38538 /* Backlight operations structure. */
38539- struct backlight_ops backlight_ops;
38540+ const struct backlight_ops backlight_ops;
38541 };
38542
38543 /* Module parameters. */
38544diff -urNp linux-2.6.32.45/drivers/video/backlight/omap1_bl.c linux-2.6.32.45/drivers/video/backlight/omap1_bl.c
38545--- linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
38546+++ linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
38547@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
38548 return bl->current_intensity;
38549 }
38550
38551-static struct backlight_ops omapbl_ops = {
38552+static const struct backlight_ops omapbl_ops = {
38553 .get_brightness = omapbl_get_intensity,
38554 .update_status = omapbl_update_status,
38555 };
38556diff -urNp linux-2.6.32.45/drivers/video/backlight/progear_bl.c linux-2.6.32.45/drivers/video/backlight/progear_bl.c
38557--- linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
38558+++ linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
38559@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
38560 return intensity - HW_LEVEL_MIN;
38561 }
38562
38563-static struct backlight_ops progearbl_ops = {
38564+static const struct backlight_ops progearbl_ops = {
38565 .get_brightness = progearbl_get_intensity,
38566 .update_status = progearbl_set_intensity,
38567 };
38568diff -urNp linux-2.6.32.45/drivers/video/backlight/pwm_bl.c linux-2.6.32.45/drivers/video/backlight/pwm_bl.c
38569--- linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
38570+++ linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
38571@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
38572 return bl->props.brightness;
38573 }
38574
38575-static struct backlight_ops pwm_backlight_ops = {
38576+static const struct backlight_ops pwm_backlight_ops = {
38577 .update_status = pwm_backlight_update_status,
38578 .get_brightness = pwm_backlight_get_brightness,
38579 };
38580diff -urNp linux-2.6.32.45/drivers/video/backlight/tosa_bl.c linux-2.6.32.45/drivers/video/backlight/tosa_bl.c
38581--- linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
38582+++ linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
38583@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
38584 return props->brightness;
38585 }
38586
38587-static struct backlight_ops bl_ops = {
38588+static const struct backlight_ops bl_ops = {
38589 .get_brightness = tosa_bl_get_brightness,
38590 .update_status = tosa_bl_update_status,
38591 };
38592diff -urNp linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c
38593--- linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
38594+++ linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
38595@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
38596 return data->current_brightness;
38597 }
38598
38599-static struct backlight_ops wm831x_backlight_ops = {
38600+static const struct backlight_ops wm831x_backlight_ops = {
38601 .options = BL_CORE_SUSPENDRESUME,
38602 .update_status = wm831x_backlight_update_status,
38603 .get_brightness = wm831x_backlight_get_brightness,
38604diff -urNp linux-2.6.32.45/drivers/video/bf54x-lq043fb.c linux-2.6.32.45/drivers/video/bf54x-lq043fb.c
38605--- linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
38606+++ linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
38607@@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
38608 return 0;
38609 }
38610
38611-static struct backlight_ops bfin_lq043fb_bl_ops = {
38612+static const struct backlight_ops bfin_lq043fb_bl_ops = {
38613 .get_brightness = bl_get_brightness,
38614 };
38615
38616diff -urNp linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c
38617--- linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
38618+++ linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
38619@@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
38620 return 0;
38621 }
38622
38623-static struct backlight_ops bfin_lq043fb_bl_ops = {
38624+static const struct backlight_ops bfin_lq043fb_bl_ops = {
38625 .get_brightness = bl_get_brightness,
38626 };
38627
38628diff -urNp linux-2.6.32.45/drivers/video/fbcmap.c linux-2.6.32.45/drivers/video/fbcmap.c
38629--- linux-2.6.32.45/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
38630+++ linux-2.6.32.45/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
38631@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
38632 rc = -ENODEV;
38633 goto out;
38634 }
38635- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38636- !info->fbops->fb_setcmap)) {
38637+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38638 rc = -EINVAL;
38639 goto out1;
38640 }
38641diff -urNp linux-2.6.32.45/drivers/video/fbmem.c linux-2.6.32.45/drivers/video/fbmem.c
38642--- linux-2.6.32.45/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
38643+++ linux-2.6.32.45/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
38644@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
38645 image->dx += image->width + 8;
38646 }
38647 } else if (rotate == FB_ROTATE_UD) {
38648- for (x = 0; x < num && image->dx >= 0; x++) {
38649+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38650 info->fbops->fb_imageblit(info, image);
38651 image->dx -= image->width + 8;
38652 }
38653@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
38654 image->dy += image->height + 8;
38655 }
38656 } else if (rotate == FB_ROTATE_CCW) {
38657- for (x = 0; x < num && image->dy >= 0; x++) {
38658+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38659 info->fbops->fb_imageblit(info, image);
38660 image->dy -= image->height + 8;
38661 }
38662@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
38663 int flags = info->flags;
38664 int ret = 0;
38665
38666+ pax_track_stack();
38667+
38668 if (var->activate & FB_ACTIVATE_INV_MODE) {
38669 struct fb_videomode mode1, mode2;
38670
38671@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
38672 void __user *argp = (void __user *)arg;
38673 long ret = 0;
38674
38675+ pax_track_stack();
38676+
38677 switch (cmd) {
38678 case FBIOGET_VSCREENINFO:
38679 if (!lock_fb_info(info))
38680@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
38681 return -EFAULT;
38682 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38683 return -EINVAL;
38684- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38685+ if (con2fb.framebuffer >= FB_MAX)
38686 return -EINVAL;
38687 if (!registered_fb[con2fb.framebuffer])
38688 request_module("fb%d", con2fb.framebuffer);
38689diff -urNp linux-2.6.32.45/drivers/video/i810/i810_accel.c linux-2.6.32.45/drivers/video/i810/i810_accel.c
38690--- linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
38691+++ linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
38692@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
38693 }
38694 }
38695 printk("ringbuffer lockup!!!\n");
38696+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38697 i810_report_error(mmio);
38698 par->dev_flags |= LOCKUP;
38699 info->pixmap.scan_align = 1;
38700diff -urNp linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c
38701--- linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
38702+++ linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
38703@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
38704 return bd->props.brightness;
38705 }
38706
38707-static struct backlight_ops nvidia_bl_ops = {
38708+static const struct backlight_ops nvidia_bl_ops = {
38709 .get_brightness = nvidia_bl_get_brightness,
38710 .update_status = nvidia_bl_update_status,
38711 };
38712diff -urNp linux-2.6.32.45/drivers/video/riva/fbdev.c linux-2.6.32.45/drivers/video/riva/fbdev.c
38713--- linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
38714+++ linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
38715@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
38716 return bd->props.brightness;
38717 }
38718
38719-static struct backlight_ops riva_bl_ops = {
38720+static const struct backlight_ops riva_bl_ops = {
38721 .get_brightness = riva_bl_get_brightness,
38722 .update_status = riva_bl_update_status,
38723 };
38724diff -urNp linux-2.6.32.45/drivers/video/uvesafb.c linux-2.6.32.45/drivers/video/uvesafb.c
38725--- linux-2.6.32.45/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
38726+++ linux-2.6.32.45/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
38727@@ -18,6 +18,7 @@
38728 #include <linux/fb.h>
38729 #include <linux/io.h>
38730 #include <linux/mutex.h>
38731+#include <linux/moduleloader.h>
38732 #include <video/edid.h>
38733 #include <video/uvesafb.h>
38734 #ifdef CONFIG_X86
38735@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
38736 NULL,
38737 };
38738
38739- return call_usermodehelper(v86d_path, argv, envp, 1);
38740+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38741 }
38742
38743 /*
38744@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
38745 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38746 par->pmi_setpal = par->ypan = 0;
38747 } else {
38748+
38749+#ifdef CONFIG_PAX_KERNEXEC
38750+#ifdef CONFIG_MODULES
38751+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38752+#endif
38753+ if (!par->pmi_code) {
38754+ par->pmi_setpal = par->ypan = 0;
38755+ return 0;
38756+ }
38757+#endif
38758+
38759 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38760 + task->t.regs.edi);
38761+
38762+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38763+ pax_open_kernel();
38764+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
38765+ pax_close_kernel();
38766+
38767+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
38768+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
38769+#else
38770 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
38771 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
38772+#endif
38773+
38774 printk(KERN_INFO "uvesafb: protected mode interface info at "
38775 "%04x:%04x\n",
38776 (u16)task->t.regs.es, (u16)task->t.regs.edi);
38777@@ -1799,6 +1822,11 @@ out:
38778 if (par->vbe_modes)
38779 kfree(par->vbe_modes);
38780
38781+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38782+ if (par->pmi_code)
38783+ module_free_exec(NULL, par->pmi_code);
38784+#endif
38785+
38786 framebuffer_release(info);
38787 return err;
38788 }
38789@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
38790 kfree(par->vbe_state_orig);
38791 if (par->vbe_state_saved)
38792 kfree(par->vbe_state_saved);
38793+
38794+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38795+ if (par->pmi_code)
38796+ module_free_exec(NULL, par->pmi_code);
38797+#endif
38798+
38799 }
38800
38801 framebuffer_release(info);
38802diff -urNp linux-2.6.32.45/drivers/video/vesafb.c linux-2.6.32.45/drivers/video/vesafb.c
38803--- linux-2.6.32.45/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
38804+++ linux-2.6.32.45/drivers/video/vesafb.c 2011-08-05 20:33:55.000000000 -0400
38805@@ -9,6 +9,7 @@
38806 */
38807
38808 #include <linux/module.h>
38809+#include <linux/moduleloader.h>
38810 #include <linux/kernel.h>
38811 #include <linux/errno.h>
38812 #include <linux/string.h>
38813@@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
38814 static int vram_total __initdata; /* Set total amount of memory */
38815 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
38816 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
38817-static void (*pmi_start)(void) __read_mostly;
38818-static void (*pmi_pal) (void) __read_mostly;
38819+static void (*pmi_start)(void) __read_only;
38820+static void (*pmi_pal) (void) __read_only;
38821 static int depth __read_mostly;
38822 static int vga_compat __read_mostly;
38823 /* --------------------------------------------------------------------- */
38824@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
38825 unsigned int size_vmode;
38826 unsigned int size_remap;
38827 unsigned int size_total;
38828+ void *pmi_code = NULL;
38829
38830 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
38831 return -ENODEV;
38832@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
38833 size_remap = size_total;
38834 vesafb_fix.smem_len = size_remap;
38835
38836-#ifndef __i386__
38837- screen_info.vesapm_seg = 0;
38838-#endif
38839-
38840 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
38841 printk(KERN_WARNING
38842 "vesafb: cannot reserve video memory at 0x%lx\n",
38843@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
38844 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
38845 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
38846
38847+#ifdef __i386__
38848+
38849+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38850+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
38851+ if (!pmi_code)
38852+#elif !defined(CONFIG_PAX_KERNEXEC)
38853+ if (0)
38854+#endif
38855+
38856+#endif
38857+ screen_info.vesapm_seg = 0;
38858+
38859 if (screen_info.vesapm_seg) {
38860- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
38861- screen_info.vesapm_seg,screen_info.vesapm_off);
38862+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
38863+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
38864 }
38865
38866 if (screen_info.vesapm_seg < 0xc000)
38867@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
38868
38869 if (ypan || pmi_setpal) {
38870 unsigned short *pmi_base;
38871+
38872 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
38873- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
38874- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
38875+
38876+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38877+ pax_open_kernel();
38878+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
38879+#else
38880+ pmi_code = pmi_base;
38881+#endif
38882+
38883+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
38884+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
38885+
38886+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38887+ pmi_start = ktva_ktla(pmi_start);
38888+ pmi_pal = ktva_ktla(pmi_pal);
38889+ pax_close_kernel();
38890+#endif
38891+
38892 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
38893 if (pmi_base[3]) {
38894 printk(KERN_INFO "vesafb: pmi: ports = ");
38895@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
38896 info->node, info->fix.id);
38897 return 0;
38898 err:
38899+
38900+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38901+ module_free_exec(NULL, pmi_code);
38902+#endif
38903+
38904 if (info->screen_base)
38905 iounmap(info->screen_base);
38906 framebuffer_release(info);
38907diff -urNp linux-2.6.32.45/drivers/xen/sys-hypervisor.c linux-2.6.32.45/drivers/xen/sys-hypervisor.c
38908--- linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
38909+++ linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
38910@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
38911 return 0;
38912 }
38913
38914-static struct sysfs_ops hyp_sysfs_ops = {
38915+static const struct sysfs_ops hyp_sysfs_ops = {
38916 .show = hyp_sysfs_show,
38917 .store = hyp_sysfs_store,
38918 };
38919diff -urNp linux-2.6.32.45/fs/9p/vfs_inode.c linux-2.6.32.45/fs/9p/vfs_inode.c
38920--- linux-2.6.32.45/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
38921+++ linux-2.6.32.45/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
38922@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
38923 static void
38924 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38925 {
38926- char *s = nd_get_link(nd);
38927+ const char *s = nd_get_link(nd);
38928
38929 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
38930 IS_ERR(s) ? "<error>" : s);
38931diff -urNp linux-2.6.32.45/fs/aio.c linux-2.6.32.45/fs/aio.c
38932--- linux-2.6.32.45/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
38933+++ linux-2.6.32.45/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
38934@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
38935 size += sizeof(struct io_event) * nr_events;
38936 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
38937
38938- if (nr_pages < 0)
38939+ if (nr_pages <= 0)
38940 return -EINVAL;
38941
38942 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
38943@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
38944 struct aio_timeout to;
38945 int retry = 0;
38946
38947+ pax_track_stack();
38948+
38949 /* needed to zero any padding within an entry (there shouldn't be
38950 * any, but C is fun!
38951 */
38952@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
38953 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
38954 {
38955 ssize_t ret;
38956+ struct iovec iovstack;
38957
38958 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
38959 kiocb->ki_nbytes, 1,
38960- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
38961+ &iovstack, &kiocb->ki_iovec);
38962 if (ret < 0)
38963 goto out;
38964
38965+ if (kiocb->ki_iovec == &iovstack) {
38966+ kiocb->ki_inline_vec = iovstack;
38967+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
38968+ }
38969 kiocb->ki_nr_segs = kiocb->ki_nbytes;
38970 kiocb->ki_cur_seg = 0;
38971 /* ki_nbytes/left now reflect bytes instead of segs */
38972diff -urNp linux-2.6.32.45/fs/attr.c linux-2.6.32.45/fs/attr.c
38973--- linux-2.6.32.45/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
38974+++ linux-2.6.32.45/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
38975@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
38976 unsigned long limit;
38977
38978 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
38979+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
38980 if (limit != RLIM_INFINITY && offset > limit)
38981 goto out_sig;
38982 if (offset > inode->i_sb->s_maxbytes)
38983diff -urNp linux-2.6.32.45/fs/autofs/root.c linux-2.6.32.45/fs/autofs/root.c
38984--- linux-2.6.32.45/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
38985+++ linux-2.6.32.45/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
38986@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
38987 set_bit(n,sbi->symlink_bitmap);
38988 sl = &sbi->symlink[n];
38989 sl->len = strlen(symname);
38990- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
38991+ slsize = sl->len+1;
38992+ sl->data = kmalloc(slsize, GFP_KERNEL);
38993 if (!sl->data) {
38994 clear_bit(n,sbi->symlink_bitmap);
38995 unlock_kernel();
38996diff -urNp linux-2.6.32.45/fs/autofs4/symlink.c linux-2.6.32.45/fs/autofs4/symlink.c
38997--- linux-2.6.32.45/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
38998+++ linux-2.6.32.45/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
38999@@ -15,7 +15,7 @@
39000 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
39001 {
39002 struct autofs_info *ino = autofs4_dentry_ino(dentry);
39003- nd_set_link(nd, (char *)ino->u.symlink);
39004+ nd_set_link(nd, ino->u.symlink);
39005 return NULL;
39006 }
39007
39008diff -urNp linux-2.6.32.45/fs/befs/linuxvfs.c linux-2.6.32.45/fs/befs/linuxvfs.c
39009--- linux-2.6.32.45/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
39010+++ linux-2.6.32.45/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
39011@@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
39012 {
39013 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39014 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39015- char *link = nd_get_link(nd);
39016+ const char *link = nd_get_link(nd);
39017 if (!IS_ERR(link))
39018 kfree(link);
39019 }
39020diff -urNp linux-2.6.32.45/fs/binfmt_aout.c linux-2.6.32.45/fs/binfmt_aout.c
39021--- linux-2.6.32.45/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
39022+++ linux-2.6.32.45/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
39023@@ -16,6 +16,7 @@
39024 #include <linux/string.h>
39025 #include <linux/fs.h>
39026 #include <linux/file.h>
39027+#include <linux/security.h>
39028 #include <linux/stat.h>
39029 #include <linux/fcntl.h>
39030 #include <linux/ptrace.h>
39031@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
39032 #endif
39033 # define START_STACK(u) (u.start_stack)
39034
39035+ memset(&dump, 0, sizeof(dump));
39036+
39037 fs = get_fs();
39038 set_fs(KERNEL_DS);
39039 has_dumped = 1;
39040@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
39041
39042 /* If the size of the dump file exceeds the rlimit, then see what would happen
39043 if we wrote the stack, but not the data area. */
39044+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39045 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
39046 dump.u_dsize = 0;
39047
39048 /* Make sure we have enough room to write the stack and data areas. */
39049+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39050 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
39051 dump.u_ssize = 0;
39052
39053@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
39054 dump_size = dump.u_ssize << PAGE_SHIFT;
39055 DUMP_WRITE(dump_start,dump_size);
39056 }
39057-/* Finally dump the task struct. Not be used by gdb, but could be useful */
39058- set_fs(KERNEL_DS);
39059- DUMP_WRITE(current,sizeof(*current));
39060+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
39061 end_coredump:
39062 set_fs(fs);
39063 return has_dumped;
39064@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
39065 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
39066 if (rlim >= RLIM_INFINITY)
39067 rlim = ~0;
39068+
39069+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39070 if (ex.a_data + ex.a_bss > rlim)
39071 return -ENOMEM;
39072
39073@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
39074 install_exec_creds(bprm);
39075 current->flags &= ~PF_FORKNOEXEC;
39076
39077+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39078+ current->mm->pax_flags = 0UL;
39079+#endif
39080+
39081+#ifdef CONFIG_PAX_PAGEEXEC
39082+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39083+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39084+
39085+#ifdef CONFIG_PAX_EMUTRAMP
39086+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39087+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39088+#endif
39089+
39090+#ifdef CONFIG_PAX_MPROTECT
39091+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39092+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39093+#endif
39094+
39095+ }
39096+#endif
39097+
39098 if (N_MAGIC(ex) == OMAGIC) {
39099 unsigned long text_addr, map_size;
39100 loff_t pos;
39101@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
39102
39103 down_write(&current->mm->mmap_sem);
39104 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39105- PROT_READ | PROT_WRITE | PROT_EXEC,
39106+ PROT_READ | PROT_WRITE,
39107 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39108 fd_offset + ex.a_text);
39109 up_write(&current->mm->mmap_sem);
39110diff -urNp linux-2.6.32.45/fs/binfmt_elf.c linux-2.6.32.45/fs/binfmt_elf.c
39111--- linux-2.6.32.45/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
39112+++ linux-2.6.32.45/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
39113@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
39114 #define elf_core_dump NULL
39115 #endif
39116
39117+#ifdef CONFIG_PAX_MPROTECT
39118+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39119+#endif
39120+
39121 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39122 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39123 #else
39124@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
39125 .load_binary = load_elf_binary,
39126 .load_shlib = load_elf_library,
39127 .core_dump = elf_core_dump,
39128+
39129+#ifdef CONFIG_PAX_MPROTECT
39130+ .handle_mprotect= elf_handle_mprotect,
39131+#endif
39132+
39133 .min_coredump = ELF_EXEC_PAGESIZE,
39134 .hasvdso = 1
39135 };
39136@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39137
39138 static int set_brk(unsigned long start, unsigned long end)
39139 {
39140+ unsigned long e = end;
39141+
39142 start = ELF_PAGEALIGN(start);
39143 end = ELF_PAGEALIGN(end);
39144 if (end > start) {
39145@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39146 if (BAD_ADDR(addr))
39147 return addr;
39148 }
39149- current->mm->start_brk = current->mm->brk = end;
39150+ current->mm->start_brk = current->mm->brk = e;
39151 return 0;
39152 }
39153
39154@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39155 elf_addr_t __user *u_rand_bytes;
39156 const char *k_platform = ELF_PLATFORM;
39157 const char *k_base_platform = ELF_BASE_PLATFORM;
39158- unsigned char k_rand_bytes[16];
39159+ u32 k_rand_bytes[4];
39160 int items;
39161 elf_addr_t *elf_info;
39162 int ei_index = 0;
39163 const struct cred *cred = current_cred();
39164 struct vm_area_struct *vma;
39165+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39166+
39167+ pax_track_stack();
39168
39169 /*
39170 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39171@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39172 * Generate 16 random bytes for userspace PRNG seeding.
39173 */
39174 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39175- u_rand_bytes = (elf_addr_t __user *)
39176- STACK_ALLOC(p, sizeof(k_rand_bytes));
39177+ srandom32(k_rand_bytes[0] ^ random32());
39178+ srandom32(k_rand_bytes[1] ^ random32());
39179+ srandom32(k_rand_bytes[2] ^ random32());
39180+ srandom32(k_rand_bytes[3] ^ random32());
39181+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39182+ u_rand_bytes = (elf_addr_t __user *) p;
39183 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39184 return -EFAULT;
39185
39186@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39187 return -EFAULT;
39188 current->mm->env_end = p;
39189
39190+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39191+
39192 /* Put the elf_info on the stack in the right place. */
39193 sp = (elf_addr_t __user *)envp + 1;
39194- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39195+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39196 return -EFAULT;
39197 return 0;
39198 }
39199@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
39200 {
39201 struct elf_phdr *elf_phdata;
39202 struct elf_phdr *eppnt;
39203- unsigned long load_addr = 0;
39204+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39205 int load_addr_set = 0;
39206 unsigned long last_bss = 0, elf_bss = 0;
39207- unsigned long error = ~0UL;
39208+ unsigned long error = -EINVAL;
39209 unsigned long total_size;
39210 int retval, i, size;
39211
39212@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
39213 goto out_close;
39214 }
39215
39216+#ifdef CONFIG_PAX_SEGMEXEC
39217+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39218+ pax_task_size = SEGMEXEC_TASK_SIZE;
39219+#endif
39220+
39221 eppnt = elf_phdata;
39222 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39223 if (eppnt->p_type == PT_LOAD) {
39224@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
39225 k = load_addr + eppnt->p_vaddr;
39226 if (BAD_ADDR(k) ||
39227 eppnt->p_filesz > eppnt->p_memsz ||
39228- eppnt->p_memsz > TASK_SIZE ||
39229- TASK_SIZE - eppnt->p_memsz < k) {
39230+ eppnt->p_memsz > pax_task_size ||
39231+ pax_task_size - eppnt->p_memsz < k) {
39232 error = -ENOMEM;
39233 goto out_close;
39234 }
39235@@ -532,6 +557,194 @@ out:
39236 return error;
39237 }
39238
39239+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39240+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39241+{
39242+ unsigned long pax_flags = 0UL;
39243+
39244+#ifdef CONFIG_PAX_PAGEEXEC
39245+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39246+ pax_flags |= MF_PAX_PAGEEXEC;
39247+#endif
39248+
39249+#ifdef CONFIG_PAX_SEGMEXEC
39250+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39251+ pax_flags |= MF_PAX_SEGMEXEC;
39252+#endif
39253+
39254+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39255+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39256+ if (nx_enabled)
39257+ pax_flags &= ~MF_PAX_SEGMEXEC;
39258+ else
39259+ pax_flags &= ~MF_PAX_PAGEEXEC;
39260+ }
39261+#endif
39262+
39263+#ifdef CONFIG_PAX_EMUTRAMP
39264+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39265+ pax_flags |= MF_PAX_EMUTRAMP;
39266+#endif
39267+
39268+#ifdef CONFIG_PAX_MPROTECT
39269+ if (elf_phdata->p_flags & PF_MPROTECT)
39270+ pax_flags |= MF_PAX_MPROTECT;
39271+#endif
39272+
39273+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39274+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39275+ pax_flags |= MF_PAX_RANDMMAP;
39276+#endif
39277+
39278+ return pax_flags;
39279+}
39280+#endif
39281+
39282+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39283+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39284+{
39285+ unsigned long pax_flags = 0UL;
39286+
39287+#ifdef CONFIG_PAX_PAGEEXEC
39288+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39289+ pax_flags |= MF_PAX_PAGEEXEC;
39290+#endif
39291+
39292+#ifdef CONFIG_PAX_SEGMEXEC
39293+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39294+ pax_flags |= MF_PAX_SEGMEXEC;
39295+#endif
39296+
39297+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39298+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39299+ if (nx_enabled)
39300+ pax_flags &= ~MF_PAX_SEGMEXEC;
39301+ else
39302+ pax_flags &= ~MF_PAX_PAGEEXEC;
39303+ }
39304+#endif
39305+
39306+#ifdef CONFIG_PAX_EMUTRAMP
39307+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39308+ pax_flags |= MF_PAX_EMUTRAMP;
39309+#endif
39310+
39311+#ifdef CONFIG_PAX_MPROTECT
39312+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39313+ pax_flags |= MF_PAX_MPROTECT;
39314+#endif
39315+
39316+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39317+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39318+ pax_flags |= MF_PAX_RANDMMAP;
39319+#endif
39320+
39321+ return pax_flags;
39322+}
39323+#endif
39324+
39325+#ifdef CONFIG_PAX_EI_PAX
39326+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39327+{
39328+ unsigned long pax_flags = 0UL;
39329+
39330+#ifdef CONFIG_PAX_PAGEEXEC
39331+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39332+ pax_flags |= MF_PAX_PAGEEXEC;
39333+#endif
39334+
39335+#ifdef CONFIG_PAX_SEGMEXEC
39336+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39337+ pax_flags |= MF_PAX_SEGMEXEC;
39338+#endif
39339+
39340+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39341+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39342+ if (nx_enabled)
39343+ pax_flags &= ~MF_PAX_SEGMEXEC;
39344+ else
39345+ pax_flags &= ~MF_PAX_PAGEEXEC;
39346+ }
39347+#endif
39348+
39349+#ifdef CONFIG_PAX_EMUTRAMP
39350+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39351+ pax_flags |= MF_PAX_EMUTRAMP;
39352+#endif
39353+
39354+#ifdef CONFIG_PAX_MPROTECT
39355+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39356+ pax_flags |= MF_PAX_MPROTECT;
39357+#endif
39358+
39359+#ifdef CONFIG_PAX_ASLR
39360+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39361+ pax_flags |= MF_PAX_RANDMMAP;
39362+#endif
39363+
39364+ return pax_flags;
39365+}
39366+#endif
39367+
39368+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39369+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39370+{
39371+ unsigned long pax_flags = 0UL;
39372+
39373+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39374+ unsigned long i;
39375+ int found_flags = 0;
39376+#endif
39377+
39378+#ifdef CONFIG_PAX_EI_PAX
39379+ pax_flags = pax_parse_ei_pax(elf_ex);
39380+#endif
39381+
39382+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39383+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39384+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39385+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39386+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39387+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39388+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39389+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39390+ return -EINVAL;
39391+
39392+#ifdef CONFIG_PAX_SOFTMODE
39393+ if (pax_softmode)
39394+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
39395+ else
39396+#endif
39397+
39398+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39399+ found_flags = 1;
39400+ break;
39401+ }
39402+#endif
39403+
39404+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39405+ if (found_flags == 0) {
39406+ struct elf_phdr phdr;
39407+ memset(&phdr, 0, sizeof(phdr));
39408+ phdr.p_flags = PF_NOEMUTRAMP;
39409+#ifdef CONFIG_PAX_SOFTMODE
39410+ if (pax_softmode)
39411+ pax_flags = pax_parse_softmode(&phdr);
39412+ else
39413+#endif
39414+ pax_flags = pax_parse_hardmode(&phdr);
39415+ }
39416+#endif
39417+
39418+
39419+ if (0 > pax_check_flags(&pax_flags))
39420+ return -EINVAL;
39421+
39422+ current->mm->pax_flags = pax_flags;
39423+ return 0;
39424+}
39425+#endif
39426+
39427 /*
39428 * These are the functions used to load ELF style executables and shared
39429 * libraries. There is no binary dependent code anywhere else.
39430@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
39431 {
39432 unsigned int random_variable = 0;
39433
39434+#ifdef CONFIG_PAX_RANDUSTACK
39435+ if (randomize_va_space)
39436+ return stack_top - current->mm->delta_stack;
39437+#endif
39438+
39439 if ((current->flags & PF_RANDOMIZE) &&
39440 !(current->personality & ADDR_NO_RANDOMIZE)) {
39441 random_variable = get_random_int() & STACK_RND_MASK;
39442@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
39443 unsigned long load_addr = 0, load_bias = 0;
39444 int load_addr_set = 0;
39445 char * elf_interpreter = NULL;
39446- unsigned long error;
39447+ unsigned long error = 0;
39448 struct elf_phdr *elf_ppnt, *elf_phdata;
39449 unsigned long elf_bss, elf_brk;
39450 int retval, i;
39451@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
39452 unsigned long start_code, end_code, start_data, end_data;
39453 unsigned long reloc_func_desc = 0;
39454 int executable_stack = EXSTACK_DEFAULT;
39455- unsigned long def_flags = 0;
39456 struct {
39457 struct elfhdr elf_ex;
39458 struct elfhdr interp_elf_ex;
39459 } *loc;
39460+ unsigned long pax_task_size = TASK_SIZE;
39461
39462 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39463 if (!loc) {
39464@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
39465
39466 /* OK, This is the point of no return */
39467 current->flags &= ~PF_FORKNOEXEC;
39468- current->mm->def_flags = def_flags;
39469+
39470+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39471+ current->mm->pax_flags = 0UL;
39472+#endif
39473+
39474+#ifdef CONFIG_PAX_DLRESOLVE
39475+ current->mm->call_dl_resolve = 0UL;
39476+#endif
39477+
39478+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39479+ current->mm->call_syscall = 0UL;
39480+#endif
39481+
39482+#ifdef CONFIG_PAX_ASLR
39483+ current->mm->delta_mmap = 0UL;
39484+ current->mm->delta_stack = 0UL;
39485+#endif
39486+
39487+ current->mm->def_flags = 0;
39488+
39489+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39490+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39491+ send_sig(SIGKILL, current, 0);
39492+ goto out_free_dentry;
39493+ }
39494+#endif
39495+
39496+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39497+ pax_set_initial_flags(bprm);
39498+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39499+ if (pax_set_initial_flags_func)
39500+ (pax_set_initial_flags_func)(bprm);
39501+#endif
39502+
39503+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39504+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
39505+ current->mm->context.user_cs_limit = PAGE_SIZE;
39506+ current->mm->def_flags |= VM_PAGEEXEC;
39507+ }
39508+#endif
39509+
39510+#ifdef CONFIG_PAX_SEGMEXEC
39511+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39512+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39513+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39514+ pax_task_size = SEGMEXEC_TASK_SIZE;
39515+ }
39516+#endif
39517+
39518+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39519+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39520+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39521+ put_cpu();
39522+ }
39523+#endif
39524
39525 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39526 may depend on the personality. */
39527 SET_PERSONALITY(loc->elf_ex);
39528+
39529+#ifdef CONFIG_PAX_ASLR
39530+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39531+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39532+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39533+ }
39534+#endif
39535+
39536+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39537+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39538+ executable_stack = EXSTACK_DISABLE_X;
39539+ current->personality &= ~READ_IMPLIES_EXEC;
39540+ } else
39541+#endif
39542+
39543 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39544 current->personality |= READ_IMPLIES_EXEC;
39545
39546@@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
39547 #else
39548 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39549 #endif
39550+
39551+#ifdef CONFIG_PAX_RANDMMAP
39552+ /* PaX: randomize base address at the default exe base if requested */
39553+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39554+#ifdef CONFIG_SPARC64
39555+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39556+#else
39557+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39558+#endif
39559+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39560+ elf_flags |= MAP_FIXED;
39561+ }
39562+#endif
39563+
39564 }
39565
39566 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39567@@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
39568 * allowed task size. Note that p_filesz must always be
39569 * <= p_memsz so it is only necessary to check p_memsz.
39570 */
39571- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39572- elf_ppnt->p_memsz > TASK_SIZE ||
39573- TASK_SIZE - elf_ppnt->p_memsz < k) {
39574+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39575+ elf_ppnt->p_memsz > pax_task_size ||
39576+ pax_task_size - elf_ppnt->p_memsz < k) {
39577 /* set_brk can never work. Avoid overflows. */
39578 send_sig(SIGKILL, current, 0);
39579 retval = -EINVAL;
39580@@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
39581 start_data += load_bias;
39582 end_data += load_bias;
39583
39584+#ifdef CONFIG_PAX_RANDMMAP
39585+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39586+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39587+#endif
39588+
39589 /* Calling set_brk effectively mmaps the pages that we need
39590 * for the bss and break sections. We must do this before
39591 * mapping in the interpreter, to make sure it doesn't wind
39592@@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
39593 goto out_free_dentry;
39594 }
39595 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39596- send_sig(SIGSEGV, current, 0);
39597- retval = -EFAULT; /* Nobody gets to see this, but.. */
39598- goto out_free_dentry;
39599+ /*
39600+ * This bss-zeroing can fail if the ELF
39601+ * file specifies odd protections. So
39602+ * we don't check the return value
39603+ */
39604 }
39605
39606 if (elf_interpreter) {
39607@@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
39608 unsigned long n = off;
39609 if (n > PAGE_SIZE)
39610 n = PAGE_SIZE;
39611- if (!dump_write(file, buf, n))
39612+ if (!dump_write(file, buf, n)) {
39613+ free_page((unsigned long)buf);
39614 return 0;
39615+ }
39616 off -= n;
39617 }
39618 free_page((unsigned long)buf);
39619@@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
39620 * Decide what to dump of a segment, part, all or none.
39621 */
39622 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39623- unsigned long mm_flags)
39624+ unsigned long mm_flags, long signr)
39625 {
39626 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39627
39628@@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
39629 if (vma->vm_file == NULL)
39630 return 0;
39631
39632- if (FILTER(MAPPED_PRIVATE))
39633+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39634 goto whole;
39635
39636 /*
39637@@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
39638 #undef DUMP_WRITE
39639
39640 #define DUMP_WRITE(addr, nr) \
39641+ do { \
39642+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
39643 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
39644- goto end_coredump;
39645+ goto end_coredump; \
39646+ } while (0);
39647
39648 static void fill_elf_header(struct elfhdr *elf, int segs,
39649 u16 machine, u32 flags, u8 osabi)
39650@@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
39651 {
39652 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39653 int i = 0;
39654- do
39655+ do {
39656 i += 2;
39657- while (auxv[i - 2] != AT_NULL);
39658+ } while (auxv[i - 2] != AT_NULL);
39659 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39660 }
39661
39662@@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
39663 phdr.p_offset = offset;
39664 phdr.p_vaddr = vma->vm_start;
39665 phdr.p_paddr = 0;
39666- phdr.p_filesz = vma_dump_size(vma, mm_flags);
39667+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
39668 phdr.p_memsz = vma->vm_end - vma->vm_start;
39669 offset += phdr.p_filesz;
39670 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39671@@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
39672 unsigned long addr;
39673 unsigned long end;
39674
39675- end = vma->vm_start + vma_dump_size(vma, mm_flags);
39676+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
39677
39678 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39679 struct page *page;
39680@@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
39681 page = get_dump_page(addr);
39682 if (page) {
39683 void *kaddr = kmap(page);
39684+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39685 stop = ((size += PAGE_SIZE) > limit) ||
39686 !dump_write(file, kaddr, PAGE_SIZE);
39687 kunmap(page);
39688@@ -2042,6 +2356,97 @@ out:
39689
39690 #endif /* USE_ELF_CORE_DUMP */
39691
39692+#ifdef CONFIG_PAX_MPROTECT
39693+/* PaX: non-PIC ELF libraries need relocations on their executable segments
39694+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39695+ * we'll remove VM_MAYWRITE for good on RELRO segments.
39696+ *
39697+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39698+ * basis because we want to allow the common case and not the special ones.
39699+ */
39700+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39701+{
39702+ struct elfhdr elf_h;
39703+ struct elf_phdr elf_p;
39704+ unsigned long i;
39705+ unsigned long oldflags;
39706+ bool is_textrel_rw, is_textrel_rx, is_relro;
39707+
39708+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39709+ return;
39710+
39711+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39712+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39713+
39714+#ifdef CONFIG_PAX_ELFRELOCS
39715+ /* possible TEXTREL */
39716+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39717+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39718+#else
39719+ is_textrel_rw = false;
39720+ is_textrel_rx = false;
39721+#endif
39722+
39723+ /* possible RELRO */
39724+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
39725+
39726+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
39727+ return;
39728+
39729+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
39730+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
39731+
39732+#ifdef CONFIG_PAX_ETEXECRELOCS
39733+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39734+#else
39735+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
39736+#endif
39737+
39738+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39739+ !elf_check_arch(&elf_h) ||
39740+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
39741+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
39742+ return;
39743+
39744+ for (i = 0UL; i < elf_h.e_phnum; i++) {
39745+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
39746+ return;
39747+ switch (elf_p.p_type) {
39748+ case PT_DYNAMIC:
39749+ if (!is_textrel_rw && !is_textrel_rx)
39750+ continue;
39751+ i = 0UL;
39752+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
39753+ elf_dyn dyn;
39754+
39755+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
39756+ return;
39757+ if (dyn.d_tag == DT_NULL)
39758+ return;
39759+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
39760+ gr_log_textrel(vma);
39761+ if (is_textrel_rw)
39762+ vma->vm_flags |= VM_MAYWRITE;
39763+ else
39764+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
39765+ vma->vm_flags &= ~VM_MAYWRITE;
39766+ return;
39767+ }
39768+ i++;
39769+ }
39770+ return;
39771+
39772+ case PT_GNU_RELRO:
39773+ if (!is_relro)
39774+ continue;
39775+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
39776+ vma->vm_flags &= ~VM_MAYWRITE;
39777+ return;
39778+ }
39779+ }
39780+}
39781+#endif
39782+
39783 static int __init init_elf_binfmt(void)
39784 {
39785 return register_binfmt(&elf_format);
39786diff -urNp linux-2.6.32.45/fs/binfmt_flat.c linux-2.6.32.45/fs/binfmt_flat.c
39787--- linux-2.6.32.45/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
39788+++ linux-2.6.32.45/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
39789@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
39790 realdatastart = (unsigned long) -ENOMEM;
39791 printk("Unable to allocate RAM for process data, errno %d\n",
39792 (int)-realdatastart);
39793+ down_write(&current->mm->mmap_sem);
39794 do_munmap(current->mm, textpos, text_len);
39795+ up_write(&current->mm->mmap_sem);
39796 ret = realdatastart;
39797 goto err;
39798 }
39799@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
39800 }
39801 if (IS_ERR_VALUE(result)) {
39802 printk("Unable to read data+bss, errno %d\n", (int)-result);
39803+ down_write(&current->mm->mmap_sem);
39804 do_munmap(current->mm, textpos, text_len);
39805 do_munmap(current->mm, realdatastart, data_len + extra);
39806+ up_write(&current->mm->mmap_sem);
39807 ret = result;
39808 goto err;
39809 }
39810@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
39811 }
39812 if (IS_ERR_VALUE(result)) {
39813 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
39814+ down_write(&current->mm->mmap_sem);
39815 do_munmap(current->mm, textpos, text_len + data_len + extra +
39816 MAX_SHARED_LIBS * sizeof(unsigned long));
39817+ up_write(&current->mm->mmap_sem);
39818 ret = result;
39819 goto err;
39820 }
39821diff -urNp linux-2.6.32.45/fs/bio.c linux-2.6.32.45/fs/bio.c
39822--- linux-2.6.32.45/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
39823+++ linux-2.6.32.45/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
39824@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
39825
39826 i = 0;
39827 while (i < bio_slab_nr) {
39828- struct bio_slab *bslab = &bio_slabs[i];
39829+ bslab = &bio_slabs[i];
39830
39831 if (!bslab->slab && entry == -1)
39832 entry = i;
39833@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
39834 const int read = bio_data_dir(bio) == READ;
39835 struct bio_map_data *bmd = bio->bi_private;
39836 int i;
39837- char *p = bmd->sgvecs[0].iov_base;
39838+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
39839
39840 __bio_for_each_segment(bvec, bio, i, 0) {
39841 char *addr = page_address(bvec->bv_page);
39842diff -urNp linux-2.6.32.45/fs/block_dev.c linux-2.6.32.45/fs/block_dev.c
39843--- linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:35:29.000000000 -0400
39844+++ linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:34:00.000000000 -0400
39845@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
39846 else if (bdev->bd_contains == bdev)
39847 res = 0; /* is a whole device which isn't held */
39848
39849- else if (bdev->bd_contains->bd_holder == bd_claim)
39850+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
39851 res = 0; /* is a partition of a device that is being partitioned */
39852 else if (bdev->bd_contains->bd_holder != NULL)
39853 res = -EBUSY; /* is a partition of a held device */
39854diff -urNp linux-2.6.32.45/fs/btrfs/ctree.c linux-2.6.32.45/fs/btrfs/ctree.c
39855--- linux-2.6.32.45/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
39856+++ linux-2.6.32.45/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
39857@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
39858 free_extent_buffer(buf);
39859 add_root_to_dirty_list(root);
39860 } else {
39861- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
39862- parent_start = parent->start;
39863- else
39864+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
39865+ if (parent)
39866+ parent_start = parent->start;
39867+ else
39868+ parent_start = 0;
39869+ } else
39870 parent_start = 0;
39871
39872 WARN_ON(trans->transid != btrfs_header_generation(parent));
39873@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
39874
39875 ret = 0;
39876 if (slot == 0) {
39877- struct btrfs_disk_key disk_key;
39878 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
39879 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
39880 }
39881diff -urNp linux-2.6.32.45/fs/btrfs/disk-io.c linux-2.6.32.45/fs/btrfs/disk-io.c
39882--- linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
39883+++ linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
39884@@ -39,7 +39,7 @@
39885 #include "tree-log.h"
39886 #include "free-space-cache.h"
39887
39888-static struct extent_io_ops btree_extent_io_ops;
39889+static const struct extent_io_ops btree_extent_io_ops;
39890 static void end_workqueue_fn(struct btrfs_work *work);
39891 static void free_fs_root(struct btrfs_root *root);
39892
39893@@ -2607,7 +2607,7 @@ out:
39894 return 0;
39895 }
39896
39897-static struct extent_io_ops btree_extent_io_ops = {
39898+static const struct extent_io_ops btree_extent_io_ops = {
39899 .write_cache_pages_lock_hook = btree_lock_page_hook,
39900 .readpage_end_io_hook = btree_readpage_end_io_hook,
39901 .submit_bio_hook = btree_submit_bio_hook,
39902diff -urNp linux-2.6.32.45/fs/btrfs/extent_io.h linux-2.6.32.45/fs/btrfs/extent_io.h
39903--- linux-2.6.32.45/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
39904+++ linux-2.6.32.45/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
39905@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
39906 struct bio *bio, int mirror_num,
39907 unsigned long bio_flags);
39908 struct extent_io_ops {
39909- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
39910+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
39911 u64 start, u64 end, int *page_started,
39912 unsigned long *nr_written);
39913- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
39914- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
39915+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
39916+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
39917 extent_submit_bio_hook_t *submit_bio_hook;
39918- int (*merge_bio_hook)(struct page *page, unsigned long offset,
39919+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
39920 size_t size, struct bio *bio,
39921 unsigned long bio_flags);
39922- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
39923- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
39924+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
39925+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
39926 u64 start, u64 end,
39927 struct extent_state *state);
39928- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
39929+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
39930 u64 start, u64 end,
39931 struct extent_state *state);
39932- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39933+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39934 struct extent_state *state);
39935- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39936+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39937 struct extent_state *state, int uptodate);
39938- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
39939+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
39940 unsigned long old, unsigned long bits);
39941- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
39942+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
39943 unsigned long bits);
39944- int (*merge_extent_hook)(struct inode *inode,
39945+ int (* const merge_extent_hook)(struct inode *inode,
39946 struct extent_state *new,
39947 struct extent_state *other);
39948- int (*split_extent_hook)(struct inode *inode,
39949+ int (* const split_extent_hook)(struct inode *inode,
39950 struct extent_state *orig, u64 split);
39951- int (*write_cache_pages_lock_hook)(struct page *page);
39952+ int (* const write_cache_pages_lock_hook)(struct page *page);
39953 };
39954
39955 struct extent_io_tree {
39956@@ -88,7 +88,7 @@ struct extent_io_tree {
39957 u64 dirty_bytes;
39958 spinlock_t lock;
39959 spinlock_t buffer_lock;
39960- struct extent_io_ops *ops;
39961+ const struct extent_io_ops *ops;
39962 };
39963
39964 struct extent_state {
39965diff -urNp linux-2.6.32.45/fs/btrfs/extent-tree.c linux-2.6.32.45/fs/btrfs/extent-tree.c
39966--- linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
39967+++ linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
39968@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
39969 u64 group_start = group->key.objectid;
39970 new_extents = kmalloc(sizeof(*new_extents),
39971 GFP_NOFS);
39972+ if (!new_extents) {
39973+ ret = -ENOMEM;
39974+ goto out;
39975+ }
39976 nr_extents = 1;
39977 ret = get_new_locations(reloc_inode,
39978 extent_key,
39979diff -urNp linux-2.6.32.45/fs/btrfs/free-space-cache.c linux-2.6.32.45/fs/btrfs/free-space-cache.c
39980--- linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
39981+++ linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
39982@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
39983
39984 while(1) {
39985 if (entry->bytes < bytes || entry->offset < min_start) {
39986- struct rb_node *node;
39987-
39988 node = rb_next(&entry->offset_index);
39989 if (!node)
39990 break;
39991@@ -1226,7 +1224,7 @@ again:
39992 */
39993 while (entry->bitmap || found_bitmap ||
39994 (!entry->bitmap && entry->bytes < min_bytes)) {
39995- struct rb_node *node = rb_next(&entry->offset_index);
39996+ node = rb_next(&entry->offset_index);
39997
39998 if (entry->bitmap && entry->bytes > bytes + empty_size) {
39999 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
40000diff -urNp linux-2.6.32.45/fs/btrfs/inode.c linux-2.6.32.45/fs/btrfs/inode.c
40001--- linux-2.6.32.45/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40002+++ linux-2.6.32.45/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
40003@@ -63,7 +63,7 @@ static const struct inode_operations btr
40004 static const struct address_space_operations btrfs_aops;
40005 static const struct address_space_operations btrfs_symlink_aops;
40006 static const struct file_operations btrfs_dir_file_operations;
40007-static struct extent_io_ops btrfs_extent_io_ops;
40008+static const struct extent_io_ops btrfs_extent_io_ops;
40009
40010 static struct kmem_cache *btrfs_inode_cachep;
40011 struct kmem_cache *btrfs_trans_handle_cachep;
40012@@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
40013 1, 0, NULL, GFP_NOFS);
40014 while (start < end) {
40015 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
40016+ BUG_ON(!async_cow);
40017 async_cow->inode = inode;
40018 async_cow->root = root;
40019 async_cow->locked_page = locked_page;
40020@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
40021 inline_size = btrfs_file_extent_inline_item_len(leaf,
40022 btrfs_item_nr(leaf, path->slots[0]));
40023 tmp = kmalloc(inline_size, GFP_NOFS);
40024+ if (!tmp)
40025+ return -ENOMEM;
40026 ptr = btrfs_file_extent_inline_start(item);
40027
40028 read_extent_buffer(leaf, tmp, ptr, inline_size);
40029@@ -5410,7 +5413,7 @@ fail:
40030 return -ENOMEM;
40031 }
40032
40033-static int btrfs_getattr(struct vfsmount *mnt,
40034+int btrfs_getattr(struct vfsmount *mnt,
40035 struct dentry *dentry, struct kstat *stat)
40036 {
40037 struct inode *inode = dentry->d_inode;
40038@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
40039 return 0;
40040 }
40041
40042+EXPORT_SYMBOL(btrfs_getattr);
40043+
40044+dev_t get_btrfs_dev_from_inode(struct inode *inode)
40045+{
40046+ return BTRFS_I(inode)->root->anon_super.s_dev;
40047+}
40048+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40049+
40050 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
40051 struct inode *new_dir, struct dentry *new_dentry)
40052 {
40053@@ -5972,7 +5983,7 @@ static const struct file_operations btrf
40054 .fsync = btrfs_sync_file,
40055 };
40056
40057-static struct extent_io_ops btrfs_extent_io_ops = {
40058+static const struct extent_io_ops btrfs_extent_io_ops = {
40059 .fill_delalloc = run_delalloc_range,
40060 .submit_bio_hook = btrfs_submit_bio_hook,
40061 .merge_bio_hook = btrfs_merge_bio_hook,
40062diff -urNp linux-2.6.32.45/fs/btrfs/relocation.c linux-2.6.32.45/fs/btrfs/relocation.c
40063--- linux-2.6.32.45/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
40064+++ linux-2.6.32.45/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
40065@@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
40066 }
40067 spin_unlock(&rc->reloc_root_tree.lock);
40068
40069- BUG_ON((struct btrfs_root *)node->data != root);
40070+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40071
40072 if (!del) {
40073 spin_lock(&rc->reloc_root_tree.lock);
40074diff -urNp linux-2.6.32.45/fs/btrfs/sysfs.c linux-2.6.32.45/fs/btrfs/sysfs.c
40075--- linux-2.6.32.45/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
40076+++ linux-2.6.32.45/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
40077@@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
40078 complete(&root->kobj_unregister);
40079 }
40080
40081-static struct sysfs_ops btrfs_super_attr_ops = {
40082+static const struct sysfs_ops btrfs_super_attr_ops = {
40083 .show = btrfs_super_attr_show,
40084 .store = btrfs_super_attr_store,
40085 };
40086
40087-static struct sysfs_ops btrfs_root_attr_ops = {
40088+static const struct sysfs_ops btrfs_root_attr_ops = {
40089 .show = btrfs_root_attr_show,
40090 .store = btrfs_root_attr_store,
40091 };
40092diff -urNp linux-2.6.32.45/fs/buffer.c linux-2.6.32.45/fs/buffer.c
40093--- linux-2.6.32.45/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
40094+++ linux-2.6.32.45/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
40095@@ -25,6 +25,7 @@
40096 #include <linux/percpu.h>
40097 #include <linux/slab.h>
40098 #include <linux/capability.h>
40099+#include <linux/security.h>
40100 #include <linux/blkdev.h>
40101 #include <linux/file.h>
40102 #include <linux/quotaops.h>
40103diff -urNp linux-2.6.32.45/fs/cachefiles/bind.c linux-2.6.32.45/fs/cachefiles/bind.c
40104--- linux-2.6.32.45/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
40105+++ linux-2.6.32.45/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
40106@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40107 args);
40108
40109 /* start by checking things over */
40110- ASSERT(cache->fstop_percent >= 0 &&
40111- cache->fstop_percent < cache->fcull_percent &&
40112+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40113 cache->fcull_percent < cache->frun_percent &&
40114 cache->frun_percent < 100);
40115
40116- ASSERT(cache->bstop_percent >= 0 &&
40117- cache->bstop_percent < cache->bcull_percent &&
40118+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40119 cache->bcull_percent < cache->brun_percent &&
40120 cache->brun_percent < 100);
40121
40122diff -urNp linux-2.6.32.45/fs/cachefiles/daemon.c linux-2.6.32.45/fs/cachefiles/daemon.c
40123--- linux-2.6.32.45/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
40124+++ linux-2.6.32.45/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
40125@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
40126 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40127 return -EIO;
40128
40129- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40130+ if (datalen > PAGE_SIZE - 1)
40131 return -EOPNOTSUPP;
40132
40133 /* drag the command string into the kernel so we can parse it */
40134@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
40135 if (args[0] != '%' || args[1] != '\0')
40136 return -EINVAL;
40137
40138- if (fstop < 0 || fstop >= cache->fcull_percent)
40139+ if (fstop >= cache->fcull_percent)
40140 return cachefiles_daemon_range_error(cache, args);
40141
40142 cache->fstop_percent = fstop;
40143@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
40144 if (args[0] != '%' || args[1] != '\0')
40145 return -EINVAL;
40146
40147- if (bstop < 0 || bstop >= cache->bcull_percent)
40148+ if (bstop >= cache->bcull_percent)
40149 return cachefiles_daemon_range_error(cache, args);
40150
40151 cache->bstop_percent = bstop;
40152diff -urNp linux-2.6.32.45/fs/cachefiles/internal.h linux-2.6.32.45/fs/cachefiles/internal.h
40153--- linux-2.6.32.45/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
40154+++ linux-2.6.32.45/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
40155@@ -56,7 +56,7 @@ struct cachefiles_cache {
40156 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40157 struct rb_root active_nodes; /* active nodes (can't be culled) */
40158 rwlock_t active_lock; /* lock for active_nodes */
40159- atomic_t gravecounter; /* graveyard uniquifier */
40160+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40161 unsigned frun_percent; /* when to stop culling (% files) */
40162 unsigned fcull_percent; /* when to start culling (% files) */
40163 unsigned fstop_percent; /* when to stop allocating (% files) */
40164@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
40165 * proc.c
40166 */
40167 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40168-extern atomic_t cachefiles_lookup_histogram[HZ];
40169-extern atomic_t cachefiles_mkdir_histogram[HZ];
40170-extern atomic_t cachefiles_create_histogram[HZ];
40171+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40172+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40173+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40174
40175 extern int __init cachefiles_proc_init(void);
40176 extern void cachefiles_proc_cleanup(void);
40177 static inline
40178-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40179+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40180 {
40181 unsigned long jif = jiffies - start_jif;
40182 if (jif >= HZ)
40183 jif = HZ - 1;
40184- atomic_inc(&histogram[jif]);
40185+ atomic_inc_unchecked(&histogram[jif]);
40186 }
40187
40188 #else
40189diff -urNp linux-2.6.32.45/fs/cachefiles/namei.c linux-2.6.32.45/fs/cachefiles/namei.c
40190--- linux-2.6.32.45/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
40191+++ linux-2.6.32.45/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
40192@@ -250,7 +250,7 @@ try_again:
40193 /* first step is to make up a grave dentry in the graveyard */
40194 sprintf(nbuffer, "%08x%08x",
40195 (uint32_t) get_seconds(),
40196- (uint32_t) atomic_inc_return(&cache->gravecounter));
40197+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40198
40199 /* do the multiway lock magic */
40200 trap = lock_rename(cache->graveyard, dir);
40201diff -urNp linux-2.6.32.45/fs/cachefiles/proc.c linux-2.6.32.45/fs/cachefiles/proc.c
40202--- linux-2.6.32.45/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
40203+++ linux-2.6.32.45/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
40204@@ -14,9 +14,9 @@
40205 #include <linux/seq_file.h>
40206 #include "internal.h"
40207
40208-atomic_t cachefiles_lookup_histogram[HZ];
40209-atomic_t cachefiles_mkdir_histogram[HZ];
40210-atomic_t cachefiles_create_histogram[HZ];
40211+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40212+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40213+atomic_unchecked_t cachefiles_create_histogram[HZ];
40214
40215 /*
40216 * display the latency histogram
40217@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40218 return 0;
40219 default:
40220 index = (unsigned long) v - 3;
40221- x = atomic_read(&cachefiles_lookup_histogram[index]);
40222- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40223- z = atomic_read(&cachefiles_create_histogram[index]);
40224+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40225+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40226+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40227 if (x == 0 && y == 0 && z == 0)
40228 return 0;
40229
40230diff -urNp linux-2.6.32.45/fs/cachefiles/rdwr.c linux-2.6.32.45/fs/cachefiles/rdwr.c
40231--- linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
40232+++ linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
40233@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
40234 old_fs = get_fs();
40235 set_fs(KERNEL_DS);
40236 ret = file->f_op->write(
40237- file, (const void __user *) data, len, &pos);
40238+ file, (__force const void __user *) data, len, &pos);
40239 set_fs(old_fs);
40240 kunmap(page);
40241 if (ret != len)
40242diff -urNp linux-2.6.32.45/fs/cifs/cifs_debug.c linux-2.6.32.45/fs/cifs/cifs_debug.c
40243--- linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
40244+++ linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
40245@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
40246 tcon = list_entry(tmp3,
40247 struct cifsTconInfo,
40248 tcon_list);
40249- atomic_set(&tcon->num_smbs_sent, 0);
40250- atomic_set(&tcon->num_writes, 0);
40251- atomic_set(&tcon->num_reads, 0);
40252- atomic_set(&tcon->num_oplock_brks, 0);
40253- atomic_set(&tcon->num_opens, 0);
40254- atomic_set(&tcon->num_posixopens, 0);
40255- atomic_set(&tcon->num_posixmkdirs, 0);
40256- atomic_set(&tcon->num_closes, 0);
40257- atomic_set(&tcon->num_deletes, 0);
40258- atomic_set(&tcon->num_mkdirs, 0);
40259- atomic_set(&tcon->num_rmdirs, 0);
40260- atomic_set(&tcon->num_renames, 0);
40261- atomic_set(&tcon->num_t2renames, 0);
40262- atomic_set(&tcon->num_ffirst, 0);
40263- atomic_set(&tcon->num_fnext, 0);
40264- atomic_set(&tcon->num_fclose, 0);
40265- atomic_set(&tcon->num_hardlinks, 0);
40266- atomic_set(&tcon->num_symlinks, 0);
40267- atomic_set(&tcon->num_locks, 0);
40268+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40269+ atomic_set_unchecked(&tcon->num_writes, 0);
40270+ atomic_set_unchecked(&tcon->num_reads, 0);
40271+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40272+ atomic_set_unchecked(&tcon->num_opens, 0);
40273+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40274+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40275+ atomic_set_unchecked(&tcon->num_closes, 0);
40276+ atomic_set_unchecked(&tcon->num_deletes, 0);
40277+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40278+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40279+ atomic_set_unchecked(&tcon->num_renames, 0);
40280+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40281+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40282+ atomic_set_unchecked(&tcon->num_fnext, 0);
40283+ atomic_set_unchecked(&tcon->num_fclose, 0);
40284+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40285+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40286+ atomic_set_unchecked(&tcon->num_locks, 0);
40287 }
40288 }
40289 }
40290@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
40291 if (tcon->need_reconnect)
40292 seq_puts(m, "\tDISCONNECTED ");
40293 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40294- atomic_read(&tcon->num_smbs_sent),
40295- atomic_read(&tcon->num_oplock_brks));
40296+ atomic_read_unchecked(&tcon->num_smbs_sent),
40297+ atomic_read_unchecked(&tcon->num_oplock_brks));
40298 seq_printf(m, "\nReads: %d Bytes: %lld",
40299- atomic_read(&tcon->num_reads),
40300+ atomic_read_unchecked(&tcon->num_reads),
40301 (long long)(tcon->bytes_read));
40302 seq_printf(m, "\nWrites: %d Bytes: %lld",
40303- atomic_read(&tcon->num_writes),
40304+ atomic_read_unchecked(&tcon->num_writes),
40305 (long long)(tcon->bytes_written));
40306 seq_printf(m, "\nFlushes: %d",
40307- atomic_read(&tcon->num_flushes));
40308+ atomic_read_unchecked(&tcon->num_flushes));
40309 seq_printf(m, "\nLocks: %d HardLinks: %d "
40310 "Symlinks: %d",
40311- atomic_read(&tcon->num_locks),
40312- atomic_read(&tcon->num_hardlinks),
40313- atomic_read(&tcon->num_symlinks));
40314+ atomic_read_unchecked(&tcon->num_locks),
40315+ atomic_read_unchecked(&tcon->num_hardlinks),
40316+ atomic_read_unchecked(&tcon->num_symlinks));
40317 seq_printf(m, "\nOpens: %d Closes: %d "
40318 "Deletes: %d",
40319- atomic_read(&tcon->num_opens),
40320- atomic_read(&tcon->num_closes),
40321- atomic_read(&tcon->num_deletes));
40322+ atomic_read_unchecked(&tcon->num_opens),
40323+ atomic_read_unchecked(&tcon->num_closes),
40324+ atomic_read_unchecked(&tcon->num_deletes));
40325 seq_printf(m, "\nPosix Opens: %d "
40326 "Posix Mkdirs: %d",
40327- atomic_read(&tcon->num_posixopens),
40328- atomic_read(&tcon->num_posixmkdirs));
40329+ atomic_read_unchecked(&tcon->num_posixopens),
40330+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40331 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40332- atomic_read(&tcon->num_mkdirs),
40333- atomic_read(&tcon->num_rmdirs));
40334+ atomic_read_unchecked(&tcon->num_mkdirs),
40335+ atomic_read_unchecked(&tcon->num_rmdirs));
40336 seq_printf(m, "\nRenames: %d T2 Renames %d",
40337- atomic_read(&tcon->num_renames),
40338- atomic_read(&tcon->num_t2renames));
40339+ atomic_read_unchecked(&tcon->num_renames),
40340+ atomic_read_unchecked(&tcon->num_t2renames));
40341 seq_printf(m, "\nFindFirst: %d FNext %d "
40342 "FClose %d",
40343- atomic_read(&tcon->num_ffirst),
40344- atomic_read(&tcon->num_fnext),
40345- atomic_read(&tcon->num_fclose));
40346+ atomic_read_unchecked(&tcon->num_ffirst),
40347+ atomic_read_unchecked(&tcon->num_fnext),
40348+ atomic_read_unchecked(&tcon->num_fclose));
40349 }
40350 }
40351 }
40352diff -urNp linux-2.6.32.45/fs/cifs/cifsglob.h linux-2.6.32.45/fs/cifs/cifsglob.h
40353--- linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:35:29.000000000 -0400
40354+++ linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:34:00.000000000 -0400
40355@@ -252,28 +252,28 @@ struct cifsTconInfo {
40356 __u16 Flags; /* optional support bits */
40357 enum statusEnum tidStatus;
40358 #ifdef CONFIG_CIFS_STATS
40359- atomic_t num_smbs_sent;
40360- atomic_t num_writes;
40361- atomic_t num_reads;
40362- atomic_t num_flushes;
40363- atomic_t num_oplock_brks;
40364- atomic_t num_opens;
40365- atomic_t num_closes;
40366- atomic_t num_deletes;
40367- atomic_t num_mkdirs;
40368- atomic_t num_posixopens;
40369- atomic_t num_posixmkdirs;
40370- atomic_t num_rmdirs;
40371- atomic_t num_renames;
40372- atomic_t num_t2renames;
40373- atomic_t num_ffirst;
40374- atomic_t num_fnext;
40375- atomic_t num_fclose;
40376- atomic_t num_hardlinks;
40377- atomic_t num_symlinks;
40378- atomic_t num_locks;
40379- atomic_t num_acl_get;
40380- atomic_t num_acl_set;
40381+ atomic_unchecked_t num_smbs_sent;
40382+ atomic_unchecked_t num_writes;
40383+ atomic_unchecked_t num_reads;
40384+ atomic_unchecked_t num_flushes;
40385+ atomic_unchecked_t num_oplock_brks;
40386+ atomic_unchecked_t num_opens;
40387+ atomic_unchecked_t num_closes;
40388+ atomic_unchecked_t num_deletes;
40389+ atomic_unchecked_t num_mkdirs;
40390+ atomic_unchecked_t num_posixopens;
40391+ atomic_unchecked_t num_posixmkdirs;
40392+ atomic_unchecked_t num_rmdirs;
40393+ atomic_unchecked_t num_renames;
40394+ atomic_unchecked_t num_t2renames;
40395+ atomic_unchecked_t num_ffirst;
40396+ atomic_unchecked_t num_fnext;
40397+ atomic_unchecked_t num_fclose;
40398+ atomic_unchecked_t num_hardlinks;
40399+ atomic_unchecked_t num_symlinks;
40400+ atomic_unchecked_t num_locks;
40401+ atomic_unchecked_t num_acl_get;
40402+ atomic_unchecked_t num_acl_set;
40403 #ifdef CONFIG_CIFS_STATS2
40404 unsigned long long time_writes;
40405 unsigned long long time_reads;
40406@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
40407 }
40408
40409 #ifdef CONFIG_CIFS_STATS
40410-#define cifs_stats_inc atomic_inc
40411+#define cifs_stats_inc atomic_inc_unchecked
40412
40413 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
40414 unsigned int bytes)
40415diff -urNp linux-2.6.32.45/fs/cifs/link.c linux-2.6.32.45/fs/cifs/link.c
40416--- linux-2.6.32.45/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
40417+++ linux-2.6.32.45/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
40418@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
40419
40420 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40421 {
40422- char *p = nd_get_link(nd);
40423+ const char *p = nd_get_link(nd);
40424 if (!IS_ERR(p))
40425 kfree(p);
40426 }
40427diff -urNp linux-2.6.32.45/fs/coda/cache.c linux-2.6.32.45/fs/coda/cache.c
40428--- linux-2.6.32.45/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
40429+++ linux-2.6.32.45/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
40430@@ -24,14 +24,14 @@
40431 #include <linux/coda_fs_i.h>
40432 #include <linux/coda_cache.h>
40433
40434-static atomic_t permission_epoch = ATOMIC_INIT(0);
40435+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40436
40437 /* replace or extend an acl cache hit */
40438 void coda_cache_enter(struct inode *inode, int mask)
40439 {
40440 struct coda_inode_info *cii = ITOC(inode);
40441
40442- cii->c_cached_epoch = atomic_read(&permission_epoch);
40443+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40444 if (cii->c_uid != current_fsuid()) {
40445 cii->c_uid = current_fsuid();
40446 cii->c_cached_perm = mask;
40447@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
40448 void coda_cache_clear_inode(struct inode *inode)
40449 {
40450 struct coda_inode_info *cii = ITOC(inode);
40451- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40452+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40453 }
40454
40455 /* remove all acl caches */
40456 void coda_cache_clear_all(struct super_block *sb)
40457 {
40458- atomic_inc(&permission_epoch);
40459+ atomic_inc_unchecked(&permission_epoch);
40460 }
40461
40462
40463@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
40464
40465 hit = (mask & cii->c_cached_perm) == mask &&
40466 cii->c_uid == current_fsuid() &&
40467- cii->c_cached_epoch == atomic_read(&permission_epoch);
40468+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40469
40470 return hit;
40471 }
40472diff -urNp linux-2.6.32.45/fs/compat_binfmt_elf.c linux-2.6.32.45/fs/compat_binfmt_elf.c
40473--- linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
40474+++ linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
40475@@ -29,10 +29,12 @@
40476 #undef elfhdr
40477 #undef elf_phdr
40478 #undef elf_note
40479+#undef elf_dyn
40480 #undef elf_addr_t
40481 #define elfhdr elf32_hdr
40482 #define elf_phdr elf32_phdr
40483 #define elf_note elf32_note
40484+#define elf_dyn Elf32_Dyn
40485 #define elf_addr_t Elf32_Addr
40486
40487 /*
40488diff -urNp linux-2.6.32.45/fs/compat.c linux-2.6.32.45/fs/compat.c
40489--- linux-2.6.32.45/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
40490+++ linux-2.6.32.45/fs/compat.c 2011-08-11 19:56:56.000000000 -0400
40491@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
40492
40493 struct compat_readdir_callback {
40494 struct compat_old_linux_dirent __user *dirent;
40495+ struct file * file;
40496 int result;
40497 };
40498
40499@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
40500 buf->result = -EOVERFLOW;
40501 return -EOVERFLOW;
40502 }
40503+
40504+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40505+ return 0;
40506+
40507 buf->result++;
40508 dirent = buf->dirent;
40509 if (!access_ok(VERIFY_WRITE, dirent,
40510@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
40511
40512 buf.result = 0;
40513 buf.dirent = dirent;
40514+ buf.file = file;
40515
40516 error = vfs_readdir(file, compat_fillonedir, &buf);
40517 if (buf.result)
40518@@ -899,6 +905,7 @@ struct compat_linux_dirent {
40519 struct compat_getdents_callback {
40520 struct compat_linux_dirent __user *current_dir;
40521 struct compat_linux_dirent __user *previous;
40522+ struct file * file;
40523 int count;
40524 int error;
40525 };
40526@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
40527 buf->error = -EOVERFLOW;
40528 return -EOVERFLOW;
40529 }
40530+
40531+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40532+ return 0;
40533+
40534 dirent = buf->previous;
40535 if (dirent) {
40536 if (__put_user(offset, &dirent->d_off))
40537@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
40538 buf.previous = NULL;
40539 buf.count = count;
40540 buf.error = 0;
40541+ buf.file = file;
40542
40543 error = vfs_readdir(file, compat_filldir, &buf);
40544 if (error >= 0)
40545@@ -987,6 +999,7 @@ out:
40546 struct compat_getdents_callback64 {
40547 struct linux_dirent64 __user *current_dir;
40548 struct linux_dirent64 __user *previous;
40549+ struct file * file;
40550 int count;
40551 int error;
40552 };
40553@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
40554 buf->error = -EINVAL; /* only used if we fail.. */
40555 if (reclen > buf->count)
40556 return -EINVAL;
40557+
40558+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40559+ return 0;
40560+
40561 dirent = buf->previous;
40562
40563 if (dirent) {
40564@@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
40565 buf.previous = NULL;
40566 buf.count = count;
40567 buf.error = 0;
40568+ buf.file = file;
40569
40570 error = vfs_readdir(file, compat_filldir64, &buf);
40571 if (error >= 0)
40572@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
40573 * verify all the pointers
40574 */
40575 ret = -EINVAL;
40576- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
40577+ if (nr_segs > UIO_MAXIOV)
40578 goto out;
40579 if (!file->f_op)
40580 goto out;
40581@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
40582 compat_uptr_t __user *envp,
40583 struct pt_regs * regs)
40584 {
40585+#ifdef CONFIG_GRKERNSEC
40586+ struct file *old_exec_file;
40587+ struct acl_subject_label *old_acl;
40588+ struct rlimit old_rlim[RLIM_NLIMITS];
40589+#endif
40590 struct linux_binprm *bprm;
40591 struct file *file;
40592 struct files_struct *displaced;
40593 bool clear_in_exec;
40594 int retval;
40595+ const struct cred *cred = current_cred();
40596+
40597+ /*
40598+ * We move the actual failure in case of RLIMIT_NPROC excess from
40599+ * set*uid() to execve() because too many poorly written programs
40600+ * don't check setuid() return code. Here we additionally recheck
40601+ * whether NPROC limit is still exceeded.
40602+ */
40603+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40604+
40605+ if ((current->flags & PF_NPROC_EXCEEDED) &&
40606+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40607+ retval = -EAGAIN;
40608+ goto out_ret;
40609+ }
40610+
40611+ /* We're below the limit (still or again), so we don't want to make
40612+ * further execve() calls fail. */
40613+ current->flags &= ~PF_NPROC_EXCEEDED;
40614
40615 retval = unshare_files(&displaced);
40616 if (retval)
40617@@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
40618 bprm->filename = filename;
40619 bprm->interp = filename;
40620
40621+ if (gr_process_user_ban()) {
40622+ retval = -EPERM;
40623+ goto out_file;
40624+ }
40625+
40626+ retval = -EACCES;
40627+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
40628+ goto out_file;
40629+
40630 retval = bprm_mm_init(bprm);
40631 if (retval)
40632 goto out_file;
40633@@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
40634 if (retval < 0)
40635 goto out;
40636
40637+ if (!gr_tpe_allow(file)) {
40638+ retval = -EACCES;
40639+ goto out;
40640+ }
40641+
40642+ if (gr_check_crash_exec(file)) {
40643+ retval = -EACCES;
40644+ goto out;
40645+ }
40646+
40647+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40648+
40649+ gr_handle_exec_args_compat(bprm, argv);
40650+
40651+#ifdef CONFIG_GRKERNSEC
40652+ old_acl = current->acl;
40653+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40654+ old_exec_file = current->exec_file;
40655+ get_file(file);
40656+ current->exec_file = file;
40657+#endif
40658+
40659+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40660+ bprm->unsafe & LSM_UNSAFE_SHARE);
40661+ if (retval < 0)
40662+ goto out_fail;
40663+
40664 retval = search_binary_handler(bprm, regs);
40665 if (retval < 0)
40666- goto out;
40667+ goto out_fail;
40668+#ifdef CONFIG_GRKERNSEC
40669+ if (old_exec_file)
40670+ fput(old_exec_file);
40671+#endif
40672
40673 /* execve succeeded */
40674 current->fs->in_exec = 0;
40675@@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
40676 put_files_struct(displaced);
40677 return retval;
40678
40679+out_fail:
40680+#ifdef CONFIG_GRKERNSEC
40681+ current->acl = old_acl;
40682+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40683+ fput(current->exec_file);
40684+ current->exec_file = old_exec_file;
40685+#endif
40686+
40687 out:
40688 if (bprm->mm) {
40689 acct_arg_size(bprm, 0);
40690@@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat
40691 struct fdtable *fdt;
40692 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40693
40694+ pax_track_stack();
40695+
40696 if (n < 0)
40697 goto out_nofds;
40698
40699diff -urNp linux-2.6.32.45/fs/compat_ioctl.c linux-2.6.32.45/fs/compat_ioctl.c
40700--- linux-2.6.32.45/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
40701+++ linux-2.6.32.45/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
40702@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
40703 up = (struct compat_video_spu_palette __user *) arg;
40704 err = get_user(palp, &up->palette);
40705 err |= get_user(length, &up->length);
40706+ if (err)
40707+ return -EFAULT;
40708
40709 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40710 err = put_user(compat_ptr(palp), &up_native->palette);
40711diff -urNp linux-2.6.32.45/fs/configfs/dir.c linux-2.6.32.45/fs/configfs/dir.c
40712--- linux-2.6.32.45/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40713+++ linux-2.6.32.45/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
40714@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
40715 }
40716 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40717 struct configfs_dirent *next;
40718- const char * name;
40719+ const unsigned char * name;
40720+ char d_name[sizeof(next->s_dentry->d_iname)];
40721 int len;
40722
40723 next = list_entry(p, struct configfs_dirent,
40724@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
40725 continue;
40726
40727 name = configfs_get_name(next);
40728- len = strlen(name);
40729+ if (next->s_dentry && name == next->s_dentry->d_iname) {
40730+ len = next->s_dentry->d_name.len;
40731+ memcpy(d_name, name, len);
40732+ name = d_name;
40733+ } else
40734+ len = strlen(name);
40735 if (next->s_dentry)
40736 ino = next->s_dentry->d_inode->i_ino;
40737 else
40738diff -urNp linux-2.6.32.45/fs/dcache.c linux-2.6.32.45/fs/dcache.c
40739--- linux-2.6.32.45/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
40740+++ linux-2.6.32.45/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
40741@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
40742
40743 static struct kmem_cache *dentry_cache __read_mostly;
40744
40745-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
40746-
40747 /*
40748 * This is the single most critical data structure when it comes
40749 * to the dcache: the hashtable for lookups. Somebody should try
40750@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
40751 mempages -= reserve;
40752
40753 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40754- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40755+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40756
40757 dcache_init();
40758 inode_init();
40759diff -urNp linux-2.6.32.45/fs/dlm/lockspace.c linux-2.6.32.45/fs/dlm/lockspace.c
40760--- linux-2.6.32.45/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
40761+++ linux-2.6.32.45/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
40762@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
40763 kfree(ls);
40764 }
40765
40766-static struct sysfs_ops dlm_attr_ops = {
40767+static const struct sysfs_ops dlm_attr_ops = {
40768 .show = dlm_attr_show,
40769 .store = dlm_attr_store,
40770 };
40771diff -urNp linux-2.6.32.45/fs/ecryptfs/inode.c linux-2.6.32.45/fs/ecryptfs/inode.c
40772--- linux-2.6.32.45/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40773+++ linux-2.6.32.45/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40774@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
40775 old_fs = get_fs();
40776 set_fs(get_ds());
40777 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40778- (char __user *)lower_buf,
40779+ (__force char __user *)lower_buf,
40780 lower_bufsiz);
40781 set_fs(old_fs);
40782 if (rc < 0)
40783@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
40784 }
40785 old_fs = get_fs();
40786 set_fs(get_ds());
40787- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40788+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
40789 set_fs(old_fs);
40790 if (rc < 0)
40791 goto out_free;
40792diff -urNp linux-2.6.32.45/fs/exec.c linux-2.6.32.45/fs/exec.c
40793--- linux-2.6.32.45/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
40794+++ linux-2.6.32.45/fs/exec.c 2011-08-11 19:56:19.000000000 -0400
40795@@ -56,12 +56,24 @@
40796 #include <linux/fsnotify.h>
40797 #include <linux/fs_struct.h>
40798 #include <linux/pipe_fs_i.h>
40799+#include <linux/random.h>
40800+#include <linux/seq_file.h>
40801+
40802+#ifdef CONFIG_PAX_REFCOUNT
40803+#include <linux/kallsyms.h>
40804+#include <linux/kdebug.h>
40805+#endif
40806
40807 #include <asm/uaccess.h>
40808 #include <asm/mmu_context.h>
40809 #include <asm/tlb.h>
40810 #include "internal.h"
40811
40812+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
40813+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
40814+EXPORT_SYMBOL(pax_set_initial_flags_func);
40815+#endif
40816+
40817 int core_uses_pid;
40818 char core_pattern[CORENAME_MAX_SIZE] = "core";
40819 unsigned int core_pipe_limit;
40820@@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
40821 goto out;
40822
40823 file = do_filp_open(AT_FDCWD, tmp,
40824- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40825+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40826 MAY_READ | MAY_EXEC | MAY_OPEN);
40827 putname(tmp);
40828 error = PTR_ERR(file);
40829@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
40830 int write)
40831 {
40832 struct page *page;
40833- int ret;
40834
40835-#ifdef CONFIG_STACK_GROWSUP
40836- if (write) {
40837- ret = expand_stack_downwards(bprm->vma, pos);
40838- if (ret < 0)
40839- return NULL;
40840- }
40841-#endif
40842- ret = get_user_pages(current, bprm->mm, pos,
40843- 1, write, 1, &page, NULL);
40844- if (ret <= 0)
40845+ if (0 > expand_stack_downwards(bprm->vma, pos))
40846+ return NULL;
40847+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
40848 return NULL;
40849
40850 if (write) {
40851@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
40852 vma->vm_end = STACK_TOP_MAX;
40853 vma->vm_start = vma->vm_end - PAGE_SIZE;
40854 vma->vm_flags = VM_STACK_FLAGS;
40855+
40856+#ifdef CONFIG_PAX_SEGMEXEC
40857+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
40858+#endif
40859+
40860 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
40861
40862 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
40863@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
40864 mm->stack_vm = mm->total_vm = 1;
40865 up_write(&mm->mmap_sem);
40866 bprm->p = vma->vm_end - sizeof(void *);
40867+
40868+#ifdef CONFIG_PAX_RANDUSTACK
40869+ if (randomize_va_space)
40870+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
40871+#endif
40872+
40873 return 0;
40874 err:
40875 up_write(&mm->mmap_sem);
40876@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
40877 int r;
40878 mm_segment_t oldfs = get_fs();
40879 set_fs(KERNEL_DS);
40880- r = copy_strings(argc, (char __user * __user *)argv, bprm);
40881+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
40882 set_fs(oldfs);
40883 return r;
40884 }
40885@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
40886 unsigned long new_end = old_end - shift;
40887 struct mmu_gather *tlb;
40888
40889- BUG_ON(new_start > new_end);
40890+ if (new_start >= new_end || new_start < mmap_min_addr)
40891+ return -ENOMEM;
40892
40893 /*
40894 * ensure there are no vmas between where we want to go
40895@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
40896 if (vma != find_vma(mm, new_start))
40897 return -EFAULT;
40898
40899+#ifdef CONFIG_PAX_SEGMEXEC
40900+ BUG_ON(pax_find_mirror_vma(vma));
40901+#endif
40902+
40903 /*
40904 * cover the whole range: [new_start, old_end)
40905 */
40906@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
40907 stack_top = arch_align_stack(stack_top);
40908 stack_top = PAGE_ALIGN(stack_top);
40909
40910- if (unlikely(stack_top < mmap_min_addr) ||
40911- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
40912- return -ENOMEM;
40913-
40914 stack_shift = vma->vm_end - stack_top;
40915
40916 bprm->p -= stack_shift;
40917@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
40918 bprm->exec -= stack_shift;
40919
40920 down_write(&mm->mmap_sem);
40921+
40922+ /* Move stack pages down in memory. */
40923+ if (stack_shift) {
40924+ ret = shift_arg_pages(vma, stack_shift);
40925+ if (ret)
40926+ goto out_unlock;
40927+ }
40928+
40929 vm_flags = VM_STACK_FLAGS;
40930
40931 /*
40932@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
40933 vm_flags &= ~VM_EXEC;
40934 vm_flags |= mm->def_flags;
40935
40936+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40937+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40938+ vm_flags &= ~VM_EXEC;
40939+
40940+#ifdef CONFIG_PAX_MPROTECT
40941+ if (mm->pax_flags & MF_PAX_MPROTECT)
40942+ vm_flags &= ~VM_MAYEXEC;
40943+#endif
40944+
40945+ }
40946+#endif
40947+
40948 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
40949 vm_flags);
40950 if (ret)
40951 goto out_unlock;
40952 BUG_ON(prev != vma);
40953
40954- /* Move stack pages down in memory. */
40955- if (stack_shift) {
40956- ret = shift_arg_pages(vma, stack_shift);
40957- if (ret)
40958- goto out_unlock;
40959- }
40960-
40961 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
40962 stack_size = vma->vm_end - vma->vm_start;
40963 /*
40964@@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
40965 int err;
40966
40967 file = do_filp_open(AT_FDCWD, name,
40968- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40969+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40970 MAY_EXEC | MAY_OPEN);
40971 if (IS_ERR(file))
40972 goto out;
40973@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
40974 old_fs = get_fs();
40975 set_fs(get_ds());
40976 /* The cast to a user pointer is valid due to the set_fs() */
40977- result = vfs_read(file, (void __user *)addr, count, &pos);
40978+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
40979 set_fs(old_fs);
40980 return result;
40981 }
40982@@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
40983 }
40984 rcu_read_unlock();
40985
40986- if (p->fs->users > n_fs) {
40987+ if (atomic_read(&p->fs->users) > n_fs) {
40988 bprm->unsafe |= LSM_UNSAFE_SHARE;
40989 } else {
40990 res = -EAGAIN;
40991@@ -1347,11 +1376,35 @@ int do_execve(char * filename,
40992 char __user *__user *envp,
40993 struct pt_regs * regs)
40994 {
40995+#ifdef CONFIG_GRKERNSEC
40996+ struct file *old_exec_file;
40997+ struct acl_subject_label *old_acl;
40998+ struct rlimit old_rlim[RLIM_NLIMITS];
40999+#endif
41000 struct linux_binprm *bprm;
41001 struct file *file;
41002 struct files_struct *displaced;
41003 bool clear_in_exec;
41004 int retval;
41005+ const struct cred *cred = current_cred();
41006+
41007+ /*
41008+ * We move the actual failure in case of RLIMIT_NPROC excess from
41009+ * set*uid() to execve() because too many poorly written programs
41010+ * don't check setuid() return code. Here we additionally recheck
41011+ * whether NPROC limit is still exceeded.
41012+ */
41013+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41014+
41015+ if ((current->flags & PF_NPROC_EXCEEDED) &&
41016+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
41017+ retval = -EAGAIN;
41018+ goto out_ret;
41019+ }
41020+
41021+ /* We're below the limit (still or again), so we don't want to make
41022+ * further execve() calls fail. */
41023+ current->flags &= ~PF_NPROC_EXCEEDED;
41024
41025 retval = unshare_files(&displaced);
41026 if (retval)
41027@@ -1383,6 +1436,16 @@ int do_execve(char * filename,
41028 bprm->filename = filename;
41029 bprm->interp = filename;
41030
41031+ if (gr_process_user_ban()) {
41032+ retval = -EPERM;
41033+ goto out_file;
41034+ }
41035+
41036+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41037+ retval = -EACCES;
41038+ goto out_file;
41039+ }
41040+
41041 retval = bprm_mm_init(bprm);
41042 if (retval)
41043 goto out_file;
41044@@ -1412,10 +1475,41 @@ int do_execve(char * filename,
41045 if (retval < 0)
41046 goto out;
41047
41048+ if (!gr_tpe_allow(file)) {
41049+ retval = -EACCES;
41050+ goto out;
41051+ }
41052+
41053+ if (gr_check_crash_exec(file)) {
41054+ retval = -EACCES;
41055+ goto out;
41056+ }
41057+
41058+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41059+
41060+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
41061+
41062+#ifdef CONFIG_GRKERNSEC
41063+ old_acl = current->acl;
41064+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41065+ old_exec_file = current->exec_file;
41066+ get_file(file);
41067+ current->exec_file = file;
41068+#endif
41069+
41070+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41071+ bprm->unsafe & LSM_UNSAFE_SHARE);
41072+ if (retval < 0)
41073+ goto out_fail;
41074+
41075 current->flags &= ~PF_KTHREAD;
41076 retval = search_binary_handler(bprm,regs);
41077 if (retval < 0)
41078- goto out;
41079+ goto out_fail;
41080+#ifdef CONFIG_GRKERNSEC
41081+ if (old_exec_file)
41082+ fput(old_exec_file);
41083+#endif
41084
41085 /* execve succeeded */
41086 current->fs->in_exec = 0;
41087@@ -1426,6 +1520,14 @@ int do_execve(char * filename,
41088 put_files_struct(displaced);
41089 return retval;
41090
41091+out_fail:
41092+#ifdef CONFIG_GRKERNSEC
41093+ current->acl = old_acl;
41094+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41095+ fput(current->exec_file);
41096+ current->exec_file = old_exec_file;
41097+#endif
41098+
41099 out:
41100 if (bprm->mm) {
41101 acct_arg_size(bprm, 0);
41102@@ -1591,6 +1693,220 @@ out:
41103 return ispipe;
41104 }
41105
41106+int pax_check_flags(unsigned long *flags)
41107+{
41108+ int retval = 0;
41109+
41110+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41111+ if (*flags & MF_PAX_SEGMEXEC)
41112+ {
41113+ *flags &= ~MF_PAX_SEGMEXEC;
41114+ retval = -EINVAL;
41115+ }
41116+#endif
41117+
41118+ if ((*flags & MF_PAX_PAGEEXEC)
41119+
41120+#ifdef CONFIG_PAX_PAGEEXEC
41121+ && (*flags & MF_PAX_SEGMEXEC)
41122+#endif
41123+
41124+ )
41125+ {
41126+ *flags &= ~MF_PAX_PAGEEXEC;
41127+ retval = -EINVAL;
41128+ }
41129+
41130+ if ((*flags & MF_PAX_MPROTECT)
41131+
41132+#ifdef CONFIG_PAX_MPROTECT
41133+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41134+#endif
41135+
41136+ )
41137+ {
41138+ *flags &= ~MF_PAX_MPROTECT;
41139+ retval = -EINVAL;
41140+ }
41141+
41142+ if ((*flags & MF_PAX_EMUTRAMP)
41143+
41144+#ifdef CONFIG_PAX_EMUTRAMP
41145+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41146+#endif
41147+
41148+ )
41149+ {
41150+ *flags &= ~MF_PAX_EMUTRAMP;
41151+ retval = -EINVAL;
41152+ }
41153+
41154+ return retval;
41155+}
41156+
41157+EXPORT_SYMBOL(pax_check_flags);
41158+
41159+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41160+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41161+{
41162+ struct task_struct *tsk = current;
41163+ struct mm_struct *mm = current->mm;
41164+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41165+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41166+ char *path_exec = NULL;
41167+ char *path_fault = NULL;
41168+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
41169+
41170+ if (buffer_exec && buffer_fault) {
41171+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41172+
41173+ down_read(&mm->mmap_sem);
41174+ vma = mm->mmap;
41175+ while (vma && (!vma_exec || !vma_fault)) {
41176+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41177+ vma_exec = vma;
41178+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41179+ vma_fault = vma;
41180+ vma = vma->vm_next;
41181+ }
41182+ if (vma_exec) {
41183+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41184+ if (IS_ERR(path_exec))
41185+ path_exec = "<path too long>";
41186+ else {
41187+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41188+ if (path_exec) {
41189+ *path_exec = 0;
41190+ path_exec = buffer_exec;
41191+ } else
41192+ path_exec = "<path too long>";
41193+ }
41194+ }
41195+ if (vma_fault) {
41196+ start = vma_fault->vm_start;
41197+ end = vma_fault->vm_end;
41198+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41199+ if (vma_fault->vm_file) {
41200+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41201+ if (IS_ERR(path_fault))
41202+ path_fault = "<path too long>";
41203+ else {
41204+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41205+ if (path_fault) {
41206+ *path_fault = 0;
41207+ path_fault = buffer_fault;
41208+ } else
41209+ path_fault = "<path too long>";
41210+ }
41211+ } else
41212+ path_fault = "<anonymous mapping>";
41213+ }
41214+ up_read(&mm->mmap_sem);
41215+ }
41216+ if (tsk->signal->curr_ip)
41217+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41218+ else
41219+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41220+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41221+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41222+ task_uid(tsk), task_euid(tsk), pc, sp);
41223+ free_page((unsigned long)buffer_exec);
41224+ free_page((unsigned long)buffer_fault);
41225+ pax_report_insns(pc, sp);
41226+ do_coredump(SIGKILL, SIGKILL, regs);
41227+}
41228+#endif
41229+
41230+#ifdef CONFIG_PAX_REFCOUNT
41231+void pax_report_refcount_overflow(struct pt_regs *regs)
41232+{
41233+ if (current->signal->curr_ip)
41234+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41235+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41236+ else
41237+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41238+ current->comm, task_pid_nr(current), current_uid(), current_euid());
41239+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41240+ show_regs(regs);
41241+ force_sig_specific(SIGKILL, current);
41242+}
41243+#endif
41244+
41245+#ifdef CONFIG_PAX_USERCOPY
41246+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41247+int object_is_on_stack(const void *obj, unsigned long len)
41248+{
41249+ const void * const stack = task_stack_page(current);
41250+ const void * const stackend = stack + THREAD_SIZE;
41251+
41252+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41253+ const void *frame = NULL;
41254+ const void *oldframe;
41255+#endif
41256+
41257+ if (obj + len < obj)
41258+ return -1;
41259+
41260+ if (obj + len <= stack || stackend <= obj)
41261+ return 0;
41262+
41263+ if (obj < stack || stackend < obj + len)
41264+ return -1;
41265+
41266+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41267+ oldframe = __builtin_frame_address(1);
41268+ if (oldframe)
41269+ frame = __builtin_frame_address(2);
41270+ /*
41271+ low ----------------------------------------------> high
41272+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41273+ ^----------------^
41274+ allow copies only within here
41275+ */
41276+ while (stack <= frame && frame < stackend) {
41277+ /* if obj + len extends past the last frame, this
41278+ check won't pass and the next frame will be 0,
41279+ causing us to bail out and correctly report
41280+ the copy as invalid
41281+ */
41282+ if (obj + len <= frame)
41283+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41284+ oldframe = frame;
41285+ frame = *(const void * const *)frame;
41286+ }
41287+ return -1;
41288+#else
41289+ return 1;
41290+#endif
41291+}
41292+
41293+
41294+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41295+{
41296+ if (current->signal->curr_ip)
41297+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41298+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41299+ else
41300+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41301+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41302+
41303+ dump_stack();
41304+ gr_handle_kernel_exploit();
41305+ do_group_exit(SIGKILL);
41306+}
41307+#endif
41308+
41309+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41310+void pax_track_stack(void)
41311+{
41312+ unsigned long sp = (unsigned long)&sp;
41313+ if (sp < current_thread_info()->lowest_stack &&
41314+ sp > (unsigned long)task_stack_page(current))
41315+ current_thread_info()->lowest_stack = sp;
41316+}
41317+EXPORT_SYMBOL(pax_track_stack);
41318+#endif
41319+
41320 static int zap_process(struct task_struct *start)
41321 {
41322 struct task_struct *t;
41323@@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct
41324 pipe = file->f_path.dentry->d_inode->i_pipe;
41325
41326 pipe_lock(pipe);
41327- pipe->readers++;
41328- pipe->writers--;
41329+ atomic_inc(&pipe->readers);
41330+ atomic_dec(&pipe->writers);
41331
41332- while ((pipe->readers > 1) && (!signal_pending(current))) {
41333+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41334 wake_up_interruptible_sync(&pipe->wait);
41335 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41336 pipe_wait(pipe);
41337 }
41338
41339- pipe->readers--;
41340- pipe->writers++;
41341+ atomic_dec(&pipe->readers);
41342+ atomic_inc(&pipe->writers);
41343 pipe_unlock(pipe);
41344
41345 }
41346@@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_co
41347 char **helper_argv = NULL;
41348 int helper_argc = 0;
41349 int dump_count = 0;
41350- static atomic_t core_dump_count = ATOMIC_INIT(0);
41351+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41352
41353 audit_core_dumps(signr);
41354
41355+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41356+ gr_handle_brute_attach(current, mm->flags);
41357+
41358 binfmt = mm->binfmt;
41359 if (!binfmt || !binfmt->core_dump)
41360 goto fail;
41361@@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_co
41362 */
41363 clear_thread_flag(TIF_SIGPENDING);
41364
41365+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41366+
41367 /*
41368 * lock_kernel() because format_corename() is controlled by sysctl, which
41369 * uses lock_kernel()
41370@@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_co
41371 goto fail_unlock;
41372 }
41373
41374- dump_count = atomic_inc_return(&core_dump_count);
41375+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
41376 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41377 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41378 task_tgid_vnr(current), current->comm);
41379@@ -1972,7 +2293,7 @@ close_fail:
41380 filp_close(file, NULL);
41381 fail_dropcount:
41382 if (dump_count)
41383- atomic_dec(&core_dump_count);
41384+ atomic_dec_unchecked(&core_dump_count);
41385 fail_unlock:
41386 if (helper_argv)
41387 argv_free(helper_argv);
41388diff -urNp linux-2.6.32.45/fs/ext2/balloc.c linux-2.6.32.45/fs/ext2/balloc.c
41389--- linux-2.6.32.45/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
41390+++ linux-2.6.32.45/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
41391@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41392
41393 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41394 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41395- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41396+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41397 sbi->s_resuid != current_fsuid() &&
41398 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41399 return 0;
41400diff -urNp linux-2.6.32.45/fs/ext3/balloc.c linux-2.6.32.45/fs/ext3/balloc.c
41401--- linux-2.6.32.45/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
41402+++ linux-2.6.32.45/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
41403@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
41404
41405 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41406 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41407- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41408+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41409 sbi->s_resuid != current_fsuid() &&
41410 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41411 return 0;
41412diff -urNp linux-2.6.32.45/fs/ext4/balloc.c linux-2.6.32.45/fs/ext4/balloc.c
41413--- linux-2.6.32.45/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
41414+++ linux-2.6.32.45/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
41415@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
41416 /* Hm, nope. Are (enough) root reserved blocks available? */
41417 if (sbi->s_resuid == current_fsuid() ||
41418 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41419- capable(CAP_SYS_RESOURCE)) {
41420+ capable_nolog(CAP_SYS_RESOURCE)) {
41421 if (free_blocks >= (nblocks + dirty_blocks))
41422 return 1;
41423 }
41424diff -urNp linux-2.6.32.45/fs/ext4/ext4.h linux-2.6.32.45/fs/ext4/ext4.h
41425--- linux-2.6.32.45/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
41426+++ linux-2.6.32.45/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
41427@@ -1078,19 +1078,19 @@ struct ext4_sb_info {
41428
41429 /* stats for buddy allocator */
41430 spinlock_t s_mb_pa_lock;
41431- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41432- atomic_t s_bal_success; /* we found long enough chunks */
41433- atomic_t s_bal_allocated; /* in blocks */
41434- atomic_t s_bal_ex_scanned; /* total extents scanned */
41435- atomic_t s_bal_goals; /* goal hits */
41436- atomic_t s_bal_breaks; /* too long searches */
41437- atomic_t s_bal_2orders; /* 2^order hits */
41438+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41439+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41440+ atomic_unchecked_t s_bal_allocated; /* in blocks */
41441+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41442+ atomic_unchecked_t s_bal_goals; /* goal hits */
41443+ atomic_unchecked_t s_bal_breaks; /* too long searches */
41444+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41445 spinlock_t s_bal_lock;
41446 unsigned long s_mb_buddies_generated;
41447 unsigned long long s_mb_generation_time;
41448- atomic_t s_mb_lost_chunks;
41449- atomic_t s_mb_preallocated;
41450- atomic_t s_mb_discarded;
41451+ atomic_unchecked_t s_mb_lost_chunks;
41452+ atomic_unchecked_t s_mb_preallocated;
41453+ atomic_unchecked_t s_mb_discarded;
41454 atomic_t s_lock_busy;
41455
41456 /* locality groups */
41457diff -urNp linux-2.6.32.45/fs/ext4/mballoc.c linux-2.6.32.45/fs/ext4/mballoc.c
41458--- linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
41459+++ linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
41460@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
41461 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41462
41463 if (EXT4_SB(sb)->s_mb_stats)
41464- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41465+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41466
41467 break;
41468 }
41469@@ -2131,7 +2131,7 @@ repeat:
41470 ac->ac_status = AC_STATUS_CONTINUE;
41471 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41472 cr = 3;
41473- atomic_inc(&sbi->s_mb_lost_chunks);
41474+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41475 goto repeat;
41476 }
41477 }
41478@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
41479 ext4_grpblk_t counters[16];
41480 } sg;
41481
41482+ pax_track_stack();
41483+
41484 group--;
41485 if (group == 0)
41486 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41487@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
41488 if (sbi->s_mb_stats) {
41489 printk(KERN_INFO
41490 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41491- atomic_read(&sbi->s_bal_allocated),
41492- atomic_read(&sbi->s_bal_reqs),
41493- atomic_read(&sbi->s_bal_success));
41494+ atomic_read_unchecked(&sbi->s_bal_allocated),
41495+ atomic_read_unchecked(&sbi->s_bal_reqs),
41496+ atomic_read_unchecked(&sbi->s_bal_success));
41497 printk(KERN_INFO
41498 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41499 "%u 2^N hits, %u breaks, %u lost\n",
41500- atomic_read(&sbi->s_bal_ex_scanned),
41501- atomic_read(&sbi->s_bal_goals),
41502- atomic_read(&sbi->s_bal_2orders),
41503- atomic_read(&sbi->s_bal_breaks),
41504- atomic_read(&sbi->s_mb_lost_chunks));
41505+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41506+ atomic_read_unchecked(&sbi->s_bal_goals),
41507+ atomic_read_unchecked(&sbi->s_bal_2orders),
41508+ atomic_read_unchecked(&sbi->s_bal_breaks),
41509+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41510 printk(KERN_INFO
41511 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41512 sbi->s_mb_buddies_generated++,
41513 sbi->s_mb_generation_time);
41514 printk(KERN_INFO
41515 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41516- atomic_read(&sbi->s_mb_preallocated),
41517- atomic_read(&sbi->s_mb_discarded));
41518+ atomic_read_unchecked(&sbi->s_mb_preallocated),
41519+ atomic_read_unchecked(&sbi->s_mb_discarded));
41520 }
41521
41522 free_percpu(sbi->s_locality_groups);
41523@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
41524 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41525
41526 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41527- atomic_inc(&sbi->s_bal_reqs);
41528- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41529+ atomic_inc_unchecked(&sbi->s_bal_reqs);
41530+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41531 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
41532- atomic_inc(&sbi->s_bal_success);
41533- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41534+ atomic_inc_unchecked(&sbi->s_bal_success);
41535+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41536 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41537 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41538- atomic_inc(&sbi->s_bal_goals);
41539+ atomic_inc_unchecked(&sbi->s_bal_goals);
41540 if (ac->ac_found > sbi->s_mb_max_to_scan)
41541- atomic_inc(&sbi->s_bal_breaks);
41542+ atomic_inc_unchecked(&sbi->s_bal_breaks);
41543 }
41544
41545 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41546@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41547 trace_ext4_mb_new_inode_pa(ac, pa);
41548
41549 ext4_mb_use_inode_pa(ac, pa);
41550- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41551+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41552
41553 ei = EXT4_I(ac->ac_inode);
41554 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41555@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41556 trace_ext4_mb_new_group_pa(ac, pa);
41557
41558 ext4_mb_use_group_pa(ac, pa);
41559- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41560+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41561
41562 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41563 lg = ac->ac_lg;
41564@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41565 * from the bitmap and continue.
41566 */
41567 }
41568- atomic_add(free, &sbi->s_mb_discarded);
41569+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
41570
41571 return err;
41572 }
41573@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41574 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41575 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41576 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41577- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41578+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41579
41580 if (ac) {
41581 ac->ac_sb = sb;
41582diff -urNp linux-2.6.32.45/fs/ext4/super.c linux-2.6.32.45/fs/ext4/super.c
41583--- linux-2.6.32.45/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
41584+++ linux-2.6.32.45/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
41585@@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
41586 }
41587
41588
41589-static struct sysfs_ops ext4_attr_ops = {
41590+static const struct sysfs_ops ext4_attr_ops = {
41591 .show = ext4_attr_show,
41592 .store = ext4_attr_store,
41593 };
41594diff -urNp linux-2.6.32.45/fs/fcntl.c linux-2.6.32.45/fs/fcntl.c
41595--- linux-2.6.32.45/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
41596+++ linux-2.6.32.45/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
41597@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
41598 if (err)
41599 return err;
41600
41601+ if (gr_handle_chroot_fowner(pid, type))
41602+ return -ENOENT;
41603+ if (gr_check_protected_task_fowner(pid, type))
41604+ return -EACCES;
41605+
41606 f_modown(filp, pid, type, force);
41607 return 0;
41608 }
41609@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
41610 switch (cmd) {
41611 case F_DUPFD:
41612 case F_DUPFD_CLOEXEC:
41613+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41614 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41615 break;
41616 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41617diff -urNp linux-2.6.32.45/fs/fifo.c linux-2.6.32.45/fs/fifo.c
41618--- linux-2.6.32.45/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
41619+++ linux-2.6.32.45/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
41620@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
41621 */
41622 filp->f_op = &read_pipefifo_fops;
41623 pipe->r_counter++;
41624- if (pipe->readers++ == 0)
41625+ if (atomic_inc_return(&pipe->readers) == 1)
41626 wake_up_partner(inode);
41627
41628- if (!pipe->writers) {
41629+ if (!atomic_read(&pipe->writers)) {
41630 if ((filp->f_flags & O_NONBLOCK)) {
41631 /* suppress POLLHUP until we have
41632 * seen a writer */
41633@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
41634 * errno=ENXIO when there is no process reading the FIFO.
41635 */
41636 ret = -ENXIO;
41637- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
41638+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
41639 goto err;
41640
41641 filp->f_op = &write_pipefifo_fops;
41642 pipe->w_counter++;
41643- if (!pipe->writers++)
41644+ if (atomic_inc_return(&pipe->writers) == 1)
41645 wake_up_partner(inode);
41646
41647- if (!pipe->readers) {
41648+ if (!atomic_read(&pipe->readers)) {
41649 wait_for_partner(inode, &pipe->r_counter);
41650 if (signal_pending(current))
41651 goto err_wr;
41652@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
41653 */
41654 filp->f_op = &rdwr_pipefifo_fops;
41655
41656- pipe->readers++;
41657- pipe->writers++;
41658+ atomic_inc(&pipe->readers);
41659+ atomic_inc(&pipe->writers);
41660 pipe->r_counter++;
41661 pipe->w_counter++;
41662- if (pipe->readers == 1 || pipe->writers == 1)
41663+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
41664 wake_up_partner(inode);
41665 break;
41666
41667@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
41668 return 0;
41669
41670 err_rd:
41671- if (!--pipe->readers)
41672+ if (atomic_dec_and_test(&pipe->readers))
41673 wake_up_interruptible(&pipe->wait);
41674 ret = -ERESTARTSYS;
41675 goto err;
41676
41677 err_wr:
41678- if (!--pipe->writers)
41679+ if (atomic_dec_and_test(&pipe->writers))
41680 wake_up_interruptible(&pipe->wait);
41681 ret = -ERESTARTSYS;
41682 goto err;
41683
41684 err:
41685- if (!pipe->readers && !pipe->writers)
41686+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
41687 free_pipe_info(inode);
41688
41689 err_nocleanup:
41690diff -urNp linux-2.6.32.45/fs/file.c linux-2.6.32.45/fs/file.c
41691--- linux-2.6.32.45/fs/file.c 2011-03-27 14:31:47.000000000 -0400
41692+++ linux-2.6.32.45/fs/file.c 2011-04-17 15:56:46.000000000 -0400
41693@@ -14,6 +14,7 @@
41694 #include <linux/slab.h>
41695 #include <linux/vmalloc.h>
41696 #include <linux/file.h>
41697+#include <linux/security.h>
41698 #include <linux/fdtable.h>
41699 #include <linux/bitops.h>
41700 #include <linux/interrupt.h>
41701@@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
41702 * N.B. For clone tasks sharing a files structure, this test
41703 * will limit the total number of files that can be opened.
41704 */
41705+
41706+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
41707 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41708 return -EMFILE;
41709
41710diff -urNp linux-2.6.32.45/fs/filesystems.c linux-2.6.32.45/fs/filesystems.c
41711--- linux-2.6.32.45/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
41712+++ linux-2.6.32.45/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
41713@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
41714 int len = dot ? dot - name : strlen(name);
41715
41716 fs = __get_fs_type(name, len);
41717+
41718+#ifdef CONFIG_GRKERNSEC_MODHARDEN
41719+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
41720+#else
41721 if (!fs && (request_module("%.*s", len, name) == 0))
41722+#endif
41723 fs = __get_fs_type(name, len);
41724
41725 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
41726diff -urNp linux-2.6.32.45/fs/fscache/cookie.c linux-2.6.32.45/fs/fscache/cookie.c
41727--- linux-2.6.32.45/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
41728+++ linux-2.6.32.45/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
41729@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
41730 parent ? (char *) parent->def->name : "<no-parent>",
41731 def->name, netfs_data);
41732
41733- fscache_stat(&fscache_n_acquires);
41734+ fscache_stat_unchecked(&fscache_n_acquires);
41735
41736 /* if there's no parent cookie, then we don't create one here either */
41737 if (!parent) {
41738- fscache_stat(&fscache_n_acquires_null);
41739+ fscache_stat_unchecked(&fscache_n_acquires_null);
41740 _leave(" [no parent]");
41741 return NULL;
41742 }
41743@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
41744 /* allocate and initialise a cookie */
41745 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
41746 if (!cookie) {
41747- fscache_stat(&fscache_n_acquires_oom);
41748+ fscache_stat_unchecked(&fscache_n_acquires_oom);
41749 _leave(" [ENOMEM]");
41750 return NULL;
41751 }
41752@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
41753
41754 switch (cookie->def->type) {
41755 case FSCACHE_COOKIE_TYPE_INDEX:
41756- fscache_stat(&fscache_n_cookie_index);
41757+ fscache_stat_unchecked(&fscache_n_cookie_index);
41758 break;
41759 case FSCACHE_COOKIE_TYPE_DATAFILE:
41760- fscache_stat(&fscache_n_cookie_data);
41761+ fscache_stat_unchecked(&fscache_n_cookie_data);
41762 break;
41763 default:
41764- fscache_stat(&fscache_n_cookie_special);
41765+ fscache_stat_unchecked(&fscache_n_cookie_special);
41766 break;
41767 }
41768
41769@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
41770 if (fscache_acquire_non_index_cookie(cookie) < 0) {
41771 atomic_dec(&parent->n_children);
41772 __fscache_cookie_put(cookie);
41773- fscache_stat(&fscache_n_acquires_nobufs);
41774+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
41775 _leave(" = NULL");
41776 return NULL;
41777 }
41778 }
41779
41780- fscache_stat(&fscache_n_acquires_ok);
41781+ fscache_stat_unchecked(&fscache_n_acquires_ok);
41782 _leave(" = %p", cookie);
41783 return cookie;
41784 }
41785@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
41786 cache = fscache_select_cache_for_object(cookie->parent);
41787 if (!cache) {
41788 up_read(&fscache_addremove_sem);
41789- fscache_stat(&fscache_n_acquires_no_cache);
41790+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
41791 _leave(" = -ENOMEDIUM [no cache]");
41792 return -ENOMEDIUM;
41793 }
41794@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
41795 object = cache->ops->alloc_object(cache, cookie);
41796 fscache_stat_d(&fscache_n_cop_alloc_object);
41797 if (IS_ERR(object)) {
41798- fscache_stat(&fscache_n_object_no_alloc);
41799+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
41800 ret = PTR_ERR(object);
41801 goto error;
41802 }
41803
41804- fscache_stat(&fscache_n_object_alloc);
41805+ fscache_stat_unchecked(&fscache_n_object_alloc);
41806
41807 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
41808
41809@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
41810 struct fscache_object *object;
41811 struct hlist_node *_p;
41812
41813- fscache_stat(&fscache_n_updates);
41814+ fscache_stat_unchecked(&fscache_n_updates);
41815
41816 if (!cookie) {
41817- fscache_stat(&fscache_n_updates_null);
41818+ fscache_stat_unchecked(&fscache_n_updates_null);
41819 _leave(" [no cookie]");
41820 return;
41821 }
41822@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
41823 struct fscache_object *object;
41824 unsigned long event;
41825
41826- fscache_stat(&fscache_n_relinquishes);
41827+ fscache_stat_unchecked(&fscache_n_relinquishes);
41828 if (retire)
41829- fscache_stat(&fscache_n_relinquishes_retire);
41830+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
41831
41832 if (!cookie) {
41833- fscache_stat(&fscache_n_relinquishes_null);
41834+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
41835 _leave(" [no cookie]");
41836 return;
41837 }
41838@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
41839
41840 /* wait for the cookie to finish being instantiated (or to fail) */
41841 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
41842- fscache_stat(&fscache_n_relinquishes_waitcrt);
41843+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
41844 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
41845 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
41846 }
41847diff -urNp linux-2.6.32.45/fs/fscache/internal.h linux-2.6.32.45/fs/fscache/internal.h
41848--- linux-2.6.32.45/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
41849+++ linux-2.6.32.45/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
41850@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
41851 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
41852 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
41853
41854-extern atomic_t fscache_n_op_pend;
41855-extern atomic_t fscache_n_op_run;
41856-extern atomic_t fscache_n_op_enqueue;
41857-extern atomic_t fscache_n_op_deferred_release;
41858-extern atomic_t fscache_n_op_release;
41859-extern atomic_t fscache_n_op_gc;
41860-extern atomic_t fscache_n_op_cancelled;
41861-extern atomic_t fscache_n_op_rejected;
41862-
41863-extern atomic_t fscache_n_attr_changed;
41864-extern atomic_t fscache_n_attr_changed_ok;
41865-extern atomic_t fscache_n_attr_changed_nobufs;
41866-extern atomic_t fscache_n_attr_changed_nomem;
41867-extern atomic_t fscache_n_attr_changed_calls;
41868-
41869-extern atomic_t fscache_n_allocs;
41870-extern atomic_t fscache_n_allocs_ok;
41871-extern atomic_t fscache_n_allocs_wait;
41872-extern atomic_t fscache_n_allocs_nobufs;
41873-extern atomic_t fscache_n_allocs_intr;
41874-extern atomic_t fscache_n_allocs_object_dead;
41875-extern atomic_t fscache_n_alloc_ops;
41876-extern atomic_t fscache_n_alloc_op_waits;
41877-
41878-extern atomic_t fscache_n_retrievals;
41879-extern atomic_t fscache_n_retrievals_ok;
41880-extern atomic_t fscache_n_retrievals_wait;
41881-extern atomic_t fscache_n_retrievals_nodata;
41882-extern atomic_t fscache_n_retrievals_nobufs;
41883-extern atomic_t fscache_n_retrievals_intr;
41884-extern atomic_t fscache_n_retrievals_nomem;
41885-extern atomic_t fscache_n_retrievals_object_dead;
41886-extern atomic_t fscache_n_retrieval_ops;
41887-extern atomic_t fscache_n_retrieval_op_waits;
41888-
41889-extern atomic_t fscache_n_stores;
41890-extern atomic_t fscache_n_stores_ok;
41891-extern atomic_t fscache_n_stores_again;
41892-extern atomic_t fscache_n_stores_nobufs;
41893-extern atomic_t fscache_n_stores_oom;
41894-extern atomic_t fscache_n_store_ops;
41895-extern atomic_t fscache_n_store_calls;
41896-extern atomic_t fscache_n_store_pages;
41897-extern atomic_t fscache_n_store_radix_deletes;
41898-extern atomic_t fscache_n_store_pages_over_limit;
41899-
41900-extern atomic_t fscache_n_store_vmscan_not_storing;
41901-extern atomic_t fscache_n_store_vmscan_gone;
41902-extern atomic_t fscache_n_store_vmscan_busy;
41903-extern atomic_t fscache_n_store_vmscan_cancelled;
41904-
41905-extern atomic_t fscache_n_marks;
41906-extern atomic_t fscache_n_uncaches;
41907-
41908-extern atomic_t fscache_n_acquires;
41909-extern atomic_t fscache_n_acquires_null;
41910-extern atomic_t fscache_n_acquires_no_cache;
41911-extern atomic_t fscache_n_acquires_ok;
41912-extern atomic_t fscache_n_acquires_nobufs;
41913-extern atomic_t fscache_n_acquires_oom;
41914-
41915-extern atomic_t fscache_n_updates;
41916-extern atomic_t fscache_n_updates_null;
41917-extern atomic_t fscache_n_updates_run;
41918-
41919-extern atomic_t fscache_n_relinquishes;
41920-extern atomic_t fscache_n_relinquishes_null;
41921-extern atomic_t fscache_n_relinquishes_waitcrt;
41922-extern atomic_t fscache_n_relinquishes_retire;
41923-
41924-extern atomic_t fscache_n_cookie_index;
41925-extern atomic_t fscache_n_cookie_data;
41926-extern atomic_t fscache_n_cookie_special;
41927-
41928-extern atomic_t fscache_n_object_alloc;
41929-extern atomic_t fscache_n_object_no_alloc;
41930-extern atomic_t fscache_n_object_lookups;
41931-extern atomic_t fscache_n_object_lookups_negative;
41932-extern atomic_t fscache_n_object_lookups_positive;
41933-extern atomic_t fscache_n_object_lookups_timed_out;
41934-extern atomic_t fscache_n_object_created;
41935-extern atomic_t fscache_n_object_avail;
41936-extern atomic_t fscache_n_object_dead;
41937-
41938-extern atomic_t fscache_n_checkaux_none;
41939-extern atomic_t fscache_n_checkaux_okay;
41940-extern atomic_t fscache_n_checkaux_update;
41941-extern atomic_t fscache_n_checkaux_obsolete;
41942+extern atomic_unchecked_t fscache_n_op_pend;
41943+extern atomic_unchecked_t fscache_n_op_run;
41944+extern atomic_unchecked_t fscache_n_op_enqueue;
41945+extern atomic_unchecked_t fscache_n_op_deferred_release;
41946+extern atomic_unchecked_t fscache_n_op_release;
41947+extern atomic_unchecked_t fscache_n_op_gc;
41948+extern atomic_unchecked_t fscache_n_op_cancelled;
41949+extern atomic_unchecked_t fscache_n_op_rejected;
41950+
41951+extern atomic_unchecked_t fscache_n_attr_changed;
41952+extern atomic_unchecked_t fscache_n_attr_changed_ok;
41953+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
41954+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
41955+extern atomic_unchecked_t fscache_n_attr_changed_calls;
41956+
41957+extern atomic_unchecked_t fscache_n_allocs;
41958+extern atomic_unchecked_t fscache_n_allocs_ok;
41959+extern atomic_unchecked_t fscache_n_allocs_wait;
41960+extern atomic_unchecked_t fscache_n_allocs_nobufs;
41961+extern atomic_unchecked_t fscache_n_allocs_intr;
41962+extern atomic_unchecked_t fscache_n_allocs_object_dead;
41963+extern atomic_unchecked_t fscache_n_alloc_ops;
41964+extern atomic_unchecked_t fscache_n_alloc_op_waits;
41965+
41966+extern atomic_unchecked_t fscache_n_retrievals;
41967+extern atomic_unchecked_t fscache_n_retrievals_ok;
41968+extern atomic_unchecked_t fscache_n_retrievals_wait;
41969+extern atomic_unchecked_t fscache_n_retrievals_nodata;
41970+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
41971+extern atomic_unchecked_t fscache_n_retrievals_intr;
41972+extern atomic_unchecked_t fscache_n_retrievals_nomem;
41973+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
41974+extern atomic_unchecked_t fscache_n_retrieval_ops;
41975+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
41976+
41977+extern atomic_unchecked_t fscache_n_stores;
41978+extern atomic_unchecked_t fscache_n_stores_ok;
41979+extern atomic_unchecked_t fscache_n_stores_again;
41980+extern atomic_unchecked_t fscache_n_stores_nobufs;
41981+extern atomic_unchecked_t fscache_n_stores_oom;
41982+extern atomic_unchecked_t fscache_n_store_ops;
41983+extern atomic_unchecked_t fscache_n_store_calls;
41984+extern atomic_unchecked_t fscache_n_store_pages;
41985+extern atomic_unchecked_t fscache_n_store_radix_deletes;
41986+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
41987+
41988+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
41989+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
41990+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
41991+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
41992+
41993+extern atomic_unchecked_t fscache_n_marks;
41994+extern atomic_unchecked_t fscache_n_uncaches;
41995+
41996+extern atomic_unchecked_t fscache_n_acquires;
41997+extern atomic_unchecked_t fscache_n_acquires_null;
41998+extern atomic_unchecked_t fscache_n_acquires_no_cache;
41999+extern atomic_unchecked_t fscache_n_acquires_ok;
42000+extern atomic_unchecked_t fscache_n_acquires_nobufs;
42001+extern atomic_unchecked_t fscache_n_acquires_oom;
42002+
42003+extern atomic_unchecked_t fscache_n_updates;
42004+extern atomic_unchecked_t fscache_n_updates_null;
42005+extern atomic_unchecked_t fscache_n_updates_run;
42006+
42007+extern atomic_unchecked_t fscache_n_relinquishes;
42008+extern atomic_unchecked_t fscache_n_relinquishes_null;
42009+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42010+extern atomic_unchecked_t fscache_n_relinquishes_retire;
42011+
42012+extern atomic_unchecked_t fscache_n_cookie_index;
42013+extern atomic_unchecked_t fscache_n_cookie_data;
42014+extern atomic_unchecked_t fscache_n_cookie_special;
42015+
42016+extern atomic_unchecked_t fscache_n_object_alloc;
42017+extern atomic_unchecked_t fscache_n_object_no_alloc;
42018+extern atomic_unchecked_t fscache_n_object_lookups;
42019+extern atomic_unchecked_t fscache_n_object_lookups_negative;
42020+extern atomic_unchecked_t fscache_n_object_lookups_positive;
42021+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42022+extern atomic_unchecked_t fscache_n_object_created;
42023+extern atomic_unchecked_t fscache_n_object_avail;
42024+extern atomic_unchecked_t fscache_n_object_dead;
42025+
42026+extern atomic_unchecked_t fscache_n_checkaux_none;
42027+extern atomic_unchecked_t fscache_n_checkaux_okay;
42028+extern atomic_unchecked_t fscache_n_checkaux_update;
42029+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42030
42031 extern atomic_t fscache_n_cop_alloc_object;
42032 extern atomic_t fscache_n_cop_lookup_object;
42033@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
42034 atomic_inc(stat);
42035 }
42036
42037+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42038+{
42039+ atomic_inc_unchecked(stat);
42040+}
42041+
42042 static inline void fscache_stat_d(atomic_t *stat)
42043 {
42044 atomic_dec(stat);
42045@@ -259,6 +264,7 @@ extern const struct file_operations fsca
42046
42047 #define __fscache_stat(stat) (NULL)
42048 #define fscache_stat(stat) do {} while (0)
42049+#define fscache_stat_unchecked(stat) do {} while (0)
42050 #define fscache_stat_d(stat) do {} while (0)
42051 #endif
42052
42053diff -urNp linux-2.6.32.45/fs/fscache/object.c linux-2.6.32.45/fs/fscache/object.c
42054--- linux-2.6.32.45/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
42055+++ linux-2.6.32.45/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
42056@@ -144,7 +144,7 @@ static void fscache_object_state_machine
42057 /* update the object metadata on disk */
42058 case FSCACHE_OBJECT_UPDATING:
42059 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42060- fscache_stat(&fscache_n_updates_run);
42061+ fscache_stat_unchecked(&fscache_n_updates_run);
42062 fscache_stat(&fscache_n_cop_update_object);
42063 object->cache->ops->update_object(object);
42064 fscache_stat_d(&fscache_n_cop_update_object);
42065@@ -233,7 +233,7 @@ static void fscache_object_state_machine
42066 spin_lock(&object->lock);
42067 object->state = FSCACHE_OBJECT_DEAD;
42068 spin_unlock(&object->lock);
42069- fscache_stat(&fscache_n_object_dead);
42070+ fscache_stat_unchecked(&fscache_n_object_dead);
42071 goto terminal_transit;
42072
42073 /* handle the parent cache of this object being withdrawn from
42074@@ -248,7 +248,7 @@ static void fscache_object_state_machine
42075 spin_lock(&object->lock);
42076 object->state = FSCACHE_OBJECT_DEAD;
42077 spin_unlock(&object->lock);
42078- fscache_stat(&fscache_n_object_dead);
42079+ fscache_stat_unchecked(&fscache_n_object_dead);
42080 goto terminal_transit;
42081
42082 /* complain about the object being woken up once it is
42083@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
42084 parent->cookie->def->name, cookie->def->name,
42085 object->cache->tag->name);
42086
42087- fscache_stat(&fscache_n_object_lookups);
42088+ fscache_stat_unchecked(&fscache_n_object_lookups);
42089 fscache_stat(&fscache_n_cop_lookup_object);
42090 ret = object->cache->ops->lookup_object(object);
42091 fscache_stat_d(&fscache_n_cop_lookup_object);
42092@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
42093 if (ret == -ETIMEDOUT) {
42094 /* probably stuck behind another object, so move this one to
42095 * the back of the queue */
42096- fscache_stat(&fscache_n_object_lookups_timed_out);
42097+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42098 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42099 }
42100
42101@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
42102
42103 spin_lock(&object->lock);
42104 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42105- fscache_stat(&fscache_n_object_lookups_negative);
42106+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42107
42108 /* transit here to allow write requests to begin stacking up
42109 * and read requests to begin returning ENODATA */
42110@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
42111 * result, in which case there may be data available */
42112 spin_lock(&object->lock);
42113 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42114- fscache_stat(&fscache_n_object_lookups_positive);
42115+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42116
42117 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42118
42119@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
42120 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42121 } else {
42122 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42123- fscache_stat(&fscache_n_object_created);
42124+ fscache_stat_unchecked(&fscache_n_object_created);
42125
42126 object->state = FSCACHE_OBJECT_AVAILABLE;
42127 spin_unlock(&object->lock);
42128@@ -633,7 +633,7 @@ static void fscache_object_available(str
42129 fscache_enqueue_dependents(object);
42130
42131 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42132- fscache_stat(&fscache_n_object_avail);
42133+ fscache_stat_unchecked(&fscache_n_object_avail);
42134
42135 _leave("");
42136 }
42137@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42138 enum fscache_checkaux result;
42139
42140 if (!object->cookie->def->check_aux) {
42141- fscache_stat(&fscache_n_checkaux_none);
42142+ fscache_stat_unchecked(&fscache_n_checkaux_none);
42143 return FSCACHE_CHECKAUX_OKAY;
42144 }
42145
42146@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42147 switch (result) {
42148 /* entry okay as is */
42149 case FSCACHE_CHECKAUX_OKAY:
42150- fscache_stat(&fscache_n_checkaux_okay);
42151+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
42152 break;
42153
42154 /* entry requires update */
42155 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42156- fscache_stat(&fscache_n_checkaux_update);
42157+ fscache_stat_unchecked(&fscache_n_checkaux_update);
42158 break;
42159
42160 /* entry requires deletion */
42161 case FSCACHE_CHECKAUX_OBSOLETE:
42162- fscache_stat(&fscache_n_checkaux_obsolete);
42163+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42164 break;
42165
42166 default:
42167diff -urNp linux-2.6.32.45/fs/fscache/operation.c linux-2.6.32.45/fs/fscache/operation.c
42168--- linux-2.6.32.45/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
42169+++ linux-2.6.32.45/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
42170@@ -16,7 +16,7 @@
42171 #include <linux/seq_file.h>
42172 #include "internal.h"
42173
42174-atomic_t fscache_op_debug_id;
42175+atomic_unchecked_t fscache_op_debug_id;
42176 EXPORT_SYMBOL(fscache_op_debug_id);
42177
42178 /**
42179@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
42180 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42181 ASSERTCMP(atomic_read(&op->usage), >, 0);
42182
42183- fscache_stat(&fscache_n_op_enqueue);
42184+ fscache_stat_unchecked(&fscache_n_op_enqueue);
42185 switch (op->flags & FSCACHE_OP_TYPE) {
42186 case FSCACHE_OP_FAST:
42187 _debug("queue fast");
42188@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
42189 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42190 if (op->processor)
42191 fscache_enqueue_operation(op);
42192- fscache_stat(&fscache_n_op_run);
42193+ fscache_stat_unchecked(&fscache_n_op_run);
42194 }
42195
42196 /*
42197@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
42198 if (object->n_ops > 0) {
42199 atomic_inc(&op->usage);
42200 list_add_tail(&op->pend_link, &object->pending_ops);
42201- fscache_stat(&fscache_n_op_pend);
42202+ fscache_stat_unchecked(&fscache_n_op_pend);
42203 } else if (!list_empty(&object->pending_ops)) {
42204 atomic_inc(&op->usage);
42205 list_add_tail(&op->pend_link, &object->pending_ops);
42206- fscache_stat(&fscache_n_op_pend);
42207+ fscache_stat_unchecked(&fscache_n_op_pend);
42208 fscache_start_operations(object);
42209 } else {
42210 ASSERTCMP(object->n_in_progress, ==, 0);
42211@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
42212 object->n_exclusive++; /* reads and writes must wait */
42213 atomic_inc(&op->usage);
42214 list_add_tail(&op->pend_link, &object->pending_ops);
42215- fscache_stat(&fscache_n_op_pend);
42216+ fscache_stat_unchecked(&fscache_n_op_pend);
42217 ret = 0;
42218 } else {
42219 /* not allowed to submit ops in any other state */
42220@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
42221 if (object->n_exclusive > 0) {
42222 atomic_inc(&op->usage);
42223 list_add_tail(&op->pend_link, &object->pending_ops);
42224- fscache_stat(&fscache_n_op_pend);
42225+ fscache_stat_unchecked(&fscache_n_op_pend);
42226 } else if (!list_empty(&object->pending_ops)) {
42227 atomic_inc(&op->usage);
42228 list_add_tail(&op->pend_link, &object->pending_ops);
42229- fscache_stat(&fscache_n_op_pend);
42230+ fscache_stat_unchecked(&fscache_n_op_pend);
42231 fscache_start_operations(object);
42232 } else {
42233 ASSERTCMP(object->n_exclusive, ==, 0);
42234@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
42235 object->n_ops++;
42236 atomic_inc(&op->usage);
42237 list_add_tail(&op->pend_link, &object->pending_ops);
42238- fscache_stat(&fscache_n_op_pend);
42239+ fscache_stat_unchecked(&fscache_n_op_pend);
42240 ret = 0;
42241 } else if (object->state == FSCACHE_OBJECT_DYING ||
42242 object->state == FSCACHE_OBJECT_LC_DYING ||
42243 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42244- fscache_stat(&fscache_n_op_rejected);
42245+ fscache_stat_unchecked(&fscache_n_op_rejected);
42246 ret = -ENOBUFS;
42247 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42248 fscache_report_unexpected_submission(object, op, ostate);
42249@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
42250
42251 ret = -EBUSY;
42252 if (!list_empty(&op->pend_link)) {
42253- fscache_stat(&fscache_n_op_cancelled);
42254+ fscache_stat_unchecked(&fscache_n_op_cancelled);
42255 list_del_init(&op->pend_link);
42256 object->n_ops--;
42257 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42258@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
42259 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42260 BUG();
42261
42262- fscache_stat(&fscache_n_op_release);
42263+ fscache_stat_unchecked(&fscache_n_op_release);
42264
42265 if (op->release) {
42266 op->release(op);
42267@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
42268 * lock, and defer it otherwise */
42269 if (!spin_trylock(&object->lock)) {
42270 _debug("defer put");
42271- fscache_stat(&fscache_n_op_deferred_release);
42272+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
42273
42274 cache = object->cache;
42275 spin_lock(&cache->op_gc_list_lock);
42276@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
42277
42278 _debug("GC DEFERRED REL OBJ%x OP%x",
42279 object->debug_id, op->debug_id);
42280- fscache_stat(&fscache_n_op_gc);
42281+ fscache_stat_unchecked(&fscache_n_op_gc);
42282
42283 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42284
42285diff -urNp linux-2.6.32.45/fs/fscache/page.c linux-2.6.32.45/fs/fscache/page.c
42286--- linux-2.6.32.45/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
42287+++ linux-2.6.32.45/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
42288@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
42289 val = radix_tree_lookup(&cookie->stores, page->index);
42290 if (!val) {
42291 rcu_read_unlock();
42292- fscache_stat(&fscache_n_store_vmscan_not_storing);
42293+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42294 __fscache_uncache_page(cookie, page);
42295 return true;
42296 }
42297@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
42298 spin_unlock(&cookie->stores_lock);
42299
42300 if (xpage) {
42301- fscache_stat(&fscache_n_store_vmscan_cancelled);
42302- fscache_stat(&fscache_n_store_radix_deletes);
42303+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42304+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42305 ASSERTCMP(xpage, ==, page);
42306 } else {
42307- fscache_stat(&fscache_n_store_vmscan_gone);
42308+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42309 }
42310
42311 wake_up_bit(&cookie->flags, 0);
42312@@ -106,7 +106,7 @@ page_busy:
42313 /* we might want to wait here, but that could deadlock the allocator as
42314 * the slow-work threads writing to the cache may all end up sleeping
42315 * on memory allocation */
42316- fscache_stat(&fscache_n_store_vmscan_busy);
42317+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42318 return false;
42319 }
42320 EXPORT_SYMBOL(__fscache_maybe_release_page);
42321@@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
42322 FSCACHE_COOKIE_STORING_TAG);
42323 if (!radix_tree_tag_get(&cookie->stores, page->index,
42324 FSCACHE_COOKIE_PENDING_TAG)) {
42325- fscache_stat(&fscache_n_store_radix_deletes);
42326+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42327 xpage = radix_tree_delete(&cookie->stores, page->index);
42328 }
42329 spin_unlock(&cookie->stores_lock);
42330@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
42331
42332 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42333
42334- fscache_stat(&fscache_n_attr_changed_calls);
42335+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42336
42337 if (fscache_object_is_active(object)) {
42338 fscache_set_op_state(op, "CallFS");
42339@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
42340
42341 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42342
42343- fscache_stat(&fscache_n_attr_changed);
42344+ fscache_stat_unchecked(&fscache_n_attr_changed);
42345
42346 op = kzalloc(sizeof(*op), GFP_KERNEL);
42347 if (!op) {
42348- fscache_stat(&fscache_n_attr_changed_nomem);
42349+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42350 _leave(" = -ENOMEM");
42351 return -ENOMEM;
42352 }
42353@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
42354 if (fscache_submit_exclusive_op(object, op) < 0)
42355 goto nobufs;
42356 spin_unlock(&cookie->lock);
42357- fscache_stat(&fscache_n_attr_changed_ok);
42358+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42359 fscache_put_operation(op);
42360 _leave(" = 0");
42361 return 0;
42362@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
42363 nobufs:
42364 spin_unlock(&cookie->lock);
42365 kfree(op);
42366- fscache_stat(&fscache_n_attr_changed_nobufs);
42367+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42368 _leave(" = %d", -ENOBUFS);
42369 return -ENOBUFS;
42370 }
42371@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
42372 /* allocate a retrieval operation and attempt to submit it */
42373 op = kzalloc(sizeof(*op), GFP_NOIO);
42374 if (!op) {
42375- fscache_stat(&fscache_n_retrievals_nomem);
42376+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42377 return NULL;
42378 }
42379
42380@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
42381 return 0;
42382 }
42383
42384- fscache_stat(&fscache_n_retrievals_wait);
42385+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
42386
42387 jif = jiffies;
42388 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42389 fscache_wait_bit_interruptible,
42390 TASK_INTERRUPTIBLE) != 0) {
42391- fscache_stat(&fscache_n_retrievals_intr);
42392+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42393 _leave(" = -ERESTARTSYS");
42394 return -ERESTARTSYS;
42395 }
42396@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
42397 */
42398 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42399 struct fscache_retrieval *op,
42400- atomic_t *stat_op_waits,
42401- atomic_t *stat_object_dead)
42402+ atomic_unchecked_t *stat_op_waits,
42403+ atomic_unchecked_t *stat_object_dead)
42404 {
42405 int ret;
42406
42407@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
42408 goto check_if_dead;
42409
42410 _debug(">>> WT");
42411- fscache_stat(stat_op_waits);
42412+ fscache_stat_unchecked(stat_op_waits);
42413 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42414 fscache_wait_bit_interruptible,
42415 TASK_INTERRUPTIBLE) < 0) {
42416@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
42417
42418 check_if_dead:
42419 if (unlikely(fscache_object_is_dead(object))) {
42420- fscache_stat(stat_object_dead);
42421+ fscache_stat_unchecked(stat_object_dead);
42422 return -ENOBUFS;
42423 }
42424 return 0;
42425@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
42426
42427 _enter("%p,%p,,,", cookie, page);
42428
42429- fscache_stat(&fscache_n_retrievals);
42430+ fscache_stat_unchecked(&fscache_n_retrievals);
42431
42432 if (hlist_empty(&cookie->backing_objects))
42433 goto nobufs;
42434@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
42435 goto nobufs_unlock;
42436 spin_unlock(&cookie->lock);
42437
42438- fscache_stat(&fscache_n_retrieval_ops);
42439+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42440
42441 /* pin the netfs read context in case we need to do the actual netfs
42442 * read because we've encountered a cache read failure */
42443@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
42444
42445 error:
42446 if (ret == -ENOMEM)
42447- fscache_stat(&fscache_n_retrievals_nomem);
42448+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42449 else if (ret == -ERESTARTSYS)
42450- fscache_stat(&fscache_n_retrievals_intr);
42451+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42452 else if (ret == -ENODATA)
42453- fscache_stat(&fscache_n_retrievals_nodata);
42454+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42455 else if (ret < 0)
42456- fscache_stat(&fscache_n_retrievals_nobufs);
42457+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42458 else
42459- fscache_stat(&fscache_n_retrievals_ok);
42460+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42461
42462 fscache_put_retrieval(op);
42463 _leave(" = %d", ret);
42464@@ -453,7 +453,7 @@ nobufs_unlock:
42465 spin_unlock(&cookie->lock);
42466 kfree(op);
42467 nobufs:
42468- fscache_stat(&fscache_n_retrievals_nobufs);
42469+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42470 _leave(" = -ENOBUFS");
42471 return -ENOBUFS;
42472 }
42473@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
42474
42475 _enter("%p,,%d,,,", cookie, *nr_pages);
42476
42477- fscache_stat(&fscache_n_retrievals);
42478+ fscache_stat_unchecked(&fscache_n_retrievals);
42479
42480 if (hlist_empty(&cookie->backing_objects))
42481 goto nobufs;
42482@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
42483 goto nobufs_unlock;
42484 spin_unlock(&cookie->lock);
42485
42486- fscache_stat(&fscache_n_retrieval_ops);
42487+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42488
42489 /* pin the netfs read context in case we need to do the actual netfs
42490 * read because we've encountered a cache read failure */
42491@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
42492
42493 error:
42494 if (ret == -ENOMEM)
42495- fscache_stat(&fscache_n_retrievals_nomem);
42496+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42497 else if (ret == -ERESTARTSYS)
42498- fscache_stat(&fscache_n_retrievals_intr);
42499+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42500 else if (ret == -ENODATA)
42501- fscache_stat(&fscache_n_retrievals_nodata);
42502+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42503 else if (ret < 0)
42504- fscache_stat(&fscache_n_retrievals_nobufs);
42505+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42506 else
42507- fscache_stat(&fscache_n_retrievals_ok);
42508+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42509
42510 fscache_put_retrieval(op);
42511 _leave(" = %d", ret);
42512@@ -570,7 +570,7 @@ nobufs_unlock:
42513 spin_unlock(&cookie->lock);
42514 kfree(op);
42515 nobufs:
42516- fscache_stat(&fscache_n_retrievals_nobufs);
42517+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42518 _leave(" = -ENOBUFS");
42519 return -ENOBUFS;
42520 }
42521@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
42522
42523 _enter("%p,%p,,,", cookie, page);
42524
42525- fscache_stat(&fscache_n_allocs);
42526+ fscache_stat_unchecked(&fscache_n_allocs);
42527
42528 if (hlist_empty(&cookie->backing_objects))
42529 goto nobufs;
42530@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
42531 goto nobufs_unlock;
42532 spin_unlock(&cookie->lock);
42533
42534- fscache_stat(&fscache_n_alloc_ops);
42535+ fscache_stat_unchecked(&fscache_n_alloc_ops);
42536
42537 ret = fscache_wait_for_retrieval_activation(
42538 object, op,
42539@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
42540
42541 error:
42542 if (ret == -ERESTARTSYS)
42543- fscache_stat(&fscache_n_allocs_intr);
42544+ fscache_stat_unchecked(&fscache_n_allocs_intr);
42545 else if (ret < 0)
42546- fscache_stat(&fscache_n_allocs_nobufs);
42547+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42548 else
42549- fscache_stat(&fscache_n_allocs_ok);
42550+ fscache_stat_unchecked(&fscache_n_allocs_ok);
42551
42552 fscache_put_retrieval(op);
42553 _leave(" = %d", ret);
42554@@ -651,7 +651,7 @@ nobufs_unlock:
42555 spin_unlock(&cookie->lock);
42556 kfree(op);
42557 nobufs:
42558- fscache_stat(&fscache_n_allocs_nobufs);
42559+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42560 _leave(" = -ENOBUFS");
42561 return -ENOBUFS;
42562 }
42563@@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
42564
42565 spin_lock(&cookie->stores_lock);
42566
42567- fscache_stat(&fscache_n_store_calls);
42568+ fscache_stat_unchecked(&fscache_n_store_calls);
42569
42570 /* find a page to store */
42571 page = NULL;
42572@@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
42573 page = results[0];
42574 _debug("gang %d [%lx]", n, page->index);
42575 if (page->index > op->store_limit) {
42576- fscache_stat(&fscache_n_store_pages_over_limit);
42577+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
42578 goto superseded;
42579 }
42580
42581@@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
42582
42583 if (page) {
42584 fscache_set_op_state(&op->op, "Store");
42585- fscache_stat(&fscache_n_store_pages);
42586+ fscache_stat_unchecked(&fscache_n_store_pages);
42587 fscache_stat(&fscache_n_cop_write_page);
42588 ret = object->cache->ops->write_page(op, page);
42589 fscache_stat_d(&fscache_n_cop_write_page);
42590@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
42591 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42592 ASSERT(PageFsCache(page));
42593
42594- fscache_stat(&fscache_n_stores);
42595+ fscache_stat_unchecked(&fscache_n_stores);
42596
42597 op = kzalloc(sizeof(*op), GFP_NOIO);
42598 if (!op)
42599@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
42600 spin_unlock(&cookie->stores_lock);
42601 spin_unlock(&object->lock);
42602
42603- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
42604+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
42605 op->store_limit = object->store_limit;
42606
42607 if (fscache_submit_op(object, &op->op) < 0)
42608@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
42609
42610 spin_unlock(&cookie->lock);
42611 radix_tree_preload_end();
42612- fscache_stat(&fscache_n_store_ops);
42613- fscache_stat(&fscache_n_stores_ok);
42614+ fscache_stat_unchecked(&fscache_n_store_ops);
42615+ fscache_stat_unchecked(&fscache_n_stores_ok);
42616
42617 /* the slow work queue now carries its own ref on the object */
42618 fscache_put_operation(&op->op);
42619@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
42620 return 0;
42621
42622 already_queued:
42623- fscache_stat(&fscache_n_stores_again);
42624+ fscache_stat_unchecked(&fscache_n_stores_again);
42625 already_pending:
42626 spin_unlock(&cookie->stores_lock);
42627 spin_unlock(&object->lock);
42628 spin_unlock(&cookie->lock);
42629 radix_tree_preload_end();
42630 kfree(op);
42631- fscache_stat(&fscache_n_stores_ok);
42632+ fscache_stat_unchecked(&fscache_n_stores_ok);
42633 _leave(" = 0");
42634 return 0;
42635
42636@@ -886,14 +886,14 @@ nobufs:
42637 spin_unlock(&cookie->lock);
42638 radix_tree_preload_end();
42639 kfree(op);
42640- fscache_stat(&fscache_n_stores_nobufs);
42641+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
42642 _leave(" = -ENOBUFS");
42643 return -ENOBUFS;
42644
42645 nomem_free:
42646 kfree(op);
42647 nomem:
42648- fscache_stat(&fscache_n_stores_oom);
42649+ fscache_stat_unchecked(&fscache_n_stores_oom);
42650 _leave(" = -ENOMEM");
42651 return -ENOMEM;
42652 }
42653@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
42654 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42655 ASSERTCMP(page, !=, NULL);
42656
42657- fscache_stat(&fscache_n_uncaches);
42658+ fscache_stat_unchecked(&fscache_n_uncaches);
42659
42660 /* cache withdrawal may beat us to it */
42661 if (!PageFsCache(page))
42662@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
42663 unsigned long loop;
42664
42665 #ifdef CONFIG_FSCACHE_STATS
42666- atomic_add(pagevec->nr, &fscache_n_marks);
42667+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
42668 #endif
42669
42670 for (loop = 0; loop < pagevec->nr; loop++) {
42671diff -urNp linux-2.6.32.45/fs/fscache/stats.c linux-2.6.32.45/fs/fscache/stats.c
42672--- linux-2.6.32.45/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
42673+++ linux-2.6.32.45/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
42674@@ -18,95 +18,95 @@
42675 /*
42676 * operation counters
42677 */
42678-atomic_t fscache_n_op_pend;
42679-atomic_t fscache_n_op_run;
42680-atomic_t fscache_n_op_enqueue;
42681-atomic_t fscache_n_op_requeue;
42682-atomic_t fscache_n_op_deferred_release;
42683-atomic_t fscache_n_op_release;
42684-atomic_t fscache_n_op_gc;
42685-atomic_t fscache_n_op_cancelled;
42686-atomic_t fscache_n_op_rejected;
42687-
42688-atomic_t fscache_n_attr_changed;
42689-atomic_t fscache_n_attr_changed_ok;
42690-atomic_t fscache_n_attr_changed_nobufs;
42691-atomic_t fscache_n_attr_changed_nomem;
42692-atomic_t fscache_n_attr_changed_calls;
42693-
42694-atomic_t fscache_n_allocs;
42695-atomic_t fscache_n_allocs_ok;
42696-atomic_t fscache_n_allocs_wait;
42697-atomic_t fscache_n_allocs_nobufs;
42698-atomic_t fscache_n_allocs_intr;
42699-atomic_t fscache_n_allocs_object_dead;
42700-atomic_t fscache_n_alloc_ops;
42701-atomic_t fscache_n_alloc_op_waits;
42702-
42703-atomic_t fscache_n_retrievals;
42704-atomic_t fscache_n_retrievals_ok;
42705-atomic_t fscache_n_retrievals_wait;
42706-atomic_t fscache_n_retrievals_nodata;
42707-atomic_t fscache_n_retrievals_nobufs;
42708-atomic_t fscache_n_retrievals_intr;
42709-atomic_t fscache_n_retrievals_nomem;
42710-atomic_t fscache_n_retrievals_object_dead;
42711-atomic_t fscache_n_retrieval_ops;
42712-atomic_t fscache_n_retrieval_op_waits;
42713-
42714-atomic_t fscache_n_stores;
42715-atomic_t fscache_n_stores_ok;
42716-atomic_t fscache_n_stores_again;
42717-atomic_t fscache_n_stores_nobufs;
42718-atomic_t fscache_n_stores_oom;
42719-atomic_t fscache_n_store_ops;
42720-atomic_t fscache_n_store_calls;
42721-atomic_t fscache_n_store_pages;
42722-atomic_t fscache_n_store_radix_deletes;
42723-atomic_t fscache_n_store_pages_over_limit;
42724-
42725-atomic_t fscache_n_store_vmscan_not_storing;
42726-atomic_t fscache_n_store_vmscan_gone;
42727-atomic_t fscache_n_store_vmscan_busy;
42728-atomic_t fscache_n_store_vmscan_cancelled;
42729-
42730-atomic_t fscache_n_marks;
42731-atomic_t fscache_n_uncaches;
42732-
42733-atomic_t fscache_n_acquires;
42734-atomic_t fscache_n_acquires_null;
42735-atomic_t fscache_n_acquires_no_cache;
42736-atomic_t fscache_n_acquires_ok;
42737-atomic_t fscache_n_acquires_nobufs;
42738-atomic_t fscache_n_acquires_oom;
42739-
42740-atomic_t fscache_n_updates;
42741-atomic_t fscache_n_updates_null;
42742-atomic_t fscache_n_updates_run;
42743-
42744-atomic_t fscache_n_relinquishes;
42745-atomic_t fscache_n_relinquishes_null;
42746-atomic_t fscache_n_relinquishes_waitcrt;
42747-atomic_t fscache_n_relinquishes_retire;
42748-
42749-atomic_t fscache_n_cookie_index;
42750-atomic_t fscache_n_cookie_data;
42751-atomic_t fscache_n_cookie_special;
42752-
42753-atomic_t fscache_n_object_alloc;
42754-atomic_t fscache_n_object_no_alloc;
42755-atomic_t fscache_n_object_lookups;
42756-atomic_t fscache_n_object_lookups_negative;
42757-atomic_t fscache_n_object_lookups_positive;
42758-atomic_t fscache_n_object_lookups_timed_out;
42759-atomic_t fscache_n_object_created;
42760-atomic_t fscache_n_object_avail;
42761-atomic_t fscache_n_object_dead;
42762-
42763-atomic_t fscache_n_checkaux_none;
42764-atomic_t fscache_n_checkaux_okay;
42765-atomic_t fscache_n_checkaux_update;
42766-atomic_t fscache_n_checkaux_obsolete;
42767+atomic_unchecked_t fscache_n_op_pend;
42768+atomic_unchecked_t fscache_n_op_run;
42769+atomic_unchecked_t fscache_n_op_enqueue;
42770+atomic_unchecked_t fscache_n_op_requeue;
42771+atomic_unchecked_t fscache_n_op_deferred_release;
42772+atomic_unchecked_t fscache_n_op_release;
42773+atomic_unchecked_t fscache_n_op_gc;
42774+atomic_unchecked_t fscache_n_op_cancelled;
42775+atomic_unchecked_t fscache_n_op_rejected;
42776+
42777+atomic_unchecked_t fscache_n_attr_changed;
42778+atomic_unchecked_t fscache_n_attr_changed_ok;
42779+atomic_unchecked_t fscache_n_attr_changed_nobufs;
42780+atomic_unchecked_t fscache_n_attr_changed_nomem;
42781+atomic_unchecked_t fscache_n_attr_changed_calls;
42782+
42783+atomic_unchecked_t fscache_n_allocs;
42784+atomic_unchecked_t fscache_n_allocs_ok;
42785+atomic_unchecked_t fscache_n_allocs_wait;
42786+atomic_unchecked_t fscache_n_allocs_nobufs;
42787+atomic_unchecked_t fscache_n_allocs_intr;
42788+atomic_unchecked_t fscache_n_allocs_object_dead;
42789+atomic_unchecked_t fscache_n_alloc_ops;
42790+atomic_unchecked_t fscache_n_alloc_op_waits;
42791+
42792+atomic_unchecked_t fscache_n_retrievals;
42793+atomic_unchecked_t fscache_n_retrievals_ok;
42794+atomic_unchecked_t fscache_n_retrievals_wait;
42795+atomic_unchecked_t fscache_n_retrievals_nodata;
42796+atomic_unchecked_t fscache_n_retrievals_nobufs;
42797+atomic_unchecked_t fscache_n_retrievals_intr;
42798+atomic_unchecked_t fscache_n_retrievals_nomem;
42799+atomic_unchecked_t fscache_n_retrievals_object_dead;
42800+atomic_unchecked_t fscache_n_retrieval_ops;
42801+atomic_unchecked_t fscache_n_retrieval_op_waits;
42802+
42803+atomic_unchecked_t fscache_n_stores;
42804+atomic_unchecked_t fscache_n_stores_ok;
42805+atomic_unchecked_t fscache_n_stores_again;
42806+atomic_unchecked_t fscache_n_stores_nobufs;
42807+atomic_unchecked_t fscache_n_stores_oom;
42808+atomic_unchecked_t fscache_n_store_ops;
42809+atomic_unchecked_t fscache_n_store_calls;
42810+atomic_unchecked_t fscache_n_store_pages;
42811+atomic_unchecked_t fscache_n_store_radix_deletes;
42812+atomic_unchecked_t fscache_n_store_pages_over_limit;
42813+
42814+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42815+atomic_unchecked_t fscache_n_store_vmscan_gone;
42816+atomic_unchecked_t fscache_n_store_vmscan_busy;
42817+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42818+
42819+atomic_unchecked_t fscache_n_marks;
42820+atomic_unchecked_t fscache_n_uncaches;
42821+
42822+atomic_unchecked_t fscache_n_acquires;
42823+atomic_unchecked_t fscache_n_acquires_null;
42824+atomic_unchecked_t fscache_n_acquires_no_cache;
42825+atomic_unchecked_t fscache_n_acquires_ok;
42826+atomic_unchecked_t fscache_n_acquires_nobufs;
42827+atomic_unchecked_t fscache_n_acquires_oom;
42828+
42829+atomic_unchecked_t fscache_n_updates;
42830+atomic_unchecked_t fscache_n_updates_null;
42831+atomic_unchecked_t fscache_n_updates_run;
42832+
42833+atomic_unchecked_t fscache_n_relinquishes;
42834+atomic_unchecked_t fscache_n_relinquishes_null;
42835+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42836+atomic_unchecked_t fscache_n_relinquishes_retire;
42837+
42838+atomic_unchecked_t fscache_n_cookie_index;
42839+atomic_unchecked_t fscache_n_cookie_data;
42840+atomic_unchecked_t fscache_n_cookie_special;
42841+
42842+atomic_unchecked_t fscache_n_object_alloc;
42843+atomic_unchecked_t fscache_n_object_no_alloc;
42844+atomic_unchecked_t fscache_n_object_lookups;
42845+atomic_unchecked_t fscache_n_object_lookups_negative;
42846+atomic_unchecked_t fscache_n_object_lookups_positive;
42847+atomic_unchecked_t fscache_n_object_lookups_timed_out;
42848+atomic_unchecked_t fscache_n_object_created;
42849+atomic_unchecked_t fscache_n_object_avail;
42850+atomic_unchecked_t fscache_n_object_dead;
42851+
42852+atomic_unchecked_t fscache_n_checkaux_none;
42853+atomic_unchecked_t fscache_n_checkaux_okay;
42854+atomic_unchecked_t fscache_n_checkaux_update;
42855+atomic_unchecked_t fscache_n_checkaux_obsolete;
42856
42857 atomic_t fscache_n_cop_alloc_object;
42858 atomic_t fscache_n_cop_lookup_object;
42859@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
42860 seq_puts(m, "FS-Cache statistics\n");
42861
42862 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
42863- atomic_read(&fscache_n_cookie_index),
42864- atomic_read(&fscache_n_cookie_data),
42865- atomic_read(&fscache_n_cookie_special));
42866+ atomic_read_unchecked(&fscache_n_cookie_index),
42867+ atomic_read_unchecked(&fscache_n_cookie_data),
42868+ atomic_read_unchecked(&fscache_n_cookie_special));
42869
42870 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
42871- atomic_read(&fscache_n_object_alloc),
42872- atomic_read(&fscache_n_object_no_alloc),
42873- atomic_read(&fscache_n_object_avail),
42874- atomic_read(&fscache_n_object_dead));
42875+ atomic_read_unchecked(&fscache_n_object_alloc),
42876+ atomic_read_unchecked(&fscache_n_object_no_alloc),
42877+ atomic_read_unchecked(&fscache_n_object_avail),
42878+ atomic_read_unchecked(&fscache_n_object_dead));
42879 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
42880- atomic_read(&fscache_n_checkaux_none),
42881- atomic_read(&fscache_n_checkaux_okay),
42882- atomic_read(&fscache_n_checkaux_update),
42883- atomic_read(&fscache_n_checkaux_obsolete));
42884+ atomic_read_unchecked(&fscache_n_checkaux_none),
42885+ atomic_read_unchecked(&fscache_n_checkaux_okay),
42886+ atomic_read_unchecked(&fscache_n_checkaux_update),
42887+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
42888
42889 seq_printf(m, "Pages : mrk=%u unc=%u\n",
42890- atomic_read(&fscache_n_marks),
42891- atomic_read(&fscache_n_uncaches));
42892+ atomic_read_unchecked(&fscache_n_marks),
42893+ atomic_read_unchecked(&fscache_n_uncaches));
42894
42895 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
42896 " oom=%u\n",
42897- atomic_read(&fscache_n_acquires),
42898- atomic_read(&fscache_n_acquires_null),
42899- atomic_read(&fscache_n_acquires_no_cache),
42900- atomic_read(&fscache_n_acquires_ok),
42901- atomic_read(&fscache_n_acquires_nobufs),
42902- atomic_read(&fscache_n_acquires_oom));
42903+ atomic_read_unchecked(&fscache_n_acquires),
42904+ atomic_read_unchecked(&fscache_n_acquires_null),
42905+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
42906+ atomic_read_unchecked(&fscache_n_acquires_ok),
42907+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
42908+ atomic_read_unchecked(&fscache_n_acquires_oom));
42909
42910 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
42911- atomic_read(&fscache_n_object_lookups),
42912- atomic_read(&fscache_n_object_lookups_negative),
42913- atomic_read(&fscache_n_object_lookups_positive),
42914- atomic_read(&fscache_n_object_lookups_timed_out),
42915- atomic_read(&fscache_n_object_created));
42916+ atomic_read_unchecked(&fscache_n_object_lookups),
42917+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
42918+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
42919+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
42920+ atomic_read_unchecked(&fscache_n_object_created));
42921
42922 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
42923- atomic_read(&fscache_n_updates),
42924- atomic_read(&fscache_n_updates_null),
42925- atomic_read(&fscache_n_updates_run));
42926+ atomic_read_unchecked(&fscache_n_updates),
42927+ atomic_read_unchecked(&fscache_n_updates_null),
42928+ atomic_read_unchecked(&fscache_n_updates_run));
42929
42930 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
42931- atomic_read(&fscache_n_relinquishes),
42932- atomic_read(&fscache_n_relinquishes_null),
42933- atomic_read(&fscache_n_relinquishes_waitcrt),
42934- atomic_read(&fscache_n_relinquishes_retire));
42935+ atomic_read_unchecked(&fscache_n_relinquishes),
42936+ atomic_read_unchecked(&fscache_n_relinquishes_null),
42937+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
42938+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
42939
42940 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
42941- atomic_read(&fscache_n_attr_changed),
42942- atomic_read(&fscache_n_attr_changed_ok),
42943- atomic_read(&fscache_n_attr_changed_nobufs),
42944- atomic_read(&fscache_n_attr_changed_nomem),
42945- atomic_read(&fscache_n_attr_changed_calls));
42946+ atomic_read_unchecked(&fscache_n_attr_changed),
42947+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
42948+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
42949+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
42950+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
42951
42952 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
42953- atomic_read(&fscache_n_allocs),
42954- atomic_read(&fscache_n_allocs_ok),
42955- atomic_read(&fscache_n_allocs_wait),
42956- atomic_read(&fscache_n_allocs_nobufs),
42957- atomic_read(&fscache_n_allocs_intr));
42958+ atomic_read_unchecked(&fscache_n_allocs),
42959+ atomic_read_unchecked(&fscache_n_allocs_ok),
42960+ atomic_read_unchecked(&fscache_n_allocs_wait),
42961+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
42962+ atomic_read_unchecked(&fscache_n_allocs_intr));
42963 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
42964- atomic_read(&fscache_n_alloc_ops),
42965- atomic_read(&fscache_n_alloc_op_waits),
42966- atomic_read(&fscache_n_allocs_object_dead));
42967+ atomic_read_unchecked(&fscache_n_alloc_ops),
42968+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
42969+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
42970
42971 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
42972 " int=%u oom=%u\n",
42973- atomic_read(&fscache_n_retrievals),
42974- atomic_read(&fscache_n_retrievals_ok),
42975- atomic_read(&fscache_n_retrievals_wait),
42976- atomic_read(&fscache_n_retrievals_nodata),
42977- atomic_read(&fscache_n_retrievals_nobufs),
42978- atomic_read(&fscache_n_retrievals_intr),
42979- atomic_read(&fscache_n_retrievals_nomem));
42980+ atomic_read_unchecked(&fscache_n_retrievals),
42981+ atomic_read_unchecked(&fscache_n_retrievals_ok),
42982+ atomic_read_unchecked(&fscache_n_retrievals_wait),
42983+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
42984+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
42985+ atomic_read_unchecked(&fscache_n_retrievals_intr),
42986+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
42987 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
42988- atomic_read(&fscache_n_retrieval_ops),
42989- atomic_read(&fscache_n_retrieval_op_waits),
42990- atomic_read(&fscache_n_retrievals_object_dead));
42991+ atomic_read_unchecked(&fscache_n_retrieval_ops),
42992+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
42993+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
42994
42995 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
42996- atomic_read(&fscache_n_stores),
42997- atomic_read(&fscache_n_stores_ok),
42998- atomic_read(&fscache_n_stores_again),
42999- atomic_read(&fscache_n_stores_nobufs),
43000- atomic_read(&fscache_n_stores_oom));
43001+ atomic_read_unchecked(&fscache_n_stores),
43002+ atomic_read_unchecked(&fscache_n_stores_ok),
43003+ atomic_read_unchecked(&fscache_n_stores_again),
43004+ atomic_read_unchecked(&fscache_n_stores_nobufs),
43005+ atomic_read_unchecked(&fscache_n_stores_oom));
43006 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43007- atomic_read(&fscache_n_store_ops),
43008- atomic_read(&fscache_n_store_calls),
43009- atomic_read(&fscache_n_store_pages),
43010- atomic_read(&fscache_n_store_radix_deletes),
43011- atomic_read(&fscache_n_store_pages_over_limit));
43012+ atomic_read_unchecked(&fscache_n_store_ops),
43013+ atomic_read_unchecked(&fscache_n_store_calls),
43014+ atomic_read_unchecked(&fscache_n_store_pages),
43015+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
43016+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43017
43018 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43019- atomic_read(&fscache_n_store_vmscan_not_storing),
43020- atomic_read(&fscache_n_store_vmscan_gone),
43021- atomic_read(&fscache_n_store_vmscan_busy),
43022- atomic_read(&fscache_n_store_vmscan_cancelled));
43023+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43024+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43025+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43026+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43027
43028 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43029- atomic_read(&fscache_n_op_pend),
43030- atomic_read(&fscache_n_op_run),
43031- atomic_read(&fscache_n_op_enqueue),
43032- atomic_read(&fscache_n_op_cancelled),
43033- atomic_read(&fscache_n_op_rejected));
43034+ atomic_read_unchecked(&fscache_n_op_pend),
43035+ atomic_read_unchecked(&fscache_n_op_run),
43036+ atomic_read_unchecked(&fscache_n_op_enqueue),
43037+ atomic_read_unchecked(&fscache_n_op_cancelled),
43038+ atomic_read_unchecked(&fscache_n_op_rejected));
43039 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43040- atomic_read(&fscache_n_op_deferred_release),
43041- atomic_read(&fscache_n_op_release),
43042- atomic_read(&fscache_n_op_gc));
43043+ atomic_read_unchecked(&fscache_n_op_deferred_release),
43044+ atomic_read_unchecked(&fscache_n_op_release),
43045+ atomic_read_unchecked(&fscache_n_op_gc));
43046
43047 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43048 atomic_read(&fscache_n_cop_alloc_object),
43049diff -urNp linux-2.6.32.45/fs/fs_struct.c linux-2.6.32.45/fs/fs_struct.c
43050--- linux-2.6.32.45/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
43051+++ linux-2.6.32.45/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
43052@@ -4,6 +4,7 @@
43053 #include <linux/path.h>
43054 #include <linux/slab.h>
43055 #include <linux/fs_struct.h>
43056+#include <linux/grsecurity.h>
43057
43058 /*
43059 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
43060@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
43061 old_root = fs->root;
43062 fs->root = *path;
43063 path_get(path);
43064+ gr_set_chroot_entries(current, path);
43065 write_unlock(&fs->lock);
43066 if (old_root.dentry)
43067 path_put(&old_root);
43068@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
43069 && fs->root.mnt == old_root->mnt) {
43070 path_get(new_root);
43071 fs->root = *new_root;
43072+ gr_set_chroot_entries(p, new_root);
43073 count++;
43074 }
43075 if (fs->pwd.dentry == old_root->dentry
43076@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
43077 task_lock(tsk);
43078 write_lock(&fs->lock);
43079 tsk->fs = NULL;
43080- kill = !--fs->users;
43081+ gr_clear_chroot_entries(tsk);
43082+ kill = !atomic_dec_return(&fs->users);
43083 write_unlock(&fs->lock);
43084 task_unlock(tsk);
43085 if (kill)
43086@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
43087 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43088 /* We don't need to lock fs - think why ;-) */
43089 if (fs) {
43090- fs->users = 1;
43091+ atomic_set(&fs->users, 1);
43092 fs->in_exec = 0;
43093 rwlock_init(&fs->lock);
43094 fs->umask = old->umask;
43095@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
43096
43097 task_lock(current);
43098 write_lock(&fs->lock);
43099- kill = !--fs->users;
43100+ kill = !atomic_dec_return(&fs->users);
43101 current->fs = new_fs;
43102+ gr_set_chroot_entries(current, &new_fs->root);
43103 write_unlock(&fs->lock);
43104 task_unlock(current);
43105
43106@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
43107
43108 /* to be mentioned only in INIT_TASK */
43109 struct fs_struct init_fs = {
43110- .users = 1,
43111+ .users = ATOMIC_INIT(1),
43112 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
43113 .umask = 0022,
43114 };
43115@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
43116 task_lock(current);
43117
43118 write_lock(&init_fs.lock);
43119- init_fs.users++;
43120+ atomic_inc(&init_fs.users);
43121 write_unlock(&init_fs.lock);
43122
43123 write_lock(&fs->lock);
43124 current->fs = &init_fs;
43125- kill = !--fs->users;
43126+ gr_set_chroot_entries(current, &current->fs->root);
43127+ kill = !atomic_dec_return(&fs->users);
43128 write_unlock(&fs->lock);
43129
43130 task_unlock(current);
43131diff -urNp linux-2.6.32.45/fs/fuse/cuse.c linux-2.6.32.45/fs/fuse/cuse.c
43132--- linux-2.6.32.45/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
43133+++ linux-2.6.32.45/fs/fuse/cuse.c 2011-08-05 20:33:55.000000000 -0400
43134@@ -576,10 +576,12 @@ static int __init cuse_init(void)
43135 INIT_LIST_HEAD(&cuse_conntbl[i]);
43136
43137 /* inherit and extend fuse_dev_operations */
43138- cuse_channel_fops = fuse_dev_operations;
43139- cuse_channel_fops.owner = THIS_MODULE;
43140- cuse_channel_fops.open = cuse_channel_open;
43141- cuse_channel_fops.release = cuse_channel_release;
43142+ pax_open_kernel();
43143+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43144+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43145+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
43146+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
43147+ pax_close_kernel();
43148
43149 cuse_class = class_create(THIS_MODULE, "cuse");
43150 if (IS_ERR(cuse_class))
43151diff -urNp linux-2.6.32.45/fs/fuse/dev.c linux-2.6.32.45/fs/fuse/dev.c
43152--- linux-2.6.32.45/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
43153+++ linux-2.6.32.45/fs/fuse/dev.c 2011-08-05 20:33:55.000000000 -0400
43154@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
43155 {
43156 struct fuse_notify_inval_entry_out outarg;
43157 int err = -EINVAL;
43158- char buf[FUSE_NAME_MAX+1];
43159+ char *buf = NULL;
43160 struct qstr name;
43161
43162 if (size < sizeof(outarg))
43163@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
43164 if (outarg.namelen > FUSE_NAME_MAX)
43165 goto err;
43166
43167+ err = -ENOMEM;
43168+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
43169+ if (!buf)
43170+ goto err;
43171+
43172 name.name = buf;
43173 name.len = outarg.namelen;
43174 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
43175@@ -910,17 +915,15 @@ static int fuse_notify_inval_entry(struc
43176
43177 down_read(&fc->killsb);
43178 err = -ENOENT;
43179- if (!fc->sb)
43180- goto err_unlock;
43181-
43182- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43183-
43184-err_unlock:
43185+ if (fc->sb)
43186+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43187 up_read(&fc->killsb);
43188+ kfree(buf);
43189 return err;
43190
43191 err:
43192 fuse_copy_finish(cs);
43193+ kfree(buf);
43194 return err;
43195 }
43196
43197diff -urNp linux-2.6.32.45/fs/fuse/dir.c linux-2.6.32.45/fs/fuse/dir.c
43198--- linux-2.6.32.45/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
43199+++ linux-2.6.32.45/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
43200@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
43201 return link;
43202 }
43203
43204-static void free_link(char *link)
43205+static void free_link(const char *link)
43206 {
43207 if (!IS_ERR(link))
43208 free_page((unsigned long) link);
43209diff -urNp linux-2.6.32.45/fs/gfs2/ops_inode.c linux-2.6.32.45/fs/gfs2/ops_inode.c
43210--- linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
43211+++ linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
43212@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
43213 unsigned int x;
43214 int error;
43215
43216+ pax_track_stack();
43217+
43218 if (ndentry->d_inode) {
43219 nip = GFS2_I(ndentry->d_inode);
43220 if (ip == nip)
43221diff -urNp linux-2.6.32.45/fs/gfs2/sys.c linux-2.6.32.45/fs/gfs2/sys.c
43222--- linux-2.6.32.45/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
43223+++ linux-2.6.32.45/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
43224@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
43225 return a->store ? a->store(sdp, buf, len) : len;
43226 }
43227
43228-static struct sysfs_ops gfs2_attr_ops = {
43229+static const struct sysfs_ops gfs2_attr_ops = {
43230 .show = gfs2_attr_show,
43231 .store = gfs2_attr_store,
43232 };
43233@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
43234 return 0;
43235 }
43236
43237-static struct kset_uevent_ops gfs2_uevent_ops = {
43238+static const struct kset_uevent_ops gfs2_uevent_ops = {
43239 .uevent = gfs2_uevent,
43240 };
43241
43242diff -urNp linux-2.6.32.45/fs/hfsplus/catalog.c linux-2.6.32.45/fs/hfsplus/catalog.c
43243--- linux-2.6.32.45/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
43244+++ linux-2.6.32.45/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
43245@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
43246 int err;
43247 u16 type;
43248
43249+ pax_track_stack();
43250+
43251 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43252 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43253 if (err)
43254@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
43255 int entry_size;
43256 int err;
43257
43258+ pax_track_stack();
43259+
43260 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
43261 sb = dir->i_sb;
43262 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
43263@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
43264 int entry_size, type;
43265 int err = 0;
43266
43267+ pax_track_stack();
43268+
43269 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
43270 dst_dir->i_ino, dst_name->name);
43271 sb = src_dir->i_sb;
43272diff -urNp linux-2.6.32.45/fs/hfsplus/dir.c linux-2.6.32.45/fs/hfsplus/dir.c
43273--- linux-2.6.32.45/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
43274+++ linux-2.6.32.45/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
43275@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
43276 struct hfsplus_readdir_data *rd;
43277 u16 type;
43278
43279+ pax_track_stack();
43280+
43281 if (filp->f_pos >= inode->i_size)
43282 return 0;
43283
43284diff -urNp linux-2.6.32.45/fs/hfsplus/inode.c linux-2.6.32.45/fs/hfsplus/inode.c
43285--- linux-2.6.32.45/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
43286+++ linux-2.6.32.45/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
43287@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
43288 int res = 0;
43289 u16 type;
43290
43291+ pax_track_stack();
43292+
43293 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43294
43295 HFSPLUS_I(inode).dev = 0;
43296@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
43297 struct hfs_find_data fd;
43298 hfsplus_cat_entry entry;
43299
43300+ pax_track_stack();
43301+
43302 if (HFSPLUS_IS_RSRC(inode))
43303 main_inode = HFSPLUS_I(inode).rsrc_inode;
43304
43305diff -urNp linux-2.6.32.45/fs/hfsplus/ioctl.c linux-2.6.32.45/fs/hfsplus/ioctl.c
43306--- linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43307+++ linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
43308@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
43309 struct hfsplus_cat_file *file;
43310 int res;
43311
43312+ pax_track_stack();
43313+
43314 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43315 return -EOPNOTSUPP;
43316
43317@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43318 struct hfsplus_cat_file *file;
43319 ssize_t res = 0;
43320
43321+ pax_track_stack();
43322+
43323 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43324 return -EOPNOTSUPP;
43325
43326diff -urNp linux-2.6.32.45/fs/hfsplus/super.c linux-2.6.32.45/fs/hfsplus/super.c
43327--- linux-2.6.32.45/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
43328+++ linux-2.6.32.45/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
43329@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
43330 struct nls_table *nls = NULL;
43331 int err = -EINVAL;
43332
43333+ pax_track_stack();
43334+
43335 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43336 if (!sbi)
43337 return -ENOMEM;
43338diff -urNp linux-2.6.32.45/fs/hugetlbfs/inode.c linux-2.6.32.45/fs/hugetlbfs/inode.c
43339--- linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43340+++ linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
43341@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
43342 .kill_sb = kill_litter_super,
43343 };
43344
43345-static struct vfsmount *hugetlbfs_vfsmount;
43346+struct vfsmount *hugetlbfs_vfsmount;
43347
43348 static int can_do_hugetlb_shm(void)
43349 {
43350diff -urNp linux-2.6.32.45/fs/ioctl.c linux-2.6.32.45/fs/ioctl.c
43351--- linux-2.6.32.45/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43352+++ linux-2.6.32.45/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
43353@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
43354 u64 phys, u64 len, u32 flags)
43355 {
43356 struct fiemap_extent extent;
43357- struct fiemap_extent *dest = fieinfo->fi_extents_start;
43358+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
43359
43360 /* only count the extents */
43361 if (fieinfo->fi_extents_max == 0) {
43362@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
43363
43364 fieinfo.fi_flags = fiemap.fm_flags;
43365 fieinfo.fi_extents_max = fiemap.fm_extent_count;
43366- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
43367+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
43368
43369 if (fiemap.fm_extent_count != 0 &&
43370 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
43371@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
43372 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
43373 fiemap.fm_flags = fieinfo.fi_flags;
43374 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
43375- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
43376+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
43377 error = -EFAULT;
43378
43379 return error;
43380diff -urNp linux-2.6.32.45/fs/jbd/checkpoint.c linux-2.6.32.45/fs/jbd/checkpoint.c
43381--- linux-2.6.32.45/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
43382+++ linux-2.6.32.45/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
43383@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
43384 tid_t this_tid;
43385 int result;
43386
43387+ pax_track_stack();
43388+
43389 jbd_debug(1, "Start checkpoint\n");
43390
43391 /*
43392diff -urNp linux-2.6.32.45/fs/jffs2/compr_rtime.c linux-2.6.32.45/fs/jffs2/compr_rtime.c
43393--- linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
43394+++ linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
43395@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43396 int outpos = 0;
43397 int pos=0;
43398
43399+ pax_track_stack();
43400+
43401 memset(positions,0,sizeof(positions));
43402
43403 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43404@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
43405 int outpos = 0;
43406 int pos=0;
43407
43408+ pax_track_stack();
43409+
43410 memset(positions,0,sizeof(positions));
43411
43412 while (outpos<destlen) {
43413diff -urNp linux-2.6.32.45/fs/jffs2/compr_rubin.c linux-2.6.32.45/fs/jffs2/compr_rubin.c
43414--- linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
43415+++ linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
43416@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43417 int ret;
43418 uint32_t mysrclen, mydstlen;
43419
43420+ pax_track_stack();
43421+
43422 mysrclen = *sourcelen;
43423 mydstlen = *dstlen - 8;
43424
43425diff -urNp linux-2.6.32.45/fs/jffs2/erase.c linux-2.6.32.45/fs/jffs2/erase.c
43426--- linux-2.6.32.45/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
43427+++ linux-2.6.32.45/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
43428@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
43429 struct jffs2_unknown_node marker = {
43430 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43431 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43432- .totlen = cpu_to_je32(c->cleanmarker_size)
43433+ .totlen = cpu_to_je32(c->cleanmarker_size),
43434+ .hdr_crc = cpu_to_je32(0)
43435 };
43436
43437 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43438diff -urNp linux-2.6.32.45/fs/jffs2/wbuf.c linux-2.6.32.45/fs/jffs2/wbuf.c
43439--- linux-2.6.32.45/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
43440+++ linux-2.6.32.45/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
43441@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43442 {
43443 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43444 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43445- .totlen = constant_cpu_to_je32(8)
43446+ .totlen = constant_cpu_to_je32(8),
43447+ .hdr_crc = constant_cpu_to_je32(0)
43448 };
43449
43450 /*
43451diff -urNp linux-2.6.32.45/fs/jffs2/xattr.c linux-2.6.32.45/fs/jffs2/xattr.c
43452--- linux-2.6.32.45/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
43453+++ linux-2.6.32.45/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
43454@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43455
43456 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43457
43458+ pax_track_stack();
43459+
43460 /* Phase.1 : Merge same xref */
43461 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43462 xref_tmphash[i] = NULL;
43463diff -urNp linux-2.6.32.45/fs/jfs/super.c linux-2.6.32.45/fs/jfs/super.c
43464--- linux-2.6.32.45/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
43465+++ linux-2.6.32.45/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
43466@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
43467
43468 jfs_inode_cachep =
43469 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43470- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43471+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43472 init_once);
43473 if (jfs_inode_cachep == NULL)
43474 return -ENOMEM;
43475diff -urNp linux-2.6.32.45/fs/Kconfig.binfmt linux-2.6.32.45/fs/Kconfig.binfmt
43476--- linux-2.6.32.45/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
43477+++ linux-2.6.32.45/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
43478@@ -86,7 +86,7 @@ config HAVE_AOUT
43479
43480 config BINFMT_AOUT
43481 tristate "Kernel support for a.out and ECOFF binaries"
43482- depends on HAVE_AOUT
43483+ depends on HAVE_AOUT && BROKEN
43484 ---help---
43485 A.out (Assembler.OUTput) is a set of formats for libraries and
43486 executables used in the earliest versions of UNIX. Linux used
43487diff -urNp linux-2.6.32.45/fs/libfs.c linux-2.6.32.45/fs/libfs.c
43488--- linux-2.6.32.45/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
43489+++ linux-2.6.32.45/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
43490@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
43491
43492 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43493 struct dentry *next;
43494+ char d_name[sizeof(next->d_iname)];
43495+ const unsigned char *name;
43496+
43497 next = list_entry(p, struct dentry, d_u.d_child);
43498 if (d_unhashed(next) || !next->d_inode)
43499 continue;
43500
43501 spin_unlock(&dcache_lock);
43502- if (filldir(dirent, next->d_name.name,
43503+ name = next->d_name.name;
43504+ if (name == next->d_iname) {
43505+ memcpy(d_name, name, next->d_name.len);
43506+ name = d_name;
43507+ }
43508+ if (filldir(dirent, name,
43509 next->d_name.len, filp->f_pos,
43510 next->d_inode->i_ino,
43511 dt_type(next->d_inode)) < 0)
43512diff -urNp linux-2.6.32.45/fs/lockd/clntproc.c linux-2.6.32.45/fs/lockd/clntproc.c
43513--- linux-2.6.32.45/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
43514+++ linux-2.6.32.45/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
43515@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43516 /*
43517 * Cookie counter for NLM requests
43518 */
43519-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43520+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43521
43522 void nlmclnt_next_cookie(struct nlm_cookie *c)
43523 {
43524- u32 cookie = atomic_inc_return(&nlm_cookie);
43525+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43526
43527 memcpy(c->data, &cookie, 4);
43528 c->len=4;
43529@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43530 struct nlm_rqst reqst, *req;
43531 int status;
43532
43533+ pax_track_stack();
43534+
43535 req = &reqst;
43536 memset(req, 0, sizeof(*req));
43537 locks_init_lock(&req->a_args.lock.fl);
43538diff -urNp linux-2.6.32.45/fs/lockd/svc.c linux-2.6.32.45/fs/lockd/svc.c
43539--- linux-2.6.32.45/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
43540+++ linux-2.6.32.45/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
43541@@ -43,7 +43,7 @@
43542
43543 static struct svc_program nlmsvc_program;
43544
43545-struct nlmsvc_binding * nlmsvc_ops;
43546+const struct nlmsvc_binding * nlmsvc_ops;
43547 EXPORT_SYMBOL_GPL(nlmsvc_ops);
43548
43549 static DEFINE_MUTEX(nlmsvc_mutex);
43550diff -urNp linux-2.6.32.45/fs/locks.c linux-2.6.32.45/fs/locks.c
43551--- linux-2.6.32.45/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
43552+++ linux-2.6.32.45/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
43553@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
43554
43555 static struct kmem_cache *filelock_cache __read_mostly;
43556
43557+static void locks_init_lock_always(struct file_lock *fl)
43558+{
43559+ fl->fl_next = NULL;
43560+ fl->fl_fasync = NULL;
43561+ fl->fl_owner = NULL;
43562+ fl->fl_pid = 0;
43563+ fl->fl_nspid = NULL;
43564+ fl->fl_file = NULL;
43565+ fl->fl_flags = 0;
43566+ fl->fl_type = 0;
43567+ fl->fl_start = fl->fl_end = 0;
43568+}
43569+
43570 /* Allocate an empty lock structure. */
43571 static struct file_lock *locks_alloc_lock(void)
43572 {
43573- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43574+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43575+
43576+ if (fl)
43577+ locks_init_lock_always(fl);
43578+
43579+ return fl;
43580 }
43581
43582 void locks_release_private(struct file_lock *fl)
43583@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
43584 INIT_LIST_HEAD(&fl->fl_link);
43585 INIT_LIST_HEAD(&fl->fl_block);
43586 init_waitqueue_head(&fl->fl_wait);
43587- fl->fl_next = NULL;
43588- fl->fl_fasync = NULL;
43589- fl->fl_owner = NULL;
43590- fl->fl_pid = 0;
43591- fl->fl_nspid = NULL;
43592- fl->fl_file = NULL;
43593- fl->fl_flags = 0;
43594- fl->fl_type = 0;
43595- fl->fl_start = fl->fl_end = 0;
43596 fl->fl_ops = NULL;
43597 fl->fl_lmops = NULL;
43598+ locks_init_lock_always(fl);
43599 }
43600
43601 EXPORT_SYMBOL(locks_init_lock);
43602@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
43603 return;
43604
43605 if (filp->f_op && filp->f_op->flock) {
43606- struct file_lock fl = {
43607+ struct file_lock flock = {
43608 .fl_pid = current->tgid,
43609 .fl_file = filp,
43610 .fl_flags = FL_FLOCK,
43611 .fl_type = F_UNLCK,
43612 .fl_end = OFFSET_MAX,
43613 };
43614- filp->f_op->flock(filp, F_SETLKW, &fl);
43615- if (fl.fl_ops && fl.fl_ops->fl_release_private)
43616- fl.fl_ops->fl_release_private(&fl);
43617+ filp->f_op->flock(filp, F_SETLKW, &flock);
43618+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
43619+ flock.fl_ops->fl_release_private(&flock);
43620 }
43621
43622 lock_kernel();
43623diff -urNp linux-2.6.32.45/fs/mbcache.c linux-2.6.32.45/fs/mbcache.c
43624--- linux-2.6.32.45/fs/mbcache.c 2011-03-27 14:31:47.000000000 -0400
43625+++ linux-2.6.32.45/fs/mbcache.c 2011-08-05 20:33:55.000000000 -0400
43626@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct
43627 if (!cache)
43628 goto fail;
43629 cache->c_name = name;
43630- cache->c_op.free = NULL;
43631+ *(void **)&cache->c_op.free = NULL;
43632 if (cache_op)
43633- cache->c_op.free = cache_op->free;
43634+ *(void **)&cache->c_op.free = cache_op->free;
43635 atomic_set(&cache->c_entry_count, 0);
43636 cache->c_bucket_bits = bucket_bits;
43637 #ifdef MB_CACHE_INDEXES_COUNT
43638diff -urNp linux-2.6.32.45/fs/namei.c linux-2.6.32.45/fs/namei.c
43639--- linux-2.6.32.45/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
43640+++ linux-2.6.32.45/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
43641@@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
43642 return ret;
43643
43644 /*
43645- * Read/write DACs are always overridable.
43646- * Executable DACs are overridable if at least one exec bit is set.
43647- */
43648- if (!(mask & MAY_EXEC) || execute_ok(inode))
43649- if (capable(CAP_DAC_OVERRIDE))
43650- return 0;
43651-
43652- /*
43653 * Searching includes executable on directories, else just read.
43654 */
43655 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43656@@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
43657 if (capable(CAP_DAC_READ_SEARCH))
43658 return 0;
43659
43660+ /*
43661+ * Read/write DACs are always overridable.
43662+ * Executable DACs are overridable if at least one exec bit is set.
43663+ */
43664+ if (!(mask & MAY_EXEC) || execute_ok(inode))
43665+ if (capable(CAP_DAC_OVERRIDE))
43666+ return 0;
43667+
43668 return -EACCES;
43669 }
43670
43671@@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
43672 if (!ret)
43673 goto ok;
43674
43675- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
43676+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
43677+ capable(CAP_DAC_OVERRIDE))
43678 goto ok;
43679
43680 return ret;
43681@@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
43682 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
43683 error = PTR_ERR(cookie);
43684 if (!IS_ERR(cookie)) {
43685- char *s = nd_get_link(nd);
43686+ const char *s = nd_get_link(nd);
43687 error = 0;
43688 if (s)
43689 error = __vfs_follow_link(nd, s);
43690@@ -669,6 +670,13 @@ static inline int do_follow_link(struct
43691 err = security_inode_follow_link(path->dentry, nd);
43692 if (err)
43693 goto loop;
43694+
43695+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
43696+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
43697+ err = -EACCES;
43698+ goto loop;
43699+ }
43700+
43701 current->link_count++;
43702 current->total_link_count++;
43703 nd->depth++;
43704@@ -1016,11 +1024,18 @@ return_reval:
43705 break;
43706 }
43707 return_base:
43708+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43709+ path_put(&nd->path);
43710+ return -ENOENT;
43711+ }
43712 return 0;
43713 out_dput:
43714 path_put_conditional(&next, nd);
43715 break;
43716 }
43717+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
43718+ err = -ENOENT;
43719+
43720 path_put(&nd->path);
43721 return_err:
43722 return err;
43723@@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
43724 int retval = path_init(dfd, name, flags, nd);
43725 if (!retval)
43726 retval = path_walk(name, nd);
43727- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
43728- nd->path.dentry->d_inode))
43729- audit_inode(name, nd->path.dentry);
43730+
43731+ if (likely(!retval)) {
43732+ if (nd->path.dentry && nd->path.dentry->d_inode) {
43733+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43734+ retval = -ENOENT;
43735+ if (!audit_dummy_context())
43736+ audit_inode(name, nd->path.dentry);
43737+ }
43738+ }
43739 if (nd->root.mnt) {
43740 path_put(&nd->root);
43741 nd->root.mnt = NULL;
43742 }
43743+
43744 return retval;
43745 }
43746
43747@@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
43748 if (error)
43749 goto err_out;
43750
43751+
43752+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
43753+ error = -EPERM;
43754+ goto err_out;
43755+ }
43756+ if (gr_handle_rawio(inode)) {
43757+ error = -EPERM;
43758+ goto err_out;
43759+ }
43760+ if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
43761+ error = -EACCES;
43762+ goto err_out;
43763+ }
43764+
43765 if (flag & O_TRUNC) {
43766 error = get_write_access(inode);
43767 if (error)
43768@@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
43769 int error;
43770 struct dentry *dir = nd->path.dentry;
43771
43772+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
43773+ error = -EACCES;
43774+ goto out_unlock;
43775+ }
43776+
43777 if (!IS_POSIXACL(dir->d_inode))
43778 mode &= ~current_umask();
43779 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
43780 if (error)
43781 goto out_unlock;
43782 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
43783+ if (!error)
43784+ gr_handle_create(path->dentry, nd->path.mnt);
43785 out_unlock:
43786 mutex_unlock(&dir->d_inode->i_mutex);
43787 dput(nd->path.dentry);
43788@@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
43789 &nd, flag);
43790 if (error)
43791 return ERR_PTR(error);
43792+
43793+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
43794+ error = -EPERM;
43795+ goto exit;
43796+ }
43797+
43798+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
43799+ error = -EPERM;
43800+ goto exit;
43801+ }
43802+
43803+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
43804+ error = -EACCES;
43805+ goto exit;
43806+ }
43807+
43808 goto ok;
43809 }
43810
43811@@ -1795,6 +1854,14 @@ do_last:
43812 /*
43813 * It already exists.
43814 */
43815+
43816+ /* only check if O_CREAT is specified, all other checks need
43817+ to go into may_open */
43818+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
43819+ error = -EACCES;
43820+ goto exit_mutex_unlock;
43821+ }
43822+
43823 mutex_unlock(&dir->d_inode->i_mutex);
43824 audit_inode(pathname, path.dentry);
43825
43826@@ -1887,6 +1954,13 @@ do_link:
43827 error = security_inode_follow_link(path.dentry, &nd);
43828 if (error)
43829 goto exit_dput;
43830+
43831+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
43832+ path.dentry, nd.path.mnt)) {
43833+ error = -EACCES;
43834+ goto exit_dput;
43835+ }
43836+
43837 error = __do_follow_link(&path, &nd);
43838 if (error) {
43839 /* Does someone understand code flow here? Or it is only
43840@@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43841 error = may_mknod(mode);
43842 if (error)
43843 goto out_dput;
43844+
43845+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
43846+ error = -EPERM;
43847+ goto out_dput;
43848+ }
43849+
43850+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
43851+ error = -EACCES;
43852+ goto out_dput;
43853+ }
43854+
43855 error = mnt_want_write(nd.path.mnt);
43856 if (error)
43857 goto out_dput;
43858@@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43859 }
43860 out_drop_write:
43861 mnt_drop_write(nd.path.mnt);
43862+
43863+ if (!error)
43864+ gr_handle_create(dentry, nd.path.mnt);
43865 out_dput:
43866 dput(dentry);
43867 out_unlock:
43868@@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43869 if (IS_ERR(dentry))
43870 goto out_unlock;
43871
43872+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
43873+ error = -EACCES;
43874+ goto out_dput;
43875+ }
43876+
43877 if (!IS_POSIXACL(nd.path.dentry->d_inode))
43878 mode &= ~current_umask();
43879 error = mnt_want_write(nd.path.mnt);
43880@@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43881 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
43882 out_drop_write:
43883 mnt_drop_write(nd.path.mnt);
43884+
43885+ if (!error)
43886+ gr_handle_create(dentry, nd.path.mnt);
43887+
43888 out_dput:
43889 dput(dentry);
43890 out_unlock:
43891@@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
43892 char * name;
43893 struct dentry *dentry;
43894 struct nameidata nd;
43895+ ino_t saved_ino = 0;
43896+ dev_t saved_dev = 0;
43897
43898 error = user_path_parent(dfd, pathname, &nd, &name);
43899 if (error)
43900@@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
43901 error = PTR_ERR(dentry);
43902 if (IS_ERR(dentry))
43903 goto exit2;
43904+
43905+ if (dentry->d_inode != NULL) {
43906+ if (dentry->d_inode->i_nlink <= 1) {
43907+ saved_ino = dentry->d_inode->i_ino;
43908+ saved_dev = gr_get_dev_from_dentry(dentry);
43909+ }
43910+
43911+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
43912+ error = -EACCES;
43913+ goto exit3;
43914+ }
43915+ }
43916+
43917 error = mnt_want_write(nd.path.mnt);
43918 if (error)
43919 goto exit3;
43920@@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
43921 if (error)
43922 goto exit4;
43923 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
43924+ if (!error && (saved_dev || saved_ino))
43925+ gr_handle_delete(saved_ino, saved_dev);
43926 exit4:
43927 mnt_drop_write(nd.path.mnt);
43928 exit3:
43929@@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
43930 struct dentry *dentry;
43931 struct nameidata nd;
43932 struct inode *inode = NULL;
43933+ ino_t saved_ino = 0;
43934+ dev_t saved_dev = 0;
43935
43936 error = user_path_parent(dfd, pathname, &nd, &name);
43937 if (error)
43938@@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
43939 if (nd.last.name[nd.last.len])
43940 goto slashes;
43941 inode = dentry->d_inode;
43942- if (inode)
43943+ if (inode) {
43944+ if (inode->i_nlink <= 1) {
43945+ saved_ino = inode->i_ino;
43946+ saved_dev = gr_get_dev_from_dentry(dentry);
43947+ }
43948+
43949 atomic_inc(&inode->i_count);
43950+
43951+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
43952+ error = -EACCES;
43953+ goto exit2;
43954+ }
43955+ }
43956 error = mnt_want_write(nd.path.mnt);
43957 if (error)
43958 goto exit2;
43959@@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
43960 if (error)
43961 goto exit3;
43962 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
43963+ if (!error && (saved_ino || saved_dev))
43964+ gr_handle_delete(saved_ino, saved_dev);
43965 exit3:
43966 mnt_drop_write(nd.path.mnt);
43967 exit2:
43968@@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
43969 if (IS_ERR(dentry))
43970 goto out_unlock;
43971
43972+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
43973+ error = -EACCES;
43974+ goto out_dput;
43975+ }
43976+
43977 error = mnt_want_write(nd.path.mnt);
43978 if (error)
43979 goto out_dput;
43980@@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
43981 if (error)
43982 goto out_drop_write;
43983 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
43984+ if (!error)
43985+ gr_handle_create(dentry, nd.path.mnt);
43986 out_drop_write:
43987 mnt_drop_write(nd.path.mnt);
43988 out_dput:
43989@@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
43990 error = PTR_ERR(new_dentry);
43991 if (IS_ERR(new_dentry))
43992 goto out_unlock;
43993+
43994+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
43995+ old_path.dentry->d_inode,
43996+ old_path.dentry->d_inode->i_mode, to)) {
43997+ error = -EACCES;
43998+ goto out_dput;
43999+ }
44000+
44001+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
44002+ old_path.dentry, old_path.mnt, to)) {
44003+ error = -EACCES;
44004+ goto out_dput;
44005+ }
44006+
44007 error = mnt_want_write(nd.path.mnt);
44008 if (error)
44009 goto out_dput;
44010@@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44011 if (error)
44012 goto out_drop_write;
44013 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
44014+ if (!error)
44015+ gr_handle_create(new_dentry, nd.path.mnt);
44016 out_drop_write:
44017 mnt_drop_write(nd.path.mnt);
44018 out_dput:
44019@@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44020 char *to;
44021 int error;
44022
44023+ pax_track_stack();
44024+
44025 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44026 if (error)
44027 goto exit;
44028@@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44029 if (new_dentry == trap)
44030 goto exit5;
44031
44032+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44033+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44034+ to);
44035+ if (error)
44036+ goto exit5;
44037+
44038 error = mnt_want_write(oldnd.path.mnt);
44039 if (error)
44040 goto exit5;
44041@@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44042 goto exit6;
44043 error = vfs_rename(old_dir->d_inode, old_dentry,
44044 new_dir->d_inode, new_dentry);
44045+ if (!error)
44046+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44047+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44048 exit6:
44049 mnt_drop_write(oldnd.path.mnt);
44050 exit5:
44051@@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
44052
44053 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44054 {
44055+ char tmpbuf[64];
44056+ const char *newlink;
44057 int len;
44058
44059 len = PTR_ERR(link);
44060@@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
44061 len = strlen(link);
44062 if (len > (unsigned) buflen)
44063 len = buflen;
44064- if (copy_to_user(buffer, link, len))
44065+
44066+ if (len < sizeof(tmpbuf)) {
44067+ memcpy(tmpbuf, link, len);
44068+ newlink = tmpbuf;
44069+ } else
44070+ newlink = link;
44071+
44072+ if (copy_to_user(buffer, newlink, len))
44073 len = -EFAULT;
44074 out:
44075 return len;
44076diff -urNp linux-2.6.32.45/fs/namespace.c linux-2.6.32.45/fs/namespace.c
44077--- linux-2.6.32.45/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
44078+++ linux-2.6.32.45/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
44079@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
44080 if (!(sb->s_flags & MS_RDONLY))
44081 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44082 up_write(&sb->s_umount);
44083+
44084+ gr_log_remount(mnt->mnt_devname, retval);
44085+
44086 return retval;
44087 }
44088
44089@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
44090 security_sb_umount_busy(mnt);
44091 up_write(&namespace_sem);
44092 release_mounts(&umount_list);
44093+
44094+ gr_log_unmount(mnt->mnt_devname, retval);
44095+
44096 return retval;
44097 }
44098
44099@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
44100 if (retval)
44101 goto dput_out;
44102
44103+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44104+ retval = -EPERM;
44105+ goto dput_out;
44106+ }
44107+
44108+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44109+ retval = -EPERM;
44110+ goto dput_out;
44111+ }
44112+
44113 if (flags & MS_REMOUNT)
44114 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44115 data_page);
44116@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
44117 dev_name, data_page);
44118 dput_out:
44119 path_put(&path);
44120+
44121+ gr_log_mount(dev_name, dir_name, retval);
44122+
44123 return retval;
44124 }
44125
44126@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
44127 goto out1;
44128 }
44129
44130+ if (gr_handle_chroot_pivot()) {
44131+ error = -EPERM;
44132+ path_put(&old);
44133+ goto out1;
44134+ }
44135+
44136 read_lock(&current->fs->lock);
44137 root = current->fs->root;
44138 path_get(&current->fs->root);
44139diff -urNp linux-2.6.32.45/fs/ncpfs/dir.c linux-2.6.32.45/fs/ncpfs/dir.c
44140--- linux-2.6.32.45/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44141+++ linux-2.6.32.45/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
44142@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
44143 int res, val = 0, len;
44144 __u8 __name[NCP_MAXPATHLEN + 1];
44145
44146+ pax_track_stack();
44147+
44148 parent = dget_parent(dentry);
44149 dir = parent->d_inode;
44150
44151@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
44152 int error, res, len;
44153 __u8 __name[NCP_MAXPATHLEN + 1];
44154
44155+ pax_track_stack();
44156+
44157 lock_kernel();
44158 error = -EIO;
44159 if (!ncp_conn_valid(server))
44160@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
44161 int error, result, len;
44162 int opmode;
44163 __u8 __name[NCP_MAXPATHLEN + 1];
44164-
44165+
44166 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44167 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44168
44169+ pax_track_stack();
44170+
44171 error = -EIO;
44172 lock_kernel();
44173 if (!ncp_conn_valid(server))
44174@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
44175 int error, len;
44176 __u8 __name[NCP_MAXPATHLEN + 1];
44177
44178+ pax_track_stack();
44179+
44180 DPRINTK("ncp_mkdir: making %s/%s\n",
44181 dentry->d_parent->d_name.name, dentry->d_name.name);
44182
44183@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
44184 if (!ncp_conn_valid(server))
44185 goto out;
44186
44187+ pax_track_stack();
44188+
44189 ncp_age_dentry(server, dentry);
44190 len = sizeof(__name);
44191 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44192@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
44193 int old_len, new_len;
44194 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44195
44196+ pax_track_stack();
44197+
44198 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44199 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44200 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44201diff -urNp linux-2.6.32.45/fs/ncpfs/inode.c linux-2.6.32.45/fs/ncpfs/inode.c
44202--- linux-2.6.32.45/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
44203+++ linux-2.6.32.45/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
44204@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
44205 #endif
44206 struct ncp_entry_info finfo;
44207
44208+ pax_track_stack();
44209+
44210 data.wdog_pid = NULL;
44211 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44212 if (!server)
44213diff -urNp linux-2.6.32.45/fs/nfs/inode.c linux-2.6.32.45/fs/nfs/inode.c
44214--- linux-2.6.32.45/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
44215+++ linux-2.6.32.45/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
44216@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
44217 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44218 nfsi->attrtimeo_timestamp = jiffies;
44219
44220- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44221+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44222 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44223 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44224 else
44225@@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
44226 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44227 }
44228
44229-static atomic_long_t nfs_attr_generation_counter;
44230+static atomic_long_unchecked_t nfs_attr_generation_counter;
44231
44232 static unsigned long nfs_read_attr_generation_counter(void)
44233 {
44234- return atomic_long_read(&nfs_attr_generation_counter);
44235+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44236 }
44237
44238 unsigned long nfs_inc_attr_generation_counter(void)
44239 {
44240- return atomic_long_inc_return(&nfs_attr_generation_counter);
44241+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44242 }
44243
44244 void nfs_fattr_init(struct nfs_fattr *fattr)
44245diff -urNp linux-2.6.32.45/fs/nfsd/lockd.c linux-2.6.32.45/fs/nfsd/lockd.c
44246--- linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
44247+++ linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
44248@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
44249 fput(filp);
44250 }
44251
44252-static struct nlmsvc_binding nfsd_nlm_ops = {
44253+static const struct nlmsvc_binding nfsd_nlm_ops = {
44254 .fopen = nlm_fopen, /* open file for locking */
44255 .fclose = nlm_fclose, /* close file */
44256 };
44257diff -urNp linux-2.6.32.45/fs/nfsd/nfs4state.c linux-2.6.32.45/fs/nfsd/nfs4state.c
44258--- linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
44259+++ linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
44260@@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44261 unsigned int cmd;
44262 int err;
44263
44264+ pax_track_stack();
44265+
44266 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44267 (long long) lock->lk_offset,
44268 (long long) lock->lk_length);
44269diff -urNp linux-2.6.32.45/fs/nfsd/nfs4xdr.c linux-2.6.32.45/fs/nfsd/nfs4xdr.c
44270--- linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
44271+++ linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
44272@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44273 struct nfsd4_compoundres *resp = rqstp->rq_resp;
44274 u32 minorversion = resp->cstate.minorversion;
44275
44276+ pax_track_stack();
44277+
44278 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44279 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44280 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44281diff -urNp linux-2.6.32.45/fs/nfsd/vfs.c linux-2.6.32.45/fs/nfsd/vfs.c
44282--- linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
44283+++ linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
44284@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44285 } else {
44286 oldfs = get_fs();
44287 set_fs(KERNEL_DS);
44288- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44289+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
44290 set_fs(oldfs);
44291 }
44292
44293@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44294
44295 /* Write the data. */
44296 oldfs = get_fs(); set_fs(KERNEL_DS);
44297- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44298+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
44299 set_fs(oldfs);
44300 if (host_err < 0)
44301 goto out_nfserr;
44302@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44303 */
44304
44305 oldfs = get_fs(); set_fs(KERNEL_DS);
44306- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44307+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
44308 set_fs(oldfs);
44309
44310 if (host_err < 0)
44311diff -urNp linux-2.6.32.45/fs/nilfs2/ioctl.c linux-2.6.32.45/fs/nilfs2/ioctl.c
44312--- linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
44313+++ linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
44314@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
44315 unsigned int cmd, void __user *argp)
44316 {
44317 struct nilfs_argv argv[5];
44318- const static size_t argsz[5] = {
44319+ static const size_t argsz[5] = {
44320 sizeof(struct nilfs_vdesc),
44321 sizeof(struct nilfs_period),
44322 sizeof(__u64),
44323diff -urNp linux-2.6.32.45/fs/notify/dnotify/dnotify.c linux-2.6.32.45/fs/notify/dnotify/dnotify.c
44324--- linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
44325+++ linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
44326@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
44327 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
44328 }
44329
44330-static struct fsnotify_ops dnotify_fsnotify_ops = {
44331+static const struct fsnotify_ops dnotify_fsnotify_ops = {
44332 .handle_event = dnotify_handle_event,
44333 .should_send_event = dnotify_should_send_event,
44334 .free_group_priv = NULL,
44335diff -urNp linux-2.6.32.45/fs/notify/notification.c linux-2.6.32.45/fs/notify/notification.c
44336--- linux-2.6.32.45/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
44337+++ linux-2.6.32.45/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
44338@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44339 * get set to 0 so it will never get 'freed'
44340 */
44341 static struct fsnotify_event q_overflow_event;
44342-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44343+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44344
44345 /**
44346 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44347@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44348 */
44349 u32 fsnotify_get_cookie(void)
44350 {
44351- return atomic_inc_return(&fsnotify_sync_cookie);
44352+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44353 }
44354 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44355
44356diff -urNp linux-2.6.32.45/fs/ntfs/dir.c linux-2.6.32.45/fs/ntfs/dir.c
44357--- linux-2.6.32.45/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44358+++ linux-2.6.32.45/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
44359@@ -1328,7 +1328,7 @@ find_next_index_buffer:
44360 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44361 ~(s64)(ndir->itype.index.block_size - 1)));
44362 /* Bounds checks. */
44363- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44364+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44365 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44366 "inode 0x%lx or driver bug.", vdir->i_ino);
44367 goto err_out;
44368diff -urNp linux-2.6.32.45/fs/ntfs/file.c linux-2.6.32.45/fs/ntfs/file.c
44369--- linux-2.6.32.45/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
44370+++ linux-2.6.32.45/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
44371@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
44372 #endif /* NTFS_RW */
44373 };
44374
44375-const struct file_operations ntfs_empty_file_ops = {};
44376+const struct file_operations ntfs_empty_file_ops __read_only;
44377
44378-const struct inode_operations ntfs_empty_inode_ops = {};
44379+const struct inode_operations ntfs_empty_inode_ops __read_only;
44380diff -urNp linux-2.6.32.45/fs/ocfs2/cluster/masklog.c linux-2.6.32.45/fs/ocfs2/cluster/masklog.c
44381--- linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
44382+++ linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
44383@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
44384 return mlog_mask_store(mlog_attr->mask, buf, count);
44385 }
44386
44387-static struct sysfs_ops mlog_attr_ops = {
44388+static const struct sysfs_ops mlog_attr_ops = {
44389 .show = mlog_show,
44390 .store = mlog_store,
44391 };
44392diff -urNp linux-2.6.32.45/fs/ocfs2/localalloc.c linux-2.6.32.45/fs/ocfs2/localalloc.c
44393--- linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
44394+++ linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
44395@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
44396 goto bail;
44397 }
44398
44399- atomic_inc(&osb->alloc_stats.moves);
44400+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44401
44402 status = 0;
44403 bail:
44404diff -urNp linux-2.6.32.45/fs/ocfs2/namei.c linux-2.6.32.45/fs/ocfs2/namei.c
44405--- linux-2.6.32.45/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
44406+++ linux-2.6.32.45/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
44407@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
44408 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44409 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44410
44411+ pax_track_stack();
44412+
44413 /* At some point it might be nice to break this function up a
44414 * bit. */
44415
44416diff -urNp linux-2.6.32.45/fs/ocfs2/ocfs2.h linux-2.6.32.45/fs/ocfs2/ocfs2.h
44417--- linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
44418+++ linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
44419@@ -217,11 +217,11 @@ enum ocfs2_vol_state
44420
44421 struct ocfs2_alloc_stats
44422 {
44423- atomic_t moves;
44424- atomic_t local_data;
44425- atomic_t bitmap_data;
44426- atomic_t bg_allocs;
44427- atomic_t bg_extends;
44428+ atomic_unchecked_t moves;
44429+ atomic_unchecked_t local_data;
44430+ atomic_unchecked_t bitmap_data;
44431+ atomic_unchecked_t bg_allocs;
44432+ atomic_unchecked_t bg_extends;
44433 };
44434
44435 enum ocfs2_local_alloc_state
44436diff -urNp linux-2.6.32.45/fs/ocfs2/suballoc.c linux-2.6.32.45/fs/ocfs2/suballoc.c
44437--- linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
44438+++ linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
44439@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
44440 mlog_errno(status);
44441 goto bail;
44442 }
44443- atomic_inc(&osb->alloc_stats.bg_extends);
44444+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44445
44446 /* You should never ask for this much metadata */
44447 BUG_ON(bits_wanted >
44448@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
44449 mlog_errno(status);
44450 goto bail;
44451 }
44452- atomic_inc(&osb->alloc_stats.bg_allocs);
44453+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44454
44455 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
44456 ac->ac_bits_given += (*num_bits);
44457@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
44458 mlog_errno(status);
44459 goto bail;
44460 }
44461- atomic_inc(&osb->alloc_stats.bg_allocs);
44462+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44463
44464 BUG_ON(num_bits != 1);
44465
44466@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44467 cluster_start,
44468 num_clusters);
44469 if (!status)
44470- atomic_inc(&osb->alloc_stats.local_data);
44471+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44472 } else {
44473 if (min_clusters > (osb->bitmap_cpg - 1)) {
44474 /* The only paths asking for contiguousness
44475@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44476 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44477 bg_blkno,
44478 bg_bit_off);
44479- atomic_inc(&osb->alloc_stats.bitmap_data);
44480+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44481 }
44482 }
44483 if (status < 0) {
44484diff -urNp linux-2.6.32.45/fs/ocfs2/super.c linux-2.6.32.45/fs/ocfs2/super.c
44485--- linux-2.6.32.45/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
44486+++ linux-2.6.32.45/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
44487@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44488 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44489 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44490 "Stats",
44491- atomic_read(&osb->alloc_stats.bitmap_data),
44492- atomic_read(&osb->alloc_stats.local_data),
44493- atomic_read(&osb->alloc_stats.bg_allocs),
44494- atomic_read(&osb->alloc_stats.moves),
44495- atomic_read(&osb->alloc_stats.bg_extends));
44496+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44497+ atomic_read_unchecked(&osb->alloc_stats.local_data),
44498+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44499+ atomic_read_unchecked(&osb->alloc_stats.moves),
44500+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44501
44502 out += snprintf(buf + out, len - out,
44503 "%10s => State: %u Descriptor: %llu Size: %u bits "
44504@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
44505 spin_lock_init(&osb->osb_xattr_lock);
44506 ocfs2_init_inode_steal_slot(osb);
44507
44508- atomic_set(&osb->alloc_stats.moves, 0);
44509- atomic_set(&osb->alloc_stats.local_data, 0);
44510- atomic_set(&osb->alloc_stats.bitmap_data, 0);
44511- atomic_set(&osb->alloc_stats.bg_allocs, 0);
44512- atomic_set(&osb->alloc_stats.bg_extends, 0);
44513+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44514+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44515+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44516+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44517+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44518
44519 /* Copy the blockcheck stats from the superblock probe */
44520 osb->osb_ecc_stats = *stats;
44521diff -urNp linux-2.6.32.45/fs/open.c linux-2.6.32.45/fs/open.c
44522--- linux-2.6.32.45/fs/open.c 2011-03-27 14:31:47.000000000 -0400
44523+++ linux-2.6.32.45/fs/open.c 2011-04-17 15:56:46.000000000 -0400
44524@@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
44525 error = locks_verify_truncate(inode, NULL, length);
44526 if (!error)
44527 error = security_path_truncate(&path, length, 0);
44528+
44529+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44530+ error = -EACCES;
44531+
44532 if (!error) {
44533 vfs_dq_init(inode);
44534 error = do_truncate(path.dentry, length, 0, NULL);
44535@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44536 if (__mnt_is_readonly(path.mnt))
44537 res = -EROFS;
44538
44539+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44540+ res = -EACCES;
44541+
44542 out_path_release:
44543 path_put(&path);
44544 out:
44545@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44546 if (error)
44547 goto dput_and_out;
44548
44549+ gr_log_chdir(path.dentry, path.mnt);
44550+
44551 set_fs_pwd(current->fs, &path);
44552
44553 dput_and_out:
44554@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44555 goto out_putf;
44556
44557 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
44558+
44559+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44560+ error = -EPERM;
44561+
44562+ if (!error)
44563+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44564+
44565 if (!error)
44566 set_fs_pwd(current->fs, &file->f_path);
44567 out_putf:
44568@@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
44569 if (!capable(CAP_SYS_CHROOT))
44570 goto dput_and_out;
44571
44572+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44573+ goto dput_and_out;
44574+
44575+ if (gr_handle_chroot_caps(&path)) {
44576+ error = -ENOMEM;
44577+ goto dput_and_out;
44578+ }
44579+
44580 set_fs_root(current->fs, &path);
44581+
44582+ gr_handle_chroot_chdir(&path);
44583+
44584 error = 0;
44585 dput_and_out:
44586 path_put(&path);
44587@@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44588 err = mnt_want_write_file(file);
44589 if (err)
44590 goto out_putf;
44591+
44592 mutex_lock(&inode->i_mutex);
44593+
44594+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
44595+ err = -EACCES;
44596+ goto out_unlock;
44597+ }
44598+
44599 if (mode == (mode_t) -1)
44600 mode = inode->i_mode;
44601+
44602+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
44603+ err = -EPERM;
44604+ goto out_unlock;
44605+ }
44606+
44607 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44608 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44609 err = notify_change(dentry, &newattrs);
44610+
44611+out_unlock:
44612 mutex_unlock(&inode->i_mutex);
44613 mnt_drop_write(file->f_path.mnt);
44614 out_putf:
44615@@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44616 error = mnt_want_write(path.mnt);
44617 if (error)
44618 goto dput_and_out;
44619+
44620 mutex_lock(&inode->i_mutex);
44621+
44622+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44623+ error = -EACCES;
44624+ goto out_unlock;
44625+ }
44626+
44627 if (mode == (mode_t) -1)
44628 mode = inode->i_mode;
44629+
44630+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44631+ error = -EACCES;
44632+ goto out_unlock;
44633+ }
44634+
44635 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44636 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44637 error = notify_change(path.dentry, &newattrs);
44638+
44639+out_unlock:
44640 mutex_unlock(&inode->i_mutex);
44641 mnt_drop_write(path.mnt);
44642 dput_and_out:
44643@@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
44644 return sys_fchmodat(AT_FDCWD, filename, mode);
44645 }
44646
44647-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
44648+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
44649 {
44650 struct inode *inode = dentry->d_inode;
44651 int error;
44652 struct iattr newattrs;
44653
44654+ if (!gr_acl_handle_chown(dentry, mnt))
44655+ return -EACCES;
44656+
44657 newattrs.ia_valid = ATTR_CTIME;
44658 if (user != (uid_t) -1) {
44659 newattrs.ia_valid |= ATTR_UID;
44660@@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
44661 error = mnt_want_write(path.mnt);
44662 if (error)
44663 goto out_release;
44664- error = chown_common(path.dentry, user, group);
44665+ error = chown_common(path.dentry, user, group, path.mnt);
44666 mnt_drop_write(path.mnt);
44667 out_release:
44668 path_put(&path);
44669@@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
44670 error = mnt_want_write(path.mnt);
44671 if (error)
44672 goto out_release;
44673- error = chown_common(path.dentry, user, group);
44674+ error = chown_common(path.dentry, user, group, path.mnt);
44675 mnt_drop_write(path.mnt);
44676 out_release:
44677 path_put(&path);
44678@@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
44679 error = mnt_want_write(path.mnt);
44680 if (error)
44681 goto out_release;
44682- error = chown_common(path.dentry, user, group);
44683+ error = chown_common(path.dentry, user, group, path.mnt);
44684 mnt_drop_write(path.mnt);
44685 out_release:
44686 path_put(&path);
44687@@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
44688 goto out_fput;
44689 dentry = file->f_path.dentry;
44690 audit_inode(NULL, dentry);
44691- error = chown_common(dentry, user, group);
44692+ error = chown_common(dentry, user, group, file->f_path.mnt);
44693 mnt_drop_write(file->f_path.mnt);
44694 out_fput:
44695 fput(file);
44696@@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
44697 if (!IS_ERR(tmp)) {
44698 fd = get_unused_fd_flags(flags);
44699 if (fd >= 0) {
44700- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
44701+ struct file *f;
44702+ /* don't allow to be set by userland */
44703+ flags &= ~FMODE_GREXEC;
44704+ f = do_filp_open(dfd, tmp, flags, mode, 0);
44705 if (IS_ERR(f)) {
44706 put_unused_fd(fd);
44707 fd = PTR_ERR(f);
44708diff -urNp linux-2.6.32.45/fs/partitions/ldm.c linux-2.6.32.45/fs/partitions/ldm.c
44709--- linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
44710+++ linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
44711@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44712 ldm_error ("A VBLK claims to have %d parts.", num);
44713 return false;
44714 }
44715+
44716 if (rec >= num) {
44717 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44718 return false;
44719@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44720 goto found;
44721 }
44722
44723- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44724+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44725 if (!f) {
44726 ldm_crit ("Out of memory.");
44727 return false;
44728diff -urNp linux-2.6.32.45/fs/partitions/mac.c linux-2.6.32.45/fs/partitions/mac.c
44729--- linux-2.6.32.45/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
44730+++ linux-2.6.32.45/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
44731@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
44732 return 0; /* not a MacOS disk */
44733 }
44734 blocks_in_map = be32_to_cpu(part->map_count);
44735+ printk(" [mac]");
44736 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
44737 put_dev_sector(sect);
44738 return 0;
44739 }
44740- printk(" [mac]");
44741 for (slot = 1; slot <= blocks_in_map; ++slot) {
44742 int pos = slot * secsize;
44743 put_dev_sector(sect);
44744diff -urNp linux-2.6.32.45/fs/pipe.c linux-2.6.32.45/fs/pipe.c
44745--- linux-2.6.32.45/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
44746+++ linux-2.6.32.45/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
44747@@ -401,9 +401,9 @@ redo:
44748 }
44749 if (bufs) /* More to do? */
44750 continue;
44751- if (!pipe->writers)
44752+ if (!atomic_read(&pipe->writers))
44753 break;
44754- if (!pipe->waiting_writers) {
44755+ if (!atomic_read(&pipe->waiting_writers)) {
44756 /* syscall merging: Usually we must not sleep
44757 * if O_NONBLOCK is set, or if we got some data.
44758 * But if a writer sleeps in kernel space, then
44759@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
44760 mutex_lock(&inode->i_mutex);
44761 pipe = inode->i_pipe;
44762
44763- if (!pipe->readers) {
44764+ if (!atomic_read(&pipe->readers)) {
44765 send_sig(SIGPIPE, current, 0);
44766 ret = -EPIPE;
44767 goto out;
44768@@ -511,7 +511,7 @@ redo1:
44769 for (;;) {
44770 int bufs;
44771
44772- if (!pipe->readers) {
44773+ if (!atomic_read(&pipe->readers)) {
44774 send_sig(SIGPIPE, current, 0);
44775 if (!ret)
44776 ret = -EPIPE;
44777@@ -597,9 +597,9 @@ redo2:
44778 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44779 do_wakeup = 0;
44780 }
44781- pipe->waiting_writers++;
44782+ atomic_inc(&pipe->waiting_writers);
44783 pipe_wait(pipe);
44784- pipe->waiting_writers--;
44785+ atomic_dec(&pipe->waiting_writers);
44786 }
44787 out:
44788 mutex_unlock(&inode->i_mutex);
44789@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
44790 mask = 0;
44791 if (filp->f_mode & FMODE_READ) {
44792 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44793- if (!pipe->writers && filp->f_version != pipe->w_counter)
44794+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44795 mask |= POLLHUP;
44796 }
44797
44798@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
44799 * Most Unices do not set POLLERR for FIFOs but on Linux they
44800 * behave exactly like pipes for poll().
44801 */
44802- if (!pipe->readers)
44803+ if (!atomic_read(&pipe->readers))
44804 mask |= POLLERR;
44805 }
44806
44807@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
44808
44809 mutex_lock(&inode->i_mutex);
44810 pipe = inode->i_pipe;
44811- pipe->readers -= decr;
44812- pipe->writers -= decw;
44813+ atomic_sub(decr, &pipe->readers);
44814+ atomic_sub(decw, &pipe->writers);
44815
44816- if (!pipe->readers && !pipe->writers) {
44817+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
44818 free_pipe_info(inode);
44819 } else {
44820 wake_up_interruptible_sync(&pipe->wait);
44821@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
44822
44823 if (inode->i_pipe) {
44824 ret = 0;
44825- inode->i_pipe->readers++;
44826+ atomic_inc(&inode->i_pipe->readers);
44827 }
44828
44829 mutex_unlock(&inode->i_mutex);
44830@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
44831
44832 if (inode->i_pipe) {
44833 ret = 0;
44834- inode->i_pipe->writers++;
44835+ atomic_inc(&inode->i_pipe->writers);
44836 }
44837
44838 mutex_unlock(&inode->i_mutex);
44839@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
44840 if (inode->i_pipe) {
44841 ret = 0;
44842 if (filp->f_mode & FMODE_READ)
44843- inode->i_pipe->readers++;
44844+ atomic_inc(&inode->i_pipe->readers);
44845 if (filp->f_mode & FMODE_WRITE)
44846- inode->i_pipe->writers++;
44847+ atomic_inc(&inode->i_pipe->writers);
44848 }
44849
44850 mutex_unlock(&inode->i_mutex);
44851@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
44852 inode->i_pipe = NULL;
44853 }
44854
44855-static struct vfsmount *pipe_mnt __read_mostly;
44856+struct vfsmount *pipe_mnt __read_mostly;
44857 static int pipefs_delete_dentry(struct dentry *dentry)
44858 {
44859 /*
44860@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
44861 goto fail_iput;
44862 inode->i_pipe = pipe;
44863
44864- pipe->readers = pipe->writers = 1;
44865+ atomic_set(&pipe->readers, 1);
44866+ atomic_set(&pipe->writers, 1);
44867 inode->i_fop = &rdwr_pipefifo_fops;
44868
44869 /*
44870diff -urNp linux-2.6.32.45/fs/proc/array.c linux-2.6.32.45/fs/proc/array.c
44871--- linux-2.6.32.45/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
44872+++ linux-2.6.32.45/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
44873@@ -60,6 +60,7 @@
44874 #include <linux/tty.h>
44875 #include <linux/string.h>
44876 #include <linux/mman.h>
44877+#include <linux/grsecurity.h>
44878 #include <linux/proc_fs.h>
44879 #include <linux/ioport.h>
44880 #include <linux/uaccess.h>
44881@@ -321,6 +322,21 @@ static inline void task_context_switch_c
44882 p->nivcsw);
44883 }
44884
44885+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44886+static inline void task_pax(struct seq_file *m, struct task_struct *p)
44887+{
44888+ if (p->mm)
44889+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
44890+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
44891+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
44892+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
44893+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
44894+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
44895+ else
44896+ seq_printf(m, "PaX:\t-----\n");
44897+}
44898+#endif
44899+
44900 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
44901 struct pid *pid, struct task_struct *task)
44902 {
44903@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
44904 task_cap(m, task);
44905 cpuset_task_status_allowed(m, task);
44906 task_context_switch_counts(m, task);
44907+
44908+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44909+ task_pax(m, task);
44910+#endif
44911+
44912+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
44913+ task_grsec_rbac(m, task);
44914+#endif
44915+
44916 return 0;
44917 }
44918
44919+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44920+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44921+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
44922+ _mm->pax_flags & MF_PAX_SEGMEXEC))
44923+#endif
44924+
44925 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
44926 struct pid *pid, struct task_struct *task, int whole)
44927 {
44928@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
44929 cputime_t cutime, cstime, utime, stime;
44930 cputime_t cgtime, gtime;
44931 unsigned long rsslim = 0;
44932- char tcomm[sizeof(task->comm)];
44933+ char tcomm[sizeof(task->comm)] = { 0 };
44934 unsigned long flags;
44935
44936+ pax_track_stack();
44937+
44938 state = *get_task_state(task);
44939 vsize = eip = esp = 0;
44940 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
44941@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
44942 gtime = task_gtime(task);
44943 }
44944
44945+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44946+ if (PAX_RAND_FLAGS(mm)) {
44947+ eip = 0;
44948+ esp = 0;
44949+ wchan = 0;
44950+ }
44951+#endif
44952+#ifdef CONFIG_GRKERNSEC_HIDESYM
44953+ wchan = 0;
44954+ eip =0;
44955+ esp =0;
44956+#endif
44957+
44958 /* scale priority and nice values from timeslices to -20..20 */
44959 /* to make it look like a "normal" Unix priority/nice value */
44960 priority = task_prio(task);
44961@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
44962 vsize,
44963 mm ? get_mm_rss(mm) : 0,
44964 rsslim,
44965+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44966+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
44967+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
44968+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
44969+#else
44970 mm ? (permitted ? mm->start_code : 1) : 0,
44971 mm ? (permitted ? mm->end_code : 1) : 0,
44972 (permitted && mm) ? mm->start_stack : 0,
44973+#endif
44974 esp,
44975 eip,
44976 /* The signal information here is obsolete.
44977@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
44978
44979 return 0;
44980 }
44981+
44982+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
44983+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
44984+{
44985+ u32 curr_ip = 0;
44986+ unsigned long flags;
44987+
44988+ if (lock_task_sighand(task, &flags)) {
44989+ curr_ip = task->signal->curr_ip;
44990+ unlock_task_sighand(task, &flags);
44991+ }
44992+
44993+ return sprintf(buffer, "%pI4\n", &curr_ip);
44994+}
44995+#endif
44996diff -urNp linux-2.6.32.45/fs/proc/base.c linux-2.6.32.45/fs/proc/base.c
44997--- linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:35:30.000000000 -0400
44998+++ linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:34:33.000000000 -0400
44999@@ -102,6 +102,22 @@ struct pid_entry {
45000 union proc_op op;
45001 };
45002
45003+struct getdents_callback {
45004+ struct linux_dirent __user * current_dir;
45005+ struct linux_dirent __user * previous;
45006+ struct file * file;
45007+ int count;
45008+ int error;
45009+};
45010+
45011+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45012+ loff_t offset, u64 ino, unsigned int d_type)
45013+{
45014+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45015+ buf->error = -EINVAL;
45016+ return 0;
45017+}
45018+
45019 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45020 .name = (NAME), \
45021 .len = sizeof(NAME) - 1, \
45022@@ -213,6 +229,9 @@ static int check_mem_permission(struct t
45023 if (task == current)
45024 return 0;
45025
45026+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45027+ return -EPERM;
45028+
45029 /*
45030 * If current is actively ptrace'ing, and would also be
45031 * permitted to freshly attach with ptrace now, permit it.
45032@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
45033 if (!mm->arg_end)
45034 goto out_mm; /* Shh! No looking before we're done */
45035
45036+ if (gr_acl_handle_procpidmem(task))
45037+ goto out_mm;
45038+
45039 len = mm->arg_end - mm->arg_start;
45040
45041 if (len > PAGE_SIZE)
45042@@ -287,12 +309,28 @@ out:
45043 return res;
45044 }
45045
45046+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45047+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45048+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45049+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45050+#endif
45051+
45052 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45053 {
45054 int res = 0;
45055 struct mm_struct *mm = get_task_mm(task);
45056 if (mm) {
45057 unsigned int nwords = 0;
45058+
45059+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45060+ /* allow if we're currently ptracing this task */
45061+ if (PAX_RAND_FLAGS(mm) &&
45062+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45063+ mmput(mm);
45064+ return res;
45065+ }
45066+#endif
45067+
45068 do {
45069 nwords += 2;
45070 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45071@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
45072 }
45073
45074
45075-#ifdef CONFIG_KALLSYMS
45076+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45077 /*
45078 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45079 * Returns the resolved symbol. If that fails, simply return the address.
45080@@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
45081 }
45082 #endif /* CONFIG_KALLSYMS */
45083
45084-#ifdef CONFIG_STACKTRACE
45085+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45086
45087 #define MAX_STACK_TRACE_DEPTH 64
45088
45089@@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
45090 return count;
45091 }
45092
45093-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45094+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45095 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45096 {
45097 long nr;
45098@@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
45099 /************************************************************************/
45100
45101 /* permission checks */
45102-static int proc_fd_access_allowed(struct inode *inode)
45103+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45104 {
45105 struct task_struct *task;
45106 int allowed = 0;
45107@@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
45108 */
45109 task = get_proc_task(inode);
45110 if (task) {
45111- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45112+ if (log)
45113+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45114+ else
45115+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45116 put_task_struct(task);
45117 }
45118 return allowed;
45119@@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
45120 if (!task)
45121 goto out_no_task;
45122
45123+ if (gr_acl_handle_procpidmem(task))
45124+ goto out;
45125+
45126 if (!ptrace_may_access(task, PTRACE_MODE_READ))
45127 goto out;
45128
45129@@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
45130 path_put(&nd->path);
45131
45132 /* Are we allowed to snoop on the tasks file descriptors? */
45133- if (!proc_fd_access_allowed(inode))
45134+ if (!proc_fd_access_allowed(inode,0))
45135 goto out;
45136
45137 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45138@@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
45139 struct path path;
45140
45141 /* Are we allowed to snoop on the tasks file descriptors? */
45142- if (!proc_fd_access_allowed(inode))
45143- goto out;
45144+ /* logging this is needed for learning on chromium to work properly,
45145+ but we don't want to flood the logs from 'ps' which does a readlink
45146+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45147+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45148+ */
45149+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45150+ if (!proc_fd_access_allowed(inode,0))
45151+ goto out;
45152+ } else {
45153+ if (!proc_fd_access_allowed(inode,1))
45154+ goto out;
45155+ }
45156
45157 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45158 if (error)
45159@@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
45160 rcu_read_lock();
45161 cred = __task_cred(task);
45162 inode->i_uid = cred->euid;
45163+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45164+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45165+#else
45166 inode->i_gid = cred->egid;
45167+#endif
45168 rcu_read_unlock();
45169 }
45170 security_task_to_inode(task, inode);
45171@@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
45172 struct inode *inode = dentry->d_inode;
45173 struct task_struct *task;
45174 const struct cred *cred;
45175+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45176+ const struct cred *tmpcred = current_cred();
45177+#endif
45178
45179 generic_fillattr(inode, stat);
45180
45181@@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
45182 stat->uid = 0;
45183 stat->gid = 0;
45184 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45185+
45186+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45187+ rcu_read_unlock();
45188+ return -ENOENT;
45189+ }
45190+
45191 if (task) {
45192+ cred = __task_cred(task);
45193+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45194+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45195+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45196+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45197+#endif
45198+ ) {
45199+#endif
45200 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45201+#ifdef CONFIG_GRKERNSEC_PROC_USER
45202+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45203+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45204+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45205+#endif
45206 task_dumpable(task)) {
45207- cred = __task_cred(task);
45208 stat->uid = cred->euid;
45209+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45210+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45211+#else
45212 stat->gid = cred->egid;
45213+#endif
45214 }
45215+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45216+ } else {
45217+ rcu_read_unlock();
45218+ return -ENOENT;
45219+ }
45220+#endif
45221 }
45222 rcu_read_unlock();
45223 return 0;
45224@@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
45225
45226 if (task) {
45227 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45228+#ifdef CONFIG_GRKERNSEC_PROC_USER
45229+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45230+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45231+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45232+#endif
45233 task_dumpable(task)) {
45234 rcu_read_lock();
45235 cred = __task_cred(task);
45236 inode->i_uid = cred->euid;
45237+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45238+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45239+#else
45240 inode->i_gid = cred->egid;
45241+#endif
45242 rcu_read_unlock();
45243 } else {
45244 inode->i_uid = 0;
45245@@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
45246 int fd = proc_fd(inode);
45247
45248 if (task) {
45249- files = get_files_struct(task);
45250+ if (!gr_acl_handle_procpidmem(task))
45251+ files = get_files_struct(task);
45252 put_task_struct(task);
45253 }
45254 if (files) {
45255@@ -1895,12 +1994,22 @@ static const struct file_operations proc
45256 static int proc_fd_permission(struct inode *inode, int mask)
45257 {
45258 int rv;
45259+ struct task_struct *task;
45260
45261 rv = generic_permission(inode, mask, NULL);
45262- if (rv == 0)
45263- return 0;
45264+
45265 if (task_pid(current) == proc_pid(inode))
45266 rv = 0;
45267+
45268+ task = get_proc_task(inode);
45269+ if (task == NULL)
45270+ return rv;
45271+
45272+ if (gr_acl_handle_procpidmem(task))
45273+ rv = -EACCES;
45274+
45275+ put_task_struct(task);
45276+
45277 return rv;
45278 }
45279
45280@@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
45281 if (!task)
45282 goto out_no_task;
45283
45284+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45285+ goto out;
45286+
45287 /*
45288 * Yes, it does not scale. And it should not. Don't add
45289 * new entries into /proc/<tgid>/ without very good reasons.
45290@@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
45291 if (!task)
45292 goto out_no_task;
45293
45294+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45295+ goto out;
45296+
45297 ret = 0;
45298 i = filp->f_pos;
45299 switch (i) {
45300@@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
45301 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45302 void *cookie)
45303 {
45304- char *s = nd_get_link(nd);
45305+ const char *s = nd_get_link(nd);
45306 if (!IS_ERR(s))
45307 __putname(s);
45308 }
45309@@ -2522,7 +2637,7 @@ static const struct pid_entry tgid_base_
45310 #ifdef CONFIG_SCHED_DEBUG
45311 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45312 #endif
45313-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45314+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45315 INF("syscall", S_IRUSR, proc_pid_syscall),
45316 #endif
45317 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45318@@ -2547,10 +2662,10 @@ static const struct pid_entry tgid_base_
45319 #ifdef CONFIG_SECURITY
45320 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45321 #endif
45322-#ifdef CONFIG_KALLSYMS
45323+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45324 INF("wchan", S_IRUGO, proc_pid_wchan),
45325 #endif
45326-#ifdef CONFIG_STACKTRACE
45327+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45328 ONE("stack", S_IRUSR, proc_pid_stack),
45329 #endif
45330 #ifdef CONFIG_SCHEDSTATS
45331@@ -2580,6 +2695,9 @@ static const struct pid_entry tgid_base_
45332 #ifdef CONFIG_TASK_IO_ACCOUNTING
45333 INF("io", S_IRUSR, proc_tgid_io_accounting),
45334 #endif
45335+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45336+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45337+#endif
45338 };
45339
45340 static int proc_tgid_base_readdir(struct file * filp,
45341@@ -2704,7 +2822,14 @@ static struct dentry *proc_pid_instantia
45342 if (!inode)
45343 goto out;
45344
45345+#ifdef CONFIG_GRKERNSEC_PROC_USER
45346+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45347+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45348+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45349+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45350+#else
45351 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45352+#endif
45353 inode->i_op = &proc_tgid_base_inode_operations;
45354 inode->i_fop = &proc_tgid_base_operations;
45355 inode->i_flags|=S_IMMUTABLE;
45356@@ -2746,7 +2871,11 @@ struct dentry *proc_pid_lookup(struct in
45357 if (!task)
45358 goto out;
45359
45360+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45361+ goto out_put_task;
45362+
45363 result = proc_pid_instantiate(dir, dentry, task, NULL);
45364+out_put_task:
45365 put_task_struct(task);
45366 out:
45367 return result;
45368@@ -2811,6 +2940,11 @@ int proc_pid_readdir(struct file * filp,
45369 {
45370 unsigned int nr;
45371 struct task_struct *reaper;
45372+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45373+ const struct cred *tmpcred = current_cred();
45374+ const struct cred *itercred;
45375+#endif
45376+ filldir_t __filldir = filldir;
45377 struct tgid_iter iter;
45378 struct pid_namespace *ns;
45379
45380@@ -2834,8 +2968,27 @@ int proc_pid_readdir(struct file * filp,
45381 for (iter = next_tgid(ns, iter);
45382 iter.task;
45383 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45384+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45385+ rcu_read_lock();
45386+ itercred = __task_cred(iter.task);
45387+#endif
45388+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45389+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45390+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45391+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45392+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45393+#endif
45394+ )
45395+#endif
45396+ )
45397+ __filldir = &gr_fake_filldir;
45398+ else
45399+ __filldir = filldir;
45400+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45401+ rcu_read_unlock();
45402+#endif
45403 filp->f_pos = iter.tgid + TGID_OFFSET;
45404- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45405+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45406 put_task_struct(iter.task);
45407 goto out;
45408 }
45409@@ -2861,7 +3014,7 @@ static const struct pid_entry tid_base_s
45410 #ifdef CONFIG_SCHED_DEBUG
45411 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45412 #endif
45413-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45414+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45415 INF("syscall", S_IRUSR, proc_pid_syscall),
45416 #endif
45417 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45418@@ -2885,10 +3038,10 @@ static const struct pid_entry tid_base_s
45419 #ifdef CONFIG_SECURITY
45420 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45421 #endif
45422-#ifdef CONFIG_KALLSYMS
45423+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45424 INF("wchan", S_IRUGO, proc_pid_wchan),
45425 #endif
45426-#ifdef CONFIG_STACKTRACE
45427+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45428 ONE("stack", S_IRUSR, proc_pid_stack),
45429 #endif
45430 #ifdef CONFIG_SCHEDSTATS
45431diff -urNp linux-2.6.32.45/fs/proc/cmdline.c linux-2.6.32.45/fs/proc/cmdline.c
45432--- linux-2.6.32.45/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
45433+++ linux-2.6.32.45/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
45434@@ -23,7 +23,11 @@ static const struct file_operations cmdl
45435
45436 static int __init proc_cmdline_init(void)
45437 {
45438+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45439+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45440+#else
45441 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45442+#endif
45443 return 0;
45444 }
45445 module_init(proc_cmdline_init);
45446diff -urNp linux-2.6.32.45/fs/proc/devices.c linux-2.6.32.45/fs/proc/devices.c
45447--- linux-2.6.32.45/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
45448+++ linux-2.6.32.45/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
45449@@ -64,7 +64,11 @@ static const struct file_operations proc
45450
45451 static int __init proc_devices_init(void)
45452 {
45453+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45454+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45455+#else
45456 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45457+#endif
45458 return 0;
45459 }
45460 module_init(proc_devices_init);
45461diff -urNp linux-2.6.32.45/fs/proc/inode.c linux-2.6.32.45/fs/proc/inode.c
45462--- linux-2.6.32.45/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
45463+++ linux-2.6.32.45/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
45464@@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
45465 if (de->mode) {
45466 inode->i_mode = de->mode;
45467 inode->i_uid = de->uid;
45468+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45469+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45470+#else
45471 inode->i_gid = de->gid;
45472+#endif
45473 }
45474 if (de->size)
45475 inode->i_size = de->size;
45476diff -urNp linux-2.6.32.45/fs/proc/internal.h linux-2.6.32.45/fs/proc/internal.h
45477--- linux-2.6.32.45/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
45478+++ linux-2.6.32.45/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
45479@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45480 struct pid *pid, struct task_struct *task);
45481 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45482 struct pid *pid, struct task_struct *task);
45483+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45484+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45485+#endif
45486 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45487
45488 extern const struct file_operations proc_maps_operations;
45489diff -urNp linux-2.6.32.45/fs/proc/Kconfig linux-2.6.32.45/fs/proc/Kconfig
45490--- linux-2.6.32.45/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
45491+++ linux-2.6.32.45/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
45492@@ -30,12 +30,12 @@ config PROC_FS
45493
45494 config PROC_KCORE
45495 bool "/proc/kcore support" if !ARM
45496- depends on PROC_FS && MMU
45497+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45498
45499 config PROC_VMCORE
45500 bool "/proc/vmcore support (EXPERIMENTAL)"
45501- depends on PROC_FS && CRASH_DUMP
45502- default y
45503+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45504+ default n
45505 help
45506 Exports the dump image of crashed kernel in ELF format.
45507
45508@@ -59,8 +59,8 @@ config PROC_SYSCTL
45509 limited in memory.
45510
45511 config PROC_PAGE_MONITOR
45512- default y
45513- depends on PROC_FS && MMU
45514+ default n
45515+ depends on PROC_FS && MMU && !GRKERNSEC
45516 bool "Enable /proc page monitoring" if EMBEDDED
45517 help
45518 Various /proc files exist to monitor process memory utilization:
45519diff -urNp linux-2.6.32.45/fs/proc/kcore.c linux-2.6.32.45/fs/proc/kcore.c
45520--- linux-2.6.32.45/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
45521+++ linux-2.6.32.45/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
45522@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
45523 off_t offset = 0;
45524 struct kcore_list *m;
45525
45526+ pax_track_stack();
45527+
45528 /* setup ELF header */
45529 elf = (struct elfhdr *) bufp;
45530 bufp += sizeof(struct elfhdr);
45531@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
45532 * the addresses in the elf_phdr on our list.
45533 */
45534 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45535- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45536+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45537+ if (tsz > buflen)
45538 tsz = buflen;
45539-
45540+
45541 while (buflen) {
45542 struct kcore_list *m;
45543
45544@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
45545 kfree(elf_buf);
45546 } else {
45547 if (kern_addr_valid(start)) {
45548- unsigned long n;
45549+ char *elf_buf;
45550+ mm_segment_t oldfs;
45551
45552- n = copy_to_user(buffer, (char *)start, tsz);
45553- /*
45554- * We cannot distingush between fault on source
45555- * and fault on destination. When this happens
45556- * we clear too and hope it will trigger the
45557- * EFAULT again.
45558- */
45559- if (n) {
45560- if (clear_user(buffer + tsz - n,
45561- n))
45562+ elf_buf = kmalloc(tsz, GFP_KERNEL);
45563+ if (!elf_buf)
45564+ return -ENOMEM;
45565+ oldfs = get_fs();
45566+ set_fs(KERNEL_DS);
45567+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45568+ set_fs(oldfs);
45569+ if (copy_to_user(buffer, elf_buf, tsz)) {
45570+ kfree(elf_buf);
45571 return -EFAULT;
45572+ }
45573 }
45574+ set_fs(oldfs);
45575+ kfree(elf_buf);
45576 } else {
45577 if (clear_user(buffer, tsz))
45578 return -EFAULT;
45579@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
45580
45581 static int open_kcore(struct inode *inode, struct file *filp)
45582 {
45583+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45584+ return -EPERM;
45585+#endif
45586 if (!capable(CAP_SYS_RAWIO))
45587 return -EPERM;
45588 if (kcore_need_update)
45589diff -urNp linux-2.6.32.45/fs/proc/meminfo.c linux-2.6.32.45/fs/proc/meminfo.c
45590--- linux-2.6.32.45/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
45591+++ linux-2.6.32.45/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
45592@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45593 unsigned long pages[NR_LRU_LISTS];
45594 int lru;
45595
45596+ pax_track_stack();
45597+
45598 /*
45599 * display in kilobytes.
45600 */
45601@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
45602 vmi.used >> 10,
45603 vmi.largest_chunk >> 10
45604 #ifdef CONFIG_MEMORY_FAILURE
45605- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45606+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45607 #endif
45608 );
45609
45610diff -urNp linux-2.6.32.45/fs/proc/nommu.c linux-2.6.32.45/fs/proc/nommu.c
45611--- linux-2.6.32.45/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
45612+++ linux-2.6.32.45/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
45613@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
45614 if (len < 1)
45615 len = 1;
45616 seq_printf(m, "%*c", len, ' ');
45617- seq_path(m, &file->f_path, "");
45618+ seq_path(m, &file->f_path, "\n\\");
45619 }
45620
45621 seq_putc(m, '\n');
45622diff -urNp linux-2.6.32.45/fs/proc/proc_net.c linux-2.6.32.45/fs/proc/proc_net.c
45623--- linux-2.6.32.45/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
45624+++ linux-2.6.32.45/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
45625@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
45626 struct task_struct *task;
45627 struct nsproxy *ns;
45628 struct net *net = NULL;
45629+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45630+ const struct cred *cred = current_cred();
45631+#endif
45632+
45633+#ifdef CONFIG_GRKERNSEC_PROC_USER
45634+ if (cred->fsuid)
45635+ return net;
45636+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45637+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45638+ return net;
45639+#endif
45640
45641 rcu_read_lock();
45642 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45643diff -urNp linux-2.6.32.45/fs/proc/proc_sysctl.c linux-2.6.32.45/fs/proc/proc_sysctl.c
45644--- linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
45645+++ linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
45646@@ -7,6 +7,8 @@
45647 #include <linux/security.h>
45648 #include "internal.h"
45649
45650+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45651+
45652 static const struct dentry_operations proc_sys_dentry_operations;
45653 static const struct file_operations proc_sys_file_operations;
45654 static const struct inode_operations proc_sys_inode_operations;
45655@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
45656 if (!p)
45657 goto out;
45658
45659+ if (gr_handle_sysctl(p, MAY_EXEC))
45660+ goto out;
45661+
45662 err = ERR_PTR(-ENOMEM);
45663 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
45664 if (h)
45665@@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
45666 if (*pos < file->f_pos)
45667 continue;
45668
45669+ if (gr_handle_sysctl(table, 0))
45670+ continue;
45671+
45672 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45673 if (res)
45674 return res;
45675@@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
45676 if (IS_ERR(head))
45677 return PTR_ERR(head);
45678
45679+ if (table && gr_handle_sysctl(table, MAY_EXEC))
45680+ return -ENOENT;
45681+
45682 generic_fillattr(inode, stat);
45683 if (table)
45684 stat->mode = (stat->mode & S_IFMT) | table->mode;
45685diff -urNp linux-2.6.32.45/fs/proc/root.c linux-2.6.32.45/fs/proc/root.c
45686--- linux-2.6.32.45/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
45687+++ linux-2.6.32.45/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
45688@@ -134,7 +134,15 @@ void __init proc_root_init(void)
45689 #ifdef CONFIG_PROC_DEVICETREE
45690 proc_device_tree_init();
45691 #endif
45692+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45693+#ifdef CONFIG_GRKERNSEC_PROC_USER
45694+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45695+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45696+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45697+#endif
45698+#else
45699 proc_mkdir("bus", NULL);
45700+#endif
45701 proc_sys_init();
45702 }
45703
45704diff -urNp linux-2.6.32.45/fs/proc/task_mmu.c linux-2.6.32.45/fs/proc/task_mmu.c
45705--- linux-2.6.32.45/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
45706+++ linux-2.6.32.45/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
45707@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
45708 "VmStk:\t%8lu kB\n"
45709 "VmExe:\t%8lu kB\n"
45710 "VmLib:\t%8lu kB\n"
45711- "VmPTE:\t%8lu kB\n",
45712- hiwater_vm << (PAGE_SHIFT-10),
45713+ "VmPTE:\t%8lu kB\n"
45714+
45715+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45716+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45717+#endif
45718+
45719+ ,hiwater_vm << (PAGE_SHIFT-10),
45720 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45721 mm->locked_vm << (PAGE_SHIFT-10),
45722 hiwater_rss << (PAGE_SHIFT-10),
45723 total_rss << (PAGE_SHIFT-10),
45724 data << (PAGE_SHIFT-10),
45725 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
45726- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
45727+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
45728+
45729+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45730+ , mm->context.user_cs_base, mm->context.user_cs_limit
45731+#endif
45732+
45733+ );
45734 }
45735
45736 unsigned long task_vsize(struct mm_struct *mm)
45737@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
45738 struct proc_maps_private *priv = m->private;
45739 struct vm_area_struct *vma = v;
45740
45741- vma_stop(priv, vma);
45742+ if (!IS_ERR(vma))
45743+ vma_stop(priv, vma);
45744 if (priv->task)
45745 put_task_struct(priv->task);
45746 }
45747@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
45748 return ret;
45749 }
45750
45751+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45752+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45753+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45754+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45755+#endif
45756+
45757 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
45758 {
45759 struct mm_struct *mm = vma->vm_mm;
45760@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
45761 int flags = vma->vm_flags;
45762 unsigned long ino = 0;
45763 unsigned long long pgoff = 0;
45764- unsigned long start;
45765 dev_t dev = 0;
45766 int len;
45767
45768@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
45769 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
45770 }
45771
45772- /* We don't show the stack guard page in /proc/maps */
45773- start = vma->vm_start;
45774- if (vma->vm_flags & VM_GROWSDOWN)
45775- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
45776- start += PAGE_SIZE;
45777-
45778 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
45779- start,
45780+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45781+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
45782+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
45783+#else
45784+ vma->vm_start,
45785 vma->vm_end,
45786+#endif
45787 flags & VM_READ ? 'r' : '-',
45788 flags & VM_WRITE ? 'w' : '-',
45789 flags & VM_EXEC ? 'x' : '-',
45790 flags & VM_MAYSHARE ? 's' : 'p',
45791+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45792+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
45793+#else
45794 pgoff,
45795+#endif
45796 MAJOR(dev), MINOR(dev), ino, &len);
45797
45798 /*
45799@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
45800 */
45801 if (file) {
45802 pad_len_spaces(m, len);
45803- seq_path(m, &file->f_path, "\n");
45804+ seq_path(m, &file->f_path, "\n\\");
45805 } else {
45806 const char *name = arch_vma_name(vma);
45807 if (!name) {
45808@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
45809 if (vma->vm_start <= mm->brk &&
45810 vma->vm_end >= mm->start_brk) {
45811 name = "[heap]";
45812- } else if (vma->vm_start <= mm->start_stack &&
45813- vma->vm_end >= mm->start_stack) {
45814+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
45815+ (vma->vm_start <= mm->start_stack &&
45816+ vma->vm_end >= mm->start_stack)) {
45817 name = "[stack]";
45818 }
45819 } else {
45820@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
45821 };
45822
45823 memset(&mss, 0, sizeof mss);
45824- mss.vma = vma;
45825- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45826- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45827+
45828+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45829+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
45830+#endif
45831+ mss.vma = vma;
45832+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45833+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45834+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45835+ }
45836+#endif
45837
45838 show_map_vma(m, vma);
45839
45840@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
45841 "Swap: %8lu kB\n"
45842 "KernelPageSize: %8lu kB\n"
45843 "MMUPageSize: %8lu kB\n",
45844+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45845+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
45846+#else
45847 (vma->vm_end - vma->vm_start) >> 10,
45848+#endif
45849 mss.resident >> 10,
45850 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
45851 mss.shared_clean >> 10,
45852diff -urNp linux-2.6.32.45/fs/proc/task_nommu.c linux-2.6.32.45/fs/proc/task_nommu.c
45853--- linux-2.6.32.45/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
45854+++ linux-2.6.32.45/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
45855@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
45856 else
45857 bytes += kobjsize(mm);
45858
45859- if (current->fs && current->fs->users > 1)
45860+ if (current->fs && atomic_read(&current->fs->users) > 1)
45861 sbytes += kobjsize(current->fs);
45862 else
45863 bytes += kobjsize(current->fs);
45864@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
45865 if (len < 1)
45866 len = 1;
45867 seq_printf(m, "%*c", len, ' ');
45868- seq_path(m, &file->f_path, "");
45869+ seq_path(m, &file->f_path, "\n\\");
45870 }
45871
45872 seq_putc(m, '\n');
45873diff -urNp linux-2.6.32.45/fs/readdir.c linux-2.6.32.45/fs/readdir.c
45874--- linux-2.6.32.45/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
45875+++ linux-2.6.32.45/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
45876@@ -16,6 +16,7 @@
45877 #include <linux/security.h>
45878 #include <linux/syscalls.h>
45879 #include <linux/unistd.h>
45880+#include <linux/namei.h>
45881
45882 #include <asm/uaccess.h>
45883
45884@@ -67,6 +68,7 @@ struct old_linux_dirent {
45885
45886 struct readdir_callback {
45887 struct old_linux_dirent __user * dirent;
45888+ struct file * file;
45889 int result;
45890 };
45891
45892@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
45893 buf->result = -EOVERFLOW;
45894 return -EOVERFLOW;
45895 }
45896+
45897+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45898+ return 0;
45899+
45900 buf->result++;
45901 dirent = buf->dirent;
45902 if (!access_ok(VERIFY_WRITE, dirent,
45903@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
45904
45905 buf.result = 0;
45906 buf.dirent = dirent;
45907+ buf.file = file;
45908
45909 error = vfs_readdir(file, fillonedir, &buf);
45910 if (buf.result)
45911@@ -142,6 +149,7 @@ struct linux_dirent {
45912 struct getdents_callback {
45913 struct linux_dirent __user * current_dir;
45914 struct linux_dirent __user * previous;
45915+ struct file * file;
45916 int count;
45917 int error;
45918 };
45919@@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
45920 buf->error = -EOVERFLOW;
45921 return -EOVERFLOW;
45922 }
45923+
45924+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45925+ return 0;
45926+
45927 dirent = buf->previous;
45928 if (dirent) {
45929 if (__put_user(offset, &dirent->d_off))
45930@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
45931 buf.previous = NULL;
45932 buf.count = count;
45933 buf.error = 0;
45934+ buf.file = file;
45935
45936 error = vfs_readdir(file, filldir, &buf);
45937 if (error >= 0)
45938@@ -228,6 +241,7 @@ out:
45939 struct getdents_callback64 {
45940 struct linux_dirent64 __user * current_dir;
45941 struct linux_dirent64 __user * previous;
45942+ struct file *file;
45943 int count;
45944 int error;
45945 };
45946@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
45947 buf->error = -EINVAL; /* only used if we fail.. */
45948 if (reclen > buf->count)
45949 return -EINVAL;
45950+
45951+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45952+ return 0;
45953+
45954 dirent = buf->previous;
45955 if (dirent) {
45956 if (__put_user(offset, &dirent->d_off))
45957@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
45958
45959 buf.current_dir = dirent;
45960 buf.previous = NULL;
45961+ buf.file = file;
45962 buf.count = count;
45963 buf.error = 0;
45964
45965diff -urNp linux-2.6.32.45/fs/reiserfs/dir.c linux-2.6.32.45/fs/reiserfs/dir.c
45966--- linux-2.6.32.45/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
45967+++ linux-2.6.32.45/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
45968@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
45969 struct reiserfs_dir_entry de;
45970 int ret = 0;
45971
45972+ pax_track_stack();
45973+
45974 reiserfs_write_lock(inode->i_sb);
45975
45976 reiserfs_check_lock_depth(inode->i_sb, "readdir");
45977diff -urNp linux-2.6.32.45/fs/reiserfs/do_balan.c linux-2.6.32.45/fs/reiserfs/do_balan.c
45978--- linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
45979+++ linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
45980@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
45981 return;
45982 }
45983
45984- atomic_inc(&(fs_generation(tb->tb_sb)));
45985+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
45986 do_balance_starts(tb);
45987
45988 /* balance leaf returns 0 except if combining L R and S into
45989diff -urNp linux-2.6.32.45/fs/reiserfs/item_ops.c linux-2.6.32.45/fs/reiserfs/item_ops.c
45990--- linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
45991+++ linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
45992@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
45993 vi->vi_index, vi->vi_type, vi->vi_ih);
45994 }
45995
45996-static struct item_operations stat_data_ops = {
45997+static const struct item_operations stat_data_ops = {
45998 .bytes_number = sd_bytes_number,
45999 .decrement_key = sd_decrement_key,
46000 .is_left_mergeable = sd_is_left_mergeable,
46001@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
46002 vi->vi_index, vi->vi_type, vi->vi_ih);
46003 }
46004
46005-static struct item_operations direct_ops = {
46006+static const struct item_operations direct_ops = {
46007 .bytes_number = direct_bytes_number,
46008 .decrement_key = direct_decrement_key,
46009 .is_left_mergeable = direct_is_left_mergeable,
46010@@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
46011 vi->vi_index, vi->vi_type, vi->vi_ih);
46012 }
46013
46014-static struct item_operations indirect_ops = {
46015+static const struct item_operations indirect_ops = {
46016 .bytes_number = indirect_bytes_number,
46017 .decrement_key = indirect_decrement_key,
46018 .is_left_mergeable = indirect_is_left_mergeable,
46019@@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
46020 printk("\n");
46021 }
46022
46023-static struct item_operations direntry_ops = {
46024+static const struct item_operations direntry_ops = {
46025 .bytes_number = direntry_bytes_number,
46026 .decrement_key = direntry_decrement_key,
46027 .is_left_mergeable = direntry_is_left_mergeable,
46028@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
46029 "Invalid item type observed, run fsck ASAP");
46030 }
46031
46032-static struct item_operations errcatch_ops = {
46033+static const struct item_operations errcatch_ops = {
46034 errcatch_bytes_number,
46035 errcatch_decrement_key,
46036 errcatch_is_left_mergeable,
46037@@ -746,7 +746,7 @@ static struct item_operations errcatch_o
46038 #error Item types must use disk-format assigned values.
46039 #endif
46040
46041-struct item_operations *item_ops[TYPE_ANY + 1] = {
46042+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
46043 &stat_data_ops,
46044 &indirect_ops,
46045 &direct_ops,
46046diff -urNp linux-2.6.32.45/fs/reiserfs/journal.c linux-2.6.32.45/fs/reiserfs/journal.c
46047--- linux-2.6.32.45/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
46048+++ linux-2.6.32.45/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
46049@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
46050 struct buffer_head *bh;
46051 int i, j;
46052
46053+ pax_track_stack();
46054+
46055 bh = __getblk(dev, block, bufsize);
46056 if (buffer_uptodate(bh))
46057 return (bh);
46058diff -urNp linux-2.6.32.45/fs/reiserfs/namei.c linux-2.6.32.45/fs/reiserfs/namei.c
46059--- linux-2.6.32.45/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
46060+++ linux-2.6.32.45/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
46061@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
46062 unsigned long savelink = 1;
46063 struct timespec ctime;
46064
46065+ pax_track_stack();
46066+
46067 /* three balancings: (1) old name removal, (2) new name insertion
46068 and (3) maybe "save" link insertion
46069 stat data updates: (1) old directory,
46070diff -urNp linux-2.6.32.45/fs/reiserfs/procfs.c linux-2.6.32.45/fs/reiserfs/procfs.c
46071--- linux-2.6.32.45/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
46072+++ linux-2.6.32.45/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
46073@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
46074 "SMALL_TAILS " : "NO_TAILS ",
46075 replay_only(sb) ? "REPLAY_ONLY " : "",
46076 convert_reiserfs(sb) ? "CONV " : "",
46077- atomic_read(&r->s_generation_counter),
46078+ atomic_read_unchecked(&r->s_generation_counter),
46079 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46080 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46081 SF(s_good_search_by_key_reada), SF(s_bmaps),
46082@@ -309,6 +309,8 @@ static int show_journal(struct seq_file
46083 struct journal_params *jp = &rs->s_v1.s_journal;
46084 char b[BDEVNAME_SIZE];
46085
46086+ pax_track_stack();
46087+
46088 seq_printf(m, /* on-disk fields */
46089 "jp_journal_1st_block: \t%i\n"
46090 "jp_journal_dev: \t%s[%x]\n"
46091diff -urNp linux-2.6.32.45/fs/reiserfs/stree.c linux-2.6.32.45/fs/reiserfs/stree.c
46092--- linux-2.6.32.45/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
46093+++ linux-2.6.32.45/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
46094@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
46095 int iter = 0;
46096 #endif
46097
46098+ pax_track_stack();
46099+
46100 BUG_ON(!th->t_trans_id);
46101
46102 init_tb_struct(th, &s_del_balance, sb, path,
46103@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
46104 int retval;
46105 int quota_cut_bytes = 0;
46106
46107+ pax_track_stack();
46108+
46109 BUG_ON(!th->t_trans_id);
46110
46111 le_key2cpu_key(&cpu_key, key);
46112@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
46113 int quota_cut_bytes;
46114 loff_t tail_pos = 0;
46115
46116+ pax_track_stack();
46117+
46118 BUG_ON(!th->t_trans_id);
46119
46120 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46121@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
46122 int retval;
46123 int fs_gen;
46124
46125+ pax_track_stack();
46126+
46127 BUG_ON(!th->t_trans_id);
46128
46129 fs_gen = get_generation(inode->i_sb);
46130@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
46131 int fs_gen = 0;
46132 int quota_bytes = 0;
46133
46134+ pax_track_stack();
46135+
46136 BUG_ON(!th->t_trans_id);
46137
46138 if (inode) { /* Do we count quotas for item? */
46139diff -urNp linux-2.6.32.45/fs/reiserfs/super.c linux-2.6.32.45/fs/reiserfs/super.c
46140--- linux-2.6.32.45/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
46141+++ linux-2.6.32.45/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
46142@@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
46143 {.option_name = NULL}
46144 };
46145
46146+ pax_track_stack();
46147+
46148 *blocks = 0;
46149 if (!options || !*options)
46150 /* use default configuration: create tails, journaling on, no
46151diff -urNp linux-2.6.32.45/fs/select.c linux-2.6.32.45/fs/select.c
46152--- linux-2.6.32.45/fs/select.c 2011-03-27 14:31:47.000000000 -0400
46153+++ linux-2.6.32.45/fs/select.c 2011-05-16 21:46:57.000000000 -0400
46154@@ -20,6 +20,7 @@
46155 #include <linux/module.h>
46156 #include <linux/slab.h>
46157 #include <linux/poll.h>
46158+#include <linux/security.h>
46159 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46160 #include <linux/file.h>
46161 #include <linux/fdtable.h>
46162@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
46163 int retval, i, timed_out = 0;
46164 unsigned long slack = 0;
46165
46166+ pax_track_stack();
46167+
46168 rcu_read_lock();
46169 retval = max_select_fd(n, fds);
46170 rcu_read_unlock();
46171@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
46172 /* Allocate small arguments on the stack to save memory and be faster */
46173 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46174
46175+ pax_track_stack();
46176+
46177 ret = -EINVAL;
46178 if (n < 0)
46179 goto out_nofds;
46180@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
46181 struct poll_list *walk = head;
46182 unsigned long todo = nfds;
46183
46184+ pax_track_stack();
46185+
46186+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46187 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
46188 return -EINVAL;
46189
46190diff -urNp linux-2.6.32.45/fs/seq_file.c linux-2.6.32.45/fs/seq_file.c
46191--- linux-2.6.32.45/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
46192+++ linux-2.6.32.45/fs/seq_file.c 2011-08-05 20:33:55.000000000 -0400
46193@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46194 return 0;
46195 }
46196 if (!m->buf) {
46197- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46198+ m->size = PAGE_SIZE;
46199+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46200 if (!m->buf)
46201 return -ENOMEM;
46202 }
46203@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46204 Eoverflow:
46205 m->op->stop(m, p);
46206 kfree(m->buf);
46207- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46208+ m->size <<= 1;
46209+ m->buf = kmalloc(m->size, GFP_KERNEL);
46210 return !m->buf ? -ENOMEM : -EAGAIN;
46211 }
46212
46213@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46214 m->version = file->f_version;
46215 /* grab buffer if we didn't have one */
46216 if (!m->buf) {
46217- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46218+ m->size = PAGE_SIZE;
46219+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46220 if (!m->buf)
46221 goto Enomem;
46222 }
46223@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46224 goto Fill;
46225 m->op->stop(m, p);
46226 kfree(m->buf);
46227- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46228+ m->size <<= 1;
46229+ m->buf = kmalloc(m->size, GFP_KERNEL);
46230 if (!m->buf)
46231 goto Enomem;
46232 m->count = 0;
46233@@ -555,10 +559,10 @@ int single_open(struct file *file, int (
46234 int res = -ENOMEM;
46235
46236 if (op) {
46237- op->start = single_start;
46238- op->next = single_next;
46239- op->stop = single_stop;
46240- op->show = show;
46241+ *(void **)&op->start = single_start;
46242+ *(void **)&op->next = single_next;
46243+ *(void **)&op->stop = single_stop;
46244+ *(void **)&op->show = show;
46245 res = seq_open(file, op);
46246 if (!res)
46247 ((struct seq_file *)file->private_data)->private = data;
46248diff -urNp linux-2.6.32.45/fs/smbfs/proc.c linux-2.6.32.45/fs/smbfs/proc.c
46249--- linux-2.6.32.45/fs/smbfs/proc.c 2011-03-27 14:31:47.000000000 -0400
46250+++ linux-2.6.32.45/fs/smbfs/proc.c 2011-08-05 20:33:55.000000000 -0400
46251@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *
46252
46253 out:
46254 if (server->local_nls != NULL && server->remote_nls != NULL)
46255- server->ops->convert = convert_cp;
46256+ *(void **)&server->ops->convert = convert_cp;
46257 else
46258- server->ops->convert = convert_memcpy;
46259+ *(void **)&server->ops->convert = convert_memcpy;
46260
46261 smb_unlock_server(server);
46262 return n;
46263@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server,
46264
46265 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
46266 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
46267- server->ops->getattr = smb_proc_getattr_core;
46268+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
46269 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
46270- server->ops->getattr = smb_proc_getattr_ff;
46271+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
46272 }
46273
46274 /* Decode server capabilities */
46275@@ -3439,7 +3439,7 @@ out:
46276 static void
46277 install_ops(struct smb_ops *dst, struct smb_ops *src)
46278 {
46279- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46280+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46281 }
46282
46283 /* < LANMAN2 */
46284diff -urNp linux-2.6.32.45/fs/smbfs/symlink.c linux-2.6.32.45/fs/smbfs/symlink.c
46285--- linux-2.6.32.45/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46286+++ linux-2.6.32.45/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46287@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
46288
46289 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46290 {
46291- char *s = nd_get_link(nd);
46292+ const char *s = nd_get_link(nd);
46293 if (!IS_ERR(s))
46294 __putname(s);
46295 }
46296diff -urNp linux-2.6.32.45/fs/splice.c linux-2.6.32.45/fs/splice.c
46297--- linux-2.6.32.45/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
46298+++ linux-2.6.32.45/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
46299@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46300 pipe_lock(pipe);
46301
46302 for (;;) {
46303- if (!pipe->readers) {
46304+ if (!atomic_read(&pipe->readers)) {
46305 send_sig(SIGPIPE, current, 0);
46306 if (!ret)
46307 ret = -EPIPE;
46308@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46309 do_wakeup = 0;
46310 }
46311
46312- pipe->waiting_writers++;
46313+ atomic_inc(&pipe->waiting_writers);
46314 pipe_wait(pipe);
46315- pipe->waiting_writers--;
46316+ atomic_dec(&pipe->waiting_writers);
46317 }
46318
46319 pipe_unlock(pipe);
46320@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
46321 .spd_release = spd_release_page,
46322 };
46323
46324+ pax_track_stack();
46325+
46326 index = *ppos >> PAGE_CACHE_SHIFT;
46327 loff = *ppos & ~PAGE_CACHE_MASK;
46328 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46329@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
46330 old_fs = get_fs();
46331 set_fs(get_ds());
46332 /* The cast to a user pointer is valid due to the set_fs() */
46333- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46334+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
46335 set_fs(old_fs);
46336
46337 return res;
46338@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
46339 old_fs = get_fs();
46340 set_fs(get_ds());
46341 /* The cast to a user pointer is valid due to the set_fs() */
46342- res = vfs_write(file, (const char __user *)buf, count, &pos);
46343+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
46344 set_fs(old_fs);
46345
46346 return res;
46347@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
46348 .spd_release = spd_release_page,
46349 };
46350
46351+ pax_track_stack();
46352+
46353 index = *ppos >> PAGE_CACHE_SHIFT;
46354 offset = *ppos & ~PAGE_CACHE_MASK;
46355 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46356@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
46357 goto err;
46358
46359 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46360- vec[i].iov_base = (void __user *) page_address(page);
46361+ vec[i].iov_base = (__force void __user *) page_address(page);
46362 vec[i].iov_len = this_len;
46363 pages[i] = page;
46364 spd.nr_pages++;
46365@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46366 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46367 {
46368 while (!pipe->nrbufs) {
46369- if (!pipe->writers)
46370+ if (!atomic_read(&pipe->writers))
46371 return 0;
46372
46373- if (!pipe->waiting_writers && sd->num_spliced)
46374+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46375 return 0;
46376
46377 if (sd->flags & SPLICE_F_NONBLOCK)
46378@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
46379 * out of the pipe right after the splice_to_pipe(). So set
46380 * PIPE_READERS appropriately.
46381 */
46382- pipe->readers = 1;
46383+ atomic_set(&pipe->readers, 1);
46384
46385 current->splice_pipe = pipe;
46386 }
46387@@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
46388 .spd_release = spd_release_page,
46389 };
46390
46391+ pax_track_stack();
46392+
46393 pipe = pipe_info(file->f_path.dentry->d_inode);
46394 if (!pipe)
46395 return -EBADF;
46396@@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
46397 ret = -ERESTARTSYS;
46398 break;
46399 }
46400- if (!pipe->writers)
46401+ if (!atomic_read(&pipe->writers))
46402 break;
46403- if (!pipe->waiting_writers) {
46404+ if (!atomic_read(&pipe->waiting_writers)) {
46405 if (flags & SPLICE_F_NONBLOCK) {
46406 ret = -EAGAIN;
46407 break;
46408@@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
46409 pipe_lock(pipe);
46410
46411 while (pipe->nrbufs >= PIPE_BUFFERS) {
46412- if (!pipe->readers) {
46413+ if (!atomic_read(&pipe->readers)) {
46414 send_sig(SIGPIPE, current, 0);
46415 ret = -EPIPE;
46416 break;
46417@@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
46418 ret = -ERESTARTSYS;
46419 break;
46420 }
46421- pipe->waiting_writers++;
46422+ atomic_inc(&pipe->waiting_writers);
46423 pipe_wait(pipe);
46424- pipe->waiting_writers--;
46425+ atomic_dec(&pipe->waiting_writers);
46426 }
46427
46428 pipe_unlock(pipe);
46429@@ -1785,14 +1791,14 @@ retry:
46430 pipe_double_lock(ipipe, opipe);
46431
46432 do {
46433- if (!opipe->readers) {
46434+ if (!atomic_read(&opipe->readers)) {
46435 send_sig(SIGPIPE, current, 0);
46436 if (!ret)
46437 ret = -EPIPE;
46438 break;
46439 }
46440
46441- if (!ipipe->nrbufs && !ipipe->writers)
46442+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46443 break;
46444
46445 /*
46446@@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
46447 pipe_double_lock(ipipe, opipe);
46448
46449 do {
46450- if (!opipe->readers) {
46451+ if (!atomic_read(&opipe->readers)) {
46452 send_sig(SIGPIPE, current, 0);
46453 if (!ret)
46454 ret = -EPIPE;
46455@@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
46456 * return EAGAIN if we have the potential of some data in the
46457 * future, otherwise just return 0
46458 */
46459- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46460+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46461 ret = -EAGAIN;
46462
46463 pipe_unlock(ipipe);
46464diff -urNp linux-2.6.32.45/fs/sysfs/file.c linux-2.6.32.45/fs/sysfs/file.c
46465--- linux-2.6.32.45/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
46466+++ linux-2.6.32.45/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
46467@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46468
46469 struct sysfs_open_dirent {
46470 atomic_t refcnt;
46471- atomic_t event;
46472+ atomic_unchecked_t event;
46473 wait_queue_head_t poll;
46474 struct list_head buffers; /* goes through sysfs_buffer.list */
46475 };
46476@@ -53,7 +53,7 @@ struct sysfs_buffer {
46477 size_t count;
46478 loff_t pos;
46479 char * page;
46480- struct sysfs_ops * ops;
46481+ const struct sysfs_ops * ops;
46482 struct mutex mutex;
46483 int needs_read_fill;
46484 int event;
46485@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
46486 {
46487 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46488 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46489- struct sysfs_ops * ops = buffer->ops;
46490+ const struct sysfs_ops * ops = buffer->ops;
46491 int ret = 0;
46492 ssize_t count;
46493
46494@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
46495 if (!sysfs_get_active_two(attr_sd))
46496 return -ENODEV;
46497
46498- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46499+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46500 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46501
46502 sysfs_put_active_two(attr_sd);
46503@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
46504 {
46505 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46506 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46507- struct sysfs_ops * ops = buffer->ops;
46508+ const struct sysfs_ops * ops = buffer->ops;
46509 int rc;
46510
46511 /* need attr_sd for attr and ops, its parent for kobj */
46512@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
46513 return -ENOMEM;
46514
46515 atomic_set(&new_od->refcnt, 0);
46516- atomic_set(&new_od->event, 1);
46517+ atomic_set_unchecked(&new_od->event, 1);
46518 init_waitqueue_head(&new_od->poll);
46519 INIT_LIST_HEAD(&new_od->buffers);
46520 goto retry;
46521@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
46522 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
46523 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46524 struct sysfs_buffer *buffer;
46525- struct sysfs_ops *ops;
46526+ const struct sysfs_ops *ops;
46527 int error = -EACCES;
46528 char *p;
46529
46530@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
46531
46532 sysfs_put_active_two(attr_sd);
46533
46534- if (buffer->event != atomic_read(&od->event))
46535+ if (buffer->event != atomic_read_unchecked(&od->event))
46536 goto trigger;
46537
46538 return DEFAULT_POLLMASK;
46539@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
46540
46541 od = sd->s_attr.open;
46542 if (od) {
46543- atomic_inc(&od->event);
46544+ atomic_inc_unchecked(&od->event);
46545 wake_up_interruptible(&od->poll);
46546 }
46547
46548diff -urNp linux-2.6.32.45/fs/sysfs/mount.c linux-2.6.32.45/fs/sysfs/mount.c
46549--- linux-2.6.32.45/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
46550+++ linux-2.6.32.45/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
46551@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46552 .s_name = "",
46553 .s_count = ATOMIC_INIT(1),
46554 .s_flags = SYSFS_DIR,
46555+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46556+ .s_mode = S_IFDIR | S_IRWXU,
46557+#else
46558 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46559+#endif
46560 .s_ino = 1,
46561 };
46562
46563diff -urNp linux-2.6.32.45/fs/sysfs/symlink.c linux-2.6.32.45/fs/sysfs/symlink.c
46564--- linux-2.6.32.45/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46565+++ linux-2.6.32.45/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46566@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
46567
46568 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46569 {
46570- char *page = nd_get_link(nd);
46571+ const char *page = nd_get_link(nd);
46572 if (!IS_ERR(page))
46573 free_page((unsigned long)page);
46574 }
46575diff -urNp linux-2.6.32.45/fs/udf/balloc.c linux-2.6.32.45/fs/udf/balloc.c
46576--- linux-2.6.32.45/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
46577+++ linux-2.6.32.45/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
46578@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
46579
46580 mutex_lock(&sbi->s_alloc_mutex);
46581 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46582- if (bloc->logicalBlockNum < 0 ||
46583- (bloc->logicalBlockNum + count) >
46584- partmap->s_partition_len) {
46585+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46586 udf_debug("%d < %d || %d + %d > %d\n",
46587 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
46588 count, partmap->s_partition_len);
46589@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
46590
46591 mutex_lock(&sbi->s_alloc_mutex);
46592 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46593- if (bloc->logicalBlockNum < 0 ||
46594- (bloc->logicalBlockNum + count) >
46595- partmap->s_partition_len) {
46596+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46597 udf_debug("%d < %d || %d + %d > %d\n",
46598 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
46599 partmap->s_partition_len);
46600diff -urNp linux-2.6.32.45/fs/udf/inode.c linux-2.6.32.45/fs/udf/inode.c
46601--- linux-2.6.32.45/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
46602+++ linux-2.6.32.45/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
46603@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
46604 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46605 int lastblock = 0;
46606
46607+ pax_track_stack();
46608+
46609 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46610 prev_epos.block = iinfo->i_location;
46611 prev_epos.bh = NULL;
46612diff -urNp linux-2.6.32.45/fs/udf/misc.c linux-2.6.32.45/fs/udf/misc.c
46613--- linux-2.6.32.45/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
46614+++ linux-2.6.32.45/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
46615@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46616
46617 u8 udf_tag_checksum(const struct tag *t)
46618 {
46619- u8 *data = (u8 *)t;
46620+ const u8 *data = (const u8 *)t;
46621 u8 checksum = 0;
46622 int i;
46623 for (i = 0; i < sizeof(struct tag); ++i)
46624diff -urNp linux-2.6.32.45/fs/utimes.c linux-2.6.32.45/fs/utimes.c
46625--- linux-2.6.32.45/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
46626+++ linux-2.6.32.45/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
46627@@ -1,6 +1,7 @@
46628 #include <linux/compiler.h>
46629 #include <linux/file.h>
46630 #include <linux/fs.h>
46631+#include <linux/security.h>
46632 #include <linux/linkage.h>
46633 #include <linux/mount.h>
46634 #include <linux/namei.h>
46635@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46636 goto mnt_drop_write_and_out;
46637 }
46638 }
46639+
46640+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46641+ error = -EACCES;
46642+ goto mnt_drop_write_and_out;
46643+ }
46644+
46645 mutex_lock(&inode->i_mutex);
46646 error = notify_change(path->dentry, &newattrs);
46647 mutex_unlock(&inode->i_mutex);
46648diff -urNp linux-2.6.32.45/fs/xattr_acl.c linux-2.6.32.45/fs/xattr_acl.c
46649--- linux-2.6.32.45/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
46650+++ linux-2.6.32.45/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
46651@@ -17,8 +17,8 @@
46652 struct posix_acl *
46653 posix_acl_from_xattr(const void *value, size_t size)
46654 {
46655- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46656- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46657+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46658+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46659 int count;
46660 struct posix_acl *acl;
46661 struct posix_acl_entry *acl_e;
46662diff -urNp linux-2.6.32.45/fs/xattr.c linux-2.6.32.45/fs/xattr.c
46663--- linux-2.6.32.45/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
46664+++ linux-2.6.32.45/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
46665@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46666 * Extended attribute SET operations
46667 */
46668 static long
46669-setxattr(struct dentry *d, const char __user *name, const void __user *value,
46670+setxattr(struct path *path, const char __user *name, const void __user *value,
46671 size_t size, int flags)
46672 {
46673 int error;
46674@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
46675 return PTR_ERR(kvalue);
46676 }
46677
46678- error = vfs_setxattr(d, kname, kvalue, size, flags);
46679+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46680+ error = -EACCES;
46681+ goto out;
46682+ }
46683+
46684+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46685+out:
46686 kfree(kvalue);
46687 return error;
46688 }
46689@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46690 return error;
46691 error = mnt_want_write(path.mnt);
46692 if (!error) {
46693- error = setxattr(path.dentry, name, value, size, flags);
46694+ error = setxattr(&path, name, value, size, flags);
46695 mnt_drop_write(path.mnt);
46696 }
46697 path_put(&path);
46698@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46699 return error;
46700 error = mnt_want_write(path.mnt);
46701 if (!error) {
46702- error = setxattr(path.dentry, name, value, size, flags);
46703+ error = setxattr(&path, name, value, size, flags);
46704 mnt_drop_write(path.mnt);
46705 }
46706 path_put(&path);
46707@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46708 const void __user *,value, size_t, size, int, flags)
46709 {
46710 struct file *f;
46711- struct dentry *dentry;
46712 int error = -EBADF;
46713
46714 f = fget(fd);
46715 if (!f)
46716 return error;
46717- dentry = f->f_path.dentry;
46718- audit_inode(NULL, dentry);
46719+ audit_inode(NULL, f->f_path.dentry);
46720 error = mnt_want_write_file(f);
46721 if (!error) {
46722- error = setxattr(dentry, name, value, size, flags);
46723+ error = setxattr(&f->f_path, name, value, size, flags);
46724 mnt_drop_write(f->f_path.mnt);
46725 }
46726 fput(f);
46727diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c
46728--- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
46729+++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
46730@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
46731 xfs_fsop_geom_t fsgeo;
46732 int error;
46733
46734+ memset(&fsgeo, 0, sizeof(fsgeo));
46735 error = xfs_fs_geometry(mp, &fsgeo, 3);
46736 if (error)
46737 return -error;
46738diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c
46739--- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
46740+++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
46741@@ -134,7 +134,7 @@ xfs_find_handle(
46742 }
46743
46744 error = -EFAULT;
46745- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46746+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46747 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46748 goto out_put;
46749
46750@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
46751 if (IS_ERR(dentry))
46752 return PTR_ERR(dentry);
46753
46754- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
46755+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
46756 if (!kbuf)
46757 goto out_dput;
46758
46759@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
46760 xfs_mount_t *mp,
46761 void __user *arg)
46762 {
46763- xfs_fsop_geom_t fsgeo;
46764+ xfs_fsop_geom_t fsgeo;
46765 int error;
46766
46767 error = xfs_fs_geometry(mp, &fsgeo, 3);
46768diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c
46769--- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
46770+++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
46771@@ -468,7 +468,7 @@ xfs_vn_put_link(
46772 struct nameidata *nd,
46773 void *p)
46774 {
46775- char *s = nd_get_link(nd);
46776+ const char *s = nd_get_link(nd);
46777
46778 if (!IS_ERR(s))
46779 kfree(s);
46780diff -urNp linux-2.6.32.45/fs/xfs/xfs_bmap.c linux-2.6.32.45/fs/xfs/xfs_bmap.c
46781--- linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
46782+++ linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
46783@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
46784 int nmap,
46785 int ret_nmap);
46786 #else
46787-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46788+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46789 #endif /* DEBUG */
46790
46791 #if defined(XFS_RW_TRACE)
46792diff -urNp linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c
46793--- linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
46794+++ linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
46795@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
46796 }
46797
46798 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46799- if (filldir(dirent, sfep->name, sfep->namelen,
46800+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46801+ char name[sfep->namelen];
46802+ memcpy(name, sfep->name, sfep->namelen);
46803+ if (filldir(dirent, name, sfep->namelen,
46804+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
46805+ *offset = off & 0x7fffffff;
46806+ return 0;
46807+ }
46808+ } else if (filldir(dirent, sfep->name, sfep->namelen,
46809 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46810 *offset = off & 0x7fffffff;
46811 return 0;
46812diff -urNp linux-2.6.32.45/grsecurity/gracl_alloc.c linux-2.6.32.45/grsecurity/gracl_alloc.c
46813--- linux-2.6.32.45/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
46814+++ linux-2.6.32.45/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
46815@@ -0,0 +1,105 @@
46816+#include <linux/kernel.h>
46817+#include <linux/mm.h>
46818+#include <linux/slab.h>
46819+#include <linux/vmalloc.h>
46820+#include <linux/gracl.h>
46821+#include <linux/grsecurity.h>
46822+
46823+static unsigned long alloc_stack_next = 1;
46824+static unsigned long alloc_stack_size = 1;
46825+static void **alloc_stack;
46826+
46827+static __inline__ int
46828+alloc_pop(void)
46829+{
46830+ if (alloc_stack_next == 1)
46831+ return 0;
46832+
46833+ kfree(alloc_stack[alloc_stack_next - 2]);
46834+
46835+ alloc_stack_next--;
46836+
46837+ return 1;
46838+}
46839+
46840+static __inline__ int
46841+alloc_push(void *buf)
46842+{
46843+ if (alloc_stack_next >= alloc_stack_size)
46844+ return 1;
46845+
46846+ alloc_stack[alloc_stack_next - 1] = buf;
46847+
46848+ alloc_stack_next++;
46849+
46850+ return 0;
46851+}
46852+
46853+void *
46854+acl_alloc(unsigned long len)
46855+{
46856+ void *ret = NULL;
46857+
46858+ if (!len || len > PAGE_SIZE)
46859+ goto out;
46860+
46861+ ret = kmalloc(len, GFP_KERNEL);
46862+
46863+ if (ret) {
46864+ if (alloc_push(ret)) {
46865+ kfree(ret);
46866+ ret = NULL;
46867+ }
46868+ }
46869+
46870+out:
46871+ return ret;
46872+}
46873+
46874+void *
46875+acl_alloc_num(unsigned long num, unsigned long len)
46876+{
46877+ if (!len || (num > (PAGE_SIZE / len)))
46878+ return NULL;
46879+
46880+ return acl_alloc(num * len);
46881+}
46882+
46883+void
46884+acl_free_all(void)
46885+{
46886+ if (gr_acl_is_enabled() || !alloc_stack)
46887+ return;
46888+
46889+ while (alloc_pop()) ;
46890+
46891+ if (alloc_stack) {
46892+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
46893+ kfree(alloc_stack);
46894+ else
46895+ vfree(alloc_stack);
46896+ }
46897+
46898+ alloc_stack = NULL;
46899+ alloc_stack_size = 1;
46900+ alloc_stack_next = 1;
46901+
46902+ return;
46903+}
46904+
46905+int
46906+acl_alloc_stack_init(unsigned long size)
46907+{
46908+ if ((size * sizeof (void *)) <= PAGE_SIZE)
46909+ alloc_stack =
46910+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
46911+ else
46912+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
46913+
46914+ alloc_stack_size = size;
46915+
46916+ if (!alloc_stack)
46917+ return 0;
46918+ else
46919+ return 1;
46920+}
46921diff -urNp linux-2.6.32.45/grsecurity/gracl.c linux-2.6.32.45/grsecurity/gracl.c
46922--- linux-2.6.32.45/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
46923+++ linux-2.6.32.45/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
46924@@ -0,0 +1,4082 @@
46925+#include <linux/kernel.h>
46926+#include <linux/module.h>
46927+#include <linux/sched.h>
46928+#include <linux/mm.h>
46929+#include <linux/file.h>
46930+#include <linux/fs.h>
46931+#include <linux/namei.h>
46932+#include <linux/mount.h>
46933+#include <linux/tty.h>
46934+#include <linux/proc_fs.h>
46935+#include <linux/smp_lock.h>
46936+#include <linux/slab.h>
46937+#include <linux/vmalloc.h>
46938+#include <linux/types.h>
46939+#include <linux/sysctl.h>
46940+#include <linux/netdevice.h>
46941+#include <linux/ptrace.h>
46942+#include <linux/gracl.h>
46943+#include <linux/gralloc.h>
46944+#include <linux/grsecurity.h>
46945+#include <linux/grinternal.h>
46946+#include <linux/pid_namespace.h>
46947+#include <linux/fdtable.h>
46948+#include <linux/percpu.h>
46949+
46950+#include <asm/uaccess.h>
46951+#include <asm/errno.h>
46952+#include <asm/mman.h>
46953+
46954+static struct acl_role_db acl_role_set;
46955+static struct name_db name_set;
46956+static struct inodev_db inodev_set;
46957+
46958+/* for keeping track of userspace pointers used for subjects, so we
46959+ can share references in the kernel as well
46960+*/
46961+
46962+static struct dentry *real_root;
46963+static struct vfsmount *real_root_mnt;
46964+
46965+static struct acl_subj_map_db subj_map_set;
46966+
46967+static struct acl_role_label *default_role;
46968+
46969+static struct acl_role_label *role_list;
46970+
46971+static u16 acl_sp_role_value;
46972+
46973+extern char *gr_shared_page[4];
46974+static DEFINE_MUTEX(gr_dev_mutex);
46975+DEFINE_RWLOCK(gr_inode_lock);
46976+
46977+struct gr_arg *gr_usermode;
46978+
46979+static unsigned int gr_status __read_only = GR_STATUS_INIT;
46980+
46981+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
46982+extern void gr_clear_learn_entries(void);
46983+
46984+#ifdef CONFIG_GRKERNSEC_RESLOG
46985+extern void gr_log_resource(const struct task_struct *task,
46986+ const int res, const unsigned long wanted, const int gt);
46987+#endif
46988+
46989+unsigned char *gr_system_salt;
46990+unsigned char *gr_system_sum;
46991+
46992+static struct sprole_pw **acl_special_roles = NULL;
46993+static __u16 num_sprole_pws = 0;
46994+
46995+static struct acl_role_label *kernel_role = NULL;
46996+
46997+static unsigned int gr_auth_attempts = 0;
46998+static unsigned long gr_auth_expires = 0UL;
46999+
47000+#ifdef CONFIG_NET
47001+extern struct vfsmount *sock_mnt;
47002+#endif
47003+extern struct vfsmount *pipe_mnt;
47004+extern struct vfsmount *shm_mnt;
47005+#ifdef CONFIG_HUGETLBFS
47006+extern struct vfsmount *hugetlbfs_vfsmount;
47007+#endif
47008+
47009+static struct acl_object_label *fakefs_obj_rw;
47010+static struct acl_object_label *fakefs_obj_rwx;
47011+
47012+extern int gr_init_uidset(void);
47013+extern void gr_free_uidset(void);
47014+extern void gr_remove_uid(uid_t uid);
47015+extern int gr_find_uid(uid_t uid);
47016+
47017+__inline__ int
47018+gr_acl_is_enabled(void)
47019+{
47020+ return (gr_status & GR_READY);
47021+}
47022+
47023+#ifdef CONFIG_BTRFS_FS
47024+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
47025+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
47026+#endif
47027+
47028+static inline dev_t __get_dev(const struct dentry *dentry)
47029+{
47030+#ifdef CONFIG_BTRFS_FS
47031+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
47032+ return get_btrfs_dev_from_inode(dentry->d_inode);
47033+ else
47034+#endif
47035+ return dentry->d_inode->i_sb->s_dev;
47036+}
47037+
47038+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47039+{
47040+ return __get_dev(dentry);
47041+}
47042+
47043+static char gr_task_roletype_to_char(struct task_struct *task)
47044+{
47045+ switch (task->role->roletype &
47046+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
47047+ GR_ROLE_SPECIAL)) {
47048+ case GR_ROLE_DEFAULT:
47049+ return 'D';
47050+ case GR_ROLE_USER:
47051+ return 'U';
47052+ case GR_ROLE_GROUP:
47053+ return 'G';
47054+ case GR_ROLE_SPECIAL:
47055+ return 'S';
47056+ }
47057+
47058+ return 'X';
47059+}
47060+
47061+char gr_roletype_to_char(void)
47062+{
47063+ return gr_task_roletype_to_char(current);
47064+}
47065+
47066+__inline__ int
47067+gr_acl_tpe_check(void)
47068+{
47069+ if (unlikely(!(gr_status & GR_READY)))
47070+ return 0;
47071+ if (current->role->roletype & GR_ROLE_TPE)
47072+ return 1;
47073+ else
47074+ return 0;
47075+}
47076+
47077+int
47078+gr_handle_rawio(const struct inode *inode)
47079+{
47080+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47081+ if (inode && S_ISBLK(inode->i_mode) &&
47082+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47083+ !capable(CAP_SYS_RAWIO))
47084+ return 1;
47085+#endif
47086+ return 0;
47087+}
47088+
47089+static int
47090+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
47091+{
47092+ if (likely(lena != lenb))
47093+ return 0;
47094+
47095+ return !memcmp(a, b, lena);
47096+}
47097+
47098+/* this must be called with vfsmount_lock and dcache_lock held */
47099+
47100+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47101+ struct dentry *root, struct vfsmount *rootmnt,
47102+ char *buffer, int buflen)
47103+{
47104+ char * end = buffer+buflen;
47105+ char * retval;
47106+ int namelen;
47107+
47108+ *--end = '\0';
47109+ buflen--;
47110+
47111+ if (buflen < 1)
47112+ goto Elong;
47113+ /* Get '/' right */
47114+ retval = end-1;
47115+ *retval = '/';
47116+
47117+ for (;;) {
47118+ struct dentry * parent;
47119+
47120+ if (dentry == root && vfsmnt == rootmnt)
47121+ break;
47122+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47123+ /* Global root? */
47124+ if (vfsmnt->mnt_parent == vfsmnt)
47125+ goto global_root;
47126+ dentry = vfsmnt->mnt_mountpoint;
47127+ vfsmnt = vfsmnt->mnt_parent;
47128+ continue;
47129+ }
47130+ parent = dentry->d_parent;
47131+ prefetch(parent);
47132+ namelen = dentry->d_name.len;
47133+ buflen -= namelen + 1;
47134+ if (buflen < 0)
47135+ goto Elong;
47136+ end -= namelen;
47137+ memcpy(end, dentry->d_name.name, namelen);
47138+ *--end = '/';
47139+ retval = end;
47140+ dentry = parent;
47141+ }
47142+
47143+out:
47144+ return retval;
47145+
47146+global_root:
47147+ namelen = dentry->d_name.len;
47148+ buflen -= namelen;
47149+ if (buflen < 0)
47150+ goto Elong;
47151+ retval -= namelen-1; /* hit the slash */
47152+ memcpy(retval, dentry->d_name.name, namelen);
47153+ goto out;
47154+Elong:
47155+ retval = ERR_PTR(-ENAMETOOLONG);
47156+ goto out;
47157+}
47158+
47159+static char *
47160+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47161+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
47162+{
47163+ char *retval;
47164+
47165+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
47166+ if (unlikely(IS_ERR(retval)))
47167+ retval = strcpy(buf, "<path too long>");
47168+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47169+ retval[1] = '\0';
47170+
47171+ return retval;
47172+}
47173+
47174+static char *
47175+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47176+ char *buf, int buflen)
47177+{
47178+ char *res;
47179+
47180+ /* we can use real_root, real_root_mnt, because this is only called
47181+ by the RBAC system */
47182+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
47183+
47184+ return res;
47185+}
47186+
47187+static char *
47188+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47189+ char *buf, int buflen)
47190+{
47191+ char *res;
47192+ struct dentry *root;
47193+ struct vfsmount *rootmnt;
47194+ struct task_struct *reaper = &init_task;
47195+
47196+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
47197+ read_lock(&reaper->fs->lock);
47198+ root = dget(reaper->fs->root.dentry);
47199+ rootmnt = mntget(reaper->fs->root.mnt);
47200+ read_unlock(&reaper->fs->lock);
47201+
47202+ spin_lock(&dcache_lock);
47203+ spin_lock(&vfsmount_lock);
47204+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
47205+ spin_unlock(&vfsmount_lock);
47206+ spin_unlock(&dcache_lock);
47207+
47208+ dput(root);
47209+ mntput(rootmnt);
47210+ return res;
47211+}
47212+
47213+static char *
47214+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47215+{
47216+ char *ret;
47217+ spin_lock(&dcache_lock);
47218+ spin_lock(&vfsmount_lock);
47219+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47220+ PAGE_SIZE);
47221+ spin_unlock(&vfsmount_lock);
47222+ spin_unlock(&dcache_lock);
47223+ return ret;
47224+}
47225+
47226+char *
47227+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47228+{
47229+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47230+ PAGE_SIZE);
47231+}
47232+
47233+char *
47234+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47235+{
47236+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47237+ PAGE_SIZE);
47238+}
47239+
47240+char *
47241+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47242+{
47243+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47244+ PAGE_SIZE);
47245+}
47246+
47247+char *
47248+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47249+{
47250+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47251+ PAGE_SIZE);
47252+}
47253+
47254+char *
47255+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47256+{
47257+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47258+ PAGE_SIZE);
47259+}
47260+
47261+__inline__ __u32
47262+to_gr_audit(const __u32 reqmode)
47263+{
47264+ /* masks off auditable permission flags, then shifts them to create
47265+ auditing flags, and adds the special case of append auditing if
47266+ we're requesting write */
47267+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47268+}
47269+
47270+struct acl_subject_label *
47271+lookup_subject_map(const struct acl_subject_label *userp)
47272+{
47273+ unsigned int index = shash(userp, subj_map_set.s_size);
47274+ struct subject_map *match;
47275+
47276+ match = subj_map_set.s_hash[index];
47277+
47278+ while (match && match->user != userp)
47279+ match = match->next;
47280+
47281+ if (match != NULL)
47282+ return match->kernel;
47283+ else
47284+ return NULL;
47285+}
47286+
47287+static void
47288+insert_subj_map_entry(struct subject_map *subjmap)
47289+{
47290+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47291+ struct subject_map **curr;
47292+
47293+ subjmap->prev = NULL;
47294+
47295+ curr = &subj_map_set.s_hash[index];
47296+ if (*curr != NULL)
47297+ (*curr)->prev = subjmap;
47298+
47299+ subjmap->next = *curr;
47300+ *curr = subjmap;
47301+
47302+ return;
47303+}
47304+
47305+static struct acl_role_label *
47306+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47307+ const gid_t gid)
47308+{
47309+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47310+ struct acl_role_label *match;
47311+ struct role_allowed_ip *ipp;
47312+ unsigned int x;
47313+ u32 curr_ip = task->signal->curr_ip;
47314+
47315+ task->signal->saved_ip = curr_ip;
47316+
47317+ match = acl_role_set.r_hash[index];
47318+
47319+ while (match) {
47320+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47321+ for (x = 0; x < match->domain_child_num; x++) {
47322+ if (match->domain_children[x] == uid)
47323+ goto found;
47324+ }
47325+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47326+ break;
47327+ match = match->next;
47328+ }
47329+found:
47330+ if (match == NULL) {
47331+ try_group:
47332+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47333+ match = acl_role_set.r_hash[index];
47334+
47335+ while (match) {
47336+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47337+ for (x = 0; x < match->domain_child_num; x++) {
47338+ if (match->domain_children[x] == gid)
47339+ goto found2;
47340+ }
47341+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47342+ break;
47343+ match = match->next;
47344+ }
47345+found2:
47346+ if (match == NULL)
47347+ match = default_role;
47348+ if (match->allowed_ips == NULL)
47349+ return match;
47350+ else {
47351+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47352+ if (likely
47353+ ((ntohl(curr_ip) & ipp->netmask) ==
47354+ (ntohl(ipp->addr) & ipp->netmask)))
47355+ return match;
47356+ }
47357+ match = default_role;
47358+ }
47359+ } else if (match->allowed_ips == NULL) {
47360+ return match;
47361+ } else {
47362+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47363+ if (likely
47364+ ((ntohl(curr_ip) & ipp->netmask) ==
47365+ (ntohl(ipp->addr) & ipp->netmask)))
47366+ return match;
47367+ }
47368+ goto try_group;
47369+ }
47370+
47371+ return match;
47372+}
47373+
47374+struct acl_subject_label *
47375+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47376+ const struct acl_role_label *role)
47377+{
47378+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47379+ struct acl_subject_label *match;
47380+
47381+ match = role->subj_hash[index];
47382+
47383+ while (match && (match->inode != ino || match->device != dev ||
47384+ (match->mode & GR_DELETED))) {
47385+ match = match->next;
47386+ }
47387+
47388+ if (match && !(match->mode & GR_DELETED))
47389+ return match;
47390+ else
47391+ return NULL;
47392+}
47393+
47394+struct acl_subject_label *
47395+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47396+ const struct acl_role_label *role)
47397+{
47398+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47399+ struct acl_subject_label *match;
47400+
47401+ match = role->subj_hash[index];
47402+
47403+ while (match && (match->inode != ino || match->device != dev ||
47404+ !(match->mode & GR_DELETED))) {
47405+ match = match->next;
47406+ }
47407+
47408+ if (match && (match->mode & GR_DELETED))
47409+ return match;
47410+ else
47411+ return NULL;
47412+}
47413+
47414+static struct acl_object_label *
47415+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47416+ const struct acl_subject_label *subj)
47417+{
47418+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47419+ struct acl_object_label *match;
47420+
47421+ match = subj->obj_hash[index];
47422+
47423+ while (match && (match->inode != ino || match->device != dev ||
47424+ (match->mode & GR_DELETED))) {
47425+ match = match->next;
47426+ }
47427+
47428+ if (match && !(match->mode & GR_DELETED))
47429+ return match;
47430+ else
47431+ return NULL;
47432+}
47433+
47434+static struct acl_object_label *
47435+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47436+ const struct acl_subject_label *subj)
47437+{
47438+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47439+ struct acl_object_label *match;
47440+
47441+ match = subj->obj_hash[index];
47442+
47443+ while (match && (match->inode != ino || match->device != dev ||
47444+ !(match->mode & GR_DELETED))) {
47445+ match = match->next;
47446+ }
47447+
47448+ if (match && (match->mode & GR_DELETED))
47449+ return match;
47450+
47451+ match = subj->obj_hash[index];
47452+
47453+ while (match && (match->inode != ino || match->device != dev ||
47454+ (match->mode & GR_DELETED))) {
47455+ match = match->next;
47456+ }
47457+
47458+ if (match && !(match->mode & GR_DELETED))
47459+ return match;
47460+ else
47461+ return NULL;
47462+}
47463+
47464+static struct name_entry *
47465+lookup_name_entry(const char *name)
47466+{
47467+ unsigned int len = strlen(name);
47468+ unsigned int key = full_name_hash(name, len);
47469+ unsigned int index = key % name_set.n_size;
47470+ struct name_entry *match;
47471+
47472+ match = name_set.n_hash[index];
47473+
47474+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47475+ match = match->next;
47476+
47477+ return match;
47478+}
47479+
47480+static struct name_entry *
47481+lookup_name_entry_create(const char *name)
47482+{
47483+ unsigned int len = strlen(name);
47484+ unsigned int key = full_name_hash(name, len);
47485+ unsigned int index = key % name_set.n_size;
47486+ struct name_entry *match;
47487+
47488+ match = name_set.n_hash[index];
47489+
47490+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47491+ !match->deleted))
47492+ match = match->next;
47493+
47494+ if (match && match->deleted)
47495+ return match;
47496+
47497+ match = name_set.n_hash[index];
47498+
47499+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47500+ match->deleted))
47501+ match = match->next;
47502+
47503+ if (match && !match->deleted)
47504+ return match;
47505+ else
47506+ return NULL;
47507+}
47508+
47509+static struct inodev_entry *
47510+lookup_inodev_entry(const ino_t ino, const dev_t dev)
47511+{
47512+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
47513+ struct inodev_entry *match;
47514+
47515+ match = inodev_set.i_hash[index];
47516+
47517+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47518+ match = match->next;
47519+
47520+ return match;
47521+}
47522+
47523+static void
47524+insert_inodev_entry(struct inodev_entry *entry)
47525+{
47526+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47527+ inodev_set.i_size);
47528+ struct inodev_entry **curr;
47529+
47530+ entry->prev = NULL;
47531+
47532+ curr = &inodev_set.i_hash[index];
47533+ if (*curr != NULL)
47534+ (*curr)->prev = entry;
47535+
47536+ entry->next = *curr;
47537+ *curr = entry;
47538+
47539+ return;
47540+}
47541+
47542+static void
47543+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47544+{
47545+ unsigned int index =
47546+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47547+ struct acl_role_label **curr;
47548+ struct acl_role_label *tmp;
47549+
47550+ curr = &acl_role_set.r_hash[index];
47551+
47552+ /* if role was already inserted due to domains and already has
47553+ a role in the same bucket as it attached, then we need to
47554+ combine these two buckets
47555+ */
47556+ if (role->next) {
47557+ tmp = role->next;
47558+ while (tmp->next)
47559+ tmp = tmp->next;
47560+ tmp->next = *curr;
47561+ } else
47562+ role->next = *curr;
47563+ *curr = role;
47564+
47565+ return;
47566+}
47567+
47568+static void
47569+insert_acl_role_label(struct acl_role_label *role)
47570+{
47571+ int i;
47572+
47573+ if (role_list == NULL) {
47574+ role_list = role;
47575+ role->prev = NULL;
47576+ } else {
47577+ role->prev = role_list;
47578+ role_list = role;
47579+ }
47580+
47581+ /* used for hash chains */
47582+ role->next = NULL;
47583+
47584+ if (role->roletype & GR_ROLE_DOMAIN) {
47585+ for (i = 0; i < role->domain_child_num; i++)
47586+ __insert_acl_role_label(role, role->domain_children[i]);
47587+ } else
47588+ __insert_acl_role_label(role, role->uidgid);
47589+}
47590+
47591+static int
47592+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47593+{
47594+ struct name_entry **curr, *nentry;
47595+ struct inodev_entry *ientry;
47596+ unsigned int len = strlen(name);
47597+ unsigned int key = full_name_hash(name, len);
47598+ unsigned int index = key % name_set.n_size;
47599+
47600+ curr = &name_set.n_hash[index];
47601+
47602+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47603+ curr = &((*curr)->next);
47604+
47605+ if (*curr != NULL)
47606+ return 1;
47607+
47608+ nentry = acl_alloc(sizeof (struct name_entry));
47609+ if (nentry == NULL)
47610+ return 0;
47611+ ientry = acl_alloc(sizeof (struct inodev_entry));
47612+ if (ientry == NULL)
47613+ return 0;
47614+ ientry->nentry = nentry;
47615+
47616+ nentry->key = key;
47617+ nentry->name = name;
47618+ nentry->inode = inode;
47619+ nentry->device = device;
47620+ nentry->len = len;
47621+ nentry->deleted = deleted;
47622+
47623+ nentry->prev = NULL;
47624+ curr = &name_set.n_hash[index];
47625+ if (*curr != NULL)
47626+ (*curr)->prev = nentry;
47627+ nentry->next = *curr;
47628+ *curr = nentry;
47629+
47630+ /* insert us into the table searchable by inode/dev */
47631+ insert_inodev_entry(ientry);
47632+
47633+ return 1;
47634+}
47635+
47636+static void
47637+insert_acl_obj_label(struct acl_object_label *obj,
47638+ struct acl_subject_label *subj)
47639+{
47640+ unsigned int index =
47641+ fhash(obj->inode, obj->device, subj->obj_hash_size);
47642+ struct acl_object_label **curr;
47643+
47644+
47645+ obj->prev = NULL;
47646+
47647+ curr = &subj->obj_hash[index];
47648+ if (*curr != NULL)
47649+ (*curr)->prev = obj;
47650+
47651+ obj->next = *curr;
47652+ *curr = obj;
47653+
47654+ return;
47655+}
47656+
47657+static void
47658+insert_acl_subj_label(struct acl_subject_label *obj,
47659+ struct acl_role_label *role)
47660+{
47661+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47662+ struct acl_subject_label **curr;
47663+
47664+ obj->prev = NULL;
47665+
47666+ curr = &role->subj_hash[index];
47667+ if (*curr != NULL)
47668+ (*curr)->prev = obj;
47669+
47670+ obj->next = *curr;
47671+ *curr = obj;
47672+
47673+ return;
47674+}
47675+
47676+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
47677+
47678+static void *
47679+create_table(__u32 * len, int elementsize)
47680+{
47681+ unsigned int table_sizes[] = {
47682+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
47683+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
47684+ 4194301, 8388593, 16777213, 33554393, 67108859
47685+ };
47686+ void *newtable = NULL;
47687+ unsigned int pwr = 0;
47688+
47689+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
47690+ table_sizes[pwr] <= *len)
47691+ pwr++;
47692+
47693+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
47694+ return newtable;
47695+
47696+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
47697+ newtable =
47698+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
47699+ else
47700+ newtable = vmalloc(table_sizes[pwr] * elementsize);
47701+
47702+ *len = table_sizes[pwr];
47703+
47704+ return newtable;
47705+}
47706+
47707+static int
47708+init_variables(const struct gr_arg *arg)
47709+{
47710+ struct task_struct *reaper = &init_task;
47711+ unsigned int stacksize;
47712+
47713+ subj_map_set.s_size = arg->role_db.num_subjects;
47714+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
47715+ name_set.n_size = arg->role_db.num_objects;
47716+ inodev_set.i_size = arg->role_db.num_objects;
47717+
47718+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
47719+ !name_set.n_size || !inodev_set.i_size)
47720+ return 1;
47721+
47722+ if (!gr_init_uidset())
47723+ return 1;
47724+
47725+ /* set up the stack that holds allocation info */
47726+
47727+ stacksize = arg->role_db.num_pointers + 5;
47728+
47729+ if (!acl_alloc_stack_init(stacksize))
47730+ return 1;
47731+
47732+ /* grab reference for the real root dentry and vfsmount */
47733+ read_lock(&reaper->fs->lock);
47734+ real_root = dget(reaper->fs->root.dentry);
47735+ real_root_mnt = mntget(reaper->fs->root.mnt);
47736+ read_unlock(&reaper->fs->lock);
47737+
47738+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47739+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
47740+#endif
47741+
47742+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
47743+ if (fakefs_obj_rw == NULL)
47744+ return 1;
47745+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
47746+
47747+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
47748+ if (fakefs_obj_rwx == NULL)
47749+ return 1;
47750+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
47751+
47752+ subj_map_set.s_hash =
47753+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
47754+ acl_role_set.r_hash =
47755+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
47756+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
47757+ inodev_set.i_hash =
47758+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
47759+
47760+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
47761+ !name_set.n_hash || !inodev_set.i_hash)
47762+ return 1;
47763+
47764+ memset(subj_map_set.s_hash, 0,
47765+ sizeof(struct subject_map *) * subj_map_set.s_size);
47766+ memset(acl_role_set.r_hash, 0,
47767+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
47768+ memset(name_set.n_hash, 0,
47769+ sizeof (struct name_entry *) * name_set.n_size);
47770+ memset(inodev_set.i_hash, 0,
47771+ sizeof (struct inodev_entry *) * inodev_set.i_size);
47772+
47773+ return 0;
47774+}
47775+
47776+/* free information not needed after startup
47777+ currently contains user->kernel pointer mappings for subjects
47778+*/
47779+
47780+static void
47781+free_init_variables(void)
47782+{
47783+ __u32 i;
47784+
47785+ if (subj_map_set.s_hash) {
47786+ for (i = 0; i < subj_map_set.s_size; i++) {
47787+ if (subj_map_set.s_hash[i]) {
47788+ kfree(subj_map_set.s_hash[i]);
47789+ subj_map_set.s_hash[i] = NULL;
47790+ }
47791+ }
47792+
47793+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
47794+ PAGE_SIZE)
47795+ kfree(subj_map_set.s_hash);
47796+ else
47797+ vfree(subj_map_set.s_hash);
47798+ }
47799+
47800+ return;
47801+}
47802+
47803+static void
47804+free_variables(void)
47805+{
47806+ struct acl_subject_label *s;
47807+ struct acl_role_label *r;
47808+ struct task_struct *task, *task2;
47809+ unsigned int x;
47810+
47811+ gr_clear_learn_entries();
47812+
47813+ read_lock(&tasklist_lock);
47814+ do_each_thread(task2, task) {
47815+ task->acl_sp_role = 0;
47816+ task->acl_role_id = 0;
47817+ task->acl = NULL;
47818+ task->role = NULL;
47819+ } while_each_thread(task2, task);
47820+ read_unlock(&tasklist_lock);
47821+
47822+ /* release the reference to the real root dentry and vfsmount */
47823+ if (real_root)
47824+ dput(real_root);
47825+ real_root = NULL;
47826+ if (real_root_mnt)
47827+ mntput(real_root_mnt);
47828+ real_root_mnt = NULL;
47829+
47830+ /* free all object hash tables */
47831+
47832+ FOR_EACH_ROLE_START(r)
47833+ if (r->subj_hash == NULL)
47834+ goto next_role;
47835+ FOR_EACH_SUBJECT_START(r, s, x)
47836+ if (s->obj_hash == NULL)
47837+ break;
47838+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47839+ kfree(s->obj_hash);
47840+ else
47841+ vfree(s->obj_hash);
47842+ FOR_EACH_SUBJECT_END(s, x)
47843+ FOR_EACH_NESTED_SUBJECT_START(r, s)
47844+ if (s->obj_hash == NULL)
47845+ break;
47846+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47847+ kfree(s->obj_hash);
47848+ else
47849+ vfree(s->obj_hash);
47850+ FOR_EACH_NESTED_SUBJECT_END(s)
47851+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
47852+ kfree(r->subj_hash);
47853+ else
47854+ vfree(r->subj_hash);
47855+ r->subj_hash = NULL;
47856+next_role:
47857+ FOR_EACH_ROLE_END(r)
47858+
47859+ acl_free_all();
47860+
47861+ if (acl_role_set.r_hash) {
47862+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
47863+ PAGE_SIZE)
47864+ kfree(acl_role_set.r_hash);
47865+ else
47866+ vfree(acl_role_set.r_hash);
47867+ }
47868+ if (name_set.n_hash) {
47869+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
47870+ PAGE_SIZE)
47871+ kfree(name_set.n_hash);
47872+ else
47873+ vfree(name_set.n_hash);
47874+ }
47875+
47876+ if (inodev_set.i_hash) {
47877+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
47878+ PAGE_SIZE)
47879+ kfree(inodev_set.i_hash);
47880+ else
47881+ vfree(inodev_set.i_hash);
47882+ }
47883+
47884+ gr_free_uidset();
47885+
47886+ memset(&name_set, 0, sizeof (struct name_db));
47887+ memset(&inodev_set, 0, sizeof (struct inodev_db));
47888+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
47889+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
47890+
47891+ default_role = NULL;
47892+ role_list = NULL;
47893+
47894+ return;
47895+}
47896+
47897+static __u32
47898+count_user_objs(struct acl_object_label *userp)
47899+{
47900+ struct acl_object_label o_tmp;
47901+ __u32 num = 0;
47902+
47903+ while (userp) {
47904+ if (copy_from_user(&o_tmp, userp,
47905+ sizeof (struct acl_object_label)))
47906+ break;
47907+
47908+ userp = o_tmp.prev;
47909+ num++;
47910+ }
47911+
47912+ return num;
47913+}
47914+
47915+static struct acl_subject_label *
47916+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
47917+
47918+static int
47919+copy_user_glob(struct acl_object_label *obj)
47920+{
47921+ struct acl_object_label *g_tmp, **guser;
47922+ unsigned int len;
47923+ char *tmp;
47924+
47925+ if (obj->globbed == NULL)
47926+ return 0;
47927+
47928+ guser = &obj->globbed;
47929+ while (*guser) {
47930+ g_tmp = (struct acl_object_label *)
47931+ acl_alloc(sizeof (struct acl_object_label));
47932+ if (g_tmp == NULL)
47933+ return -ENOMEM;
47934+
47935+ if (copy_from_user(g_tmp, *guser,
47936+ sizeof (struct acl_object_label)))
47937+ return -EFAULT;
47938+
47939+ len = strnlen_user(g_tmp->filename, PATH_MAX);
47940+
47941+ if (!len || len >= PATH_MAX)
47942+ return -EINVAL;
47943+
47944+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47945+ return -ENOMEM;
47946+
47947+ if (copy_from_user(tmp, g_tmp->filename, len))
47948+ return -EFAULT;
47949+ tmp[len-1] = '\0';
47950+ g_tmp->filename = tmp;
47951+
47952+ *guser = g_tmp;
47953+ guser = &(g_tmp->next);
47954+ }
47955+
47956+ return 0;
47957+}
47958+
47959+static int
47960+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
47961+ struct acl_role_label *role)
47962+{
47963+ struct acl_object_label *o_tmp;
47964+ unsigned int len;
47965+ int ret;
47966+ char *tmp;
47967+
47968+ while (userp) {
47969+ if ((o_tmp = (struct acl_object_label *)
47970+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
47971+ return -ENOMEM;
47972+
47973+ if (copy_from_user(o_tmp, userp,
47974+ sizeof (struct acl_object_label)))
47975+ return -EFAULT;
47976+
47977+ userp = o_tmp->prev;
47978+
47979+ len = strnlen_user(o_tmp->filename, PATH_MAX);
47980+
47981+ if (!len || len >= PATH_MAX)
47982+ return -EINVAL;
47983+
47984+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47985+ return -ENOMEM;
47986+
47987+ if (copy_from_user(tmp, o_tmp->filename, len))
47988+ return -EFAULT;
47989+ tmp[len-1] = '\0';
47990+ o_tmp->filename = tmp;
47991+
47992+ insert_acl_obj_label(o_tmp, subj);
47993+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
47994+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
47995+ return -ENOMEM;
47996+
47997+ ret = copy_user_glob(o_tmp);
47998+ if (ret)
47999+ return ret;
48000+
48001+ if (o_tmp->nested) {
48002+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
48003+ if (IS_ERR(o_tmp->nested))
48004+ return PTR_ERR(o_tmp->nested);
48005+
48006+ /* insert into nested subject list */
48007+ o_tmp->nested->next = role->hash->first;
48008+ role->hash->first = o_tmp->nested;
48009+ }
48010+ }
48011+
48012+ return 0;
48013+}
48014+
48015+static __u32
48016+count_user_subjs(struct acl_subject_label *userp)
48017+{
48018+ struct acl_subject_label s_tmp;
48019+ __u32 num = 0;
48020+
48021+ while (userp) {
48022+ if (copy_from_user(&s_tmp, userp,
48023+ sizeof (struct acl_subject_label)))
48024+ break;
48025+
48026+ userp = s_tmp.prev;
48027+ /* do not count nested subjects against this count, since
48028+ they are not included in the hash table, but are
48029+ attached to objects. We have already counted
48030+ the subjects in userspace for the allocation
48031+ stack
48032+ */
48033+ if (!(s_tmp.mode & GR_NESTED))
48034+ num++;
48035+ }
48036+
48037+ return num;
48038+}
48039+
48040+static int
48041+copy_user_allowedips(struct acl_role_label *rolep)
48042+{
48043+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
48044+
48045+ ruserip = rolep->allowed_ips;
48046+
48047+ while (ruserip) {
48048+ rlast = rtmp;
48049+
48050+ if ((rtmp = (struct role_allowed_ip *)
48051+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
48052+ return -ENOMEM;
48053+
48054+ if (copy_from_user(rtmp, ruserip,
48055+ sizeof (struct role_allowed_ip)))
48056+ return -EFAULT;
48057+
48058+ ruserip = rtmp->prev;
48059+
48060+ if (!rlast) {
48061+ rtmp->prev = NULL;
48062+ rolep->allowed_ips = rtmp;
48063+ } else {
48064+ rlast->next = rtmp;
48065+ rtmp->prev = rlast;
48066+ }
48067+
48068+ if (!ruserip)
48069+ rtmp->next = NULL;
48070+ }
48071+
48072+ return 0;
48073+}
48074+
48075+static int
48076+copy_user_transitions(struct acl_role_label *rolep)
48077+{
48078+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
48079+
48080+ unsigned int len;
48081+ char *tmp;
48082+
48083+ rusertp = rolep->transitions;
48084+
48085+ while (rusertp) {
48086+ rlast = rtmp;
48087+
48088+ if ((rtmp = (struct role_transition *)
48089+ acl_alloc(sizeof (struct role_transition))) == NULL)
48090+ return -ENOMEM;
48091+
48092+ if (copy_from_user(rtmp, rusertp,
48093+ sizeof (struct role_transition)))
48094+ return -EFAULT;
48095+
48096+ rusertp = rtmp->prev;
48097+
48098+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48099+
48100+ if (!len || len >= GR_SPROLE_LEN)
48101+ return -EINVAL;
48102+
48103+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48104+ return -ENOMEM;
48105+
48106+ if (copy_from_user(tmp, rtmp->rolename, len))
48107+ return -EFAULT;
48108+ tmp[len-1] = '\0';
48109+ rtmp->rolename = tmp;
48110+
48111+ if (!rlast) {
48112+ rtmp->prev = NULL;
48113+ rolep->transitions = rtmp;
48114+ } else {
48115+ rlast->next = rtmp;
48116+ rtmp->prev = rlast;
48117+ }
48118+
48119+ if (!rusertp)
48120+ rtmp->next = NULL;
48121+ }
48122+
48123+ return 0;
48124+}
48125+
48126+static struct acl_subject_label *
48127+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48128+{
48129+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48130+ unsigned int len;
48131+ char *tmp;
48132+ __u32 num_objs;
48133+ struct acl_ip_label **i_tmp, *i_utmp2;
48134+ struct gr_hash_struct ghash;
48135+ struct subject_map *subjmap;
48136+ unsigned int i_num;
48137+ int err;
48138+
48139+ s_tmp = lookup_subject_map(userp);
48140+
48141+ /* we've already copied this subject into the kernel, just return
48142+ the reference to it, and don't copy it over again
48143+ */
48144+ if (s_tmp)
48145+ return(s_tmp);
48146+
48147+ if ((s_tmp = (struct acl_subject_label *)
48148+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48149+ return ERR_PTR(-ENOMEM);
48150+
48151+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48152+ if (subjmap == NULL)
48153+ return ERR_PTR(-ENOMEM);
48154+
48155+ subjmap->user = userp;
48156+ subjmap->kernel = s_tmp;
48157+ insert_subj_map_entry(subjmap);
48158+
48159+ if (copy_from_user(s_tmp, userp,
48160+ sizeof (struct acl_subject_label)))
48161+ return ERR_PTR(-EFAULT);
48162+
48163+ len = strnlen_user(s_tmp->filename, PATH_MAX);
48164+
48165+ if (!len || len >= PATH_MAX)
48166+ return ERR_PTR(-EINVAL);
48167+
48168+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48169+ return ERR_PTR(-ENOMEM);
48170+
48171+ if (copy_from_user(tmp, s_tmp->filename, len))
48172+ return ERR_PTR(-EFAULT);
48173+ tmp[len-1] = '\0';
48174+ s_tmp->filename = tmp;
48175+
48176+ if (!strcmp(s_tmp->filename, "/"))
48177+ role->root_label = s_tmp;
48178+
48179+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48180+ return ERR_PTR(-EFAULT);
48181+
48182+ /* copy user and group transition tables */
48183+
48184+ if (s_tmp->user_trans_num) {
48185+ uid_t *uidlist;
48186+
48187+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48188+ if (uidlist == NULL)
48189+ return ERR_PTR(-ENOMEM);
48190+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48191+ return ERR_PTR(-EFAULT);
48192+
48193+ s_tmp->user_transitions = uidlist;
48194+ }
48195+
48196+ if (s_tmp->group_trans_num) {
48197+ gid_t *gidlist;
48198+
48199+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48200+ if (gidlist == NULL)
48201+ return ERR_PTR(-ENOMEM);
48202+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48203+ return ERR_PTR(-EFAULT);
48204+
48205+ s_tmp->group_transitions = gidlist;
48206+ }
48207+
48208+ /* set up object hash table */
48209+ num_objs = count_user_objs(ghash.first);
48210+
48211+ s_tmp->obj_hash_size = num_objs;
48212+ s_tmp->obj_hash =
48213+ (struct acl_object_label **)
48214+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48215+
48216+ if (!s_tmp->obj_hash)
48217+ return ERR_PTR(-ENOMEM);
48218+
48219+ memset(s_tmp->obj_hash, 0,
48220+ s_tmp->obj_hash_size *
48221+ sizeof (struct acl_object_label *));
48222+
48223+ /* add in objects */
48224+ err = copy_user_objs(ghash.first, s_tmp, role);
48225+
48226+ if (err)
48227+ return ERR_PTR(err);
48228+
48229+ /* set pointer for parent subject */
48230+ if (s_tmp->parent_subject) {
48231+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48232+
48233+ if (IS_ERR(s_tmp2))
48234+ return s_tmp2;
48235+
48236+ s_tmp->parent_subject = s_tmp2;
48237+ }
48238+
48239+ /* add in ip acls */
48240+
48241+ if (!s_tmp->ip_num) {
48242+ s_tmp->ips = NULL;
48243+ goto insert;
48244+ }
48245+
48246+ i_tmp =
48247+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48248+ sizeof (struct acl_ip_label *));
48249+
48250+ if (!i_tmp)
48251+ return ERR_PTR(-ENOMEM);
48252+
48253+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48254+ *(i_tmp + i_num) =
48255+ (struct acl_ip_label *)
48256+ acl_alloc(sizeof (struct acl_ip_label));
48257+ if (!*(i_tmp + i_num))
48258+ return ERR_PTR(-ENOMEM);
48259+
48260+ if (copy_from_user
48261+ (&i_utmp2, s_tmp->ips + i_num,
48262+ sizeof (struct acl_ip_label *)))
48263+ return ERR_PTR(-EFAULT);
48264+
48265+ if (copy_from_user
48266+ (*(i_tmp + i_num), i_utmp2,
48267+ sizeof (struct acl_ip_label)))
48268+ return ERR_PTR(-EFAULT);
48269+
48270+ if ((*(i_tmp + i_num))->iface == NULL)
48271+ continue;
48272+
48273+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48274+ if (!len || len >= IFNAMSIZ)
48275+ return ERR_PTR(-EINVAL);
48276+ tmp = acl_alloc(len);
48277+ if (tmp == NULL)
48278+ return ERR_PTR(-ENOMEM);
48279+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48280+ return ERR_PTR(-EFAULT);
48281+ (*(i_tmp + i_num))->iface = tmp;
48282+ }
48283+
48284+ s_tmp->ips = i_tmp;
48285+
48286+insert:
48287+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48288+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48289+ return ERR_PTR(-ENOMEM);
48290+
48291+ return s_tmp;
48292+}
48293+
48294+static int
48295+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48296+{
48297+ struct acl_subject_label s_pre;
48298+ struct acl_subject_label * ret;
48299+ int err;
48300+
48301+ while (userp) {
48302+ if (copy_from_user(&s_pre, userp,
48303+ sizeof (struct acl_subject_label)))
48304+ return -EFAULT;
48305+
48306+ /* do not add nested subjects here, add
48307+ while parsing objects
48308+ */
48309+
48310+ if (s_pre.mode & GR_NESTED) {
48311+ userp = s_pre.prev;
48312+ continue;
48313+ }
48314+
48315+ ret = do_copy_user_subj(userp, role);
48316+
48317+ err = PTR_ERR(ret);
48318+ if (IS_ERR(ret))
48319+ return err;
48320+
48321+ insert_acl_subj_label(ret, role);
48322+
48323+ userp = s_pre.prev;
48324+ }
48325+
48326+ return 0;
48327+}
48328+
48329+static int
48330+copy_user_acl(struct gr_arg *arg)
48331+{
48332+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48333+ struct sprole_pw *sptmp;
48334+ struct gr_hash_struct *ghash;
48335+ uid_t *domainlist;
48336+ unsigned int r_num;
48337+ unsigned int len;
48338+ char *tmp;
48339+ int err = 0;
48340+ __u16 i;
48341+ __u32 num_subjs;
48342+
48343+ /* we need a default and kernel role */
48344+ if (arg->role_db.num_roles < 2)
48345+ return -EINVAL;
48346+
48347+ /* copy special role authentication info from userspace */
48348+
48349+ num_sprole_pws = arg->num_sprole_pws;
48350+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48351+
48352+ if (!acl_special_roles) {
48353+ err = -ENOMEM;
48354+ goto cleanup;
48355+ }
48356+
48357+ for (i = 0; i < num_sprole_pws; i++) {
48358+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48359+ if (!sptmp) {
48360+ err = -ENOMEM;
48361+ goto cleanup;
48362+ }
48363+ if (copy_from_user(sptmp, arg->sprole_pws + i,
48364+ sizeof (struct sprole_pw))) {
48365+ err = -EFAULT;
48366+ goto cleanup;
48367+ }
48368+
48369+ len =
48370+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48371+
48372+ if (!len || len >= GR_SPROLE_LEN) {
48373+ err = -EINVAL;
48374+ goto cleanup;
48375+ }
48376+
48377+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48378+ err = -ENOMEM;
48379+ goto cleanup;
48380+ }
48381+
48382+ if (copy_from_user(tmp, sptmp->rolename, len)) {
48383+ err = -EFAULT;
48384+ goto cleanup;
48385+ }
48386+ tmp[len-1] = '\0';
48387+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48388+ printk(KERN_ALERT "Copying special role %s\n", tmp);
48389+#endif
48390+ sptmp->rolename = tmp;
48391+ acl_special_roles[i] = sptmp;
48392+ }
48393+
48394+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48395+
48396+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48397+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
48398+
48399+ if (!r_tmp) {
48400+ err = -ENOMEM;
48401+ goto cleanup;
48402+ }
48403+
48404+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
48405+ sizeof (struct acl_role_label *))) {
48406+ err = -EFAULT;
48407+ goto cleanup;
48408+ }
48409+
48410+ if (copy_from_user(r_tmp, r_utmp2,
48411+ sizeof (struct acl_role_label))) {
48412+ err = -EFAULT;
48413+ goto cleanup;
48414+ }
48415+
48416+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48417+
48418+ if (!len || len >= PATH_MAX) {
48419+ err = -EINVAL;
48420+ goto cleanup;
48421+ }
48422+
48423+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48424+ err = -ENOMEM;
48425+ goto cleanup;
48426+ }
48427+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
48428+ err = -EFAULT;
48429+ goto cleanup;
48430+ }
48431+ tmp[len-1] = '\0';
48432+ r_tmp->rolename = tmp;
48433+
48434+ if (!strcmp(r_tmp->rolename, "default")
48435+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48436+ default_role = r_tmp;
48437+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48438+ kernel_role = r_tmp;
48439+ }
48440+
48441+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48442+ err = -ENOMEM;
48443+ goto cleanup;
48444+ }
48445+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48446+ err = -EFAULT;
48447+ goto cleanup;
48448+ }
48449+
48450+ r_tmp->hash = ghash;
48451+
48452+ num_subjs = count_user_subjs(r_tmp->hash->first);
48453+
48454+ r_tmp->subj_hash_size = num_subjs;
48455+ r_tmp->subj_hash =
48456+ (struct acl_subject_label **)
48457+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48458+
48459+ if (!r_tmp->subj_hash) {
48460+ err = -ENOMEM;
48461+ goto cleanup;
48462+ }
48463+
48464+ err = copy_user_allowedips(r_tmp);
48465+ if (err)
48466+ goto cleanup;
48467+
48468+ /* copy domain info */
48469+ if (r_tmp->domain_children != NULL) {
48470+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48471+ if (domainlist == NULL) {
48472+ err = -ENOMEM;
48473+ goto cleanup;
48474+ }
48475+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48476+ err = -EFAULT;
48477+ goto cleanup;
48478+ }
48479+ r_tmp->domain_children = domainlist;
48480+ }
48481+
48482+ err = copy_user_transitions(r_tmp);
48483+ if (err)
48484+ goto cleanup;
48485+
48486+ memset(r_tmp->subj_hash, 0,
48487+ r_tmp->subj_hash_size *
48488+ sizeof (struct acl_subject_label *));
48489+
48490+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48491+
48492+ if (err)
48493+ goto cleanup;
48494+
48495+ /* set nested subject list to null */
48496+ r_tmp->hash->first = NULL;
48497+
48498+ insert_acl_role_label(r_tmp);
48499+ }
48500+
48501+ goto return_err;
48502+ cleanup:
48503+ free_variables();
48504+ return_err:
48505+ return err;
48506+
48507+}
48508+
48509+static int
48510+gracl_init(struct gr_arg *args)
48511+{
48512+ int error = 0;
48513+
48514+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48515+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48516+
48517+ if (init_variables(args)) {
48518+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48519+ error = -ENOMEM;
48520+ free_variables();
48521+ goto out;
48522+ }
48523+
48524+ error = copy_user_acl(args);
48525+ free_init_variables();
48526+ if (error) {
48527+ free_variables();
48528+ goto out;
48529+ }
48530+
48531+ if ((error = gr_set_acls(0))) {
48532+ free_variables();
48533+ goto out;
48534+ }
48535+
48536+ pax_open_kernel();
48537+ gr_status |= GR_READY;
48538+ pax_close_kernel();
48539+
48540+ out:
48541+ return error;
48542+}
48543+
48544+/* derived from glibc fnmatch() 0: match, 1: no match*/
48545+
48546+static int
48547+glob_match(const char *p, const char *n)
48548+{
48549+ char c;
48550+
48551+ while ((c = *p++) != '\0') {
48552+ switch (c) {
48553+ case '?':
48554+ if (*n == '\0')
48555+ return 1;
48556+ else if (*n == '/')
48557+ return 1;
48558+ break;
48559+ case '\\':
48560+ if (*n != c)
48561+ return 1;
48562+ break;
48563+ case '*':
48564+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
48565+ if (*n == '/')
48566+ return 1;
48567+ else if (c == '?') {
48568+ if (*n == '\0')
48569+ return 1;
48570+ else
48571+ ++n;
48572+ }
48573+ }
48574+ if (c == '\0') {
48575+ return 0;
48576+ } else {
48577+ const char *endp;
48578+
48579+ if ((endp = strchr(n, '/')) == NULL)
48580+ endp = n + strlen(n);
48581+
48582+ if (c == '[') {
48583+ for (--p; n < endp; ++n)
48584+ if (!glob_match(p, n))
48585+ return 0;
48586+ } else if (c == '/') {
48587+ while (*n != '\0' && *n != '/')
48588+ ++n;
48589+ if (*n == '/' && !glob_match(p, n + 1))
48590+ return 0;
48591+ } else {
48592+ for (--p; n < endp; ++n)
48593+ if (*n == c && !glob_match(p, n))
48594+ return 0;
48595+ }
48596+
48597+ return 1;
48598+ }
48599+ case '[':
48600+ {
48601+ int not;
48602+ char cold;
48603+
48604+ if (*n == '\0' || *n == '/')
48605+ return 1;
48606+
48607+ not = (*p == '!' || *p == '^');
48608+ if (not)
48609+ ++p;
48610+
48611+ c = *p++;
48612+ for (;;) {
48613+ unsigned char fn = (unsigned char)*n;
48614+
48615+ if (c == '\0')
48616+ return 1;
48617+ else {
48618+ if (c == fn)
48619+ goto matched;
48620+ cold = c;
48621+ c = *p++;
48622+
48623+ if (c == '-' && *p != ']') {
48624+ unsigned char cend = *p++;
48625+
48626+ if (cend == '\0')
48627+ return 1;
48628+
48629+ if (cold <= fn && fn <= cend)
48630+ goto matched;
48631+
48632+ c = *p++;
48633+ }
48634+ }
48635+
48636+ if (c == ']')
48637+ break;
48638+ }
48639+ if (!not)
48640+ return 1;
48641+ break;
48642+ matched:
48643+ while (c != ']') {
48644+ if (c == '\0')
48645+ return 1;
48646+
48647+ c = *p++;
48648+ }
48649+ if (not)
48650+ return 1;
48651+ }
48652+ break;
48653+ default:
48654+ if (c != *n)
48655+ return 1;
48656+ }
48657+
48658+ ++n;
48659+ }
48660+
48661+ if (*n == '\0')
48662+ return 0;
48663+
48664+ if (*n == '/')
48665+ return 0;
48666+
48667+ return 1;
48668+}
48669+
48670+static struct acl_object_label *
48671+chk_glob_label(struct acl_object_label *globbed,
48672+ struct dentry *dentry, struct vfsmount *mnt, char **path)
48673+{
48674+ struct acl_object_label *tmp;
48675+
48676+ if (*path == NULL)
48677+ *path = gr_to_filename_nolock(dentry, mnt);
48678+
48679+ tmp = globbed;
48680+
48681+ while (tmp) {
48682+ if (!glob_match(tmp->filename, *path))
48683+ return tmp;
48684+ tmp = tmp->next;
48685+ }
48686+
48687+ return NULL;
48688+}
48689+
48690+static struct acl_object_label *
48691+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48692+ const ino_t curr_ino, const dev_t curr_dev,
48693+ const struct acl_subject_label *subj, char **path, const int checkglob)
48694+{
48695+ struct acl_subject_label *tmpsubj;
48696+ struct acl_object_label *retval;
48697+ struct acl_object_label *retval2;
48698+
48699+ tmpsubj = (struct acl_subject_label *) subj;
48700+ read_lock(&gr_inode_lock);
48701+ do {
48702+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
48703+ if (retval) {
48704+ if (checkglob && retval->globbed) {
48705+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
48706+ (struct vfsmount *)orig_mnt, path);
48707+ if (retval2)
48708+ retval = retval2;
48709+ }
48710+ break;
48711+ }
48712+ } while ((tmpsubj = tmpsubj->parent_subject));
48713+ read_unlock(&gr_inode_lock);
48714+
48715+ return retval;
48716+}
48717+
48718+static __inline__ struct acl_object_label *
48719+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48720+ const struct dentry *curr_dentry,
48721+ const struct acl_subject_label *subj, char **path, const int checkglob)
48722+{
48723+ int newglob = checkglob;
48724+
48725+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
48726+ as we don't want a / * rule to match instead of the / object
48727+ don't do this for create lookups that call this function though, since they're looking up
48728+ on the parent and thus need globbing checks on all paths
48729+ */
48730+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
48731+ newglob = GR_NO_GLOB;
48732+
48733+ return __full_lookup(orig_dentry, orig_mnt,
48734+ curr_dentry->d_inode->i_ino,
48735+ __get_dev(curr_dentry), subj, path, newglob);
48736+}
48737+
48738+static struct acl_object_label *
48739+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48740+ const struct acl_subject_label *subj, char *path, const int checkglob)
48741+{
48742+ struct dentry *dentry = (struct dentry *) l_dentry;
48743+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48744+ struct acl_object_label *retval;
48745+
48746+ spin_lock(&dcache_lock);
48747+ spin_lock(&vfsmount_lock);
48748+
48749+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
48750+#ifdef CONFIG_NET
48751+ mnt == sock_mnt ||
48752+#endif
48753+#ifdef CONFIG_HUGETLBFS
48754+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
48755+#endif
48756+ /* ignore Eric Biederman */
48757+ IS_PRIVATE(l_dentry->d_inode))) {
48758+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
48759+ goto out;
48760+ }
48761+
48762+ for (;;) {
48763+ if (dentry == real_root && mnt == real_root_mnt)
48764+ break;
48765+
48766+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48767+ if (mnt->mnt_parent == mnt)
48768+ break;
48769+
48770+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48771+ if (retval != NULL)
48772+ goto out;
48773+
48774+ dentry = mnt->mnt_mountpoint;
48775+ mnt = mnt->mnt_parent;
48776+ continue;
48777+ }
48778+
48779+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48780+ if (retval != NULL)
48781+ goto out;
48782+
48783+ dentry = dentry->d_parent;
48784+ }
48785+
48786+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48787+
48788+ if (retval == NULL)
48789+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
48790+out:
48791+ spin_unlock(&vfsmount_lock);
48792+ spin_unlock(&dcache_lock);
48793+
48794+ BUG_ON(retval == NULL);
48795+
48796+ return retval;
48797+}
48798+
48799+static __inline__ struct acl_object_label *
48800+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48801+ const struct acl_subject_label *subj)
48802+{
48803+ char *path = NULL;
48804+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
48805+}
48806+
48807+static __inline__ struct acl_object_label *
48808+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48809+ const struct acl_subject_label *subj)
48810+{
48811+ char *path = NULL;
48812+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
48813+}
48814+
48815+static __inline__ struct acl_object_label *
48816+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48817+ const struct acl_subject_label *subj, char *path)
48818+{
48819+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
48820+}
48821+
48822+static struct acl_subject_label *
48823+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48824+ const struct acl_role_label *role)
48825+{
48826+ struct dentry *dentry = (struct dentry *) l_dentry;
48827+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48828+ struct acl_subject_label *retval;
48829+
48830+ spin_lock(&dcache_lock);
48831+ spin_lock(&vfsmount_lock);
48832+
48833+ for (;;) {
48834+ if (dentry == real_root && mnt == real_root_mnt)
48835+ break;
48836+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48837+ if (mnt->mnt_parent == mnt)
48838+ break;
48839+
48840+ read_lock(&gr_inode_lock);
48841+ retval =
48842+ lookup_acl_subj_label(dentry->d_inode->i_ino,
48843+ __get_dev(dentry), role);
48844+ read_unlock(&gr_inode_lock);
48845+ if (retval != NULL)
48846+ goto out;
48847+
48848+ dentry = mnt->mnt_mountpoint;
48849+ mnt = mnt->mnt_parent;
48850+ continue;
48851+ }
48852+
48853+ read_lock(&gr_inode_lock);
48854+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48855+ __get_dev(dentry), role);
48856+ read_unlock(&gr_inode_lock);
48857+ if (retval != NULL)
48858+ goto out;
48859+
48860+ dentry = dentry->d_parent;
48861+ }
48862+
48863+ read_lock(&gr_inode_lock);
48864+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48865+ __get_dev(dentry), role);
48866+ read_unlock(&gr_inode_lock);
48867+
48868+ if (unlikely(retval == NULL)) {
48869+ read_lock(&gr_inode_lock);
48870+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
48871+ __get_dev(real_root), role);
48872+ read_unlock(&gr_inode_lock);
48873+ }
48874+out:
48875+ spin_unlock(&vfsmount_lock);
48876+ spin_unlock(&dcache_lock);
48877+
48878+ BUG_ON(retval == NULL);
48879+
48880+ return retval;
48881+}
48882+
48883+static void
48884+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
48885+{
48886+ struct task_struct *task = current;
48887+ const struct cred *cred = current_cred();
48888+
48889+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48890+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48891+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48892+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
48893+
48894+ return;
48895+}
48896+
48897+static void
48898+gr_log_learn_sysctl(const char *path, const __u32 mode)
48899+{
48900+ struct task_struct *task = current;
48901+ const struct cred *cred = current_cred();
48902+
48903+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48904+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48905+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48906+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
48907+
48908+ return;
48909+}
48910+
48911+static void
48912+gr_log_learn_id_change(const char type, const unsigned int real,
48913+ const unsigned int effective, const unsigned int fs)
48914+{
48915+ struct task_struct *task = current;
48916+ const struct cred *cred = current_cred();
48917+
48918+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
48919+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48920+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48921+ type, real, effective, fs, &task->signal->saved_ip);
48922+
48923+ return;
48924+}
48925+
48926+__u32
48927+gr_check_link(const struct dentry * new_dentry,
48928+ const struct dentry * parent_dentry,
48929+ const struct vfsmount * parent_mnt,
48930+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
48931+{
48932+ struct acl_object_label *obj;
48933+ __u32 oldmode, newmode;
48934+ __u32 needmode;
48935+
48936+ if (unlikely(!(gr_status & GR_READY)))
48937+ return (GR_CREATE | GR_LINK);
48938+
48939+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
48940+ oldmode = obj->mode;
48941+
48942+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48943+ oldmode |= (GR_CREATE | GR_LINK);
48944+
48945+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
48946+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
48947+ needmode |= GR_SETID | GR_AUDIT_SETID;
48948+
48949+ newmode =
48950+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
48951+ oldmode | needmode);
48952+
48953+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
48954+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
48955+ GR_INHERIT | GR_AUDIT_INHERIT);
48956+
48957+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
48958+ goto bad;
48959+
48960+ if ((oldmode & needmode) != needmode)
48961+ goto bad;
48962+
48963+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
48964+ if ((newmode & needmode) != needmode)
48965+ goto bad;
48966+
48967+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
48968+ return newmode;
48969+bad:
48970+ needmode = oldmode;
48971+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
48972+ needmode |= GR_SETID;
48973+
48974+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
48975+ gr_log_learn(old_dentry, old_mnt, needmode);
48976+ return (GR_CREATE | GR_LINK);
48977+ } else if (newmode & GR_SUPPRESS)
48978+ return GR_SUPPRESS;
48979+ else
48980+ return 0;
48981+}
48982+
48983+__u32
48984+gr_search_file(const struct dentry * dentry, const __u32 mode,
48985+ const struct vfsmount * mnt)
48986+{
48987+ __u32 retval = mode;
48988+ struct acl_subject_label *curracl;
48989+ struct acl_object_label *currobj;
48990+
48991+ if (unlikely(!(gr_status & GR_READY)))
48992+ return (mode & ~GR_AUDITS);
48993+
48994+ curracl = current->acl;
48995+
48996+ currobj = chk_obj_label(dentry, mnt, curracl);
48997+ retval = currobj->mode & mode;
48998+
48999+ /* if we're opening a specified transfer file for writing
49000+ (e.g. /dev/initctl), then transfer our role to init
49001+ */
49002+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
49003+ current->role->roletype & GR_ROLE_PERSIST)) {
49004+ struct task_struct *task = init_pid_ns.child_reaper;
49005+
49006+ if (task->role != current->role) {
49007+ task->acl_sp_role = 0;
49008+ task->acl_role_id = current->acl_role_id;
49009+ task->role = current->role;
49010+ rcu_read_lock();
49011+ read_lock(&grsec_exec_file_lock);
49012+ gr_apply_subject_to_task(task);
49013+ read_unlock(&grsec_exec_file_lock);
49014+ rcu_read_unlock();
49015+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
49016+ }
49017+ }
49018+
49019+ if (unlikely
49020+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
49021+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
49022+ __u32 new_mode = mode;
49023+
49024+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49025+
49026+ retval = new_mode;
49027+
49028+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
49029+ new_mode |= GR_INHERIT;
49030+
49031+ if (!(mode & GR_NOLEARN))
49032+ gr_log_learn(dentry, mnt, new_mode);
49033+ }
49034+
49035+ return retval;
49036+}
49037+
49038+__u32
49039+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
49040+ const struct vfsmount * mnt, const __u32 mode)
49041+{
49042+ struct name_entry *match;
49043+ struct acl_object_label *matchpo;
49044+ struct acl_subject_label *curracl;
49045+ char *path;
49046+ __u32 retval;
49047+
49048+ if (unlikely(!(gr_status & GR_READY)))
49049+ return (mode & ~GR_AUDITS);
49050+
49051+ preempt_disable();
49052+ path = gr_to_filename_rbac(new_dentry, mnt);
49053+ match = lookup_name_entry_create(path);
49054+
49055+ if (!match)
49056+ goto check_parent;
49057+
49058+ curracl = current->acl;
49059+
49060+ read_lock(&gr_inode_lock);
49061+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
49062+ read_unlock(&gr_inode_lock);
49063+
49064+ if (matchpo) {
49065+ if ((matchpo->mode & mode) !=
49066+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
49067+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49068+ __u32 new_mode = mode;
49069+
49070+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49071+
49072+ gr_log_learn(new_dentry, mnt, new_mode);
49073+
49074+ preempt_enable();
49075+ return new_mode;
49076+ }
49077+ preempt_enable();
49078+ return (matchpo->mode & mode);
49079+ }
49080+
49081+ check_parent:
49082+ curracl = current->acl;
49083+
49084+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
49085+ retval = matchpo->mode & mode;
49086+
49087+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
49088+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
49089+ __u32 new_mode = mode;
49090+
49091+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49092+
49093+ gr_log_learn(new_dentry, mnt, new_mode);
49094+ preempt_enable();
49095+ return new_mode;
49096+ }
49097+
49098+ preempt_enable();
49099+ return retval;
49100+}
49101+
49102+int
49103+gr_check_hidden_task(const struct task_struct *task)
49104+{
49105+ if (unlikely(!(gr_status & GR_READY)))
49106+ return 0;
49107+
49108+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49109+ return 1;
49110+
49111+ return 0;
49112+}
49113+
49114+int
49115+gr_check_protected_task(const struct task_struct *task)
49116+{
49117+ if (unlikely(!(gr_status & GR_READY) || !task))
49118+ return 0;
49119+
49120+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49121+ task->acl != current->acl)
49122+ return 1;
49123+
49124+ return 0;
49125+}
49126+
49127+int
49128+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49129+{
49130+ struct task_struct *p;
49131+ int ret = 0;
49132+
49133+ if (unlikely(!(gr_status & GR_READY) || !pid))
49134+ return ret;
49135+
49136+ read_lock(&tasklist_lock);
49137+ do_each_pid_task(pid, type, p) {
49138+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49139+ p->acl != current->acl) {
49140+ ret = 1;
49141+ goto out;
49142+ }
49143+ } while_each_pid_task(pid, type, p);
49144+out:
49145+ read_unlock(&tasklist_lock);
49146+
49147+ return ret;
49148+}
49149+
49150+void
49151+gr_copy_label(struct task_struct *tsk)
49152+{
49153+ tsk->signal->used_accept = 0;
49154+ tsk->acl_sp_role = 0;
49155+ tsk->acl_role_id = current->acl_role_id;
49156+ tsk->acl = current->acl;
49157+ tsk->role = current->role;
49158+ tsk->signal->curr_ip = current->signal->curr_ip;
49159+ tsk->signal->saved_ip = current->signal->saved_ip;
49160+ if (current->exec_file)
49161+ get_file(current->exec_file);
49162+ tsk->exec_file = current->exec_file;
49163+ tsk->is_writable = current->is_writable;
49164+ if (unlikely(current->signal->used_accept)) {
49165+ current->signal->curr_ip = 0;
49166+ current->signal->saved_ip = 0;
49167+ }
49168+
49169+ return;
49170+}
49171+
49172+static void
49173+gr_set_proc_res(struct task_struct *task)
49174+{
49175+ struct acl_subject_label *proc;
49176+ unsigned short i;
49177+
49178+ proc = task->acl;
49179+
49180+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49181+ return;
49182+
49183+ for (i = 0; i < RLIM_NLIMITS; i++) {
49184+ if (!(proc->resmask & (1 << i)))
49185+ continue;
49186+
49187+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49188+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49189+ }
49190+
49191+ return;
49192+}
49193+
49194+extern int __gr_process_user_ban(struct user_struct *user);
49195+
49196+int
49197+gr_check_user_change(int real, int effective, int fs)
49198+{
49199+ unsigned int i;
49200+ __u16 num;
49201+ uid_t *uidlist;
49202+ int curuid;
49203+ int realok = 0;
49204+ int effectiveok = 0;
49205+ int fsok = 0;
49206+
49207+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49208+ struct user_struct *user;
49209+
49210+ if (real == -1)
49211+ goto skipit;
49212+
49213+ user = find_user(real);
49214+ if (user == NULL)
49215+ goto skipit;
49216+
49217+ if (__gr_process_user_ban(user)) {
49218+ /* for find_user */
49219+ free_uid(user);
49220+ return 1;
49221+ }
49222+
49223+ /* for find_user */
49224+ free_uid(user);
49225+
49226+skipit:
49227+#endif
49228+
49229+ if (unlikely(!(gr_status & GR_READY)))
49230+ return 0;
49231+
49232+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49233+ gr_log_learn_id_change('u', real, effective, fs);
49234+
49235+ num = current->acl->user_trans_num;
49236+ uidlist = current->acl->user_transitions;
49237+
49238+ if (uidlist == NULL)
49239+ return 0;
49240+
49241+ if (real == -1)
49242+ realok = 1;
49243+ if (effective == -1)
49244+ effectiveok = 1;
49245+ if (fs == -1)
49246+ fsok = 1;
49247+
49248+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
49249+ for (i = 0; i < num; i++) {
49250+ curuid = (int)uidlist[i];
49251+ if (real == curuid)
49252+ realok = 1;
49253+ if (effective == curuid)
49254+ effectiveok = 1;
49255+ if (fs == curuid)
49256+ fsok = 1;
49257+ }
49258+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
49259+ for (i = 0; i < num; i++) {
49260+ curuid = (int)uidlist[i];
49261+ if (real == curuid)
49262+ break;
49263+ if (effective == curuid)
49264+ break;
49265+ if (fs == curuid)
49266+ break;
49267+ }
49268+ /* not in deny list */
49269+ if (i == num) {
49270+ realok = 1;
49271+ effectiveok = 1;
49272+ fsok = 1;
49273+ }
49274+ }
49275+
49276+ if (realok && effectiveok && fsok)
49277+ return 0;
49278+ else {
49279+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49280+ return 1;
49281+ }
49282+}
49283+
49284+int
49285+gr_check_group_change(int real, int effective, int fs)
49286+{
49287+ unsigned int i;
49288+ __u16 num;
49289+ gid_t *gidlist;
49290+ int curgid;
49291+ int realok = 0;
49292+ int effectiveok = 0;
49293+ int fsok = 0;
49294+
49295+ if (unlikely(!(gr_status & GR_READY)))
49296+ return 0;
49297+
49298+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49299+ gr_log_learn_id_change('g', real, effective, fs);
49300+
49301+ num = current->acl->group_trans_num;
49302+ gidlist = current->acl->group_transitions;
49303+
49304+ if (gidlist == NULL)
49305+ return 0;
49306+
49307+ if (real == -1)
49308+ realok = 1;
49309+ if (effective == -1)
49310+ effectiveok = 1;
49311+ if (fs == -1)
49312+ fsok = 1;
49313+
49314+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
49315+ for (i = 0; i < num; i++) {
49316+ curgid = (int)gidlist[i];
49317+ if (real == curgid)
49318+ realok = 1;
49319+ if (effective == curgid)
49320+ effectiveok = 1;
49321+ if (fs == curgid)
49322+ fsok = 1;
49323+ }
49324+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
49325+ for (i = 0; i < num; i++) {
49326+ curgid = (int)gidlist[i];
49327+ if (real == curgid)
49328+ break;
49329+ if (effective == curgid)
49330+ break;
49331+ if (fs == curgid)
49332+ break;
49333+ }
49334+ /* not in deny list */
49335+ if (i == num) {
49336+ realok = 1;
49337+ effectiveok = 1;
49338+ fsok = 1;
49339+ }
49340+ }
49341+
49342+ if (realok && effectiveok && fsok)
49343+ return 0;
49344+ else {
49345+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49346+ return 1;
49347+ }
49348+}
49349+
49350+void
49351+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49352+{
49353+ struct acl_role_label *role = task->role;
49354+ struct acl_subject_label *subj = NULL;
49355+ struct acl_object_label *obj;
49356+ struct file *filp;
49357+
49358+ if (unlikely(!(gr_status & GR_READY)))
49359+ return;
49360+
49361+ filp = task->exec_file;
49362+
49363+ /* kernel process, we'll give them the kernel role */
49364+ if (unlikely(!filp)) {
49365+ task->role = kernel_role;
49366+ task->acl = kernel_role->root_label;
49367+ return;
49368+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49369+ role = lookup_acl_role_label(task, uid, gid);
49370+
49371+ /* perform subject lookup in possibly new role
49372+ we can use this result below in the case where role == task->role
49373+ */
49374+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49375+
49376+ /* if we changed uid/gid, but result in the same role
49377+ and are using inheritance, don't lose the inherited subject
49378+ if current subject is other than what normal lookup
49379+ would result in, we arrived via inheritance, don't
49380+ lose subject
49381+ */
49382+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49383+ (subj == task->acl)))
49384+ task->acl = subj;
49385+
49386+ task->role = role;
49387+
49388+ task->is_writable = 0;
49389+
49390+ /* ignore additional mmap checks for processes that are writable
49391+ by the default ACL */
49392+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49393+ if (unlikely(obj->mode & GR_WRITE))
49394+ task->is_writable = 1;
49395+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49396+ if (unlikely(obj->mode & GR_WRITE))
49397+ task->is_writable = 1;
49398+
49399+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49400+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49401+#endif
49402+
49403+ gr_set_proc_res(task);
49404+
49405+ return;
49406+}
49407+
49408+int
49409+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49410+ const int unsafe_share)
49411+{
49412+ struct task_struct *task = current;
49413+ struct acl_subject_label *newacl;
49414+ struct acl_object_label *obj;
49415+ __u32 retmode;
49416+
49417+ if (unlikely(!(gr_status & GR_READY)))
49418+ return 0;
49419+
49420+ newacl = chk_subj_label(dentry, mnt, task->role);
49421+
49422+ task_lock(task);
49423+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49424+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49425+ !(task->role->roletype & GR_ROLE_GOD) &&
49426+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49427+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49428+ task_unlock(task);
49429+ if (unsafe_share)
49430+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49431+ else
49432+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49433+ return -EACCES;
49434+ }
49435+ task_unlock(task);
49436+
49437+ obj = chk_obj_label(dentry, mnt, task->acl);
49438+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49439+
49440+ if (!(task->acl->mode & GR_INHERITLEARN) &&
49441+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49442+ if (obj->nested)
49443+ task->acl = obj->nested;
49444+ else
49445+ task->acl = newacl;
49446+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49447+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49448+
49449+ task->is_writable = 0;
49450+
49451+ /* ignore additional mmap checks for processes that are writable
49452+ by the default ACL */
49453+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
49454+ if (unlikely(obj->mode & GR_WRITE))
49455+ task->is_writable = 1;
49456+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
49457+ if (unlikely(obj->mode & GR_WRITE))
49458+ task->is_writable = 1;
49459+
49460+ gr_set_proc_res(task);
49461+
49462+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49463+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49464+#endif
49465+ return 0;
49466+}
49467+
49468+/* always called with valid inodev ptr */
49469+static void
49470+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49471+{
49472+ struct acl_object_label *matchpo;
49473+ struct acl_subject_label *matchps;
49474+ struct acl_subject_label *subj;
49475+ struct acl_role_label *role;
49476+ unsigned int x;
49477+
49478+ FOR_EACH_ROLE_START(role)
49479+ FOR_EACH_SUBJECT_START(role, subj, x)
49480+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49481+ matchpo->mode |= GR_DELETED;
49482+ FOR_EACH_SUBJECT_END(subj,x)
49483+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49484+ if (subj->inode == ino && subj->device == dev)
49485+ subj->mode |= GR_DELETED;
49486+ FOR_EACH_NESTED_SUBJECT_END(subj)
49487+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49488+ matchps->mode |= GR_DELETED;
49489+ FOR_EACH_ROLE_END(role)
49490+
49491+ inodev->nentry->deleted = 1;
49492+
49493+ return;
49494+}
49495+
49496+void
49497+gr_handle_delete(const ino_t ino, const dev_t dev)
49498+{
49499+ struct inodev_entry *inodev;
49500+
49501+ if (unlikely(!(gr_status & GR_READY)))
49502+ return;
49503+
49504+ write_lock(&gr_inode_lock);
49505+ inodev = lookup_inodev_entry(ino, dev);
49506+ if (inodev != NULL)
49507+ do_handle_delete(inodev, ino, dev);
49508+ write_unlock(&gr_inode_lock);
49509+
49510+ return;
49511+}
49512+
49513+static void
49514+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49515+ const ino_t newinode, const dev_t newdevice,
49516+ struct acl_subject_label *subj)
49517+{
49518+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49519+ struct acl_object_label *match;
49520+
49521+ match = subj->obj_hash[index];
49522+
49523+ while (match && (match->inode != oldinode ||
49524+ match->device != olddevice ||
49525+ !(match->mode & GR_DELETED)))
49526+ match = match->next;
49527+
49528+ if (match && (match->inode == oldinode)
49529+ && (match->device == olddevice)
49530+ && (match->mode & GR_DELETED)) {
49531+ if (match->prev == NULL) {
49532+ subj->obj_hash[index] = match->next;
49533+ if (match->next != NULL)
49534+ match->next->prev = NULL;
49535+ } else {
49536+ match->prev->next = match->next;
49537+ if (match->next != NULL)
49538+ match->next->prev = match->prev;
49539+ }
49540+ match->prev = NULL;
49541+ match->next = NULL;
49542+ match->inode = newinode;
49543+ match->device = newdevice;
49544+ match->mode &= ~GR_DELETED;
49545+
49546+ insert_acl_obj_label(match, subj);
49547+ }
49548+
49549+ return;
49550+}
49551+
49552+static void
49553+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49554+ const ino_t newinode, const dev_t newdevice,
49555+ struct acl_role_label *role)
49556+{
49557+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49558+ struct acl_subject_label *match;
49559+
49560+ match = role->subj_hash[index];
49561+
49562+ while (match && (match->inode != oldinode ||
49563+ match->device != olddevice ||
49564+ !(match->mode & GR_DELETED)))
49565+ match = match->next;
49566+
49567+ if (match && (match->inode == oldinode)
49568+ && (match->device == olddevice)
49569+ && (match->mode & GR_DELETED)) {
49570+ if (match->prev == NULL) {
49571+ role->subj_hash[index] = match->next;
49572+ if (match->next != NULL)
49573+ match->next->prev = NULL;
49574+ } else {
49575+ match->prev->next = match->next;
49576+ if (match->next != NULL)
49577+ match->next->prev = match->prev;
49578+ }
49579+ match->prev = NULL;
49580+ match->next = NULL;
49581+ match->inode = newinode;
49582+ match->device = newdevice;
49583+ match->mode &= ~GR_DELETED;
49584+
49585+ insert_acl_subj_label(match, role);
49586+ }
49587+
49588+ return;
49589+}
49590+
49591+static void
49592+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49593+ const ino_t newinode, const dev_t newdevice)
49594+{
49595+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49596+ struct inodev_entry *match;
49597+
49598+ match = inodev_set.i_hash[index];
49599+
49600+ while (match && (match->nentry->inode != oldinode ||
49601+ match->nentry->device != olddevice || !match->nentry->deleted))
49602+ match = match->next;
49603+
49604+ if (match && (match->nentry->inode == oldinode)
49605+ && (match->nentry->device == olddevice) &&
49606+ match->nentry->deleted) {
49607+ if (match->prev == NULL) {
49608+ inodev_set.i_hash[index] = match->next;
49609+ if (match->next != NULL)
49610+ match->next->prev = NULL;
49611+ } else {
49612+ match->prev->next = match->next;
49613+ if (match->next != NULL)
49614+ match->next->prev = match->prev;
49615+ }
49616+ match->prev = NULL;
49617+ match->next = NULL;
49618+ match->nentry->inode = newinode;
49619+ match->nentry->device = newdevice;
49620+ match->nentry->deleted = 0;
49621+
49622+ insert_inodev_entry(match);
49623+ }
49624+
49625+ return;
49626+}
49627+
49628+static void
49629+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49630+ const struct vfsmount *mnt)
49631+{
49632+ struct acl_subject_label *subj;
49633+ struct acl_role_label *role;
49634+ unsigned int x;
49635+ ino_t inode = dentry->d_inode->i_ino;
49636+ dev_t dev = __get_dev(dentry);
49637+
49638+ FOR_EACH_ROLE_START(role)
49639+ update_acl_subj_label(matchn->inode, matchn->device,
49640+ inode, dev, role);
49641+
49642+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49643+ if ((subj->inode == inode) && (subj->device == dev)) {
49644+ subj->inode = inode;
49645+ subj->device = dev;
49646+ }
49647+ FOR_EACH_NESTED_SUBJECT_END(subj)
49648+ FOR_EACH_SUBJECT_START(role, subj, x)
49649+ update_acl_obj_label(matchn->inode, matchn->device,
49650+ inode, dev, subj);
49651+ FOR_EACH_SUBJECT_END(subj,x)
49652+ FOR_EACH_ROLE_END(role)
49653+
49654+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
49655+
49656+ return;
49657+}
49658+
49659+void
49660+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49661+{
49662+ struct name_entry *matchn;
49663+
49664+ if (unlikely(!(gr_status & GR_READY)))
49665+ return;
49666+
49667+ preempt_disable();
49668+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
49669+
49670+ if (unlikely((unsigned long)matchn)) {
49671+ write_lock(&gr_inode_lock);
49672+ do_handle_create(matchn, dentry, mnt);
49673+ write_unlock(&gr_inode_lock);
49674+ }
49675+ preempt_enable();
49676+
49677+ return;
49678+}
49679+
49680+void
49681+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49682+ struct dentry *old_dentry,
49683+ struct dentry *new_dentry,
49684+ struct vfsmount *mnt, const __u8 replace)
49685+{
49686+ struct name_entry *matchn;
49687+ struct inodev_entry *inodev;
49688+ ino_t oldinode = old_dentry->d_inode->i_ino;
49689+ dev_t olddev = __get_dev(old_dentry);
49690+
49691+ /* vfs_rename swaps the name and parent link for old_dentry and
49692+ new_dentry
49693+ at this point, old_dentry has the new name, parent link, and inode
49694+ for the renamed file
49695+ if a file is being replaced by a rename, new_dentry has the inode
49696+ and name for the replaced file
49697+ */
49698+
49699+ if (unlikely(!(gr_status & GR_READY)))
49700+ return;
49701+
49702+ preempt_disable();
49703+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
49704+
49705+ /* we wouldn't have to check d_inode if it weren't for
49706+ NFS silly-renaming
49707+ */
49708+
49709+ write_lock(&gr_inode_lock);
49710+ if (unlikely(replace && new_dentry->d_inode)) {
49711+ ino_t newinode = new_dentry->d_inode->i_ino;
49712+ dev_t newdev = __get_dev(new_dentry);
49713+ inodev = lookup_inodev_entry(newinode, newdev);
49714+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
49715+ do_handle_delete(inodev, newinode, newdev);
49716+ }
49717+
49718+ inodev = lookup_inodev_entry(oldinode, olddev);
49719+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
49720+ do_handle_delete(inodev, oldinode, olddev);
49721+
49722+ if (unlikely((unsigned long)matchn))
49723+ do_handle_create(matchn, old_dentry, mnt);
49724+
49725+ write_unlock(&gr_inode_lock);
49726+ preempt_enable();
49727+
49728+ return;
49729+}
49730+
49731+static int
49732+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
49733+ unsigned char **sum)
49734+{
49735+ struct acl_role_label *r;
49736+ struct role_allowed_ip *ipp;
49737+ struct role_transition *trans;
49738+ unsigned int i;
49739+ int found = 0;
49740+ u32 curr_ip = current->signal->curr_ip;
49741+
49742+ current->signal->saved_ip = curr_ip;
49743+
49744+ /* check transition table */
49745+
49746+ for (trans = current->role->transitions; trans; trans = trans->next) {
49747+ if (!strcmp(rolename, trans->rolename)) {
49748+ found = 1;
49749+ break;
49750+ }
49751+ }
49752+
49753+ if (!found)
49754+ return 0;
49755+
49756+ /* handle special roles that do not require authentication
49757+ and check ip */
49758+
49759+ FOR_EACH_ROLE_START(r)
49760+ if (!strcmp(rolename, r->rolename) &&
49761+ (r->roletype & GR_ROLE_SPECIAL)) {
49762+ found = 0;
49763+ if (r->allowed_ips != NULL) {
49764+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
49765+ if ((ntohl(curr_ip) & ipp->netmask) ==
49766+ (ntohl(ipp->addr) & ipp->netmask))
49767+ found = 1;
49768+ }
49769+ } else
49770+ found = 2;
49771+ if (!found)
49772+ return 0;
49773+
49774+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
49775+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
49776+ *salt = NULL;
49777+ *sum = NULL;
49778+ return 1;
49779+ }
49780+ }
49781+ FOR_EACH_ROLE_END(r)
49782+
49783+ for (i = 0; i < num_sprole_pws; i++) {
49784+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
49785+ *salt = acl_special_roles[i]->salt;
49786+ *sum = acl_special_roles[i]->sum;
49787+ return 1;
49788+ }
49789+ }
49790+
49791+ return 0;
49792+}
49793+
49794+static void
49795+assign_special_role(char *rolename)
49796+{
49797+ struct acl_object_label *obj;
49798+ struct acl_role_label *r;
49799+ struct acl_role_label *assigned = NULL;
49800+ struct task_struct *tsk;
49801+ struct file *filp;
49802+
49803+ FOR_EACH_ROLE_START(r)
49804+ if (!strcmp(rolename, r->rolename) &&
49805+ (r->roletype & GR_ROLE_SPECIAL)) {
49806+ assigned = r;
49807+ break;
49808+ }
49809+ FOR_EACH_ROLE_END(r)
49810+
49811+ if (!assigned)
49812+ return;
49813+
49814+ read_lock(&tasklist_lock);
49815+ read_lock(&grsec_exec_file_lock);
49816+
49817+ tsk = current->real_parent;
49818+ if (tsk == NULL)
49819+ goto out_unlock;
49820+
49821+ filp = tsk->exec_file;
49822+ if (filp == NULL)
49823+ goto out_unlock;
49824+
49825+ tsk->is_writable = 0;
49826+
49827+ tsk->acl_sp_role = 1;
49828+ tsk->acl_role_id = ++acl_sp_role_value;
49829+ tsk->role = assigned;
49830+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
49831+
49832+ /* ignore additional mmap checks for processes that are writable
49833+ by the default ACL */
49834+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49835+ if (unlikely(obj->mode & GR_WRITE))
49836+ tsk->is_writable = 1;
49837+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
49838+ if (unlikely(obj->mode & GR_WRITE))
49839+ tsk->is_writable = 1;
49840+
49841+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49842+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
49843+#endif
49844+
49845+out_unlock:
49846+ read_unlock(&grsec_exec_file_lock);
49847+ read_unlock(&tasklist_lock);
49848+ return;
49849+}
49850+
49851+int gr_check_secure_terminal(struct task_struct *task)
49852+{
49853+ struct task_struct *p, *p2, *p3;
49854+ struct files_struct *files;
49855+ struct fdtable *fdt;
49856+ struct file *our_file = NULL, *file;
49857+ int i;
49858+
49859+ if (task->signal->tty == NULL)
49860+ return 1;
49861+
49862+ files = get_files_struct(task);
49863+ if (files != NULL) {
49864+ rcu_read_lock();
49865+ fdt = files_fdtable(files);
49866+ for (i=0; i < fdt->max_fds; i++) {
49867+ file = fcheck_files(files, i);
49868+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
49869+ get_file(file);
49870+ our_file = file;
49871+ }
49872+ }
49873+ rcu_read_unlock();
49874+ put_files_struct(files);
49875+ }
49876+
49877+ if (our_file == NULL)
49878+ return 1;
49879+
49880+ read_lock(&tasklist_lock);
49881+ do_each_thread(p2, p) {
49882+ files = get_files_struct(p);
49883+ if (files == NULL ||
49884+ (p->signal && p->signal->tty == task->signal->tty)) {
49885+ if (files != NULL)
49886+ put_files_struct(files);
49887+ continue;
49888+ }
49889+ rcu_read_lock();
49890+ fdt = files_fdtable(files);
49891+ for (i=0; i < fdt->max_fds; i++) {
49892+ file = fcheck_files(files, i);
49893+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
49894+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
49895+ p3 = task;
49896+ while (p3->pid > 0) {
49897+ if (p3 == p)
49898+ break;
49899+ p3 = p3->real_parent;
49900+ }
49901+ if (p3 == p)
49902+ break;
49903+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
49904+ gr_handle_alertkill(p);
49905+ rcu_read_unlock();
49906+ put_files_struct(files);
49907+ read_unlock(&tasklist_lock);
49908+ fput(our_file);
49909+ return 0;
49910+ }
49911+ }
49912+ rcu_read_unlock();
49913+ put_files_struct(files);
49914+ } while_each_thread(p2, p);
49915+ read_unlock(&tasklist_lock);
49916+
49917+ fput(our_file);
49918+ return 1;
49919+}
49920+
49921+ssize_t
49922+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
49923+{
49924+ struct gr_arg_wrapper uwrap;
49925+ unsigned char *sprole_salt = NULL;
49926+ unsigned char *sprole_sum = NULL;
49927+ int error = sizeof (struct gr_arg_wrapper);
49928+ int error2 = 0;
49929+
49930+ mutex_lock(&gr_dev_mutex);
49931+
49932+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
49933+ error = -EPERM;
49934+ goto out;
49935+ }
49936+
49937+ if (count != sizeof (struct gr_arg_wrapper)) {
49938+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
49939+ error = -EINVAL;
49940+ goto out;
49941+ }
49942+
49943+
49944+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
49945+ gr_auth_expires = 0;
49946+ gr_auth_attempts = 0;
49947+ }
49948+
49949+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
49950+ error = -EFAULT;
49951+ goto out;
49952+ }
49953+
49954+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
49955+ error = -EINVAL;
49956+ goto out;
49957+ }
49958+
49959+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
49960+ error = -EFAULT;
49961+ goto out;
49962+ }
49963+
49964+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49965+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49966+ time_after(gr_auth_expires, get_seconds())) {
49967+ error = -EBUSY;
49968+ goto out;
49969+ }
49970+
49971+ /* if non-root trying to do anything other than use a special role,
49972+ do not attempt authentication, do not count towards authentication
49973+ locking
49974+ */
49975+
49976+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
49977+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49978+ current_uid()) {
49979+ error = -EPERM;
49980+ goto out;
49981+ }
49982+
49983+ /* ensure pw and special role name are null terminated */
49984+
49985+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
49986+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
49987+
49988+ /* Okay.
49989+ * We have our enough of the argument structure..(we have yet
49990+ * to copy_from_user the tables themselves) . Copy the tables
49991+ * only if we need them, i.e. for loading operations. */
49992+
49993+ switch (gr_usermode->mode) {
49994+ case GR_STATUS:
49995+ if (gr_status & GR_READY) {
49996+ error = 1;
49997+ if (!gr_check_secure_terminal(current))
49998+ error = 3;
49999+ } else
50000+ error = 2;
50001+ goto out;
50002+ case GR_SHUTDOWN:
50003+ if ((gr_status & GR_READY)
50004+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50005+ pax_open_kernel();
50006+ gr_status &= ~GR_READY;
50007+ pax_close_kernel();
50008+
50009+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
50010+ free_variables();
50011+ memset(gr_usermode, 0, sizeof (struct gr_arg));
50012+ memset(gr_system_salt, 0, GR_SALT_LEN);
50013+ memset(gr_system_sum, 0, GR_SHA_LEN);
50014+ } else if (gr_status & GR_READY) {
50015+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
50016+ error = -EPERM;
50017+ } else {
50018+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
50019+ error = -EAGAIN;
50020+ }
50021+ break;
50022+ case GR_ENABLE:
50023+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
50024+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
50025+ else {
50026+ if (gr_status & GR_READY)
50027+ error = -EAGAIN;
50028+ else
50029+ error = error2;
50030+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
50031+ }
50032+ break;
50033+ case GR_RELOAD:
50034+ if (!(gr_status & GR_READY)) {
50035+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
50036+ error = -EAGAIN;
50037+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50038+ lock_kernel();
50039+
50040+ pax_open_kernel();
50041+ gr_status &= ~GR_READY;
50042+ pax_close_kernel();
50043+
50044+ free_variables();
50045+ if (!(error2 = gracl_init(gr_usermode))) {
50046+ unlock_kernel();
50047+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50048+ } else {
50049+ unlock_kernel();
50050+ error = error2;
50051+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50052+ }
50053+ } else {
50054+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50055+ error = -EPERM;
50056+ }
50057+ break;
50058+ case GR_SEGVMOD:
50059+ if (unlikely(!(gr_status & GR_READY))) {
50060+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50061+ error = -EAGAIN;
50062+ break;
50063+ }
50064+
50065+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50066+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50067+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50068+ struct acl_subject_label *segvacl;
50069+ segvacl =
50070+ lookup_acl_subj_label(gr_usermode->segv_inode,
50071+ gr_usermode->segv_device,
50072+ current->role);
50073+ if (segvacl) {
50074+ segvacl->crashes = 0;
50075+ segvacl->expires = 0;
50076+ }
50077+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50078+ gr_remove_uid(gr_usermode->segv_uid);
50079+ }
50080+ } else {
50081+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50082+ error = -EPERM;
50083+ }
50084+ break;
50085+ case GR_SPROLE:
50086+ case GR_SPROLEPAM:
50087+ if (unlikely(!(gr_status & GR_READY))) {
50088+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50089+ error = -EAGAIN;
50090+ break;
50091+ }
50092+
50093+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50094+ current->role->expires = 0;
50095+ current->role->auth_attempts = 0;
50096+ }
50097+
50098+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50099+ time_after(current->role->expires, get_seconds())) {
50100+ error = -EBUSY;
50101+ goto out;
50102+ }
50103+
50104+ if (lookup_special_role_auth
50105+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50106+ && ((!sprole_salt && !sprole_sum)
50107+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50108+ char *p = "";
50109+ assign_special_role(gr_usermode->sp_role);
50110+ read_lock(&tasklist_lock);
50111+ if (current->real_parent)
50112+ p = current->real_parent->role->rolename;
50113+ read_unlock(&tasklist_lock);
50114+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50115+ p, acl_sp_role_value);
50116+ } else {
50117+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50118+ error = -EPERM;
50119+ if(!(current->role->auth_attempts++))
50120+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50121+
50122+ goto out;
50123+ }
50124+ break;
50125+ case GR_UNSPROLE:
50126+ if (unlikely(!(gr_status & GR_READY))) {
50127+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50128+ error = -EAGAIN;
50129+ break;
50130+ }
50131+
50132+ if (current->role->roletype & GR_ROLE_SPECIAL) {
50133+ char *p = "";
50134+ int i = 0;
50135+
50136+ read_lock(&tasklist_lock);
50137+ if (current->real_parent) {
50138+ p = current->real_parent->role->rolename;
50139+ i = current->real_parent->acl_role_id;
50140+ }
50141+ read_unlock(&tasklist_lock);
50142+
50143+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50144+ gr_set_acls(1);
50145+ } else {
50146+ error = -EPERM;
50147+ goto out;
50148+ }
50149+ break;
50150+ default:
50151+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50152+ error = -EINVAL;
50153+ break;
50154+ }
50155+
50156+ if (error != -EPERM)
50157+ goto out;
50158+
50159+ if(!(gr_auth_attempts++))
50160+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50161+
50162+ out:
50163+ mutex_unlock(&gr_dev_mutex);
50164+ return error;
50165+}
50166+
50167+/* must be called with
50168+ rcu_read_lock();
50169+ read_lock(&tasklist_lock);
50170+ read_lock(&grsec_exec_file_lock);
50171+*/
50172+int gr_apply_subject_to_task(struct task_struct *task)
50173+{
50174+ struct acl_object_label *obj;
50175+ char *tmpname;
50176+ struct acl_subject_label *tmpsubj;
50177+ struct file *filp;
50178+ struct name_entry *nmatch;
50179+
50180+ filp = task->exec_file;
50181+ if (filp == NULL)
50182+ return 0;
50183+
50184+ /* the following is to apply the correct subject
50185+ on binaries running when the RBAC system
50186+ is enabled, when the binaries have been
50187+ replaced or deleted since their execution
50188+ -----
50189+ when the RBAC system starts, the inode/dev
50190+ from exec_file will be one the RBAC system
50191+ is unaware of. It only knows the inode/dev
50192+ of the present file on disk, or the absence
50193+ of it.
50194+ */
50195+ preempt_disable();
50196+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50197+
50198+ nmatch = lookup_name_entry(tmpname);
50199+ preempt_enable();
50200+ tmpsubj = NULL;
50201+ if (nmatch) {
50202+ if (nmatch->deleted)
50203+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50204+ else
50205+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50206+ if (tmpsubj != NULL)
50207+ task->acl = tmpsubj;
50208+ }
50209+ if (tmpsubj == NULL)
50210+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50211+ task->role);
50212+ if (task->acl) {
50213+ task->is_writable = 0;
50214+ /* ignore additional mmap checks for processes that are writable
50215+ by the default ACL */
50216+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50217+ if (unlikely(obj->mode & GR_WRITE))
50218+ task->is_writable = 1;
50219+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50220+ if (unlikely(obj->mode & GR_WRITE))
50221+ task->is_writable = 1;
50222+
50223+ gr_set_proc_res(task);
50224+
50225+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50226+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50227+#endif
50228+ } else {
50229+ return 1;
50230+ }
50231+
50232+ return 0;
50233+}
50234+
50235+int
50236+gr_set_acls(const int type)
50237+{
50238+ struct task_struct *task, *task2;
50239+ struct acl_role_label *role = current->role;
50240+ __u16 acl_role_id = current->acl_role_id;
50241+ const struct cred *cred;
50242+ int ret;
50243+
50244+ rcu_read_lock();
50245+ read_lock(&tasklist_lock);
50246+ read_lock(&grsec_exec_file_lock);
50247+ do_each_thread(task2, task) {
50248+ /* check to see if we're called from the exit handler,
50249+ if so, only replace ACLs that have inherited the admin
50250+ ACL */
50251+
50252+ if (type && (task->role != role ||
50253+ task->acl_role_id != acl_role_id))
50254+ continue;
50255+
50256+ task->acl_role_id = 0;
50257+ task->acl_sp_role = 0;
50258+
50259+ if (task->exec_file) {
50260+ cred = __task_cred(task);
50261+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50262+
50263+ ret = gr_apply_subject_to_task(task);
50264+ if (ret) {
50265+ read_unlock(&grsec_exec_file_lock);
50266+ read_unlock(&tasklist_lock);
50267+ rcu_read_unlock();
50268+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50269+ return ret;
50270+ }
50271+ } else {
50272+ // it's a kernel process
50273+ task->role = kernel_role;
50274+ task->acl = kernel_role->root_label;
50275+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50276+ task->acl->mode &= ~GR_PROCFIND;
50277+#endif
50278+ }
50279+ } while_each_thread(task2, task);
50280+ read_unlock(&grsec_exec_file_lock);
50281+ read_unlock(&tasklist_lock);
50282+ rcu_read_unlock();
50283+
50284+ return 0;
50285+}
50286+
50287+void
50288+gr_learn_resource(const struct task_struct *task,
50289+ const int res, const unsigned long wanted, const int gt)
50290+{
50291+ struct acl_subject_label *acl;
50292+ const struct cred *cred;
50293+
50294+ if (unlikely((gr_status & GR_READY) &&
50295+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50296+ goto skip_reslog;
50297+
50298+#ifdef CONFIG_GRKERNSEC_RESLOG
50299+ gr_log_resource(task, res, wanted, gt);
50300+#endif
50301+ skip_reslog:
50302+
50303+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50304+ return;
50305+
50306+ acl = task->acl;
50307+
50308+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50309+ !(acl->resmask & (1 << (unsigned short) res))))
50310+ return;
50311+
50312+ if (wanted >= acl->res[res].rlim_cur) {
50313+ unsigned long res_add;
50314+
50315+ res_add = wanted;
50316+ switch (res) {
50317+ case RLIMIT_CPU:
50318+ res_add += GR_RLIM_CPU_BUMP;
50319+ break;
50320+ case RLIMIT_FSIZE:
50321+ res_add += GR_RLIM_FSIZE_BUMP;
50322+ break;
50323+ case RLIMIT_DATA:
50324+ res_add += GR_RLIM_DATA_BUMP;
50325+ break;
50326+ case RLIMIT_STACK:
50327+ res_add += GR_RLIM_STACK_BUMP;
50328+ break;
50329+ case RLIMIT_CORE:
50330+ res_add += GR_RLIM_CORE_BUMP;
50331+ break;
50332+ case RLIMIT_RSS:
50333+ res_add += GR_RLIM_RSS_BUMP;
50334+ break;
50335+ case RLIMIT_NPROC:
50336+ res_add += GR_RLIM_NPROC_BUMP;
50337+ break;
50338+ case RLIMIT_NOFILE:
50339+ res_add += GR_RLIM_NOFILE_BUMP;
50340+ break;
50341+ case RLIMIT_MEMLOCK:
50342+ res_add += GR_RLIM_MEMLOCK_BUMP;
50343+ break;
50344+ case RLIMIT_AS:
50345+ res_add += GR_RLIM_AS_BUMP;
50346+ break;
50347+ case RLIMIT_LOCKS:
50348+ res_add += GR_RLIM_LOCKS_BUMP;
50349+ break;
50350+ case RLIMIT_SIGPENDING:
50351+ res_add += GR_RLIM_SIGPENDING_BUMP;
50352+ break;
50353+ case RLIMIT_MSGQUEUE:
50354+ res_add += GR_RLIM_MSGQUEUE_BUMP;
50355+ break;
50356+ case RLIMIT_NICE:
50357+ res_add += GR_RLIM_NICE_BUMP;
50358+ break;
50359+ case RLIMIT_RTPRIO:
50360+ res_add += GR_RLIM_RTPRIO_BUMP;
50361+ break;
50362+ case RLIMIT_RTTIME:
50363+ res_add += GR_RLIM_RTTIME_BUMP;
50364+ break;
50365+ }
50366+
50367+ acl->res[res].rlim_cur = res_add;
50368+
50369+ if (wanted > acl->res[res].rlim_max)
50370+ acl->res[res].rlim_max = res_add;
50371+
50372+ /* only log the subject filename, since resource logging is supported for
50373+ single-subject learning only */
50374+ rcu_read_lock();
50375+ cred = __task_cred(task);
50376+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50377+ task->role->roletype, cred->uid, cred->gid, acl->filename,
50378+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50379+ "", (unsigned long) res, &task->signal->saved_ip);
50380+ rcu_read_unlock();
50381+ }
50382+
50383+ return;
50384+}
50385+
50386+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50387+void
50388+pax_set_initial_flags(struct linux_binprm *bprm)
50389+{
50390+ struct task_struct *task = current;
50391+ struct acl_subject_label *proc;
50392+ unsigned long flags;
50393+
50394+ if (unlikely(!(gr_status & GR_READY)))
50395+ return;
50396+
50397+ flags = pax_get_flags(task);
50398+
50399+ proc = task->acl;
50400+
50401+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50402+ flags &= ~MF_PAX_PAGEEXEC;
50403+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50404+ flags &= ~MF_PAX_SEGMEXEC;
50405+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50406+ flags &= ~MF_PAX_RANDMMAP;
50407+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50408+ flags &= ~MF_PAX_EMUTRAMP;
50409+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50410+ flags &= ~MF_PAX_MPROTECT;
50411+
50412+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50413+ flags |= MF_PAX_PAGEEXEC;
50414+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50415+ flags |= MF_PAX_SEGMEXEC;
50416+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50417+ flags |= MF_PAX_RANDMMAP;
50418+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50419+ flags |= MF_PAX_EMUTRAMP;
50420+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50421+ flags |= MF_PAX_MPROTECT;
50422+
50423+ pax_set_flags(task, flags);
50424+
50425+ return;
50426+}
50427+#endif
50428+
50429+#ifdef CONFIG_SYSCTL
50430+/* Eric Biederman likes breaking userland ABI and every inode-based security
50431+ system to save 35kb of memory */
50432+
50433+/* we modify the passed in filename, but adjust it back before returning */
50434+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50435+{
50436+ struct name_entry *nmatch;
50437+ char *p, *lastp = NULL;
50438+ struct acl_object_label *obj = NULL, *tmp;
50439+ struct acl_subject_label *tmpsubj;
50440+ char c = '\0';
50441+
50442+ read_lock(&gr_inode_lock);
50443+
50444+ p = name + len - 1;
50445+ do {
50446+ nmatch = lookup_name_entry(name);
50447+ if (lastp != NULL)
50448+ *lastp = c;
50449+
50450+ if (nmatch == NULL)
50451+ goto next_component;
50452+ tmpsubj = current->acl;
50453+ do {
50454+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50455+ if (obj != NULL) {
50456+ tmp = obj->globbed;
50457+ while (tmp) {
50458+ if (!glob_match(tmp->filename, name)) {
50459+ obj = tmp;
50460+ goto found_obj;
50461+ }
50462+ tmp = tmp->next;
50463+ }
50464+ goto found_obj;
50465+ }
50466+ } while ((tmpsubj = tmpsubj->parent_subject));
50467+next_component:
50468+ /* end case */
50469+ if (p == name)
50470+ break;
50471+
50472+ while (*p != '/')
50473+ p--;
50474+ if (p == name)
50475+ lastp = p + 1;
50476+ else {
50477+ lastp = p;
50478+ p--;
50479+ }
50480+ c = *lastp;
50481+ *lastp = '\0';
50482+ } while (1);
50483+found_obj:
50484+ read_unlock(&gr_inode_lock);
50485+ /* obj returned will always be non-null */
50486+ return obj;
50487+}
50488+
50489+/* returns 0 when allowing, non-zero on error
50490+ op of 0 is used for readdir, so we don't log the names of hidden files
50491+*/
50492+__u32
50493+gr_handle_sysctl(const struct ctl_table *table, const int op)
50494+{
50495+ ctl_table *tmp;
50496+ const char *proc_sys = "/proc/sys";
50497+ char *path;
50498+ struct acl_object_label *obj;
50499+ unsigned short len = 0, pos = 0, depth = 0, i;
50500+ __u32 err = 0;
50501+ __u32 mode = 0;
50502+
50503+ if (unlikely(!(gr_status & GR_READY)))
50504+ return 0;
50505+
50506+ /* for now, ignore operations on non-sysctl entries if it's not a
50507+ readdir*/
50508+ if (table->child != NULL && op != 0)
50509+ return 0;
50510+
50511+ mode |= GR_FIND;
50512+ /* it's only a read if it's an entry, read on dirs is for readdir */
50513+ if (op & MAY_READ)
50514+ mode |= GR_READ;
50515+ if (op & MAY_WRITE)
50516+ mode |= GR_WRITE;
50517+
50518+ preempt_disable();
50519+
50520+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50521+
50522+ /* it's only a read/write if it's an actual entry, not a dir
50523+ (which are opened for readdir)
50524+ */
50525+
50526+ /* convert the requested sysctl entry into a pathname */
50527+
50528+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50529+ len += strlen(tmp->procname);
50530+ len++;
50531+ depth++;
50532+ }
50533+
50534+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50535+ /* deny */
50536+ goto out;
50537+ }
50538+
50539+ memset(path, 0, PAGE_SIZE);
50540+
50541+ memcpy(path, proc_sys, strlen(proc_sys));
50542+
50543+ pos += strlen(proc_sys);
50544+
50545+ for (; depth > 0; depth--) {
50546+ path[pos] = '/';
50547+ pos++;
50548+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50549+ if (depth == i) {
50550+ memcpy(path + pos, tmp->procname,
50551+ strlen(tmp->procname));
50552+ pos += strlen(tmp->procname);
50553+ }
50554+ i++;
50555+ }
50556+ }
50557+
50558+ obj = gr_lookup_by_name(path, pos);
50559+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50560+
50561+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50562+ ((err & mode) != mode))) {
50563+ __u32 new_mode = mode;
50564+
50565+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50566+
50567+ err = 0;
50568+ gr_log_learn_sysctl(path, new_mode);
50569+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50570+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50571+ err = -ENOENT;
50572+ } else if (!(err & GR_FIND)) {
50573+ err = -ENOENT;
50574+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50575+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50576+ path, (mode & GR_READ) ? " reading" : "",
50577+ (mode & GR_WRITE) ? " writing" : "");
50578+ err = -EACCES;
50579+ } else if ((err & mode) != mode) {
50580+ err = -EACCES;
50581+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50582+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50583+ path, (mode & GR_READ) ? " reading" : "",
50584+ (mode & GR_WRITE) ? " writing" : "");
50585+ err = 0;
50586+ } else
50587+ err = 0;
50588+
50589+ out:
50590+ preempt_enable();
50591+
50592+ return err;
50593+}
50594+#endif
50595+
50596+int
50597+gr_handle_proc_ptrace(struct task_struct *task)
50598+{
50599+ struct file *filp;
50600+ struct task_struct *tmp = task;
50601+ struct task_struct *curtemp = current;
50602+ __u32 retmode;
50603+
50604+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50605+ if (unlikely(!(gr_status & GR_READY)))
50606+ return 0;
50607+#endif
50608+
50609+ read_lock(&tasklist_lock);
50610+ read_lock(&grsec_exec_file_lock);
50611+ filp = task->exec_file;
50612+
50613+ while (tmp->pid > 0) {
50614+ if (tmp == curtemp)
50615+ break;
50616+ tmp = tmp->real_parent;
50617+ }
50618+
50619+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50620+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50621+ read_unlock(&grsec_exec_file_lock);
50622+ read_unlock(&tasklist_lock);
50623+ return 1;
50624+ }
50625+
50626+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50627+ if (!(gr_status & GR_READY)) {
50628+ read_unlock(&grsec_exec_file_lock);
50629+ read_unlock(&tasklist_lock);
50630+ return 0;
50631+ }
50632+#endif
50633+
50634+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50635+ read_unlock(&grsec_exec_file_lock);
50636+ read_unlock(&tasklist_lock);
50637+
50638+ if (retmode & GR_NOPTRACE)
50639+ return 1;
50640+
50641+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
50642+ && (current->acl != task->acl || (current->acl != current->role->root_label
50643+ && current->pid != task->pid)))
50644+ return 1;
50645+
50646+ return 0;
50647+}
50648+
50649+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
50650+{
50651+ if (unlikely(!(gr_status & GR_READY)))
50652+ return;
50653+
50654+ if (!(current->role->roletype & GR_ROLE_GOD))
50655+ return;
50656+
50657+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
50658+ p->role->rolename, gr_task_roletype_to_char(p),
50659+ p->acl->filename);
50660+}
50661+
50662+int
50663+gr_handle_ptrace(struct task_struct *task, const long request)
50664+{
50665+ struct task_struct *tmp = task;
50666+ struct task_struct *curtemp = current;
50667+ __u32 retmode;
50668+
50669+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50670+ if (unlikely(!(gr_status & GR_READY)))
50671+ return 0;
50672+#endif
50673+
50674+ read_lock(&tasklist_lock);
50675+ while (tmp->pid > 0) {
50676+ if (tmp == curtemp)
50677+ break;
50678+ tmp = tmp->real_parent;
50679+ }
50680+
50681+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50682+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
50683+ read_unlock(&tasklist_lock);
50684+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50685+ return 1;
50686+ }
50687+ read_unlock(&tasklist_lock);
50688+
50689+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50690+ if (!(gr_status & GR_READY))
50691+ return 0;
50692+#endif
50693+
50694+ read_lock(&grsec_exec_file_lock);
50695+ if (unlikely(!task->exec_file)) {
50696+ read_unlock(&grsec_exec_file_lock);
50697+ return 0;
50698+ }
50699+
50700+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
50701+ read_unlock(&grsec_exec_file_lock);
50702+
50703+ if (retmode & GR_NOPTRACE) {
50704+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50705+ return 1;
50706+ }
50707+
50708+ if (retmode & GR_PTRACERD) {
50709+ switch (request) {
50710+ case PTRACE_POKETEXT:
50711+ case PTRACE_POKEDATA:
50712+ case PTRACE_POKEUSR:
50713+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
50714+ case PTRACE_SETREGS:
50715+ case PTRACE_SETFPREGS:
50716+#endif
50717+#ifdef CONFIG_X86
50718+ case PTRACE_SETFPXREGS:
50719+#endif
50720+#ifdef CONFIG_ALTIVEC
50721+ case PTRACE_SETVRREGS:
50722+#endif
50723+ return 1;
50724+ default:
50725+ return 0;
50726+ }
50727+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
50728+ !(current->role->roletype & GR_ROLE_GOD) &&
50729+ (current->acl != task->acl)) {
50730+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50731+ return 1;
50732+ }
50733+
50734+ return 0;
50735+}
50736+
50737+static int is_writable_mmap(const struct file *filp)
50738+{
50739+ struct task_struct *task = current;
50740+ struct acl_object_label *obj, *obj2;
50741+
50742+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
50743+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
50744+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50745+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
50746+ task->role->root_label);
50747+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
50748+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
50749+ return 1;
50750+ }
50751+ }
50752+ return 0;
50753+}
50754+
50755+int
50756+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
50757+{
50758+ __u32 mode;
50759+
50760+ if (unlikely(!file || !(prot & PROT_EXEC)))
50761+ return 1;
50762+
50763+ if (is_writable_mmap(file))
50764+ return 0;
50765+
50766+ mode =
50767+ gr_search_file(file->f_path.dentry,
50768+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50769+ file->f_path.mnt);
50770+
50771+ if (!gr_tpe_allow(file))
50772+ return 0;
50773+
50774+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50775+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50776+ return 0;
50777+ } else if (unlikely(!(mode & GR_EXEC))) {
50778+ return 0;
50779+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50780+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50781+ return 1;
50782+ }
50783+
50784+ return 1;
50785+}
50786+
50787+int
50788+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50789+{
50790+ __u32 mode;
50791+
50792+ if (unlikely(!file || !(prot & PROT_EXEC)))
50793+ return 1;
50794+
50795+ if (is_writable_mmap(file))
50796+ return 0;
50797+
50798+ mode =
50799+ gr_search_file(file->f_path.dentry,
50800+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50801+ file->f_path.mnt);
50802+
50803+ if (!gr_tpe_allow(file))
50804+ return 0;
50805+
50806+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50807+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50808+ return 0;
50809+ } else if (unlikely(!(mode & GR_EXEC))) {
50810+ return 0;
50811+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50812+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50813+ return 1;
50814+ }
50815+
50816+ return 1;
50817+}
50818+
50819+void
50820+gr_acl_handle_psacct(struct task_struct *task, const long code)
50821+{
50822+ unsigned long runtime;
50823+ unsigned long cputime;
50824+ unsigned int wday, cday;
50825+ __u8 whr, chr;
50826+ __u8 wmin, cmin;
50827+ __u8 wsec, csec;
50828+ struct timespec timeval;
50829+
50830+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
50831+ !(task->acl->mode & GR_PROCACCT)))
50832+ return;
50833+
50834+ do_posix_clock_monotonic_gettime(&timeval);
50835+ runtime = timeval.tv_sec - task->start_time.tv_sec;
50836+ wday = runtime / (3600 * 24);
50837+ runtime -= wday * (3600 * 24);
50838+ whr = runtime / 3600;
50839+ runtime -= whr * 3600;
50840+ wmin = runtime / 60;
50841+ runtime -= wmin * 60;
50842+ wsec = runtime;
50843+
50844+ cputime = (task->utime + task->stime) / HZ;
50845+ cday = cputime / (3600 * 24);
50846+ cputime -= cday * (3600 * 24);
50847+ chr = cputime / 3600;
50848+ cputime -= chr * 3600;
50849+ cmin = cputime / 60;
50850+ cputime -= cmin * 60;
50851+ csec = cputime;
50852+
50853+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
50854+
50855+ return;
50856+}
50857+
50858+void gr_set_kernel_label(struct task_struct *task)
50859+{
50860+ if (gr_status & GR_READY) {
50861+ task->role = kernel_role;
50862+ task->acl = kernel_role->root_label;
50863+ }
50864+ return;
50865+}
50866+
50867+#ifdef CONFIG_TASKSTATS
50868+int gr_is_taskstats_denied(int pid)
50869+{
50870+ struct task_struct *task;
50871+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50872+ const struct cred *cred;
50873+#endif
50874+ int ret = 0;
50875+
50876+ /* restrict taskstats viewing to un-chrooted root users
50877+ who have the 'view' subject flag if the RBAC system is enabled
50878+ */
50879+
50880+ rcu_read_lock();
50881+ read_lock(&tasklist_lock);
50882+ task = find_task_by_vpid(pid);
50883+ if (task) {
50884+#ifdef CONFIG_GRKERNSEC_CHROOT
50885+ if (proc_is_chrooted(task))
50886+ ret = -EACCES;
50887+#endif
50888+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50889+ cred = __task_cred(task);
50890+#ifdef CONFIG_GRKERNSEC_PROC_USER
50891+ if (cred->uid != 0)
50892+ ret = -EACCES;
50893+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50894+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
50895+ ret = -EACCES;
50896+#endif
50897+#endif
50898+ if (gr_status & GR_READY) {
50899+ if (!(task->acl->mode & GR_VIEW))
50900+ ret = -EACCES;
50901+ }
50902+ } else
50903+ ret = -ENOENT;
50904+
50905+ read_unlock(&tasklist_lock);
50906+ rcu_read_unlock();
50907+
50908+ return ret;
50909+}
50910+#endif
50911+
50912+/* AUXV entries are filled via a descendant of search_binary_handler
50913+ after we've already applied the subject for the target
50914+*/
50915+int gr_acl_enable_at_secure(void)
50916+{
50917+ if (unlikely(!(gr_status & GR_READY)))
50918+ return 0;
50919+
50920+ if (current->acl->mode & GR_ATSECURE)
50921+ return 1;
50922+
50923+ return 0;
50924+}
50925+
50926+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
50927+{
50928+ struct task_struct *task = current;
50929+ struct dentry *dentry = file->f_path.dentry;
50930+ struct vfsmount *mnt = file->f_path.mnt;
50931+ struct acl_object_label *obj, *tmp;
50932+ struct acl_subject_label *subj;
50933+ unsigned int bufsize;
50934+ int is_not_root;
50935+ char *path;
50936+ dev_t dev = __get_dev(dentry);
50937+
50938+ if (unlikely(!(gr_status & GR_READY)))
50939+ return 1;
50940+
50941+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50942+ return 1;
50943+
50944+ /* ignore Eric Biederman */
50945+ if (IS_PRIVATE(dentry->d_inode))
50946+ return 1;
50947+
50948+ subj = task->acl;
50949+ do {
50950+ obj = lookup_acl_obj_label(ino, dev, subj);
50951+ if (obj != NULL)
50952+ return (obj->mode & GR_FIND) ? 1 : 0;
50953+ } while ((subj = subj->parent_subject));
50954+
50955+ /* this is purely an optimization since we're looking for an object
50956+ for the directory we're doing a readdir on
50957+ if it's possible for any globbed object to match the entry we're
50958+ filling into the directory, then the object we find here will be
50959+ an anchor point with attached globbed objects
50960+ */
50961+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
50962+ if (obj->globbed == NULL)
50963+ return (obj->mode & GR_FIND) ? 1 : 0;
50964+
50965+ is_not_root = ((obj->filename[0] == '/') &&
50966+ (obj->filename[1] == '\0')) ? 0 : 1;
50967+ bufsize = PAGE_SIZE - namelen - is_not_root;
50968+
50969+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
50970+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
50971+ return 1;
50972+
50973+ preempt_disable();
50974+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50975+ bufsize);
50976+
50977+ bufsize = strlen(path);
50978+
50979+ /* if base is "/", don't append an additional slash */
50980+ if (is_not_root)
50981+ *(path + bufsize) = '/';
50982+ memcpy(path + bufsize + is_not_root, name, namelen);
50983+ *(path + bufsize + namelen + is_not_root) = '\0';
50984+
50985+ tmp = obj->globbed;
50986+ while (tmp) {
50987+ if (!glob_match(tmp->filename, path)) {
50988+ preempt_enable();
50989+ return (tmp->mode & GR_FIND) ? 1 : 0;
50990+ }
50991+ tmp = tmp->next;
50992+ }
50993+ preempt_enable();
50994+ return (obj->mode & GR_FIND) ? 1 : 0;
50995+}
50996+
50997+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
50998+EXPORT_SYMBOL(gr_acl_is_enabled);
50999+#endif
51000+EXPORT_SYMBOL(gr_learn_resource);
51001+EXPORT_SYMBOL(gr_set_kernel_label);
51002+#ifdef CONFIG_SECURITY
51003+EXPORT_SYMBOL(gr_check_user_change);
51004+EXPORT_SYMBOL(gr_check_group_change);
51005+#endif
51006+
51007diff -urNp linux-2.6.32.45/grsecurity/gracl_cap.c linux-2.6.32.45/grsecurity/gracl_cap.c
51008--- linux-2.6.32.45/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
51009+++ linux-2.6.32.45/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
51010@@ -0,0 +1,138 @@
51011+#include <linux/kernel.h>
51012+#include <linux/module.h>
51013+#include <linux/sched.h>
51014+#include <linux/gracl.h>
51015+#include <linux/grsecurity.h>
51016+#include <linux/grinternal.h>
51017+
51018+static const char *captab_log[] = {
51019+ "CAP_CHOWN",
51020+ "CAP_DAC_OVERRIDE",
51021+ "CAP_DAC_READ_SEARCH",
51022+ "CAP_FOWNER",
51023+ "CAP_FSETID",
51024+ "CAP_KILL",
51025+ "CAP_SETGID",
51026+ "CAP_SETUID",
51027+ "CAP_SETPCAP",
51028+ "CAP_LINUX_IMMUTABLE",
51029+ "CAP_NET_BIND_SERVICE",
51030+ "CAP_NET_BROADCAST",
51031+ "CAP_NET_ADMIN",
51032+ "CAP_NET_RAW",
51033+ "CAP_IPC_LOCK",
51034+ "CAP_IPC_OWNER",
51035+ "CAP_SYS_MODULE",
51036+ "CAP_SYS_RAWIO",
51037+ "CAP_SYS_CHROOT",
51038+ "CAP_SYS_PTRACE",
51039+ "CAP_SYS_PACCT",
51040+ "CAP_SYS_ADMIN",
51041+ "CAP_SYS_BOOT",
51042+ "CAP_SYS_NICE",
51043+ "CAP_SYS_RESOURCE",
51044+ "CAP_SYS_TIME",
51045+ "CAP_SYS_TTY_CONFIG",
51046+ "CAP_MKNOD",
51047+ "CAP_LEASE",
51048+ "CAP_AUDIT_WRITE",
51049+ "CAP_AUDIT_CONTROL",
51050+ "CAP_SETFCAP",
51051+ "CAP_MAC_OVERRIDE",
51052+ "CAP_MAC_ADMIN"
51053+};
51054+
51055+EXPORT_SYMBOL(gr_is_capable);
51056+EXPORT_SYMBOL(gr_is_capable_nolog);
51057+
51058+int
51059+gr_is_capable(const int cap)
51060+{
51061+ struct task_struct *task = current;
51062+ const struct cred *cred = current_cred();
51063+ struct acl_subject_label *curracl;
51064+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51065+ kernel_cap_t cap_audit = __cap_empty_set;
51066+
51067+ if (!gr_acl_is_enabled())
51068+ return 1;
51069+
51070+ curracl = task->acl;
51071+
51072+ cap_drop = curracl->cap_lower;
51073+ cap_mask = curracl->cap_mask;
51074+ cap_audit = curracl->cap_invert_audit;
51075+
51076+ while ((curracl = curracl->parent_subject)) {
51077+ /* if the cap isn't specified in the current computed mask but is specified in the
51078+ current level subject, and is lowered in the current level subject, then add
51079+ it to the set of dropped capabilities
51080+ otherwise, add the current level subject's mask to the current computed mask
51081+ */
51082+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51083+ cap_raise(cap_mask, cap);
51084+ if (cap_raised(curracl->cap_lower, cap))
51085+ cap_raise(cap_drop, cap);
51086+ if (cap_raised(curracl->cap_invert_audit, cap))
51087+ cap_raise(cap_audit, cap);
51088+ }
51089+ }
51090+
51091+ if (!cap_raised(cap_drop, cap)) {
51092+ if (cap_raised(cap_audit, cap))
51093+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51094+ return 1;
51095+ }
51096+
51097+ curracl = task->acl;
51098+
51099+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51100+ && cap_raised(cred->cap_effective, cap)) {
51101+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51102+ task->role->roletype, cred->uid,
51103+ cred->gid, task->exec_file ?
51104+ gr_to_filename(task->exec_file->f_path.dentry,
51105+ task->exec_file->f_path.mnt) : curracl->filename,
51106+ curracl->filename, 0UL,
51107+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51108+ return 1;
51109+ }
51110+
51111+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51112+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51113+ return 0;
51114+}
51115+
51116+int
51117+gr_is_capable_nolog(const int cap)
51118+{
51119+ struct acl_subject_label *curracl;
51120+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51121+
51122+ if (!gr_acl_is_enabled())
51123+ return 1;
51124+
51125+ curracl = current->acl;
51126+
51127+ cap_drop = curracl->cap_lower;
51128+ cap_mask = curracl->cap_mask;
51129+
51130+ while ((curracl = curracl->parent_subject)) {
51131+ /* if the cap isn't specified in the current computed mask but is specified in the
51132+ current level subject, and is lowered in the current level subject, then add
51133+ it to the set of dropped capabilities
51134+ otherwise, add the current level subject's mask to the current computed mask
51135+ */
51136+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51137+ cap_raise(cap_mask, cap);
51138+ if (cap_raised(curracl->cap_lower, cap))
51139+ cap_raise(cap_drop, cap);
51140+ }
51141+ }
51142+
51143+ if (!cap_raised(cap_drop, cap))
51144+ return 1;
51145+
51146+ return 0;
51147+}
51148+
51149diff -urNp linux-2.6.32.45/grsecurity/gracl_fs.c linux-2.6.32.45/grsecurity/gracl_fs.c
51150--- linux-2.6.32.45/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51151+++ linux-2.6.32.45/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
51152@@ -0,0 +1,431 @@
51153+#include <linux/kernel.h>
51154+#include <linux/sched.h>
51155+#include <linux/types.h>
51156+#include <linux/fs.h>
51157+#include <linux/file.h>
51158+#include <linux/stat.h>
51159+#include <linux/grsecurity.h>
51160+#include <linux/grinternal.h>
51161+#include <linux/gracl.h>
51162+
51163+__u32
51164+gr_acl_handle_hidden_file(const struct dentry * dentry,
51165+ const struct vfsmount * mnt)
51166+{
51167+ __u32 mode;
51168+
51169+ if (unlikely(!dentry->d_inode))
51170+ return GR_FIND;
51171+
51172+ mode =
51173+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51174+
51175+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51176+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51177+ return mode;
51178+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51179+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51180+ return 0;
51181+ } else if (unlikely(!(mode & GR_FIND)))
51182+ return 0;
51183+
51184+ return GR_FIND;
51185+}
51186+
51187+__u32
51188+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51189+ const int fmode)
51190+{
51191+ __u32 reqmode = GR_FIND;
51192+ __u32 mode;
51193+
51194+ if (unlikely(!dentry->d_inode))
51195+ return reqmode;
51196+
51197+ if (unlikely(fmode & O_APPEND))
51198+ reqmode |= GR_APPEND;
51199+ else if (unlikely(fmode & FMODE_WRITE))
51200+ reqmode |= GR_WRITE;
51201+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51202+ reqmode |= GR_READ;
51203+ if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
51204+ reqmode &= ~GR_READ;
51205+ mode =
51206+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51207+ mnt);
51208+
51209+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51210+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51211+ reqmode & GR_READ ? " reading" : "",
51212+ reqmode & GR_WRITE ? " writing" : reqmode &
51213+ GR_APPEND ? " appending" : "");
51214+ return reqmode;
51215+ } else
51216+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51217+ {
51218+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51219+ reqmode & GR_READ ? " reading" : "",
51220+ reqmode & GR_WRITE ? " writing" : reqmode &
51221+ GR_APPEND ? " appending" : "");
51222+ return 0;
51223+ } else if (unlikely((mode & reqmode) != reqmode))
51224+ return 0;
51225+
51226+ return reqmode;
51227+}
51228+
51229+__u32
51230+gr_acl_handle_creat(const struct dentry * dentry,
51231+ const struct dentry * p_dentry,
51232+ const struct vfsmount * p_mnt, const int fmode,
51233+ const int imode)
51234+{
51235+ __u32 reqmode = GR_WRITE | GR_CREATE;
51236+ __u32 mode;
51237+
51238+ if (unlikely(fmode & O_APPEND))
51239+ reqmode |= GR_APPEND;
51240+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51241+ reqmode |= GR_READ;
51242+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
51243+ reqmode |= GR_SETID;
51244+
51245+ mode =
51246+ gr_check_create(dentry, p_dentry, p_mnt,
51247+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51248+
51249+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51250+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51251+ reqmode & GR_READ ? " reading" : "",
51252+ reqmode & GR_WRITE ? " writing" : reqmode &
51253+ GR_APPEND ? " appending" : "");
51254+ return reqmode;
51255+ } else
51256+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51257+ {
51258+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51259+ reqmode & GR_READ ? " reading" : "",
51260+ reqmode & GR_WRITE ? " writing" : reqmode &
51261+ GR_APPEND ? " appending" : "");
51262+ return 0;
51263+ } else if (unlikely((mode & reqmode) != reqmode))
51264+ return 0;
51265+
51266+ return reqmode;
51267+}
51268+
51269+__u32
51270+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51271+ const int fmode)
51272+{
51273+ __u32 mode, reqmode = GR_FIND;
51274+
51275+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51276+ reqmode |= GR_EXEC;
51277+ if (fmode & S_IWOTH)
51278+ reqmode |= GR_WRITE;
51279+ if (fmode & S_IROTH)
51280+ reqmode |= GR_READ;
51281+
51282+ mode =
51283+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51284+ mnt);
51285+
51286+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51287+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51288+ reqmode & GR_READ ? " reading" : "",
51289+ reqmode & GR_WRITE ? " writing" : "",
51290+ reqmode & GR_EXEC ? " executing" : "");
51291+ return reqmode;
51292+ } else
51293+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51294+ {
51295+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51296+ reqmode & GR_READ ? " reading" : "",
51297+ reqmode & GR_WRITE ? " writing" : "",
51298+ reqmode & GR_EXEC ? " executing" : "");
51299+ return 0;
51300+ } else if (unlikely((mode & reqmode) != reqmode))
51301+ return 0;
51302+
51303+ return reqmode;
51304+}
51305+
51306+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51307+{
51308+ __u32 mode;
51309+
51310+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51311+
51312+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51313+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51314+ return mode;
51315+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51316+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51317+ return 0;
51318+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51319+ return 0;
51320+
51321+ return (reqmode);
51322+}
51323+
51324+__u32
51325+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51326+{
51327+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51328+}
51329+
51330+__u32
51331+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51332+{
51333+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51334+}
51335+
51336+__u32
51337+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51338+{
51339+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51340+}
51341+
51342+__u32
51343+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51344+{
51345+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51346+}
51347+
51348+__u32
51349+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51350+ mode_t mode)
51351+{
51352+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51353+ return 1;
51354+
51355+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51356+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51357+ GR_FCHMOD_ACL_MSG);
51358+ } else {
51359+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51360+ }
51361+}
51362+
51363+__u32
51364+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51365+ mode_t mode)
51366+{
51367+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51368+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51369+ GR_CHMOD_ACL_MSG);
51370+ } else {
51371+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51372+ }
51373+}
51374+
51375+__u32
51376+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51377+{
51378+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51379+}
51380+
51381+__u32
51382+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51383+{
51384+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51385+}
51386+
51387+__u32
51388+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51389+{
51390+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51391+}
51392+
51393+__u32
51394+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51395+{
51396+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51397+ GR_UNIXCONNECT_ACL_MSG);
51398+}
51399+
51400+/* hardlinks require at minimum create permission,
51401+ any additional privilege required is based on the
51402+ privilege of the file being linked to
51403+*/
51404+__u32
51405+gr_acl_handle_link(const struct dentry * new_dentry,
51406+ const struct dentry * parent_dentry,
51407+ const struct vfsmount * parent_mnt,
51408+ const struct dentry * old_dentry,
51409+ const struct vfsmount * old_mnt, const char *to)
51410+{
51411+ __u32 mode;
51412+ __u32 needmode = GR_CREATE | GR_LINK;
51413+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51414+
51415+ mode =
51416+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51417+ old_mnt);
51418+
51419+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51420+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51421+ return mode;
51422+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51423+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51424+ return 0;
51425+ } else if (unlikely((mode & needmode) != needmode))
51426+ return 0;
51427+
51428+ return 1;
51429+}
51430+
51431+__u32
51432+gr_acl_handle_symlink(const struct dentry * new_dentry,
51433+ const struct dentry * parent_dentry,
51434+ const struct vfsmount * parent_mnt, const char *from)
51435+{
51436+ __u32 needmode = GR_WRITE | GR_CREATE;
51437+ __u32 mode;
51438+
51439+ mode =
51440+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
51441+ GR_CREATE | GR_AUDIT_CREATE |
51442+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51443+
51444+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51445+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51446+ return mode;
51447+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51448+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51449+ return 0;
51450+ } else if (unlikely((mode & needmode) != needmode))
51451+ return 0;
51452+
51453+ return (GR_WRITE | GR_CREATE);
51454+}
51455+
51456+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51457+{
51458+ __u32 mode;
51459+
51460+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51461+
51462+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51463+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51464+ return mode;
51465+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51466+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51467+ return 0;
51468+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51469+ return 0;
51470+
51471+ return (reqmode);
51472+}
51473+
51474+__u32
51475+gr_acl_handle_mknod(const struct dentry * new_dentry,
51476+ const struct dentry * parent_dentry,
51477+ const struct vfsmount * parent_mnt,
51478+ const int mode)
51479+{
51480+ __u32 reqmode = GR_WRITE | GR_CREATE;
51481+ if (unlikely(mode & (S_ISUID | S_ISGID)))
51482+ reqmode |= GR_SETID;
51483+
51484+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51485+ reqmode, GR_MKNOD_ACL_MSG);
51486+}
51487+
51488+__u32
51489+gr_acl_handle_mkdir(const struct dentry *new_dentry,
51490+ const struct dentry *parent_dentry,
51491+ const struct vfsmount *parent_mnt)
51492+{
51493+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51494+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51495+}
51496+
51497+#define RENAME_CHECK_SUCCESS(old, new) \
51498+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51499+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51500+
51501+int
51502+gr_acl_handle_rename(struct dentry *new_dentry,
51503+ struct dentry *parent_dentry,
51504+ const struct vfsmount *parent_mnt,
51505+ struct dentry *old_dentry,
51506+ struct inode *old_parent_inode,
51507+ struct vfsmount *old_mnt, const char *newname)
51508+{
51509+ __u32 comp1, comp2;
51510+ int error = 0;
51511+
51512+ if (unlikely(!gr_acl_is_enabled()))
51513+ return 0;
51514+
51515+ if (!new_dentry->d_inode) {
51516+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51517+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51518+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51519+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51520+ GR_DELETE | GR_AUDIT_DELETE |
51521+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51522+ GR_SUPPRESS, old_mnt);
51523+ } else {
51524+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51525+ GR_CREATE | GR_DELETE |
51526+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51527+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51528+ GR_SUPPRESS, parent_mnt);
51529+ comp2 =
51530+ gr_search_file(old_dentry,
51531+ GR_READ | GR_WRITE | GR_AUDIT_READ |
51532+ GR_DELETE | GR_AUDIT_DELETE |
51533+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51534+ }
51535+
51536+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51537+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51538+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51539+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51540+ && !(comp2 & GR_SUPPRESS)) {
51541+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51542+ error = -EACCES;
51543+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51544+ error = -EACCES;
51545+
51546+ return error;
51547+}
51548+
51549+void
51550+gr_acl_handle_exit(void)
51551+{
51552+ u16 id;
51553+ char *rolename;
51554+ struct file *exec_file;
51555+
51556+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51557+ !(current->role->roletype & GR_ROLE_PERSIST))) {
51558+ id = current->acl_role_id;
51559+ rolename = current->role->rolename;
51560+ gr_set_acls(1);
51561+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51562+ }
51563+
51564+ write_lock(&grsec_exec_file_lock);
51565+ exec_file = current->exec_file;
51566+ current->exec_file = NULL;
51567+ write_unlock(&grsec_exec_file_lock);
51568+
51569+ if (exec_file)
51570+ fput(exec_file);
51571+}
51572+
51573+int
51574+gr_acl_handle_procpidmem(const struct task_struct *task)
51575+{
51576+ if (unlikely(!gr_acl_is_enabled()))
51577+ return 0;
51578+
51579+ if (task != current && task->acl->mode & GR_PROTPROCFD)
51580+ return -EACCES;
51581+
51582+ return 0;
51583+}
51584diff -urNp linux-2.6.32.45/grsecurity/gracl_ip.c linux-2.6.32.45/grsecurity/gracl_ip.c
51585--- linux-2.6.32.45/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51586+++ linux-2.6.32.45/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
51587@@ -0,0 +1,382 @@
51588+#include <linux/kernel.h>
51589+#include <asm/uaccess.h>
51590+#include <asm/errno.h>
51591+#include <net/sock.h>
51592+#include <linux/file.h>
51593+#include <linux/fs.h>
51594+#include <linux/net.h>
51595+#include <linux/in.h>
51596+#include <linux/skbuff.h>
51597+#include <linux/ip.h>
51598+#include <linux/udp.h>
51599+#include <linux/smp_lock.h>
51600+#include <linux/types.h>
51601+#include <linux/sched.h>
51602+#include <linux/netdevice.h>
51603+#include <linux/inetdevice.h>
51604+#include <linux/gracl.h>
51605+#include <linux/grsecurity.h>
51606+#include <linux/grinternal.h>
51607+
51608+#define GR_BIND 0x01
51609+#define GR_CONNECT 0x02
51610+#define GR_INVERT 0x04
51611+#define GR_BINDOVERRIDE 0x08
51612+#define GR_CONNECTOVERRIDE 0x10
51613+#define GR_SOCK_FAMILY 0x20
51614+
51615+static const char * gr_protocols[IPPROTO_MAX] = {
51616+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51617+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51618+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51619+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51620+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51621+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51622+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51623+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51624+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51625+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51626+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51627+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51628+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51629+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51630+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51631+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51632+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51633+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51634+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51635+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51636+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51637+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51638+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51639+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51640+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51641+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51642+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51643+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51644+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51645+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51646+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51647+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51648+ };
51649+
51650+static const char * gr_socktypes[SOCK_MAX] = {
51651+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51652+ "unknown:7", "unknown:8", "unknown:9", "packet"
51653+ };
51654+
51655+static const char * gr_sockfamilies[AF_MAX+1] = {
51656+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51657+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51658+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51659+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
51660+ };
51661+
51662+const char *
51663+gr_proto_to_name(unsigned char proto)
51664+{
51665+ return gr_protocols[proto];
51666+}
51667+
51668+const char *
51669+gr_socktype_to_name(unsigned char type)
51670+{
51671+ return gr_socktypes[type];
51672+}
51673+
51674+const char *
51675+gr_sockfamily_to_name(unsigned char family)
51676+{
51677+ return gr_sockfamilies[family];
51678+}
51679+
51680+int
51681+gr_search_socket(const int domain, const int type, const int protocol)
51682+{
51683+ struct acl_subject_label *curr;
51684+ const struct cred *cred = current_cred();
51685+
51686+ if (unlikely(!gr_acl_is_enabled()))
51687+ goto exit;
51688+
51689+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
51690+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
51691+ goto exit; // let the kernel handle it
51692+
51693+ curr = current->acl;
51694+
51695+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
51696+ /* the family is allowed, if this is PF_INET allow it only if
51697+ the extra sock type/protocol checks pass */
51698+ if (domain == PF_INET)
51699+ goto inet_check;
51700+ goto exit;
51701+ } else {
51702+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51703+ __u32 fakeip = 0;
51704+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51705+ current->role->roletype, cred->uid,
51706+ cred->gid, current->exec_file ?
51707+ gr_to_filename(current->exec_file->f_path.dentry,
51708+ current->exec_file->f_path.mnt) :
51709+ curr->filename, curr->filename,
51710+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
51711+ &current->signal->saved_ip);
51712+ goto exit;
51713+ }
51714+ goto exit_fail;
51715+ }
51716+
51717+inet_check:
51718+ /* the rest of this checking is for IPv4 only */
51719+ if (!curr->ips)
51720+ goto exit;
51721+
51722+ if ((curr->ip_type & (1 << type)) &&
51723+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
51724+ goto exit;
51725+
51726+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51727+ /* we don't place acls on raw sockets , and sometimes
51728+ dgram/ip sockets are opened for ioctl and not
51729+ bind/connect, so we'll fake a bind learn log */
51730+ if (type == SOCK_RAW || type == SOCK_PACKET) {
51731+ __u32 fakeip = 0;
51732+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51733+ current->role->roletype, cred->uid,
51734+ cred->gid, current->exec_file ?
51735+ gr_to_filename(current->exec_file->f_path.dentry,
51736+ current->exec_file->f_path.mnt) :
51737+ curr->filename, curr->filename,
51738+ &fakeip, 0, type,
51739+ protocol, GR_CONNECT, &current->signal->saved_ip);
51740+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
51741+ __u32 fakeip = 0;
51742+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51743+ current->role->roletype, cred->uid,
51744+ cred->gid, current->exec_file ?
51745+ gr_to_filename(current->exec_file->f_path.dentry,
51746+ current->exec_file->f_path.mnt) :
51747+ curr->filename, curr->filename,
51748+ &fakeip, 0, type,
51749+ protocol, GR_BIND, &current->signal->saved_ip);
51750+ }
51751+ /* we'll log when they use connect or bind */
51752+ goto exit;
51753+ }
51754+
51755+exit_fail:
51756+ if (domain == PF_INET)
51757+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
51758+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
51759+ else
51760+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
51761+ gr_socktype_to_name(type), protocol);
51762+
51763+ return 0;
51764+exit:
51765+ return 1;
51766+}
51767+
51768+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
51769+{
51770+ if ((ip->mode & mode) &&
51771+ (ip_port >= ip->low) &&
51772+ (ip_port <= ip->high) &&
51773+ ((ntohl(ip_addr) & our_netmask) ==
51774+ (ntohl(our_addr) & our_netmask))
51775+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
51776+ && (ip->type & (1 << type))) {
51777+ if (ip->mode & GR_INVERT)
51778+ return 2; // specifically denied
51779+ else
51780+ return 1; // allowed
51781+ }
51782+
51783+ return 0; // not specifically allowed, may continue parsing
51784+}
51785+
51786+static int
51787+gr_search_connectbind(const int full_mode, struct sock *sk,
51788+ struct sockaddr_in *addr, const int type)
51789+{
51790+ char iface[IFNAMSIZ] = {0};
51791+ struct acl_subject_label *curr;
51792+ struct acl_ip_label *ip;
51793+ struct inet_sock *isk;
51794+ struct net_device *dev;
51795+ struct in_device *idev;
51796+ unsigned long i;
51797+ int ret;
51798+ int mode = full_mode & (GR_BIND | GR_CONNECT);
51799+ __u32 ip_addr = 0;
51800+ __u32 our_addr;
51801+ __u32 our_netmask;
51802+ char *p;
51803+ __u16 ip_port = 0;
51804+ const struct cred *cred = current_cred();
51805+
51806+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
51807+ return 0;
51808+
51809+ curr = current->acl;
51810+ isk = inet_sk(sk);
51811+
51812+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
51813+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
51814+ addr->sin_addr.s_addr = curr->inaddr_any_override;
51815+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
51816+ struct sockaddr_in saddr;
51817+ int err;
51818+
51819+ saddr.sin_family = AF_INET;
51820+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
51821+ saddr.sin_port = isk->sport;
51822+
51823+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51824+ if (err)
51825+ return err;
51826+
51827+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51828+ if (err)
51829+ return err;
51830+ }
51831+
51832+ if (!curr->ips)
51833+ return 0;
51834+
51835+ ip_addr = addr->sin_addr.s_addr;
51836+ ip_port = ntohs(addr->sin_port);
51837+
51838+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51839+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51840+ current->role->roletype, cred->uid,
51841+ cred->gid, current->exec_file ?
51842+ gr_to_filename(current->exec_file->f_path.dentry,
51843+ current->exec_file->f_path.mnt) :
51844+ curr->filename, curr->filename,
51845+ &ip_addr, ip_port, type,
51846+ sk->sk_protocol, mode, &current->signal->saved_ip);
51847+ return 0;
51848+ }
51849+
51850+ for (i = 0; i < curr->ip_num; i++) {
51851+ ip = *(curr->ips + i);
51852+ if (ip->iface != NULL) {
51853+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
51854+ p = strchr(iface, ':');
51855+ if (p != NULL)
51856+ *p = '\0';
51857+ dev = dev_get_by_name(sock_net(sk), iface);
51858+ if (dev == NULL)
51859+ continue;
51860+ idev = in_dev_get(dev);
51861+ if (idev == NULL) {
51862+ dev_put(dev);
51863+ continue;
51864+ }
51865+ rcu_read_lock();
51866+ for_ifa(idev) {
51867+ if (!strcmp(ip->iface, ifa->ifa_label)) {
51868+ our_addr = ifa->ifa_address;
51869+ our_netmask = 0xffffffff;
51870+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51871+ if (ret == 1) {
51872+ rcu_read_unlock();
51873+ in_dev_put(idev);
51874+ dev_put(dev);
51875+ return 0;
51876+ } else if (ret == 2) {
51877+ rcu_read_unlock();
51878+ in_dev_put(idev);
51879+ dev_put(dev);
51880+ goto denied;
51881+ }
51882+ }
51883+ } endfor_ifa(idev);
51884+ rcu_read_unlock();
51885+ in_dev_put(idev);
51886+ dev_put(dev);
51887+ } else {
51888+ our_addr = ip->addr;
51889+ our_netmask = ip->netmask;
51890+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51891+ if (ret == 1)
51892+ return 0;
51893+ else if (ret == 2)
51894+ goto denied;
51895+ }
51896+ }
51897+
51898+denied:
51899+ if (mode == GR_BIND)
51900+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51901+ else if (mode == GR_CONNECT)
51902+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51903+
51904+ return -EACCES;
51905+}
51906+
51907+int
51908+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
51909+{
51910+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
51911+}
51912+
51913+int
51914+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
51915+{
51916+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
51917+}
51918+
51919+int gr_search_listen(struct socket *sock)
51920+{
51921+ struct sock *sk = sock->sk;
51922+ struct sockaddr_in addr;
51923+
51924+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51925+ addr.sin_port = inet_sk(sk)->sport;
51926+
51927+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51928+}
51929+
51930+int gr_search_accept(struct socket *sock)
51931+{
51932+ struct sock *sk = sock->sk;
51933+ struct sockaddr_in addr;
51934+
51935+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51936+ addr.sin_port = inet_sk(sk)->sport;
51937+
51938+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51939+}
51940+
51941+int
51942+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
51943+{
51944+ if (addr)
51945+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
51946+ else {
51947+ struct sockaddr_in sin;
51948+ const struct inet_sock *inet = inet_sk(sk);
51949+
51950+ sin.sin_addr.s_addr = inet->daddr;
51951+ sin.sin_port = inet->dport;
51952+
51953+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51954+ }
51955+}
51956+
51957+int
51958+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
51959+{
51960+ struct sockaddr_in sin;
51961+
51962+ if (unlikely(skb->len < sizeof (struct udphdr)))
51963+ return 0; // skip this packet
51964+
51965+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
51966+ sin.sin_port = udp_hdr(skb)->source;
51967+
51968+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51969+}
51970diff -urNp linux-2.6.32.45/grsecurity/gracl_learn.c linux-2.6.32.45/grsecurity/gracl_learn.c
51971--- linux-2.6.32.45/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
51972+++ linux-2.6.32.45/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
51973@@ -0,0 +1,208 @@
51974+#include <linux/kernel.h>
51975+#include <linux/mm.h>
51976+#include <linux/sched.h>
51977+#include <linux/poll.h>
51978+#include <linux/smp_lock.h>
51979+#include <linux/string.h>
51980+#include <linux/file.h>
51981+#include <linux/types.h>
51982+#include <linux/vmalloc.h>
51983+#include <linux/grinternal.h>
51984+
51985+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
51986+ size_t count, loff_t *ppos);
51987+extern int gr_acl_is_enabled(void);
51988+
51989+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
51990+static int gr_learn_attached;
51991+
51992+/* use a 512k buffer */
51993+#define LEARN_BUFFER_SIZE (512 * 1024)
51994+
51995+static DEFINE_SPINLOCK(gr_learn_lock);
51996+static DEFINE_MUTEX(gr_learn_user_mutex);
51997+
51998+/* we need to maintain two buffers, so that the kernel context of grlearn
51999+ uses a semaphore around the userspace copying, and the other kernel contexts
52000+ use a spinlock when copying into the buffer, since they cannot sleep
52001+*/
52002+static char *learn_buffer;
52003+static char *learn_buffer_user;
52004+static int learn_buffer_len;
52005+static int learn_buffer_user_len;
52006+
52007+static ssize_t
52008+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
52009+{
52010+ DECLARE_WAITQUEUE(wait, current);
52011+ ssize_t retval = 0;
52012+
52013+ add_wait_queue(&learn_wait, &wait);
52014+ set_current_state(TASK_INTERRUPTIBLE);
52015+ do {
52016+ mutex_lock(&gr_learn_user_mutex);
52017+ spin_lock(&gr_learn_lock);
52018+ if (learn_buffer_len)
52019+ break;
52020+ spin_unlock(&gr_learn_lock);
52021+ mutex_unlock(&gr_learn_user_mutex);
52022+ if (file->f_flags & O_NONBLOCK) {
52023+ retval = -EAGAIN;
52024+ goto out;
52025+ }
52026+ if (signal_pending(current)) {
52027+ retval = -ERESTARTSYS;
52028+ goto out;
52029+ }
52030+
52031+ schedule();
52032+ } while (1);
52033+
52034+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
52035+ learn_buffer_user_len = learn_buffer_len;
52036+ retval = learn_buffer_len;
52037+ learn_buffer_len = 0;
52038+
52039+ spin_unlock(&gr_learn_lock);
52040+
52041+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
52042+ retval = -EFAULT;
52043+
52044+ mutex_unlock(&gr_learn_user_mutex);
52045+out:
52046+ set_current_state(TASK_RUNNING);
52047+ remove_wait_queue(&learn_wait, &wait);
52048+ return retval;
52049+}
52050+
52051+static unsigned int
52052+poll_learn(struct file * file, poll_table * wait)
52053+{
52054+ poll_wait(file, &learn_wait, wait);
52055+
52056+ if (learn_buffer_len)
52057+ return (POLLIN | POLLRDNORM);
52058+
52059+ return 0;
52060+}
52061+
52062+void
52063+gr_clear_learn_entries(void)
52064+{
52065+ char *tmp;
52066+
52067+ mutex_lock(&gr_learn_user_mutex);
52068+ spin_lock(&gr_learn_lock);
52069+ tmp = learn_buffer;
52070+ learn_buffer = NULL;
52071+ spin_unlock(&gr_learn_lock);
52072+ if (tmp)
52073+ vfree(tmp);
52074+ if (learn_buffer_user != NULL) {
52075+ vfree(learn_buffer_user);
52076+ learn_buffer_user = NULL;
52077+ }
52078+ learn_buffer_len = 0;
52079+ mutex_unlock(&gr_learn_user_mutex);
52080+
52081+ return;
52082+}
52083+
52084+void
52085+gr_add_learn_entry(const char *fmt, ...)
52086+{
52087+ va_list args;
52088+ unsigned int len;
52089+
52090+ if (!gr_learn_attached)
52091+ return;
52092+
52093+ spin_lock(&gr_learn_lock);
52094+
52095+ /* leave a gap at the end so we know when it's "full" but don't have to
52096+ compute the exact length of the string we're trying to append
52097+ */
52098+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52099+ spin_unlock(&gr_learn_lock);
52100+ wake_up_interruptible(&learn_wait);
52101+ return;
52102+ }
52103+ if (learn_buffer == NULL) {
52104+ spin_unlock(&gr_learn_lock);
52105+ return;
52106+ }
52107+
52108+ va_start(args, fmt);
52109+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52110+ va_end(args);
52111+
52112+ learn_buffer_len += len + 1;
52113+
52114+ spin_unlock(&gr_learn_lock);
52115+ wake_up_interruptible(&learn_wait);
52116+
52117+ return;
52118+}
52119+
52120+static int
52121+open_learn(struct inode *inode, struct file *file)
52122+{
52123+ if (file->f_mode & FMODE_READ && gr_learn_attached)
52124+ return -EBUSY;
52125+ if (file->f_mode & FMODE_READ) {
52126+ int retval = 0;
52127+ mutex_lock(&gr_learn_user_mutex);
52128+ if (learn_buffer == NULL)
52129+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52130+ if (learn_buffer_user == NULL)
52131+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52132+ if (learn_buffer == NULL) {
52133+ retval = -ENOMEM;
52134+ goto out_error;
52135+ }
52136+ if (learn_buffer_user == NULL) {
52137+ retval = -ENOMEM;
52138+ goto out_error;
52139+ }
52140+ learn_buffer_len = 0;
52141+ learn_buffer_user_len = 0;
52142+ gr_learn_attached = 1;
52143+out_error:
52144+ mutex_unlock(&gr_learn_user_mutex);
52145+ return retval;
52146+ }
52147+ return 0;
52148+}
52149+
52150+static int
52151+close_learn(struct inode *inode, struct file *file)
52152+{
52153+ if (file->f_mode & FMODE_READ) {
52154+ char *tmp = NULL;
52155+ mutex_lock(&gr_learn_user_mutex);
52156+ spin_lock(&gr_learn_lock);
52157+ tmp = learn_buffer;
52158+ learn_buffer = NULL;
52159+ spin_unlock(&gr_learn_lock);
52160+ if (tmp)
52161+ vfree(tmp);
52162+ if (learn_buffer_user != NULL) {
52163+ vfree(learn_buffer_user);
52164+ learn_buffer_user = NULL;
52165+ }
52166+ learn_buffer_len = 0;
52167+ learn_buffer_user_len = 0;
52168+ gr_learn_attached = 0;
52169+ mutex_unlock(&gr_learn_user_mutex);
52170+ }
52171+
52172+ return 0;
52173+}
52174+
52175+const struct file_operations grsec_fops = {
52176+ .read = read_learn,
52177+ .write = write_grsec_handler,
52178+ .open = open_learn,
52179+ .release = close_learn,
52180+ .poll = poll_learn,
52181+};
52182diff -urNp linux-2.6.32.45/grsecurity/gracl_res.c linux-2.6.32.45/grsecurity/gracl_res.c
52183--- linux-2.6.32.45/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52184+++ linux-2.6.32.45/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
52185@@ -0,0 +1,67 @@
52186+#include <linux/kernel.h>
52187+#include <linux/sched.h>
52188+#include <linux/gracl.h>
52189+#include <linux/grinternal.h>
52190+
52191+static const char *restab_log[] = {
52192+ [RLIMIT_CPU] = "RLIMIT_CPU",
52193+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52194+ [RLIMIT_DATA] = "RLIMIT_DATA",
52195+ [RLIMIT_STACK] = "RLIMIT_STACK",
52196+ [RLIMIT_CORE] = "RLIMIT_CORE",
52197+ [RLIMIT_RSS] = "RLIMIT_RSS",
52198+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
52199+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52200+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52201+ [RLIMIT_AS] = "RLIMIT_AS",
52202+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52203+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52204+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52205+ [RLIMIT_NICE] = "RLIMIT_NICE",
52206+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52207+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52208+ [GR_CRASH_RES] = "RLIMIT_CRASH"
52209+};
52210+
52211+void
52212+gr_log_resource(const struct task_struct *task,
52213+ const int res, const unsigned long wanted, const int gt)
52214+{
52215+ const struct cred *cred;
52216+ unsigned long rlim;
52217+
52218+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
52219+ return;
52220+
52221+ // not yet supported resource
52222+ if (unlikely(!restab_log[res]))
52223+ return;
52224+
52225+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52226+ rlim = task->signal->rlim[res].rlim_max;
52227+ else
52228+ rlim = task->signal->rlim[res].rlim_cur;
52229+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52230+ return;
52231+
52232+ rcu_read_lock();
52233+ cred = __task_cred(task);
52234+
52235+ if (res == RLIMIT_NPROC &&
52236+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52237+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52238+ goto out_rcu_unlock;
52239+ else if (res == RLIMIT_MEMLOCK &&
52240+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52241+ goto out_rcu_unlock;
52242+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52243+ goto out_rcu_unlock;
52244+ rcu_read_unlock();
52245+
52246+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52247+
52248+ return;
52249+out_rcu_unlock:
52250+ rcu_read_unlock();
52251+ return;
52252+}
52253diff -urNp linux-2.6.32.45/grsecurity/gracl_segv.c linux-2.6.32.45/grsecurity/gracl_segv.c
52254--- linux-2.6.32.45/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52255+++ linux-2.6.32.45/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
52256@@ -0,0 +1,284 @@
52257+#include <linux/kernel.h>
52258+#include <linux/mm.h>
52259+#include <asm/uaccess.h>
52260+#include <asm/errno.h>
52261+#include <asm/mman.h>
52262+#include <net/sock.h>
52263+#include <linux/file.h>
52264+#include <linux/fs.h>
52265+#include <linux/net.h>
52266+#include <linux/in.h>
52267+#include <linux/smp_lock.h>
52268+#include <linux/slab.h>
52269+#include <linux/types.h>
52270+#include <linux/sched.h>
52271+#include <linux/timer.h>
52272+#include <linux/gracl.h>
52273+#include <linux/grsecurity.h>
52274+#include <linux/grinternal.h>
52275+
52276+static struct crash_uid *uid_set;
52277+static unsigned short uid_used;
52278+static DEFINE_SPINLOCK(gr_uid_lock);
52279+extern rwlock_t gr_inode_lock;
52280+extern struct acl_subject_label *
52281+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52282+ struct acl_role_label *role);
52283+extern int gr_fake_force_sig(int sig, struct task_struct *t);
52284+
52285+int
52286+gr_init_uidset(void)
52287+{
52288+ uid_set =
52289+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52290+ uid_used = 0;
52291+
52292+ return uid_set ? 1 : 0;
52293+}
52294+
52295+void
52296+gr_free_uidset(void)
52297+{
52298+ if (uid_set)
52299+ kfree(uid_set);
52300+
52301+ return;
52302+}
52303+
52304+int
52305+gr_find_uid(const uid_t uid)
52306+{
52307+ struct crash_uid *tmp = uid_set;
52308+ uid_t buid;
52309+ int low = 0, high = uid_used - 1, mid;
52310+
52311+ while (high >= low) {
52312+ mid = (low + high) >> 1;
52313+ buid = tmp[mid].uid;
52314+ if (buid == uid)
52315+ return mid;
52316+ if (buid > uid)
52317+ high = mid - 1;
52318+ if (buid < uid)
52319+ low = mid + 1;
52320+ }
52321+
52322+ return -1;
52323+}
52324+
52325+static __inline__ void
52326+gr_insertsort(void)
52327+{
52328+ unsigned short i, j;
52329+ struct crash_uid index;
52330+
52331+ for (i = 1; i < uid_used; i++) {
52332+ index = uid_set[i];
52333+ j = i;
52334+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52335+ uid_set[j] = uid_set[j - 1];
52336+ j--;
52337+ }
52338+ uid_set[j] = index;
52339+ }
52340+
52341+ return;
52342+}
52343+
52344+static __inline__ void
52345+gr_insert_uid(const uid_t uid, const unsigned long expires)
52346+{
52347+ int loc;
52348+
52349+ if (uid_used == GR_UIDTABLE_MAX)
52350+ return;
52351+
52352+ loc = gr_find_uid(uid);
52353+
52354+ if (loc >= 0) {
52355+ uid_set[loc].expires = expires;
52356+ return;
52357+ }
52358+
52359+ uid_set[uid_used].uid = uid;
52360+ uid_set[uid_used].expires = expires;
52361+ uid_used++;
52362+
52363+ gr_insertsort();
52364+
52365+ return;
52366+}
52367+
52368+void
52369+gr_remove_uid(const unsigned short loc)
52370+{
52371+ unsigned short i;
52372+
52373+ for (i = loc + 1; i < uid_used; i++)
52374+ uid_set[i - 1] = uid_set[i];
52375+
52376+ uid_used--;
52377+
52378+ return;
52379+}
52380+
52381+int
52382+gr_check_crash_uid(const uid_t uid)
52383+{
52384+ int loc;
52385+ int ret = 0;
52386+
52387+ if (unlikely(!gr_acl_is_enabled()))
52388+ return 0;
52389+
52390+ spin_lock(&gr_uid_lock);
52391+ loc = gr_find_uid(uid);
52392+
52393+ if (loc < 0)
52394+ goto out_unlock;
52395+
52396+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
52397+ gr_remove_uid(loc);
52398+ else
52399+ ret = 1;
52400+
52401+out_unlock:
52402+ spin_unlock(&gr_uid_lock);
52403+ return ret;
52404+}
52405+
52406+static __inline__ int
52407+proc_is_setxid(const struct cred *cred)
52408+{
52409+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
52410+ cred->uid != cred->fsuid)
52411+ return 1;
52412+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52413+ cred->gid != cred->fsgid)
52414+ return 1;
52415+
52416+ return 0;
52417+}
52418+
52419+void
52420+gr_handle_crash(struct task_struct *task, const int sig)
52421+{
52422+ struct acl_subject_label *curr;
52423+ struct acl_subject_label *curr2;
52424+ struct task_struct *tsk, *tsk2;
52425+ const struct cred *cred;
52426+ const struct cred *cred2;
52427+
52428+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52429+ return;
52430+
52431+ if (unlikely(!gr_acl_is_enabled()))
52432+ return;
52433+
52434+ curr = task->acl;
52435+
52436+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
52437+ return;
52438+
52439+ if (time_before_eq(curr->expires, get_seconds())) {
52440+ curr->expires = 0;
52441+ curr->crashes = 0;
52442+ }
52443+
52444+ curr->crashes++;
52445+
52446+ if (!curr->expires)
52447+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52448+
52449+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52450+ time_after(curr->expires, get_seconds())) {
52451+ rcu_read_lock();
52452+ cred = __task_cred(task);
52453+ if (cred->uid && proc_is_setxid(cred)) {
52454+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52455+ spin_lock(&gr_uid_lock);
52456+ gr_insert_uid(cred->uid, curr->expires);
52457+ spin_unlock(&gr_uid_lock);
52458+ curr->expires = 0;
52459+ curr->crashes = 0;
52460+ read_lock(&tasklist_lock);
52461+ do_each_thread(tsk2, tsk) {
52462+ cred2 = __task_cred(tsk);
52463+ if (tsk != task && cred2->uid == cred->uid)
52464+ gr_fake_force_sig(SIGKILL, tsk);
52465+ } while_each_thread(tsk2, tsk);
52466+ read_unlock(&tasklist_lock);
52467+ } else {
52468+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52469+ read_lock(&tasklist_lock);
52470+ do_each_thread(tsk2, tsk) {
52471+ if (likely(tsk != task)) {
52472+ curr2 = tsk->acl;
52473+
52474+ if (curr2->device == curr->device &&
52475+ curr2->inode == curr->inode)
52476+ gr_fake_force_sig(SIGKILL, tsk);
52477+ }
52478+ } while_each_thread(tsk2, tsk);
52479+ read_unlock(&tasklist_lock);
52480+ }
52481+ rcu_read_unlock();
52482+ }
52483+
52484+ return;
52485+}
52486+
52487+int
52488+gr_check_crash_exec(const struct file *filp)
52489+{
52490+ struct acl_subject_label *curr;
52491+
52492+ if (unlikely(!gr_acl_is_enabled()))
52493+ return 0;
52494+
52495+ read_lock(&gr_inode_lock);
52496+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52497+ filp->f_path.dentry->d_inode->i_sb->s_dev,
52498+ current->role);
52499+ read_unlock(&gr_inode_lock);
52500+
52501+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52502+ (!curr->crashes && !curr->expires))
52503+ return 0;
52504+
52505+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52506+ time_after(curr->expires, get_seconds()))
52507+ return 1;
52508+ else if (time_before_eq(curr->expires, get_seconds())) {
52509+ curr->crashes = 0;
52510+ curr->expires = 0;
52511+ }
52512+
52513+ return 0;
52514+}
52515+
52516+void
52517+gr_handle_alertkill(struct task_struct *task)
52518+{
52519+ struct acl_subject_label *curracl;
52520+ __u32 curr_ip;
52521+ struct task_struct *p, *p2;
52522+
52523+ if (unlikely(!gr_acl_is_enabled()))
52524+ return;
52525+
52526+ curracl = task->acl;
52527+ curr_ip = task->signal->curr_ip;
52528+
52529+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52530+ read_lock(&tasklist_lock);
52531+ do_each_thread(p2, p) {
52532+ if (p->signal->curr_ip == curr_ip)
52533+ gr_fake_force_sig(SIGKILL, p);
52534+ } while_each_thread(p2, p);
52535+ read_unlock(&tasklist_lock);
52536+ } else if (curracl->mode & GR_KILLPROC)
52537+ gr_fake_force_sig(SIGKILL, task);
52538+
52539+ return;
52540+}
52541diff -urNp linux-2.6.32.45/grsecurity/gracl_shm.c linux-2.6.32.45/grsecurity/gracl_shm.c
52542--- linux-2.6.32.45/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52543+++ linux-2.6.32.45/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
52544@@ -0,0 +1,40 @@
52545+#include <linux/kernel.h>
52546+#include <linux/mm.h>
52547+#include <linux/sched.h>
52548+#include <linux/file.h>
52549+#include <linux/ipc.h>
52550+#include <linux/gracl.h>
52551+#include <linux/grsecurity.h>
52552+#include <linux/grinternal.h>
52553+
52554+int
52555+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52556+ const time_t shm_createtime, const uid_t cuid, const int shmid)
52557+{
52558+ struct task_struct *task;
52559+
52560+ if (!gr_acl_is_enabled())
52561+ return 1;
52562+
52563+ rcu_read_lock();
52564+ read_lock(&tasklist_lock);
52565+
52566+ task = find_task_by_vpid(shm_cprid);
52567+
52568+ if (unlikely(!task))
52569+ task = find_task_by_vpid(shm_lapid);
52570+
52571+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52572+ (task->pid == shm_lapid)) &&
52573+ (task->acl->mode & GR_PROTSHM) &&
52574+ (task->acl != current->acl))) {
52575+ read_unlock(&tasklist_lock);
52576+ rcu_read_unlock();
52577+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52578+ return 0;
52579+ }
52580+ read_unlock(&tasklist_lock);
52581+ rcu_read_unlock();
52582+
52583+ return 1;
52584+}
52585diff -urNp linux-2.6.32.45/grsecurity/grsec_chdir.c linux-2.6.32.45/grsecurity/grsec_chdir.c
52586--- linux-2.6.32.45/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52587+++ linux-2.6.32.45/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
52588@@ -0,0 +1,19 @@
52589+#include <linux/kernel.h>
52590+#include <linux/sched.h>
52591+#include <linux/fs.h>
52592+#include <linux/file.h>
52593+#include <linux/grsecurity.h>
52594+#include <linux/grinternal.h>
52595+
52596+void
52597+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52598+{
52599+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52600+ if ((grsec_enable_chdir && grsec_enable_group &&
52601+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52602+ !grsec_enable_group)) {
52603+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52604+ }
52605+#endif
52606+ return;
52607+}
52608diff -urNp linux-2.6.32.45/grsecurity/grsec_chroot.c linux-2.6.32.45/grsecurity/grsec_chroot.c
52609--- linux-2.6.32.45/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52610+++ linux-2.6.32.45/grsecurity/grsec_chroot.c 2011-07-18 17:14:10.000000000 -0400
52611@@ -0,0 +1,384 @@
52612+#include <linux/kernel.h>
52613+#include <linux/module.h>
52614+#include <linux/sched.h>
52615+#include <linux/file.h>
52616+#include <linux/fs.h>
52617+#include <linux/mount.h>
52618+#include <linux/types.h>
52619+#include <linux/pid_namespace.h>
52620+#include <linux/grsecurity.h>
52621+#include <linux/grinternal.h>
52622+
52623+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52624+{
52625+#ifdef CONFIG_GRKERNSEC
52626+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52627+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52628+ task->gr_is_chrooted = 1;
52629+ else
52630+ task->gr_is_chrooted = 0;
52631+
52632+ task->gr_chroot_dentry = path->dentry;
52633+#endif
52634+ return;
52635+}
52636+
52637+void gr_clear_chroot_entries(struct task_struct *task)
52638+{
52639+#ifdef CONFIG_GRKERNSEC
52640+ task->gr_is_chrooted = 0;
52641+ task->gr_chroot_dentry = NULL;
52642+#endif
52643+ return;
52644+}
52645+
52646+int
52647+gr_handle_chroot_unix(const pid_t pid)
52648+{
52649+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52650+ struct task_struct *p;
52651+
52652+ if (unlikely(!grsec_enable_chroot_unix))
52653+ return 1;
52654+
52655+ if (likely(!proc_is_chrooted(current)))
52656+ return 1;
52657+
52658+ rcu_read_lock();
52659+ read_lock(&tasklist_lock);
52660+
52661+ p = find_task_by_vpid_unrestricted(pid);
52662+ if (unlikely(p && !have_same_root(current, p))) {
52663+ read_unlock(&tasklist_lock);
52664+ rcu_read_unlock();
52665+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
52666+ return 0;
52667+ }
52668+ read_unlock(&tasklist_lock);
52669+ rcu_read_unlock();
52670+#endif
52671+ return 1;
52672+}
52673+
52674+int
52675+gr_handle_chroot_nice(void)
52676+{
52677+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52678+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
52679+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
52680+ return -EPERM;
52681+ }
52682+#endif
52683+ return 0;
52684+}
52685+
52686+int
52687+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
52688+{
52689+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52690+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
52691+ && proc_is_chrooted(current)) {
52692+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
52693+ return -EACCES;
52694+ }
52695+#endif
52696+ return 0;
52697+}
52698+
52699+int
52700+gr_handle_chroot_rawio(const struct inode *inode)
52701+{
52702+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52703+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
52704+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
52705+ return 1;
52706+#endif
52707+ return 0;
52708+}
52709+
52710+int
52711+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
52712+{
52713+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52714+ struct task_struct *p;
52715+ int ret = 0;
52716+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
52717+ return ret;
52718+
52719+ read_lock(&tasklist_lock);
52720+ do_each_pid_task(pid, type, p) {
52721+ if (!have_same_root(current, p)) {
52722+ ret = 1;
52723+ goto out;
52724+ }
52725+ } while_each_pid_task(pid, type, p);
52726+out:
52727+ read_unlock(&tasklist_lock);
52728+ return ret;
52729+#endif
52730+ return 0;
52731+}
52732+
52733+int
52734+gr_pid_is_chrooted(struct task_struct *p)
52735+{
52736+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52737+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
52738+ return 0;
52739+
52740+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
52741+ !have_same_root(current, p)) {
52742+ return 1;
52743+ }
52744+#endif
52745+ return 0;
52746+}
52747+
52748+EXPORT_SYMBOL(gr_pid_is_chrooted);
52749+
52750+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
52751+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
52752+{
52753+ struct dentry *dentry = (struct dentry *)u_dentry;
52754+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
52755+ struct dentry *realroot;
52756+ struct vfsmount *realrootmnt;
52757+ struct dentry *currentroot;
52758+ struct vfsmount *currentmnt;
52759+ struct task_struct *reaper = &init_task;
52760+ int ret = 1;
52761+
52762+ read_lock(&reaper->fs->lock);
52763+ realrootmnt = mntget(reaper->fs->root.mnt);
52764+ realroot = dget(reaper->fs->root.dentry);
52765+ read_unlock(&reaper->fs->lock);
52766+
52767+ read_lock(&current->fs->lock);
52768+ currentmnt = mntget(current->fs->root.mnt);
52769+ currentroot = dget(current->fs->root.dentry);
52770+ read_unlock(&current->fs->lock);
52771+
52772+ spin_lock(&dcache_lock);
52773+ for (;;) {
52774+ if (unlikely((dentry == realroot && mnt == realrootmnt)
52775+ || (dentry == currentroot && mnt == currentmnt)))
52776+ break;
52777+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
52778+ if (mnt->mnt_parent == mnt)
52779+ break;
52780+ dentry = mnt->mnt_mountpoint;
52781+ mnt = mnt->mnt_parent;
52782+ continue;
52783+ }
52784+ dentry = dentry->d_parent;
52785+ }
52786+ spin_unlock(&dcache_lock);
52787+
52788+ dput(currentroot);
52789+ mntput(currentmnt);
52790+
52791+ /* access is outside of chroot */
52792+ if (dentry == realroot && mnt == realrootmnt)
52793+ ret = 0;
52794+
52795+ dput(realroot);
52796+ mntput(realrootmnt);
52797+ return ret;
52798+}
52799+#endif
52800+
52801+int
52802+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
52803+{
52804+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52805+ if (!grsec_enable_chroot_fchdir)
52806+ return 1;
52807+
52808+ if (!proc_is_chrooted(current))
52809+ return 1;
52810+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
52811+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
52812+ return 0;
52813+ }
52814+#endif
52815+ return 1;
52816+}
52817+
52818+int
52819+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52820+ const time_t shm_createtime)
52821+{
52822+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52823+ struct task_struct *p;
52824+ time_t starttime;
52825+
52826+ if (unlikely(!grsec_enable_chroot_shmat))
52827+ return 1;
52828+
52829+ if (likely(!proc_is_chrooted(current)))
52830+ return 1;
52831+
52832+ rcu_read_lock();
52833+ read_lock(&tasklist_lock);
52834+
52835+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
52836+ starttime = p->start_time.tv_sec;
52837+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
52838+ if (have_same_root(current, p)) {
52839+ goto allow;
52840+ } else {
52841+ read_unlock(&tasklist_lock);
52842+ rcu_read_unlock();
52843+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52844+ return 0;
52845+ }
52846+ }
52847+ /* creator exited, pid reuse, fall through to next check */
52848+ }
52849+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
52850+ if (unlikely(!have_same_root(current, p))) {
52851+ read_unlock(&tasklist_lock);
52852+ rcu_read_unlock();
52853+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52854+ return 0;
52855+ }
52856+ }
52857+
52858+allow:
52859+ read_unlock(&tasklist_lock);
52860+ rcu_read_unlock();
52861+#endif
52862+ return 1;
52863+}
52864+
52865+void
52866+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
52867+{
52868+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52869+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
52870+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
52871+#endif
52872+ return;
52873+}
52874+
52875+int
52876+gr_handle_chroot_mknod(const struct dentry *dentry,
52877+ const struct vfsmount *mnt, const int mode)
52878+{
52879+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52880+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
52881+ proc_is_chrooted(current)) {
52882+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
52883+ return -EPERM;
52884+ }
52885+#endif
52886+ return 0;
52887+}
52888+
52889+int
52890+gr_handle_chroot_mount(const struct dentry *dentry,
52891+ const struct vfsmount *mnt, const char *dev_name)
52892+{
52893+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52894+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
52895+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
52896+ return -EPERM;
52897+ }
52898+#endif
52899+ return 0;
52900+}
52901+
52902+int
52903+gr_handle_chroot_pivot(void)
52904+{
52905+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52906+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
52907+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
52908+ return -EPERM;
52909+ }
52910+#endif
52911+ return 0;
52912+}
52913+
52914+int
52915+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
52916+{
52917+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52918+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
52919+ !gr_is_outside_chroot(dentry, mnt)) {
52920+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
52921+ return -EPERM;
52922+ }
52923+#endif
52924+ return 0;
52925+}
52926+
52927+int
52928+gr_handle_chroot_caps(struct path *path)
52929+{
52930+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52931+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
52932+ (init_task.fs->root.dentry != path->dentry) &&
52933+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
52934+
52935+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52936+ const struct cred *old = current_cred();
52937+ struct cred *new = prepare_creds();
52938+ if (new == NULL)
52939+ return 1;
52940+
52941+ new->cap_permitted = cap_drop(old->cap_permitted,
52942+ chroot_caps);
52943+ new->cap_inheritable = cap_drop(old->cap_inheritable,
52944+ chroot_caps);
52945+ new->cap_effective = cap_drop(old->cap_effective,
52946+ chroot_caps);
52947+
52948+ commit_creds(new);
52949+
52950+ return 0;
52951+ }
52952+#endif
52953+ return 0;
52954+}
52955+
52956+int
52957+gr_handle_chroot_sysctl(const int op)
52958+{
52959+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52960+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
52961+ && (op & MAY_WRITE))
52962+ return -EACCES;
52963+#endif
52964+ return 0;
52965+}
52966+
52967+void
52968+gr_handle_chroot_chdir(struct path *path)
52969+{
52970+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52971+ if (grsec_enable_chroot_chdir)
52972+ set_fs_pwd(current->fs, path);
52973+#endif
52974+ return;
52975+}
52976+
52977+int
52978+gr_handle_chroot_chmod(const struct dentry *dentry,
52979+ const struct vfsmount *mnt, const int mode)
52980+{
52981+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52982+ /* allow chmod +s on directories, but not on files */
52983+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
52984+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
52985+ proc_is_chrooted(current)) {
52986+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
52987+ return -EPERM;
52988+ }
52989+#endif
52990+ return 0;
52991+}
52992+
52993+#ifdef CONFIG_SECURITY
52994+EXPORT_SYMBOL(gr_handle_chroot_caps);
52995+#endif
52996diff -urNp linux-2.6.32.45/grsecurity/grsec_disabled.c linux-2.6.32.45/grsecurity/grsec_disabled.c
52997--- linux-2.6.32.45/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
52998+++ linux-2.6.32.45/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
52999@@ -0,0 +1,447 @@
53000+#include <linux/kernel.h>
53001+#include <linux/module.h>
53002+#include <linux/sched.h>
53003+#include <linux/file.h>
53004+#include <linux/fs.h>
53005+#include <linux/kdev_t.h>
53006+#include <linux/net.h>
53007+#include <linux/in.h>
53008+#include <linux/ip.h>
53009+#include <linux/skbuff.h>
53010+#include <linux/sysctl.h>
53011+
53012+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
53013+void
53014+pax_set_initial_flags(struct linux_binprm *bprm)
53015+{
53016+ return;
53017+}
53018+#endif
53019+
53020+#ifdef CONFIG_SYSCTL
53021+__u32
53022+gr_handle_sysctl(const struct ctl_table * table, const int op)
53023+{
53024+ return 0;
53025+}
53026+#endif
53027+
53028+#ifdef CONFIG_TASKSTATS
53029+int gr_is_taskstats_denied(int pid)
53030+{
53031+ return 0;
53032+}
53033+#endif
53034+
53035+int
53036+gr_acl_is_enabled(void)
53037+{
53038+ return 0;
53039+}
53040+
53041+int
53042+gr_handle_rawio(const struct inode *inode)
53043+{
53044+ return 0;
53045+}
53046+
53047+void
53048+gr_acl_handle_psacct(struct task_struct *task, const long code)
53049+{
53050+ return;
53051+}
53052+
53053+int
53054+gr_handle_ptrace(struct task_struct *task, const long request)
53055+{
53056+ return 0;
53057+}
53058+
53059+int
53060+gr_handle_proc_ptrace(struct task_struct *task)
53061+{
53062+ return 0;
53063+}
53064+
53065+void
53066+gr_learn_resource(const struct task_struct *task,
53067+ const int res, const unsigned long wanted, const int gt)
53068+{
53069+ return;
53070+}
53071+
53072+int
53073+gr_set_acls(const int type)
53074+{
53075+ return 0;
53076+}
53077+
53078+int
53079+gr_check_hidden_task(const struct task_struct *tsk)
53080+{
53081+ return 0;
53082+}
53083+
53084+int
53085+gr_check_protected_task(const struct task_struct *task)
53086+{
53087+ return 0;
53088+}
53089+
53090+int
53091+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53092+{
53093+ return 0;
53094+}
53095+
53096+void
53097+gr_copy_label(struct task_struct *tsk)
53098+{
53099+ return;
53100+}
53101+
53102+void
53103+gr_set_pax_flags(struct task_struct *task)
53104+{
53105+ return;
53106+}
53107+
53108+int
53109+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53110+ const int unsafe_share)
53111+{
53112+ return 0;
53113+}
53114+
53115+void
53116+gr_handle_delete(const ino_t ino, const dev_t dev)
53117+{
53118+ return;
53119+}
53120+
53121+void
53122+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53123+{
53124+ return;
53125+}
53126+
53127+void
53128+gr_handle_crash(struct task_struct *task, const int sig)
53129+{
53130+ return;
53131+}
53132+
53133+int
53134+gr_check_crash_exec(const struct file *filp)
53135+{
53136+ return 0;
53137+}
53138+
53139+int
53140+gr_check_crash_uid(const uid_t uid)
53141+{
53142+ return 0;
53143+}
53144+
53145+void
53146+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53147+ struct dentry *old_dentry,
53148+ struct dentry *new_dentry,
53149+ struct vfsmount *mnt, const __u8 replace)
53150+{
53151+ return;
53152+}
53153+
53154+int
53155+gr_search_socket(const int family, const int type, const int protocol)
53156+{
53157+ return 1;
53158+}
53159+
53160+int
53161+gr_search_connectbind(const int mode, const struct socket *sock,
53162+ const struct sockaddr_in *addr)
53163+{
53164+ return 0;
53165+}
53166+
53167+int
53168+gr_is_capable(const int cap)
53169+{
53170+ return 1;
53171+}
53172+
53173+int
53174+gr_is_capable_nolog(const int cap)
53175+{
53176+ return 1;
53177+}
53178+
53179+void
53180+gr_handle_alertkill(struct task_struct *task)
53181+{
53182+ return;
53183+}
53184+
53185+__u32
53186+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53187+{
53188+ return 1;
53189+}
53190+
53191+__u32
53192+gr_acl_handle_hidden_file(const struct dentry * dentry,
53193+ const struct vfsmount * mnt)
53194+{
53195+ return 1;
53196+}
53197+
53198+__u32
53199+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53200+ const int fmode)
53201+{
53202+ return 1;
53203+}
53204+
53205+__u32
53206+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53207+{
53208+ return 1;
53209+}
53210+
53211+__u32
53212+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53213+{
53214+ return 1;
53215+}
53216+
53217+int
53218+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53219+ unsigned int *vm_flags)
53220+{
53221+ return 1;
53222+}
53223+
53224+__u32
53225+gr_acl_handle_truncate(const struct dentry * dentry,
53226+ const struct vfsmount * mnt)
53227+{
53228+ return 1;
53229+}
53230+
53231+__u32
53232+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53233+{
53234+ return 1;
53235+}
53236+
53237+__u32
53238+gr_acl_handle_access(const struct dentry * dentry,
53239+ const struct vfsmount * mnt, const int fmode)
53240+{
53241+ return 1;
53242+}
53243+
53244+__u32
53245+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53246+ mode_t mode)
53247+{
53248+ return 1;
53249+}
53250+
53251+__u32
53252+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53253+ mode_t mode)
53254+{
53255+ return 1;
53256+}
53257+
53258+__u32
53259+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53260+{
53261+ return 1;
53262+}
53263+
53264+__u32
53265+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53266+{
53267+ return 1;
53268+}
53269+
53270+void
53271+grsecurity_init(void)
53272+{
53273+ return;
53274+}
53275+
53276+__u32
53277+gr_acl_handle_mknod(const struct dentry * new_dentry,
53278+ const struct dentry * parent_dentry,
53279+ const struct vfsmount * parent_mnt,
53280+ const int mode)
53281+{
53282+ return 1;
53283+}
53284+
53285+__u32
53286+gr_acl_handle_mkdir(const struct dentry * new_dentry,
53287+ const struct dentry * parent_dentry,
53288+ const struct vfsmount * parent_mnt)
53289+{
53290+ return 1;
53291+}
53292+
53293+__u32
53294+gr_acl_handle_symlink(const struct dentry * new_dentry,
53295+ const struct dentry * parent_dentry,
53296+ const struct vfsmount * parent_mnt, const char *from)
53297+{
53298+ return 1;
53299+}
53300+
53301+__u32
53302+gr_acl_handle_link(const struct dentry * new_dentry,
53303+ const struct dentry * parent_dentry,
53304+ const struct vfsmount * parent_mnt,
53305+ const struct dentry * old_dentry,
53306+ const struct vfsmount * old_mnt, const char *to)
53307+{
53308+ return 1;
53309+}
53310+
53311+int
53312+gr_acl_handle_rename(const struct dentry *new_dentry,
53313+ const struct dentry *parent_dentry,
53314+ const struct vfsmount *parent_mnt,
53315+ const struct dentry *old_dentry,
53316+ const struct inode *old_parent_inode,
53317+ const struct vfsmount *old_mnt, const char *newname)
53318+{
53319+ return 0;
53320+}
53321+
53322+int
53323+gr_acl_handle_filldir(const struct file *file, const char *name,
53324+ const int namelen, const ino_t ino)
53325+{
53326+ return 1;
53327+}
53328+
53329+int
53330+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53331+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53332+{
53333+ return 1;
53334+}
53335+
53336+int
53337+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53338+{
53339+ return 0;
53340+}
53341+
53342+int
53343+gr_search_accept(const struct socket *sock)
53344+{
53345+ return 0;
53346+}
53347+
53348+int
53349+gr_search_listen(const struct socket *sock)
53350+{
53351+ return 0;
53352+}
53353+
53354+int
53355+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53356+{
53357+ return 0;
53358+}
53359+
53360+__u32
53361+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53362+{
53363+ return 1;
53364+}
53365+
53366+__u32
53367+gr_acl_handle_creat(const struct dentry * dentry,
53368+ const struct dentry * p_dentry,
53369+ const struct vfsmount * p_mnt, const int fmode,
53370+ const int imode)
53371+{
53372+ return 1;
53373+}
53374+
53375+void
53376+gr_acl_handle_exit(void)
53377+{
53378+ return;
53379+}
53380+
53381+int
53382+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53383+{
53384+ return 1;
53385+}
53386+
53387+void
53388+gr_set_role_label(const uid_t uid, const gid_t gid)
53389+{
53390+ return;
53391+}
53392+
53393+int
53394+gr_acl_handle_procpidmem(const struct task_struct *task)
53395+{
53396+ return 0;
53397+}
53398+
53399+int
53400+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53401+{
53402+ return 0;
53403+}
53404+
53405+int
53406+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53407+{
53408+ return 0;
53409+}
53410+
53411+void
53412+gr_set_kernel_label(struct task_struct *task)
53413+{
53414+ return;
53415+}
53416+
53417+int
53418+gr_check_user_change(int real, int effective, int fs)
53419+{
53420+ return 0;
53421+}
53422+
53423+int
53424+gr_check_group_change(int real, int effective, int fs)
53425+{
53426+ return 0;
53427+}
53428+
53429+int gr_acl_enable_at_secure(void)
53430+{
53431+ return 0;
53432+}
53433+
53434+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53435+{
53436+ return dentry->d_inode->i_sb->s_dev;
53437+}
53438+
53439+EXPORT_SYMBOL(gr_is_capable);
53440+EXPORT_SYMBOL(gr_is_capable_nolog);
53441+EXPORT_SYMBOL(gr_learn_resource);
53442+EXPORT_SYMBOL(gr_set_kernel_label);
53443+#ifdef CONFIG_SECURITY
53444+EXPORT_SYMBOL(gr_check_user_change);
53445+EXPORT_SYMBOL(gr_check_group_change);
53446+#endif
53447diff -urNp linux-2.6.32.45/grsecurity/grsec_exec.c linux-2.6.32.45/grsecurity/grsec_exec.c
53448--- linux-2.6.32.45/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53449+++ linux-2.6.32.45/grsecurity/grsec_exec.c 2011-08-11 19:57:19.000000000 -0400
53450@@ -0,0 +1,132 @@
53451+#include <linux/kernel.h>
53452+#include <linux/sched.h>
53453+#include <linux/file.h>
53454+#include <linux/binfmts.h>
53455+#include <linux/smp_lock.h>
53456+#include <linux/fs.h>
53457+#include <linux/types.h>
53458+#include <linux/grdefs.h>
53459+#include <linux/grinternal.h>
53460+#include <linux/capability.h>
53461+#include <linux/compat.h>
53462+
53463+#include <asm/uaccess.h>
53464+
53465+#ifdef CONFIG_GRKERNSEC_EXECLOG
53466+static char gr_exec_arg_buf[132];
53467+static DEFINE_MUTEX(gr_exec_arg_mutex);
53468+#endif
53469+
53470+void
53471+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
53472+{
53473+#ifdef CONFIG_GRKERNSEC_EXECLOG
53474+ char *grarg = gr_exec_arg_buf;
53475+ unsigned int i, x, execlen = 0;
53476+ char c;
53477+
53478+ if (!((grsec_enable_execlog && grsec_enable_group &&
53479+ in_group_p(grsec_audit_gid))
53480+ || (grsec_enable_execlog && !grsec_enable_group)))
53481+ return;
53482+
53483+ mutex_lock(&gr_exec_arg_mutex);
53484+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53485+
53486+ if (unlikely(argv == NULL))
53487+ goto log;
53488+
53489+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53490+ const char __user *p;
53491+ unsigned int len;
53492+
53493+ if (copy_from_user(&p, argv + i, sizeof(p)))
53494+ goto log;
53495+ if (!p)
53496+ goto log;
53497+ len = strnlen_user(p, 128 - execlen);
53498+ if (len > 128 - execlen)
53499+ len = 128 - execlen;
53500+ else if (len > 0)
53501+ len--;
53502+ if (copy_from_user(grarg + execlen, p, len))
53503+ goto log;
53504+
53505+ /* rewrite unprintable characters */
53506+ for (x = 0; x < len; x++) {
53507+ c = *(grarg + execlen + x);
53508+ if (c < 32 || c > 126)
53509+ *(grarg + execlen + x) = ' ';
53510+ }
53511+
53512+ execlen += len;
53513+ *(grarg + execlen) = ' ';
53514+ *(grarg + execlen + 1) = '\0';
53515+ execlen++;
53516+ }
53517+
53518+ log:
53519+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53520+ bprm->file->f_path.mnt, grarg);
53521+ mutex_unlock(&gr_exec_arg_mutex);
53522+#endif
53523+ return;
53524+}
53525+
53526+#ifdef CONFIG_COMPAT
53527+void
53528+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
53529+{
53530+#ifdef CONFIG_GRKERNSEC_EXECLOG
53531+ char *grarg = gr_exec_arg_buf;
53532+ unsigned int i, x, execlen = 0;
53533+ char c;
53534+
53535+ if (!((grsec_enable_execlog && grsec_enable_group &&
53536+ in_group_p(grsec_audit_gid))
53537+ || (grsec_enable_execlog && !grsec_enable_group)))
53538+ return;
53539+
53540+ mutex_lock(&gr_exec_arg_mutex);
53541+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53542+
53543+ if (unlikely(argv == NULL))
53544+ goto log;
53545+
53546+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53547+ compat_uptr_t p;
53548+ unsigned int len;
53549+
53550+ if (get_user(p, argv + i))
53551+ goto log;
53552+ len = strnlen_user(compat_ptr(p), 128 - execlen);
53553+ if (len > 128 - execlen)
53554+ len = 128 - execlen;
53555+ else if (len > 0)
53556+ len--;
53557+ else
53558+ goto log;
53559+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
53560+ goto log;
53561+
53562+ /* rewrite unprintable characters */
53563+ for (x = 0; x < len; x++) {
53564+ c = *(grarg + execlen + x);
53565+ if (c < 32 || c > 126)
53566+ *(grarg + execlen + x) = ' ';
53567+ }
53568+
53569+ execlen += len;
53570+ *(grarg + execlen) = ' ';
53571+ *(grarg + execlen + 1) = '\0';
53572+ execlen++;
53573+ }
53574+
53575+ log:
53576+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53577+ bprm->file->f_path.mnt, grarg);
53578+ mutex_unlock(&gr_exec_arg_mutex);
53579+#endif
53580+ return;
53581+}
53582+#endif
53583diff -urNp linux-2.6.32.45/grsecurity/grsec_fifo.c linux-2.6.32.45/grsecurity/grsec_fifo.c
53584--- linux-2.6.32.45/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53585+++ linux-2.6.32.45/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
53586@@ -0,0 +1,24 @@
53587+#include <linux/kernel.h>
53588+#include <linux/sched.h>
53589+#include <linux/fs.h>
53590+#include <linux/file.h>
53591+#include <linux/grinternal.h>
53592+
53593+int
53594+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53595+ const struct dentry *dir, const int flag, const int acc_mode)
53596+{
53597+#ifdef CONFIG_GRKERNSEC_FIFO
53598+ const struct cred *cred = current_cred();
53599+
53600+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53601+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53602+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53603+ (cred->fsuid != dentry->d_inode->i_uid)) {
53604+ if (!inode_permission(dentry->d_inode, acc_mode))
53605+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53606+ return -EACCES;
53607+ }
53608+#endif
53609+ return 0;
53610+}
53611diff -urNp linux-2.6.32.45/grsecurity/grsec_fork.c linux-2.6.32.45/grsecurity/grsec_fork.c
53612--- linux-2.6.32.45/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53613+++ linux-2.6.32.45/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
53614@@ -0,0 +1,23 @@
53615+#include <linux/kernel.h>
53616+#include <linux/sched.h>
53617+#include <linux/grsecurity.h>
53618+#include <linux/grinternal.h>
53619+#include <linux/errno.h>
53620+
53621+void
53622+gr_log_forkfail(const int retval)
53623+{
53624+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53625+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53626+ switch (retval) {
53627+ case -EAGAIN:
53628+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53629+ break;
53630+ case -ENOMEM:
53631+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53632+ break;
53633+ }
53634+ }
53635+#endif
53636+ return;
53637+}
53638diff -urNp linux-2.6.32.45/grsecurity/grsec_init.c linux-2.6.32.45/grsecurity/grsec_init.c
53639--- linux-2.6.32.45/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53640+++ linux-2.6.32.45/grsecurity/grsec_init.c 2011-08-11 19:57:42.000000000 -0400
53641@@ -0,0 +1,270 @@
53642+#include <linux/kernel.h>
53643+#include <linux/sched.h>
53644+#include <linux/mm.h>
53645+#include <linux/smp_lock.h>
53646+#include <linux/gracl.h>
53647+#include <linux/slab.h>
53648+#include <linux/vmalloc.h>
53649+#include <linux/percpu.h>
53650+#include <linux/module.h>
53651+
53652+int grsec_enable_brute;
53653+int grsec_enable_link;
53654+int grsec_enable_dmesg;
53655+int grsec_enable_harden_ptrace;
53656+int grsec_enable_fifo;
53657+int grsec_enable_execlog;
53658+int grsec_enable_signal;
53659+int grsec_enable_forkfail;
53660+int grsec_enable_audit_ptrace;
53661+int grsec_enable_time;
53662+int grsec_enable_audit_textrel;
53663+int grsec_enable_group;
53664+int grsec_audit_gid;
53665+int grsec_enable_chdir;
53666+int grsec_enable_mount;
53667+int grsec_enable_rofs;
53668+int grsec_enable_chroot_findtask;
53669+int grsec_enable_chroot_mount;
53670+int grsec_enable_chroot_shmat;
53671+int grsec_enable_chroot_fchdir;
53672+int grsec_enable_chroot_double;
53673+int grsec_enable_chroot_pivot;
53674+int grsec_enable_chroot_chdir;
53675+int grsec_enable_chroot_chmod;
53676+int grsec_enable_chroot_mknod;
53677+int grsec_enable_chroot_nice;
53678+int grsec_enable_chroot_execlog;
53679+int grsec_enable_chroot_caps;
53680+int grsec_enable_chroot_sysctl;
53681+int grsec_enable_chroot_unix;
53682+int grsec_enable_tpe;
53683+int grsec_tpe_gid;
53684+int grsec_enable_blackhole;
53685+#ifdef CONFIG_IPV6_MODULE
53686+EXPORT_SYMBOL(grsec_enable_blackhole);
53687+#endif
53688+int grsec_lastack_retries;
53689+int grsec_enable_tpe_all;
53690+int grsec_enable_tpe_invert;
53691+int grsec_enable_socket_all;
53692+int grsec_socket_all_gid;
53693+int grsec_enable_socket_client;
53694+int grsec_socket_client_gid;
53695+int grsec_enable_socket_server;
53696+int grsec_socket_server_gid;
53697+int grsec_resource_logging;
53698+int grsec_disable_privio;
53699+int grsec_enable_log_rwxmaps;
53700+int grsec_lock;
53701+
53702+DEFINE_SPINLOCK(grsec_alert_lock);
53703+unsigned long grsec_alert_wtime = 0;
53704+unsigned long grsec_alert_fyet = 0;
53705+
53706+DEFINE_SPINLOCK(grsec_audit_lock);
53707+
53708+DEFINE_RWLOCK(grsec_exec_file_lock);
53709+
53710+char *gr_shared_page[4];
53711+
53712+char *gr_alert_log_fmt;
53713+char *gr_audit_log_fmt;
53714+char *gr_alert_log_buf;
53715+char *gr_audit_log_buf;
53716+
53717+extern struct gr_arg *gr_usermode;
53718+extern unsigned char *gr_system_salt;
53719+extern unsigned char *gr_system_sum;
53720+
53721+void __init
53722+grsecurity_init(void)
53723+{
53724+ int j;
53725+ /* create the per-cpu shared pages */
53726+
53727+#ifdef CONFIG_X86
53728+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
53729+#endif
53730+
53731+ for (j = 0; j < 4; j++) {
53732+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
53733+ if (gr_shared_page[j] == NULL) {
53734+ panic("Unable to allocate grsecurity shared page");
53735+ return;
53736+ }
53737+ }
53738+
53739+ /* allocate log buffers */
53740+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
53741+ if (!gr_alert_log_fmt) {
53742+ panic("Unable to allocate grsecurity alert log format buffer");
53743+ return;
53744+ }
53745+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
53746+ if (!gr_audit_log_fmt) {
53747+ panic("Unable to allocate grsecurity audit log format buffer");
53748+ return;
53749+ }
53750+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53751+ if (!gr_alert_log_buf) {
53752+ panic("Unable to allocate grsecurity alert log buffer");
53753+ return;
53754+ }
53755+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53756+ if (!gr_audit_log_buf) {
53757+ panic("Unable to allocate grsecurity audit log buffer");
53758+ return;
53759+ }
53760+
53761+ /* allocate memory for authentication structure */
53762+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
53763+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
53764+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
53765+
53766+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
53767+ panic("Unable to allocate grsecurity authentication structure");
53768+ return;
53769+ }
53770+
53771+
53772+#ifdef CONFIG_GRKERNSEC_IO
53773+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
53774+ grsec_disable_privio = 1;
53775+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53776+ grsec_disable_privio = 1;
53777+#else
53778+ grsec_disable_privio = 0;
53779+#endif
53780+#endif
53781+
53782+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53783+ /* for backward compatibility, tpe_invert always defaults to on if
53784+ enabled in the kernel
53785+ */
53786+ grsec_enable_tpe_invert = 1;
53787+#endif
53788+
53789+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53790+#ifndef CONFIG_GRKERNSEC_SYSCTL
53791+ grsec_lock = 1;
53792+#endif
53793+
53794+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53795+ grsec_enable_audit_textrel = 1;
53796+#endif
53797+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53798+ grsec_enable_log_rwxmaps = 1;
53799+#endif
53800+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53801+ grsec_enable_group = 1;
53802+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
53803+#endif
53804+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53805+ grsec_enable_chdir = 1;
53806+#endif
53807+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53808+ grsec_enable_harden_ptrace = 1;
53809+#endif
53810+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53811+ grsec_enable_mount = 1;
53812+#endif
53813+#ifdef CONFIG_GRKERNSEC_LINK
53814+ grsec_enable_link = 1;
53815+#endif
53816+#ifdef CONFIG_GRKERNSEC_BRUTE
53817+ grsec_enable_brute = 1;
53818+#endif
53819+#ifdef CONFIG_GRKERNSEC_DMESG
53820+ grsec_enable_dmesg = 1;
53821+#endif
53822+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53823+ grsec_enable_blackhole = 1;
53824+ grsec_lastack_retries = 4;
53825+#endif
53826+#ifdef CONFIG_GRKERNSEC_FIFO
53827+ grsec_enable_fifo = 1;
53828+#endif
53829+#ifdef CONFIG_GRKERNSEC_EXECLOG
53830+ grsec_enable_execlog = 1;
53831+#endif
53832+#ifdef CONFIG_GRKERNSEC_SIGNAL
53833+ grsec_enable_signal = 1;
53834+#endif
53835+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53836+ grsec_enable_forkfail = 1;
53837+#endif
53838+#ifdef CONFIG_GRKERNSEC_TIME
53839+ grsec_enable_time = 1;
53840+#endif
53841+#ifdef CONFIG_GRKERNSEC_RESLOG
53842+ grsec_resource_logging = 1;
53843+#endif
53844+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53845+ grsec_enable_chroot_findtask = 1;
53846+#endif
53847+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53848+ grsec_enable_chroot_unix = 1;
53849+#endif
53850+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53851+ grsec_enable_chroot_mount = 1;
53852+#endif
53853+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53854+ grsec_enable_chroot_fchdir = 1;
53855+#endif
53856+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53857+ grsec_enable_chroot_shmat = 1;
53858+#endif
53859+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53860+ grsec_enable_audit_ptrace = 1;
53861+#endif
53862+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53863+ grsec_enable_chroot_double = 1;
53864+#endif
53865+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53866+ grsec_enable_chroot_pivot = 1;
53867+#endif
53868+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53869+ grsec_enable_chroot_chdir = 1;
53870+#endif
53871+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53872+ grsec_enable_chroot_chmod = 1;
53873+#endif
53874+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53875+ grsec_enable_chroot_mknod = 1;
53876+#endif
53877+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53878+ grsec_enable_chroot_nice = 1;
53879+#endif
53880+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53881+ grsec_enable_chroot_execlog = 1;
53882+#endif
53883+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53884+ grsec_enable_chroot_caps = 1;
53885+#endif
53886+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53887+ grsec_enable_chroot_sysctl = 1;
53888+#endif
53889+#ifdef CONFIG_GRKERNSEC_TPE
53890+ grsec_enable_tpe = 1;
53891+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
53892+#ifdef CONFIG_GRKERNSEC_TPE_ALL
53893+ grsec_enable_tpe_all = 1;
53894+#endif
53895+#endif
53896+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53897+ grsec_enable_socket_all = 1;
53898+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
53899+#endif
53900+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53901+ grsec_enable_socket_client = 1;
53902+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
53903+#endif
53904+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53905+ grsec_enable_socket_server = 1;
53906+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
53907+#endif
53908+#endif
53909+
53910+ return;
53911+}
53912diff -urNp linux-2.6.32.45/grsecurity/grsec_link.c linux-2.6.32.45/grsecurity/grsec_link.c
53913--- linux-2.6.32.45/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
53914+++ linux-2.6.32.45/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
53915@@ -0,0 +1,43 @@
53916+#include <linux/kernel.h>
53917+#include <linux/sched.h>
53918+#include <linux/fs.h>
53919+#include <linux/file.h>
53920+#include <linux/grinternal.h>
53921+
53922+int
53923+gr_handle_follow_link(const struct inode *parent,
53924+ const struct inode *inode,
53925+ const struct dentry *dentry, const struct vfsmount *mnt)
53926+{
53927+#ifdef CONFIG_GRKERNSEC_LINK
53928+ const struct cred *cred = current_cred();
53929+
53930+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
53931+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
53932+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
53933+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
53934+ return -EACCES;
53935+ }
53936+#endif
53937+ return 0;
53938+}
53939+
53940+int
53941+gr_handle_hardlink(const struct dentry *dentry,
53942+ const struct vfsmount *mnt,
53943+ struct inode *inode, const int mode, const char *to)
53944+{
53945+#ifdef CONFIG_GRKERNSEC_LINK
53946+ const struct cred *cred = current_cred();
53947+
53948+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
53949+ (!S_ISREG(mode) || (mode & S_ISUID) ||
53950+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
53951+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
53952+ !capable(CAP_FOWNER) && cred->uid) {
53953+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
53954+ return -EPERM;
53955+ }
53956+#endif
53957+ return 0;
53958+}
53959diff -urNp linux-2.6.32.45/grsecurity/grsec_log.c linux-2.6.32.45/grsecurity/grsec_log.c
53960--- linux-2.6.32.45/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
53961+++ linux-2.6.32.45/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
53962@@ -0,0 +1,310 @@
53963+#include <linux/kernel.h>
53964+#include <linux/sched.h>
53965+#include <linux/file.h>
53966+#include <linux/tty.h>
53967+#include <linux/fs.h>
53968+#include <linux/grinternal.h>
53969+
53970+#ifdef CONFIG_TREE_PREEMPT_RCU
53971+#define DISABLE_PREEMPT() preempt_disable()
53972+#define ENABLE_PREEMPT() preempt_enable()
53973+#else
53974+#define DISABLE_PREEMPT()
53975+#define ENABLE_PREEMPT()
53976+#endif
53977+
53978+#define BEGIN_LOCKS(x) \
53979+ DISABLE_PREEMPT(); \
53980+ rcu_read_lock(); \
53981+ read_lock(&tasklist_lock); \
53982+ read_lock(&grsec_exec_file_lock); \
53983+ if (x != GR_DO_AUDIT) \
53984+ spin_lock(&grsec_alert_lock); \
53985+ else \
53986+ spin_lock(&grsec_audit_lock)
53987+
53988+#define END_LOCKS(x) \
53989+ if (x != GR_DO_AUDIT) \
53990+ spin_unlock(&grsec_alert_lock); \
53991+ else \
53992+ spin_unlock(&grsec_audit_lock); \
53993+ read_unlock(&grsec_exec_file_lock); \
53994+ read_unlock(&tasklist_lock); \
53995+ rcu_read_unlock(); \
53996+ ENABLE_PREEMPT(); \
53997+ if (x == GR_DONT_AUDIT) \
53998+ gr_handle_alertkill(current)
53999+
54000+enum {
54001+ FLOODING,
54002+ NO_FLOODING
54003+};
54004+
54005+extern char *gr_alert_log_fmt;
54006+extern char *gr_audit_log_fmt;
54007+extern char *gr_alert_log_buf;
54008+extern char *gr_audit_log_buf;
54009+
54010+static int gr_log_start(int audit)
54011+{
54012+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
54013+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
54014+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54015+
54016+ if (audit == GR_DO_AUDIT)
54017+ goto set_fmt;
54018+
54019+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
54020+ grsec_alert_wtime = jiffies;
54021+ grsec_alert_fyet = 0;
54022+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
54023+ grsec_alert_fyet++;
54024+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
54025+ grsec_alert_wtime = jiffies;
54026+ grsec_alert_fyet++;
54027+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
54028+ return FLOODING;
54029+ } else return FLOODING;
54030+
54031+set_fmt:
54032+ memset(buf, 0, PAGE_SIZE);
54033+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
54034+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
54035+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54036+ } else if (current->signal->curr_ip) {
54037+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
54038+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
54039+ } else if (gr_acl_is_enabled()) {
54040+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
54041+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54042+ } else {
54043+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
54044+ strcpy(buf, fmt);
54045+ }
54046+
54047+ return NO_FLOODING;
54048+}
54049+
54050+static void gr_log_middle(int audit, const char *msg, va_list ap)
54051+ __attribute__ ((format (printf, 2, 0)));
54052+
54053+static void gr_log_middle(int audit, const char *msg, va_list ap)
54054+{
54055+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54056+ unsigned int len = strlen(buf);
54057+
54058+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54059+
54060+ return;
54061+}
54062+
54063+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54064+ __attribute__ ((format (printf, 2, 3)));
54065+
54066+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54067+{
54068+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54069+ unsigned int len = strlen(buf);
54070+ va_list ap;
54071+
54072+ va_start(ap, msg);
54073+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54074+ va_end(ap);
54075+
54076+ return;
54077+}
54078+
54079+static void gr_log_end(int audit)
54080+{
54081+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54082+ unsigned int len = strlen(buf);
54083+
54084+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
54085+ printk("%s\n", buf);
54086+
54087+ return;
54088+}
54089+
54090+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
54091+{
54092+ int logtype;
54093+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
54094+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
54095+ void *voidptr = NULL;
54096+ int num1 = 0, num2 = 0;
54097+ unsigned long ulong1 = 0, ulong2 = 0;
54098+ struct dentry *dentry = NULL;
54099+ struct vfsmount *mnt = NULL;
54100+ struct file *file = NULL;
54101+ struct task_struct *task = NULL;
54102+ const struct cred *cred, *pcred;
54103+ va_list ap;
54104+
54105+ BEGIN_LOCKS(audit);
54106+ logtype = gr_log_start(audit);
54107+ if (logtype == FLOODING) {
54108+ END_LOCKS(audit);
54109+ return;
54110+ }
54111+ va_start(ap, argtypes);
54112+ switch (argtypes) {
54113+ case GR_TTYSNIFF:
54114+ task = va_arg(ap, struct task_struct *);
54115+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54116+ break;
54117+ case GR_SYSCTL_HIDDEN:
54118+ str1 = va_arg(ap, char *);
54119+ gr_log_middle_varargs(audit, msg, result, str1);
54120+ break;
54121+ case GR_RBAC:
54122+ dentry = va_arg(ap, struct dentry *);
54123+ mnt = va_arg(ap, struct vfsmount *);
54124+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54125+ break;
54126+ case GR_RBAC_STR:
54127+ dentry = va_arg(ap, struct dentry *);
54128+ mnt = va_arg(ap, struct vfsmount *);
54129+ str1 = va_arg(ap, char *);
54130+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54131+ break;
54132+ case GR_STR_RBAC:
54133+ str1 = va_arg(ap, char *);
54134+ dentry = va_arg(ap, struct dentry *);
54135+ mnt = va_arg(ap, struct vfsmount *);
54136+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54137+ break;
54138+ case GR_RBAC_MODE2:
54139+ dentry = va_arg(ap, struct dentry *);
54140+ mnt = va_arg(ap, struct vfsmount *);
54141+ str1 = va_arg(ap, char *);
54142+ str2 = va_arg(ap, char *);
54143+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54144+ break;
54145+ case GR_RBAC_MODE3:
54146+ dentry = va_arg(ap, struct dentry *);
54147+ mnt = va_arg(ap, struct vfsmount *);
54148+ str1 = va_arg(ap, char *);
54149+ str2 = va_arg(ap, char *);
54150+ str3 = va_arg(ap, char *);
54151+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54152+ break;
54153+ case GR_FILENAME:
54154+ dentry = va_arg(ap, struct dentry *);
54155+ mnt = va_arg(ap, struct vfsmount *);
54156+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54157+ break;
54158+ case GR_STR_FILENAME:
54159+ str1 = va_arg(ap, char *);
54160+ dentry = va_arg(ap, struct dentry *);
54161+ mnt = va_arg(ap, struct vfsmount *);
54162+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54163+ break;
54164+ case GR_FILENAME_STR:
54165+ dentry = va_arg(ap, struct dentry *);
54166+ mnt = va_arg(ap, struct vfsmount *);
54167+ str1 = va_arg(ap, char *);
54168+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54169+ break;
54170+ case GR_FILENAME_TWO_INT:
54171+ dentry = va_arg(ap, struct dentry *);
54172+ mnt = va_arg(ap, struct vfsmount *);
54173+ num1 = va_arg(ap, int);
54174+ num2 = va_arg(ap, int);
54175+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54176+ break;
54177+ case GR_FILENAME_TWO_INT_STR:
54178+ dentry = va_arg(ap, struct dentry *);
54179+ mnt = va_arg(ap, struct vfsmount *);
54180+ num1 = va_arg(ap, int);
54181+ num2 = va_arg(ap, int);
54182+ str1 = va_arg(ap, char *);
54183+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54184+ break;
54185+ case GR_TEXTREL:
54186+ file = va_arg(ap, struct file *);
54187+ ulong1 = va_arg(ap, unsigned long);
54188+ ulong2 = va_arg(ap, unsigned long);
54189+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54190+ break;
54191+ case GR_PTRACE:
54192+ task = va_arg(ap, struct task_struct *);
54193+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54194+ break;
54195+ case GR_RESOURCE:
54196+ task = va_arg(ap, struct task_struct *);
54197+ cred = __task_cred(task);
54198+ pcred = __task_cred(task->real_parent);
54199+ ulong1 = va_arg(ap, unsigned long);
54200+ str1 = va_arg(ap, char *);
54201+ ulong2 = va_arg(ap, unsigned long);
54202+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54203+ break;
54204+ case GR_CAP:
54205+ task = va_arg(ap, struct task_struct *);
54206+ cred = __task_cred(task);
54207+ pcred = __task_cred(task->real_parent);
54208+ str1 = va_arg(ap, char *);
54209+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54210+ break;
54211+ case GR_SIG:
54212+ str1 = va_arg(ap, char *);
54213+ voidptr = va_arg(ap, void *);
54214+ gr_log_middle_varargs(audit, msg, str1, voidptr);
54215+ break;
54216+ case GR_SIG2:
54217+ task = va_arg(ap, struct task_struct *);
54218+ cred = __task_cred(task);
54219+ pcred = __task_cred(task->real_parent);
54220+ num1 = va_arg(ap, int);
54221+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54222+ break;
54223+ case GR_CRASH1:
54224+ task = va_arg(ap, struct task_struct *);
54225+ cred = __task_cred(task);
54226+ pcred = __task_cred(task->real_parent);
54227+ ulong1 = va_arg(ap, unsigned long);
54228+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54229+ break;
54230+ case GR_CRASH2:
54231+ task = va_arg(ap, struct task_struct *);
54232+ cred = __task_cred(task);
54233+ pcred = __task_cred(task->real_parent);
54234+ ulong1 = va_arg(ap, unsigned long);
54235+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54236+ break;
54237+ case GR_RWXMAP:
54238+ file = va_arg(ap, struct file *);
54239+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54240+ break;
54241+ case GR_PSACCT:
54242+ {
54243+ unsigned int wday, cday;
54244+ __u8 whr, chr;
54245+ __u8 wmin, cmin;
54246+ __u8 wsec, csec;
54247+ char cur_tty[64] = { 0 };
54248+ char parent_tty[64] = { 0 };
54249+
54250+ task = va_arg(ap, struct task_struct *);
54251+ wday = va_arg(ap, unsigned int);
54252+ cday = va_arg(ap, unsigned int);
54253+ whr = va_arg(ap, int);
54254+ chr = va_arg(ap, int);
54255+ wmin = va_arg(ap, int);
54256+ cmin = va_arg(ap, int);
54257+ wsec = va_arg(ap, int);
54258+ csec = va_arg(ap, int);
54259+ ulong1 = va_arg(ap, unsigned long);
54260+ cred = __task_cred(task);
54261+ pcred = __task_cred(task->real_parent);
54262+
54263+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54264+ }
54265+ break;
54266+ default:
54267+ gr_log_middle(audit, msg, ap);
54268+ }
54269+ va_end(ap);
54270+ gr_log_end(audit);
54271+ END_LOCKS(audit);
54272+}
54273diff -urNp linux-2.6.32.45/grsecurity/grsec_mem.c linux-2.6.32.45/grsecurity/grsec_mem.c
54274--- linux-2.6.32.45/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54275+++ linux-2.6.32.45/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
54276@@ -0,0 +1,33 @@
54277+#include <linux/kernel.h>
54278+#include <linux/sched.h>
54279+#include <linux/mm.h>
54280+#include <linux/mman.h>
54281+#include <linux/grinternal.h>
54282+
54283+void
54284+gr_handle_ioperm(void)
54285+{
54286+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54287+ return;
54288+}
54289+
54290+void
54291+gr_handle_iopl(void)
54292+{
54293+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54294+ return;
54295+}
54296+
54297+void
54298+gr_handle_mem_readwrite(u64 from, u64 to)
54299+{
54300+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54301+ return;
54302+}
54303+
54304+void
54305+gr_handle_vm86(void)
54306+{
54307+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54308+ return;
54309+}
54310diff -urNp linux-2.6.32.45/grsecurity/grsec_mount.c linux-2.6.32.45/grsecurity/grsec_mount.c
54311--- linux-2.6.32.45/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54312+++ linux-2.6.32.45/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
54313@@ -0,0 +1,62 @@
54314+#include <linux/kernel.h>
54315+#include <linux/sched.h>
54316+#include <linux/mount.h>
54317+#include <linux/grsecurity.h>
54318+#include <linux/grinternal.h>
54319+
54320+void
54321+gr_log_remount(const char *devname, const int retval)
54322+{
54323+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54324+ if (grsec_enable_mount && (retval >= 0))
54325+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54326+#endif
54327+ return;
54328+}
54329+
54330+void
54331+gr_log_unmount(const char *devname, const int retval)
54332+{
54333+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54334+ if (grsec_enable_mount && (retval >= 0))
54335+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54336+#endif
54337+ return;
54338+}
54339+
54340+void
54341+gr_log_mount(const char *from, const char *to, const int retval)
54342+{
54343+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54344+ if (grsec_enable_mount && (retval >= 0))
54345+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54346+#endif
54347+ return;
54348+}
54349+
54350+int
54351+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54352+{
54353+#ifdef CONFIG_GRKERNSEC_ROFS
54354+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54355+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54356+ return -EPERM;
54357+ } else
54358+ return 0;
54359+#endif
54360+ return 0;
54361+}
54362+
54363+int
54364+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54365+{
54366+#ifdef CONFIG_GRKERNSEC_ROFS
54367+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54368+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54369+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54370+ return -EPERM;
54371+ } else
54372+ return 0;
54373+#endif
54374+ return 0;
54375+}
54376diff -urNp linux-2.6.32.45/grsecurity/grsec_pax.c linux-2.6.32.45/grsecurity/grsec_pax.c
54377--- linux-2.6.32.45/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54378+++ linux-2.6.32.45/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
54379@@ -0,0 +1,36 @@
54380+#include <linux/kernel.h>
54381+#include <linux/sched.h>
54382+#include <linux/mm.h>
54383+#include <linux/file.h>
54384+#include <linux/grinternal.h>
54385+#include <linux/grsecurity.h>
54386+
54387+void
54388+gr_log_textrel(struct vm_area_struct * vma)
54389+{
54390+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54391+ if (grsec_enable_audit_textrel)
54392+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54393+#endif
54394+ return;
54395+}
54396+
54397+void
54398+gr_log_rwxmmap(struct file *file)
54399+{
54400+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54401+ if (grsec_enable_log_rwxmaps)
54402+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54403+#endif
54404+ return;
54405+}
54406+
54407+void
54408+gr_log_rwxmprotect(struct file *file)
54409+{
54410+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54411+ if (grsec_enable_log_rwxmaps)
54412+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54413+#endif
54414+ return;
54415+}
54416diff -urNp linux-2.6.32.45/grsecurity/grsec_ptrace.c linux-2.6.32.45/grsecurity/grsec_ptrace.c
54417--- linux-2.6.32.45/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54418+++ linux-2.6.32.45/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
54419@@ -0,0 +1,14 @@
54420+#include <linux/kernel.h>
54421+#include <linux/sched.h>
54422+#include <linux/grinternal.h>
54423+#include <linux/grsecurity.h>
54424+
54425+void
54426+gr_audit_ptrace(struct task_struct *task)
54427+{
54428+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54429+ if (grsec_enable_audit_ptrace)
54430+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54431+#endif
54432+ return;
54433+}
54434diff -urNp linux-2.6.32.45/grsecurity/grsec_sig.c linux-2.6.32.45/grsecurity/grsec_sig.c
54435--- linux-2.6.32.45/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54436+++ linux-2.6.32.45/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
54437@@ -0,0 +1,205 @@
54438+#include <linux/kernel.h>
54439+#include <linux/sched.h>
54440+#include <linux/delay.h>
54441+#include <linux/grsecurity.h>
54442+#include <linux/grinternal.h>
54443+#include <linux/hardirq.h>
54444+
54445+char *signames[] = {
54446+ [SIGSEGV] = "Segmentation fault",
54447+ [SIGILL] = "Illegal instruction",
54448+ [SIGABRT] = "Abort",
54449+ [SIGBUS] = "Invalid alignment/Bus error"
54450+};
54451+
54452+void
54453+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54454+{
54455+#ifdef CONFIG_GRKERNSEC_SIGNAL
54456+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54457+ (sig == SIGABRT) || (sig == SIGBUS))) {
54458+ if (t->pid == current->pid) {
54459+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54460+ } else {
54461+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54462+ }
54463+ }
54464+#endif
54465+ return;
54466+}
54467+
54468+int
54469+gr_handle_signal(const struct task_struct *p, const int sig)
54470+{
54471+#ifdef CONFIG_GRKERNSEC
54472+ if (current->pid > 1 && gr_check_protected_task(p)) {
54473+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54474+ return -EPERM;
54475+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54476+ return -EPERM;
54477+ }
54478+#endif
54479+ return 0;
54480+}
54481+
54482+#ifdef CONFIG_GRKERNSEC
54483+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54484+
54485+int gr_fake_force_sig(int sig, struct task_struct *t)
54486+{
54487+ unsigned long int flags;
54488+ int ret, blocked, ignored;
54489+ struct k_sigaction *action;
54490+
54491+ spin_lock_irqsave(&t->sighand->siglock, flags);
54492+ action = &t->sighand->action[sig-1];
54493+ ignored = action->sa.sa_handler == SIG_IGN;
54494+ blocked = sigismember(&t->blocked, sig);
54495+ if (blocked || ignored) {
54496+ action->sa.sa_handler = SIG_DFL;
54497+ if (blocked) {
54498+ sigdelset(&t->blocked, sig);
54499+ recalc_sigpending_and_wake(t);
54500+ }
54501+ }
54502+ if (action->sa.sa_handler == SIG_DFL)
54503+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
54504+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54505+
54506+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
54507+
54508+ return ret;
54509+}
54510+#endif
54511+
54512+#ifdef CONFIG_GRKERNSEC_BRUTE
54513+#define GR_USER_BAN_TIME (15 * 60)
54514+
54515+static int __get_dumpable(unsigned long mm_flags)
54516+{
54517+ int ret;
54518+
54519+ ret = mm_flags & MMF_DUMPABLE_MASK;
54520+ return (ret >= 2) ? 2 : ret;
54521+}
54522+#endif
54523+
54524+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54525+{
54526+#ifdef CONFIG_GRKERNSEC_BRUTE
54527+ uid_t uid = 0;
54528+
54529+ if (!grsec_enable_brute)
54530+ return;
54531+
54532+ rcu_read_lock();
54533+ read_lock(&tasklist_lock);
54534+ read_lock(&grsec_exec_file_lock);
54535+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54536+ p->real_parent->brute = 1;
54537+ else {
54538+ const struct cred *cred = __task_cred(p), *cred2;
54539+ struct task_struct *tsk, *tsk2;
54540+
54541+ if (!__get_dumpable(mm_flags) && cred->uid) {
54542+ struct user_struct *user;
54543+
54544+ uid = cred->uid;
54545+
54546+ /* this is put upon execution past expiration */
54547+ user = find_user(uid);
54548+ if (user == NULL)
54549+ goto unlock;
54550+ user->banned = 1;
54551+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54552+ if (user->ban_expires == ~0UL)
54553+ user->ban_expires--;
54554+
54555+ do_each_thread(tsk2, tsk) {
54556+ cred2 = __task_cred(tsk);
54557+ if (tsk != p && cred2->uid == uid)
54558+ gr_fake_force_sig(SIGKILL, tsk);
54559+ } while_each_thread(tsk2, tsk);
54560+ }
54561+ }
54562+unlock:
54563+ read_unlock(&grsec_exec_file_lock);
54564+ read_unlock(&tasklist_lock);
54565+ rcu_read_unlock();
54566+
54567+ if (uid)
54568+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54569+#endif
54570+ return;
54571+}
54572+
54573+void gr_handle_brute_check(void)
54574+{
54575+#ifdef CONFIG_GRKERNSEC_BRUTE
54576+ if (current->brute)
54577+ msleep(30 * 1000);
54578+#endif
54579+ return;
54580+}
54581+
54582+void gr_handle_kernel_exploit(void)
54583+{
54584+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54585+ const struct cred *cred;
54586+ struct task_struct *tsk, *tsk2;
54587+ struct user_struct *user;
54588+ uid_t uid;
54589+
54590+ if (in_irq() || in_serving_softirq() || in_nmi())
54591+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54592+
54593+ uid = current_uid();
54594+
54595+ if (uid == 0)
54596+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
54597+ else {
54598+ /* kill all the processes of this user, hold a reference
54599+ to their creds struct, and prevent them from creating
54600+ another process until system reset
54601+ */
54602+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54603+ /* we intentionally leak this ref */
54604+ user = get_uid(current->cred->user);
54605+ if (user) {
54606+ user->banned = 1;
54607+ user->ban_expires = ~0UL;
54608+ }
54609+
54610+ read_lock(&tasklist_lock);
54611+ do_each_thread(tsk2, tsk) {
54612+ cred = __task_cred(tsk);
54613+ if (cred->uid == uid)
54614+ gr_fake_force_sig(SIGKILL, tsk);
54615+ } while_each_thread(tsk2, tsk);
54616+ read_unlock(&tasklist_lock);
54617+ }
54618+#endif
54619+}
54620+
54621+int __gr_process_user_ban(struct user_struct *user)
54622+{
54623+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54624+ if (unlikely(user->banned)) {
54625+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54626+ user->banned = 0;
54627+ user->ban_expires = 0;
54628+ free_uid(user);
54629+ } else
54630+ return -EPERM;
54631+ }
54632+#endif
54633+ return 0;
54634+}
54635+
54636+int gr_process_user_ban(void)
54637+{
54638+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54639+ return __gr_process_user_ban(current->cred->user);
54640+#endif
54641+ return 0;
54642+}
54643diff -urNp linux-2.6.32.45/grsecurity/grsec_sock.c linux-2.6.32.45/grsecurity/grsec_sock.c
54644--- linux-2.6.32.45/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54645+++ linux-2.6.32.45/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
54646@@ -0,0 +1,275 @@
54647+#include <linux/kernel.h>
54648+#include <linux/module.h>
54649+#include <linux/sched.h>
54650+#include <linux/file.h>
54651+#include <linux/net.h>
54652+#include <linux/in.h>
54653+#include <linux/ip.h>
54654+#include <net/sock.h>
54655+#include <net/inet_sock.h>
54656+#include <linux/grsecurity.h>
54657+#include <linux/grinternal.h>
54658+#include <linux/gracl.h>
54659+
54660+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
54661+EXPORT_SYMBOL(gr_cap_rtnetlink);
54662+
54663+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54664+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54665+
54666+EXPORT_SYMBOL(gr_search_udp_recvmsg);
54667+EXPORT_SYMBOL(gr_search_udp_sendmsg);
54668+
54669+#ifdef CONFIG_UNIX_MODULE
54670+EXPORT_SYMBOL(gr_acl_handle_unix);
54671+EXPORT_SYMBOL(gr_acl_handle_mknod);
54672+EXPORT_SYMBOL(gr_handle_chroot_unix);
54673+EXPORT_SYMBOL(gr_handle_create);
54674+#endif
54675+
54676+#ifdef CONFIG_GRKERNSEC
54677+#define gr_conn_table_size 32749
54678+struct conn_table_entry {
54679+ struct conn_table_entry *next;
54680+ struct signal_struct *sig;
54681+};
54682+
54683+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
54684+DEFINE_SPINLOCK(gr_conn_table_lock);
54685+
54686+extern const char * gr_socktype_to_name(unsigned char type);
54687+extern const char * gr_proto_to_name(unsigned char proto);
54688+extern const char * gr_sockfamily_to_name(unsigned char family);
54689+
54690+static __inline__ int
54691+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
54692+{
54693+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
54694+}
54695+
54696+static __inline__ int
54697+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
54698+ __u16 sport, __u16 dport)
54699+{
54700+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
54701+ sig->gr_sport == sport && sig->gr_dport == dport))
54702+ return 1;
54703+ else
54704+ return 0;
54705+}
54706+
54707+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
54708+{
54709+ struct conn_table_entry **match;
54710+ unsigned int index;
54711+
54712+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54713+ sig->gr_sport, sig->gr_dport,
54714+ gr_conn_table_size);
54715+
54716+ newent->sig = sig;
54717+
54718+ match = &gr_conn_table[index];
54719+ newent->next = *match;
54720+ *match = newent;
54721+
54722+ return;
54723+}
54724+
54725+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
54726+{
54727+ struct conn_table_entry *match, *last = NULL;
54728+ unsigned int index;
54729+
54730+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54731+ sig->gr_sport, sig->gr_dport,
54732+ gr_conn_table_size);
54733+
54734+ match = gr_conn_table[index];
54735+ while (match && !conn_match(match->sig,
54736+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
54737+ sig->gr_dport)) {
54738+ last = match;
54739+ match = match->next;
54740+ }
54741+
54742+ if (match) {
54743+ if (last)
54744+ last->next = match->next;
54745+ else
54746+ gr_conn_table[index] = NULL;
54747+ kfree(match);
54748+ }
54749+
54750+ return;
54751+}
54752+
54753+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
54754+ __u16 sport, __u16 dport)
54755+{
54756+ struct conn_table_entry *match;
54757+ unsigned int index;
54758+
54759+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
54760+
54761+ match = gr_conn_table[index];
54762+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
54763+ match = match->next;
54764+
54765+ if (match)
54766+ return match->sig;
54767+ else
54768+ return NULL;
54769+}
54770+
54771+#endif
54772+
54773+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
54774+{
54775+#ifdef CONFIG_GRKERNSEC
54776+ struct signal_struct *sig = task->signal;
54777+ struct conn_table_entry *newent;
54778+
54779+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
54780+ if (newent == NULL)
54781+ return;
54782+ /* no bh lock needed since we are called with bh disabled */
54783+ spin_lock(&gr_conn_table_lock);
54784+ gr_del_task_from_ip_table_nolock(sig);
54785+ sig->gr_saddr = inet->rcv_saddr;
54786+ sig->gr_daddr = inet->daddr;
54787+ sig->gr_sport = inet->sport;
54788+ sig->gr_dport = inet->dport;
54789+ gr_add_to_task_ip_table_nolock(sig, newent);
54790+ spin_unlock(&gr_conn_table_lock);
54791+#endif
54792+ return;
54793+}
54794+
54795+void gr_del_task_from_ip_table(struct task_struct *task)
54796+{
54797+#ifdef CONFIG_GRKERNSEC
54798+ spin_lock_bh(&gr_conn_table_lock);
54799+ gr_del_task_from_ip_table_nolock(task->signal);
54800+ spin_unlock_bh(&gr_conn_table_lock);
54801+#endif
54802+ return;
54803+}
54804+
54805+void
54806+gr_attach_curr_ip(const struct sock *sk)
54807+{
54808+#ifdef CONFIG_GRKERNSEC
54809+ struct signal_struct *p, *set;
54810+ const struct inet_sock *inet = inet_sk(sk);
54811+
54812+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
54813+ return;
54814+
54815+ set = current->signal;
54816+
54817+ spin_lock_bh(&gr_conn_table_lock);
54818+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
54819+ inet->dport, inet->sport);
54820+ if (unlikely(p != NULL)) {
54821+ set->curr_ip = p->curr_ip;
54822+ set->used_accept = 1;
54823+ gr_del_task_from_ip_table_nolock(p);
54824+ spin_unlock_bh(&gr_conn_table_lock);
54825+ return;
54826+ }
54827+ spin_unlock_bh(&gr_conn_table_lock);
54828+
54829+ set->curr_ip = inet->daddr;
54830+ set->used_accept = 1;
54831+#endif
54832+ return;
54833+}
54834+
54835+int
54836+gr_handle_sock_all(const int family, const int type, const int protocol)
54837+{
54838+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54839+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
54840+ (family != AF_UNIX)) {
54841+ if (family == AF_INET)
54842+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
54843+ else
54844+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
54845+ return -EACCES;
54846+ }
54847+#endif
54848+ return 0;
54849+}
54850+
54851+int
54852+gr_handle_sock_server(const struct sockaddr *sck)
54853+{
54854+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54855+ if (grsec_enable_socket_server &&
54856+ in_group_p(grsec_socket_server_gid) &&
54857+ sck && (sck->sa_family != AF_UNIX) &&
54858+ (sck->sa_family != AF_LOCAL)) {
54859+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54860+ return -EACCES;
54861+ }
54862+#endif
54863+ return 0;
54864+}
54865+
54866+int
54867+gr_handle_sock_server_other(const struct sock *sck)
54868+{
54869+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54870+ if (grsec_enable_socket_server &&
54871+ in_group_p(grsec_socket_server_gid) &&
54872+ sck && (sck->sk_family != AF_UNIX) &&
54873+ (sck->sk_family != AF_LOCAL)) {
54874+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54875+ return -EACCES;
54876+ }
54877+#endif
54878+ return 0;
54879+}
54880+
54881+int
54882+gr_handle_sock_client(const struct sockaddr *sck)
54883+{
54884+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54885+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
54886+ sck && (sck->sa_family != AF_UNIX) &&
54887+ (sck->sa_family != AF_LOCAL)) {
54888+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
54889+ return -EACCES;
54890+ }
54891+#endif
54892+ return 0;
54893+}
54894+
54895+kernel_cap_t
54896+gr_cap_rtnetlink(struct sock *sock)
54897+{
54898+#ifdef CONFIG_GRKERNSEC
54899+ if (!gr_acl_is_enabled())
54900+ return current_cap();
54901+ else if (sock->sk_protocol == NETLINK_ISCSI &&
54902+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
54903+ gr_is_capable(CAP_SYS_ADMIN))
54904+ return current_cap();
54905+ else if (sock->sk_protocol == NETLINK_AUDIT &&
54906+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
54907+ gr_is_capable(CAP_AUDIT_WRITE) &&
54908+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
54909+ gr_is_capable(CAP_AUDIT_CONTROL))
54910+ return current_cap();
54911+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
54912+ ((sock->sk_protocol == NETLINK_ROUTE) ?
54913+ gr_is_capable_nolog(CAP_NET_ADMIN) :
54914+ gr_is_capable(CAP_NET_ADMIN)))
54915+ return current_cap();
54916+ else
54917+ return __cap_empty_set;
54918+#else
54919+ return current_cap();
54920+#endif
54921+}
54922diff -urNp linux-2.6.32.45/grsecurity/grsec_sysctl.c linux-2.6.32.45/grsecurity/grsec_sysctl.c
54923--- linux-2.6.32.45/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
54924+++ linux-2.6.32.45/grsecurity/grsec_sysctl.c 2011-08-11 19:57:54.000000000 -0400
54925@@ -0,0 +1,479 @@
54926+#include <linux/kernel.h>
54927+#include <linux/sched.h>
54928+#include <linux/sysctl.h>
54929+#include <linux/grsecurity.h>
54930+#include <linux/grinternal.h>
54931+
54932+int
54933+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
54934+{
54935+#ifdef CONFIG_GRKERNSEC_SYSCTL
54936+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
54937+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
54938+ return -EACCES;
54939+ }
54940+#endif
54941+ return 0;
54942+}
54943+
54944+#ifdef CONFIG_GRKERNSEC_ROFS
54945+static int __maybe_unused one = 1;
54946+#endif
54947+
54948+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
54949+ctl_table grsecurity_table[] = {
54950+#ifdef CONFIG_GRKERNSEC_SYSCTL
54951+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
54952+#ifdef CONFIG_GRKERNSEC_IO
54953+ {
54954+ .ctl_name = CTL_UNNUMBERED,
54955+ .procname = "disable_priv_io",
54956+ .data = &grsec_disable_privio,
54957+ .maxlen = sizeof(int),
54958+ .mode = 0600,
54959+ .proc_handler = &proc_dointvec,
54960+ },
54961+#endif
54962+#endif
54963+#ifdef CONFIG_GRKERNSEC_LINK
54964+ {
54965+ .ctl_name = CTL_UNNUMBERED,
54966+ .procname = "linking_restrictions",
54967+ .data = &grsec_enable_link,
54968+ .maxlen = sizeof(int),
54969+ .mode = 0600,
54970+ .proc_handler = &proc_dointvec,
54971+ },
54972+#endif
54973+#ifdef CONFIG_GRKERNSEC_BRUTE
54974+ {
54975+ .ctl_name = CTL_UNNUMBERED,
54976+ .procname = "deter_bruteforce",
54977+ .data = &grsec_enable_brute,
54978+ .maxlen = sizeof(int),
54979+ .mode = 0600,
54980+ .proc_handler = &proc_dointvec,
54981+ },
54982+#endif
54983+#ifdef CONFIG_GRKERNSEC_FIFO
54984+ {
54985+ .ctl_name = CTL_UNNUMBERED,
54986+ .procname = "fifo_restrictions",
54987+ .data = &grsec_enable_fifo,
54988+ .maxlen = sizeof(int),
54989+ .mode = 0600,
54990+ .proc_handler = &proc_dointvec,
54991+ },
54992+#endif
54993+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54994+ {
54995+ .ctl_name = CTL_UNNUMBERED,
54996+ .procname = "ip_blackhole",
54997+ .data = &grsec_enable_blackhole,
54998+ .maxlen = sizeof(int),
54999+ .mode = 0600,
55000+ .proc_handler = &proc_dointvec,
55001+ },
55002+ {
55003+ .ctl_name = CTL_UNNUMBERED,
55004+ .procname = "lastack_retries",
55005+ .data = &grsec_lastack_retries,
55006+ .maxlen = sizeof(int),
55007+ .mode = 0600,
55008+ .proc_handler = &proc_dointvec,
55009+ },
55010+#endif
55011+#ifdef CONFIG_GRKERNSEC_EXECLOG
55012+ {
55013+ .ctl_name = CTL_UNNUMBERED,
55014+ .procname = "exec_logging",
55015+ .data = &grsec_enable_execlog,
55016+ .maxlen = sizeof(int),
55017+ .mode = 0600,
55018+ .proc_handler = &proc_dointvec,
55019+ },
55020+#endif
55021+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55022+ {
55023+ .ctl_name = CTL_UNNUMBERED,
55024+ .procname = "rwxmap_logging",
55025+ .data = &grsec_enable_log_rwxmaps,
55026+ .maxlen = sizeof(int),
55027+ .mode = 0600,
55028+ .proc_handler = &proc_dointvec,
55029+ },
55030+#endif
55031+#ifdef CONFIG_GRKERNSEC_SIGNAL
55032+ {
55033+ .ctl_name = CTL_UNNUMBERED,
55034+ .procname = "signal_logging",
55035+ .data = &grsec_enable_signal,
55036+ .maxlen = sizeof(int),
55037+ .mode = 0600,
55038+ .proc_handler = &proc_dointvec,
55039+ },
55040+#endif
55041+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55042+ {
55043+ .ctl_name = CTL_UNNUMBERED,
55044+ .procname = "forkfail_logging",
55045+ .data = &grsec_enable_forkfail,
55046+ .maxlen = sizeof(int),
55047+ .mode = 0600,
55048+ .proc_handler = &proc_dointvec,
55049+ },
55050+#endif
55051+#ifdef CONFIG_GRKERNSEC_TIME
55052+ {
55053+ .ctl_name = CTL_UNNUMBERED,
55054+ .procname = "timechange_logging",
55055+ .data = &grsec_enable_time,
55056+ .maxlen = sizeof(int),
55057+ .mode = 0600,
55058+ .proc_handler = &proc_dointvec,
55059+ },
55060+#endif
55061+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55062+ {
55063+ .ctl_name = CTL_UNNUMBERED,
55064+ .procname = "chroot_deny_shmat",
55065+ .data = &grsec_enable_chroot_shmat,
55066+ .maxlen = sizeof(int),
55067+ .mode = 0600,
55068+ .proc_handler = &proc_dointvec,
55069+ },
55070+#endif
55071+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55072+ {
55073+ .ctl_name = CTL_UNNUMBERED,
55074+ .procname = "chroot_deny_unix",
55075+ .data = &grsec_enable_chroot_unix,
55076+ .maxlen = sizeof(int),
55077+ .mode = 0600,
55078+ .proc_handler = &proc_dointvec,
55079+ },
55080+#endif
55081+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55082+ {
55083+ .ctl_name = CTL_UNNUMBERED,
55084+ .procname = "chroot_deny_mount",
55085+ .data = &grsec_enable_chroot_mount,
55086+ .maxlen = sizeof(int),
55087+ .mode = 0600,
55088+ .proc_handler = &proc_dointvec,
55089+ },
55090+#endif
55091+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55092+ {
55093+ .ctl_name = CTL_UNNUMBERED,
55094+ .procname = "chroot_deny_fchdir",
55095+ .data = &grsec_enable_chroot_fchdir,
55096+ .maxlen = sizeof(int),
55097+ .mode = 0600,
55098+ .proc_handler = &proc_dointvec,
55099+ },
55100+#endif
55101+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55102+ {
55103+ .ctl_name = CTL_UNNUMBERED,
55104+ .procname = "chroot_deny_chroot",
55105+ .data = &grsec_enable_chroot_double,
55106+ .maxlen = sizeof(int),
55107+ .mode = 0600,
55108+ .proc_handler = &proc_dointvec,
55109+ },
55110+#endif
55111+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55112+ {
55113+ .ctl_name = CTL_UNNUMBERED,
55114+ .procname = "chroot_deny_pivot",
55115+ .data = &grsec_enable_chroot_pivot,
55116+ .maxlen = sizeof(int),
55117+ .mode = 0600,
55118+ .proc_handler = &proc_dointvec,
55119+ },
55120+#endif
55121+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55122+ {
55123+ .ctl_name = CTL_UNNUMBERED,
55124+ .procname = "chroot_enforce_chdir",
55125+ .data = &grsec_enable_chroot_chdir,
55126+ .maxlen = sizeof(int),
55127+ .mode = 0600,
55128+ .proc_handler = &proc_dointvec,
55129+ },
55130+#endif
55131+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55132+ {
55133+ .ctl_name = CTL_UNNUMBERED,
55134+ .procname = "chroot_deny_chmod",
55135+ .data = &grsec_enable_chroot_chmod,
55136+ .maxlen = sizeof(int),
55137+ .mode = 0600,
55138+ .proc_handler = &proc_dointvec,
55139+ },
55140+#endif
55141+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55142+ {
55143+ .ctl_name = CTL_UNNUMBERED,
55144+ .procname = "chroot_deny_mknod",
55145+ .data = &grsec_enable_chroot_mknod,
55146+ .maxlen = sizeof(int),
55147+ .mode = 0600,
55148+ .proc_handler = &proc_dointvec,
55149+ },
55150+#endif
55151+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55152+ {
55153+ .ctl_name = CTL_UNNUMBERED,
55154+ .procname = "chroot_restrict_nice",
55155+ .data = &grsec_enable_chroot_nice,
55156+ .maxlen = sizeof(int),
55157+ .mode = 0600,
55158+ .proc_handler = &proc_dointvec,
55159+ },
55160+#endif
55161+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55162+ {
55163+ .ctl_name = CTL_UNNUMBERED,
55164+ .procname = "chroot_execlog",
55165+ .data = &grsec_enable_chroot_execlog,
55166+ .maxlen = sizeof(int),
55167+ .mode = 0600,
55168+ .proc_handler = &proc_dointvec,
55169+ },
55170+#endif
55171+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55172+ {
55173+ .ctl_name = CTL_UNNUMBERED,
55174+ .procname = "chroot_caps",
55175+ .data = &grsec_enable_chroot_caps,
55176+ .maxlen = sizeof(int),
55177+ .mode = 0600,
55178+ .proc_handler = &proc_dointvec,
55179+ },
55180+#endif
55181+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55182+ {
55183+ .ctl_name = CTL_UNNUMBERED,
55184+ .procname = "chroot_deny_sysctl",
55185+ .data = &grsec_enable_chroot_sysctl,
55186+ .maxlen = sizeof(int),
55187+ .mode = 0600,
55188+ .proc_handler = &proc_dointvec,
55189+ },
55190+#endif
55191+#ifdef CONFIG_GRKERNSEC_TPE
55192+ {
55193+ .ctl_name = CTL_UNNUMBERED,
55194+ .procname = "tpe",
55195+ .data = &grsec_enable_tpe,
55196+ .maxlen = sizeof(int),
55197+ .mode = 0600,
55198+ .proc_handler = &proc_dointvec,
55199+ },
55200+ {
55201+ .ctl_name = CTL_UNNUMBERED,
55202+ .procname = "tpe_gid",
55203+ .data = &grsec_tpe_gid,
55204+ .maxlen = sizeof(int),
55205+ .mode = 0600,
55206+ .proc_handler = &proc_dointvec,
55207+ },
55208+#endif
55209+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55210+ {
55211+ .ctl_name = CTL_UNNUMBERED,
55212+ .procname = "tpe_invert",
55213+ .data = &grsec_enable_tpe_invert,
55214+ .maxlen = sizeof(int),
55215+ .mode = 0600,
55216+ .proc_handler = &proc_dointvec,
55217+ },
55218+#endif
55219+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55220+ {
55221+ .ctl_name = CTL_UNNUMBERED,
55222+ .procname = "tpe_restrict_all",
55223+ .data = &grsec_enable_tpe_all,
55224+ .maxlen = sizeof(int),
55225+ .mode = 0600,
55226+ .proc_handler = &proc_dointvec,
55227+ },
55228+#endif
55229+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55230+ {
55231+ .ctl_name = CTL_UNNUMBERED,
55232+ .procname = "socket_all",
55233+ .data = &grsec_enable_socket_all,
55234+ .maxlen = sizeof(int),
55235+ .mode = 0600,
55236+ .proc_handler = &proc_dointvec,
55237+ },
55238+ {
55239+ .ctl_name = CTL_UNNUMBERED,
55240+ .procname = "socket_all_gid",
55241+ .data = &grsec_socket_all_gid,
55242+ .maxlen = sizeof(int),
55243+ .mode = 0600,
55244+ .proc_handler = &proc_dointvec,
55245+ },
55246+#endif
55247+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55248+ {
55249+ .ctl_name = CTL_UNNUMBERED,
55250+ .procname = "socket_client",
55251+ .data = &grsec_enable_socket_client,
55252+ .maxlen = sizeof(int),
55253+ .mode = 0600,
55254+ .proc_handler = &proc_dointvec,
55255+ },
55256+ {
55257+ .ctl_name = CTL_UNNUMBERED,
55258+ .procname = "socket_client_gid",
55259+ .data = &grsec_socket_client_gid,
55260+ .maxlen = sizeof(int),
55261+ .mode = 0600,
55262+ .proc_handler = &proc_dointvec,
55263+ },
55264+#endif
55265+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55266+ {
55267+ .ctl_name = CTL_UNNUMBERED,
55268+ .procname = "socket_server",
55269+ .data = &grsec_enable_socket_server,
55270+ .maxlen = sizeof(int),
55271+ .mode = 0600,
55272+ .proc_handler = &proc_dointvec,
55273+ },
55274+ {
55275+ .ctl_name = CTL_UNNUMBERED,
55276+ .procname = "socket_server_gid",
55277+ .data = &grsec_socket_server_gid,
55278+ .maxlen = sizeof(int),
55279+ .mode = 0600,
55280+ .proc_handler = &proc_dointvec,
55281+ },
55282+#endif
55283+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55284+ {
55285+ .ctl_name = CTL_UNNUMBERED,
55286+ .procname = "audit_group",
55287+ .data = &grsec_enable_group,
55288+ .maxlen = sizeof(int),
55289+ .mode = 0600,
55290+ .proc_handler = &proc_dointvec,
55291+ },
55292+ {
55293+ .ctl_name = CTL_UNNUMBERED,
55294+ .procname = "audit_gid",
55295+ .data = &grsec_audit_gid,
55296+ .maxlen = sizeof(int),
55297+ .mode = 0600,
55298+ .proc_handler = &proc_dointvec,
55299+ },
55300+#endif
55301+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55302+ {
55303+ .ctl_name = CTL_UNNUMBERED,
55304+ .procname = "audit_chdir",
55305+ .data = &grsec_enable_chdir,
55306+ .maxlen = sizeof(int),
55307+ .mode = 0600,
55308+ .proc_handler = &proc_dointvec,
55309+ },
55310+#endif
55311+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55312+ {
55313+ .ctl_name = CTL_UNNUMBERED,
55314+ .procname = "audit_mount",
55315+ .data = &grsec_enable_mount,
55316+ .maxlen = sizeof(int),
55317+ .mode = 0600,
55318+ .proc_handler = &proc_dointvec,
55319+ },
55320+#endif
55321+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55322+ {
55323+ .ctl_name = CTL_UNNUMBERED,
55324+ .procname = "audit_textrel",
55325+ .data = &grsec_enable_audit_textrel,
55326+ .maxlen = sizeof(int),
55327+ .mode = 0600,
55328+ .proc_handler = &proc_dointvec,
55329+ },
55330+#endif
55331+#ifdef CONFIG_GRKERNSEC_DMESG
55332+ {
55333+ .ctl_name = CTL_UNNUMBERED,
55334+ .procname = "dmesg",
55335+ .data = &grsec_enable_dmesg,
55336+ .maxlen = sizeof(int),
55337+ .mode = 0600,
55338+ .proc_handler = &proc_dointvec,
55339+ },
55340+#endif
55341+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55342+ {
55343+ .ctl_name = CTL_UNNUMBERED,
55344+ .procname = "chroot_findtask",
55345+ .data = &grsec_enable_chroot_findtask,
55346+ .maxlen = sizeof(int),
55347+ .mode = 0600,
55348+ .proc_handler = &proc_dointvec,
55349+ },
55350+#endif
55351+#ifdef CONFIG_GRKERNSEC_RESLOG
55352+ {
55353+ .ctl_name = CTL_UNNUMBERED,
55354+ .procname = "resource_logging",
55355+ .data = &grsec_resource_logging,
55356+ .maxlen = sizeof(int),
55357+ .mode = 0600,
55358+ .proc_handler = &proc_dointvec,
55359+ },
55360+#endif
55361+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55362+ {
55363+ .ctl_name = CTL_UNNUMBERED,
55364+ .procname = "audit_ptrace",
55365+ .data = &grsec_enable_audit_ptrace,
55366+ .maxlen = sizeof(int),
55367+ .mode = 0600,
55368+ .proc_handler = &proc_dointvec,
55369+ },
55370+#endif
55371+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55372+ {
55373+ .ctl_name = CTL_UNNUMBERED,
55374+ .procname = "harden_ptrace",
55375+ .data = &grsec_enable_harden_ptrace,
55376+ .maxlen = sizeof(int),
55377+ .mode = 0600,
55378+ .proc_handler = &proc_dointvec,
55379+ },
55380+#endif
55381+ {
55382+ .ctl_name = CTL_UNNUMBERED,
55383+ .procname = "grsec_lock",
55384+ .data = &grsec_lock,
55385+ .maxlen = sizeof(int),
55386+ .mode = 0600,
55387+ .proc_handler = &proc_dointvec,
55388+ },
55389+#endif
55390+#ifdef CONFIG_GRKERNSEC_ROFS
55391+ {
55392+ .ctl_name = CTL_UNNUMBERED,
55393+ .procname = "romount_protect",
55394+ .data = &grsec_enable_rofs,
55395+ .maxlen = sizeof(int),
55396+ .mode = 0600,
55397+ .proc_handler = &proc_dointvec_minmax,
55398+ .extra1 = &one,
55399+ .extra2 = &one,
55400+ },
55401+#endif
55402+ { .ctl_name = 0 }
55403+};
55404+#endif
55405diff -urNp linux-2.6.32.45/grsecurity/grsec_time.c linux-2.6.32.45/grsecurity/grsec_time.c
55406--- linux-2.6.32.45/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55407+++ linux-2.6.32.45/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
55408@@ -0,0 +1,16 @@
55409+#include <linux/kernel.h>
55410+#include <linux/sched.h>
55411+#include <linux/grinternal.h>
55412+#include <linux/module.h>
55413+
55414+void
55415+gr_log_timechange(void)
55416+{
55417+#ifdef CONFIG_GRKERNSEC_TIME
55418+ if (grsec_enable_time)
55419+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55420+#endif
55421+ return;
55422+}
55423+
55424+EXPORT_SYMBOL(gr_log_timechange);
55425diff -urNp linux-2.6.32.45/grsecurity/grsec_tpe.c linux-2.6.32.45/grsecurity/grsec_tpe.c
55426--- linux-2.6.32.45/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55427+++ linux-2.6.32.45/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
55428@@ -0,0 +1,39 @@
55429+#include <linux/kernel.h>
55430+#include <linux/sched.h>
55431+#include <linux/file.h>
55432+#include <linux/fs.h>
55433+#include <linux/grinternal.h>
55434+
55435+extern int gr_acl_tpe_check(void);
55436+
55437+int
55438+gr_tpe_allow(const struct file *file)
55439+{
55440+#ifdef CONFIG_GRKERNSEC
55441+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55442+ const struct cred *cred = current_cred();
55443+
55444+ if (cred->uid && ((grsec_enable_tpe &&
55445+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55446+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55447+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55448+#else
55449+ in_group_p(grsec_tpe_gid)
55450+#endif
55451+ ) || gr_acl_tpe_check()) &&
55452+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55453+ (inode->i_mode & S_IWOTH))))) {
55454+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55455+ return 0;
55456+ }
55457+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55458+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55459+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55460+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55461+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55462+ return 0;
55463+ }
55464+#endif
55465+#endif
55466+ return 1;
55467+}
55468diff -urNp linux-2.6.32.45/grsecurity/grsum.c linux-2.6.32.45/grsecurity/grsum.c
55469--- linux-2.6.32.45/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55470+++ linux-2.6.32.45/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
55471@@ -0,0 +1,61 @@
55472+#include <linux/err.h>
55473+#include <linux/kernel.h>
55474+#include <linux/sched.h>
55475+#include <linux/mm.h>
55476+#include <linux/scatterlist.h>
55477+#include <linux/crypto.h>
55478+#include <linux/gracl.h>
55479+
55480+
55481+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55482+#error "crypto and sha256 must be built into the kernel"
55483+#endif
55484+
55485+int
55486+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55487+{
55488+ char *p;
55489+ struct crypto_hash *tfm;
55490+ struct hash_desc desc;
55491+ struct scatterlist sg;
55492+ unsigned char temp_sum[GR_SHA_LEN];
55493+ volatile int retval = 0;
55494+ volatile int dummy = 0;
55495+ unsigned int i;
55496+
55497+ sg_init_table(&sg, 1);
55498+
55499+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55500+ if (IS_ERR(tfm)) {
55501+ /* should never happen, since sha256 should be built in */
55502+ return 1;
55503+ }
55504+
55505+ desc.tfm = tfm;
55506+ desc.flags = 0;
55507+
55508+ crypto_hash_init(&desc);
55509+
55510+ p = salt;
55511+ sg_set_buf(&sg, p, GR_SALT_LEN);
55512+ crypto_hash_update(&desc, &sg, sg.length);
55513+
55514+ p = entry->pw;
55515+ sg_set_buf(&sg, p, strlen(p));
55516+
55517+ crypto_hash_update(&desc, &sg, sg.length);
55518+
55519+ crypto_hash_final(&desc, temp_sum);
55520+
55521+ memset(entry->pw, 0, GR_PW_LEN);
55522+
55523+ for (i = 0; i < GR_SHA_LEN; i++)
55524+ if (sum[i] != temp_sum[i])
55525+ retval = 1;
55526+ else
55527+ dummy = 1; // waste a cycle
55528+
55529+ crypto_free_hash(tfm);
55530+
55531+ return retval;
55532+}
55533diff -urNp linux-2.6.32.45/grsecurity/Kconfig linux-2.6.32.45/grsecurity/Kconfig
55534--- linux-2.6.32.45/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55535+++ linux-2.6.32.45/grsecurity/Kconfig 2011-08-11 19:58:19.000000000 -0400
55536@@ -0,0 +1,1035 @@
55537+#
55538+# grecurity configuration
55539+#
55540+
55541+menu "Grsecurity"
55542+
55543+config GRKERNSEC
55544+ bool "Grsecurity"
55545+ select CRYPTO
55546+ select CRYPTO_SHA256
55547+ help
55548+ If you say Y here, you will be able to configure many features
55549+ that will enhance the security of your system. It is highly
55550+ recommended that you say Y here and read through the help
55551+ for each option so that you fully understand the features and
55552+ can evaluate their usefulness for your machine.
55553+
55554+choice
55555+ prompt "Security Level"
55556+ depends on GRKERNSEC
55557+ default GRKERNSEC_CUSTOM
55558+
55559+config GRKERNSEC_LOW
55560+ bool "Low"
55561+ select GRKERNSEC_LINK
55562+ select GRKERNSEC_FIFO
55563+ select GRKERNSEC_RANDNET
55564+ select GRKERNSEC_DMESG
55565+ select GRKERNSEC_CHROOT
55566+ select GRKERNSEC_CHROOT_CHDIR
55567+
55568+ help
55569+ If you choose this option, several of the grsecurity options will
55570+ be enabled that will give you greater protection against a number
55571+ of attacks, while assuring that none of your software will have any
55572+ conflicts with the additional security measures. If you run a lot
55573+ of unusual software, or you are having problems with the higher
55574+ security levels, you should say Y here. With this option, the
55575+ following features are enabled:
55576+
55577+ - Linking restrictions
55578+ - FIFO restrictions
55579+ - Restricted dmesg
55580+ - Enforced chdir("/") on chroot
55581+ - Runtime module disabling
55582+
55583+config GRKERNSEC_MEDIUM
55584+ bool "Medium"
55585+ select PAX
55586+ select PAX_EI_PAX
55587+ select PAX_PT_PAX_FLAGS
55588+ select PAX_HAVE_ACL_FLAGS
55589+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55590+ select GRKERNSEC_CHROOT
55591+ select GRKERNSEC_CHROOT_SYSCTL
55592+ select GRKERNSEC_LINK
55593+ select GRKERNSEC_FIFO
55594+ select GRKERNSEC_DMESG
55595+ select GRKERNSEC_RANDNET
55596+ select GRKERNSEC_FORKFAIL
55597+ select GRKERNSEC_TIME
55598+ select GRKERNSEC_SIGNAL
55599+ select GRKERNSEC_CHROOT
55600+ select GRKERNSEC_CHROOT_UNIX
55601+ select GRKERNSEC_CHROOT_MOUNT
55602+ select GRKERNSEC_CHROOT_PIVOT
55603+ select GRKERNSEC_CHROOT_DOUBLE
55604+ select GRKERNSEC_CHROOT_CHDIR
55605+ select GRKERNSEC_CHROOT_MKNOD
55606+ select GRKERNSEC_PROC
55607+ select GRKERNSEC_PROC_USERGROUP
55608+ select PAX_RANDUSTACK
55609+ select PAX_ASLR
55610+ select PAX_RANDMMAP
55611+ select PAX_REFCOUNT if (X86 || SPARC64)
55612+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55613+
55614+ help
55615+ If you say Y here, several features in addition to those included
55616+ in the low additional security level will be enabled. These
55617+ features provide even more security to your system, though in rare
55618+ cases they may be incompatible with very old or poorly written
55619+ software. If you enable this option, make sure that your auth
55620+ service (identd) is running as gid 1001. With this option,
55621+ the following features (in addition to those provided in the
55622+ low additional security level) will be enabled:
55623+
55624+ - Failed fork logging
55625+ - Time change logging
55626+ - Signal logging
55627+ - Deny mounts in chroot
55628+ - Deny double chrooting
55629+ - Deny sysctl writes in chroot
55630+ - Deny mknod in chroot
55631+ - Deny access to abstract AF_UNIX sockets out of chroot
55632+ - Deny pivot_root in chroot
55633+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
55634+ - /proc restrictions with special GID set to 10 (usually wheel)
55635+ - Address Space Layout Randomization (ASLR)
55636+ - Prevent exploitation of most refcount overflows
55637+ - Bounds checking of copying between the kernel and userland
55638+
55639+config GRKERNSEC_HIGH
55640+ bool "High"
55641+ select GRKERNSEC_LINK
55642+ select GRKERNSEC_FIFO
55643+ select GRKERNSEC_DMESG
55644+ select GRKERNSEC_FORKFAIL
55645+ select GRKERNSEC_TIME
55646+ select GRKERNSEC_SIGNAL
55647+ select GRKERNSEC_CHROOT
55648+ select GRKERNSEC_CHROOT_SHMAT
55649+ select GRKERNSEC_CHROOT_UNIX
55650+ select GRKERNSEC_CHROOT_MOUNT
55651+ select GRKERNSEC_CHROOT_FCHDIR
55652+ select GRKERNSEC_CHROOT_PIVOT
55653+ select GRKERNSEC_CHROOT_DOUBLE
55654+ select GRKERNSEC_CHROOT_CHDIR
55655+ select GRKERNSEC_CHROOT_MKNOD
55656+ select GRKERNSEC_CHROOT_CAPS
55657+ select GRKERNSEC_CHROOT_SYSCTL
55658+ select GRKERNSEC_CHROOT_FINDTASK
55659+ select GRKERNSEC_SYSFS_RESTRICT
55660+ select GRKERNSEC_PROC
55661+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55662+ select GRKERNSEC_HIDESYM
55663+ select GRKERNSEC_BRUTE
55664+ select GRKERNSEC_PROC_USERGROUP
55665+ select GRKERNSEC_KMEM
55666+ select GRKERNSEC_RESLOG
55667+ select GRKERNSEC_RANDNET
55668+ select GRKERNSEC_PROC_ADD
55669+ select GRKERNSEC_CHROOT_CHMOD
55670+ select GRKERNSEC_CHROOT_NICE
55671+ select GRKERNSEC_AUDIT_MOUNT
55672+ select GRKERNSEC_MODHARDEN if (MODULES)
55673+ select GRKERNSEC_HARDEN_PTRACE
55674+ select GRKERNSEC_VM86 if (X86_32)
55675+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55676+ select PAX
55677+ select PAX_RANDUSTACK
55678+ select PAX_ASLR
55679+ select PAX_RANDMMAP
55680+ select PAX_NOEXEC
55681+ select PAX_MPROTECT
55682+ select PAX_EI_PAX
55683+ select PAX_PT_PAX_FLAGS
55684+ select PAX_HAVE_ACL_FLAGS
55685+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55686+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
55687+ select PAX_RANDKSTACK if (X86_TSC && X86)
55688+ select PAX_SEGMEXEC if (X86_32)
55689+ select PAX_PAGEEXEC
55690+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55691+ select PAX_EMUTRAMP if (PARISC)
55692+ select PAX_EMUSIGRT if (PARISC)
55693+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55694+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55695+ select PAX_REFCOUNT if (X86 || SPARC64)
55696+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55697+ help
55698+ If you say Y here, many of the features of grsecurity will be
55699+ enabled, which will protect you against many kinds of attacks
55700+ against your system. The heightened security comes at a cost
55701+ of an increased chance of incompatibilities with rare software
55702+ on your machine. Since this security level enables PaX, you should
55703+ view <http://pax.grsecurity.net> and read about the PaX
55704+ project. While you are there, download chpax and run it on
55705+ binaries that cause problems with PaX. Also remember that
55706+ since the /proc restrictions are enabled, you must run your
55707+ identd as gid 1001. This security level enables the following
55708+ features in addition to those listed in the low and medium
55709+ security levels:
55710+
55711+ - Additional /proc restrictions
55712+ - Chmod restrictions in chroot
55713+ - No signals, ptrace, or viewing of processes outside of chroot
55714+ - Capability restrictions in chroot
55715+ - Deny fchdir out of chroot
55716+ - Priority restrictions in chroot
55717+ - Segmentation-based implementation of PaX
55718+ - Mprotect restrictions
55719+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55720+ - Kernel stack randomization
55721+ - Mount/unmount/remount logging
55722+ - Kernel symbol hiding
55723+ - Prevention of memory exhaustion-based exploits
55724+ - Hardening of module auto-loading
55725+ - Ptrace restrictions
55726+ - Restricted vm86 mode
55727+ - Restricted sysfs/debugfs
55728+ - Active kernel exploit response
55729+
55730+config GRKERNSEC_CUSTOM
55731+ bool "Custom"
55732+ help
55733+ If you say Y here, you will be able to configure every grsecurity
55734+ option, which allows you to enable many more features that aren't
55735+ covered in the basic security levels. These additional features
55736+ include TPE, socket restrictions, and the sysctl system for
55737+ grsecurity. It is advised that you read through the help for
55738+ each option to determine its usefulness in your situation.
55739+
55740+endchoice
55741+
55742+menu "Address Space Protection"
55743+depends on GRKERNSEC
55744+
55745+config GRKERNSEC_KMEM
55746+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
55747+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55748+ help
55749+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55750+ be written to via mmap or otherwise to modify the running kernel.
55751+ /dev/port will also not be allowed to be opened. If you have module
55752+ support disabled, enabling this will close up four ways that are
55753+ currently used to insert malicious code into the running kernel.
55754+ Even with all these features enabled, we still highly recommend that
55755+ you use the RBAC system, as it is still possible for an attacker to
55756+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55757+ If you are not using XFree86, you may be able to stop this additional
55758+ case by enabling the 'Disable privileged I/O' option. Though nothing
55759+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55760+ but only to video memory, which is the only writing we allow in this
55761+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55762+ not be allowed to mprotect it with PROT_WRITE later.
55763+ It is highly recommended that you say Y here if you meet all the
55764+ conditions above.
55765+
55766+config GRKERNSEC_VM86
55767+ bool "Restrict VM86 mode"
55768+ depends on X86_32
55769+
55770+ help
55771+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55772+ make use of a special execution mode on 32bit x86 processors called
55773+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55774+ video cards and will still work with this option enabled. The purpose
55775+ of the option is to prevent exploitation of emulation errors in
55776+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55777+ Nearly all users should be able to enable this option.
55778+
55779+config GRKERNSEC_IO
55780+ bool "Disable privileged I/O"
55781+ depends on X86
55782+ select RTC_CLASS
55783+ select RTC_INTF_DEV
55784+ select RTC_DRV_CMOS
55785+
55786+ help
55787+ If you say Y here, all ioperm and iopl calls will return an error.
55788+ Ioperm and iopl can be used to modify the running kernel.
55789+ Unfortunately, some programs need this access to operate properly,
55790+ the most notable of which are XFree86 and hwclock. hwclock can be
55791+ remedied by having RTC support in the kernel, so real-time
55792+ clock support is enabled if this option is enabled, to ensure
55793+ that hwclock operates correctly. XFree86 still will not
55794+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55795+ IF YOU USE XFree86. If you use XFree86 and you still want to
55796+ protect your kernel against modification, use the RBAC system.
55797+
55798+config GRKERNSEC_PROC_MEMMAP
55799+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55800+ default y if (PAX_NOEXEC || PAX_ASLR)
55801+ depends on PAX_NOEXEC || PAX_ASLR
55802+ help
55803+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55804+ give no information about the addresses of its mappings if
55805+ PaX features that rely on random addresses are enabled on the task.
55806+ If you use PaX it is greatly recommended that you say Y here as it
55807+ closes up a hole that makes the full ASLR useless for suid
55808+ binaries.
55809+
55810+config GRKERNSEC_BRUTE
55811+ bool "Deter exploit bruteforcing"
55812+ help
55813+ If you say Y here, attempts to bruteforce exploits against forking
55814+ daemons such as apache or sshd, as well as against suid/sgid binaries
55815+ will be deterred. When a child of a forking daemon is killed by PaX
55816+ or crashes due to an illegal instruction or other suspicious signal,
55817+ the parent process will be delayed 30 seconds upon every subsequent
55818+ fork until the administrator is able to assess the situation and
55819+ restart the daemon.
55820+ In the suid/sgid case, the attempt is logged, the user has all their
55821+ processes terminated, and they are prevented from executing any further
55822+ processes for 15 minutes.
55823+ It is recommended that you also enable signal logging in the auditing
55824+ section so that logs are generated when a process triggers a suspicious
55825+ signal.
55826+ If the sysctl option is enabled, a sysctl option with name
55827+ "deter_bruteforce" is created.
55828+
55829+config GRKERNSEC_MODHARDEN
55830+ bool "Harden module auto-loading"
55831+ depends on MODULES
55832+ help
55833+ If you say Y here, module auto-loading in response to use of some
55834+ feature implemented by an unloaded module will be restricted to
55835+ root users. Enabling this option helps defend against attacks
55836+ by unprivileged users who abuse the auto-loading behavior to
55837+ cause a vulnerable module to load that is then exploited.
55838+
55839+ If this option prevents a legitimate use of auto-loading for a
55840+ non-root user, the administrator can execute modprobe manually
55841+ with the exact name of the module mentioned in the alert log.
55842+ Alternatively, the administrator can add the module to the list
55843+ of modules loaded at boot by modifying init scripts.
55844+
55845+ Modification of init scripts will most likely be needed on
55846+ Ubuntu servers with encrypted home directory support enabled,
55847+ as the first non-root user logging in will cause the ecb(aes),
55848+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55849+
55850+config GRKERNSEC_HIDESYM
55851+ bool "Hide kernel symbols"
55852+ help
55853+ If you say Y here, getting information on loaded modules, and
55854+ displaying all kernel symbols through a syscall will be restricted
55855+ to users with CAP_SYS_MODULE. For software compatibility reasons,
55856+ /proc/kallsyms will be restricted to the root user. The RBAC
55857+ system can hide that entry even from root.
55858+
55859+ This option also prevents leaking of kernel addresses through
55860+ several /proc entries.
55861+
55862+ Note that this option is only effective provided the following
55863+ conditions are met:
55864+ 1) The kernel using grsecurity is not precompiled by some distribution
55865+ 2) You have also enabled GRKERNSEC_DMESG
55866+ 3) You are using the RBAC system and hiding other files such as your
55867+ kernel image and System.map. Alternatively, enabling this option
55868+ causes the permissions on /boot, /lib/modules, and the kernel
55869+ source directory to change at compile time to prevent
55870+ reading by non-root users.
55871+ If the above conditions are met, this option will aid in providing a
55872+ useful protection against local kernel exploitation of overflows
55873+ and arbitrary read/write vulnerabilities.
55874+
55875+config GRKERNSEC_KERN_LOCKOUT
55876+ bool "Active kernel exploit response"
55877+ depends on X86 || ARM || PPC || SPARC
55878+ help
55879+ If you say Y here, when a PaX alert is triggered due to suspicious
55880+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55881+ or an OOPs occurs due to bad memory accesses, instead of just
55882+ terminating the offending process (and potentially allowing
55883+ a subsequent exploit from the same user), we will take one of two
55884+ actions:
55885+ If the user was root, we will panic the system
55886+ If the user was non-root, we will log the attempt, terminate
55887+ all processes owned by the user, then prevent them from creating
55888+ any new processes until the system is restarted
55889+ This deters repeated kernel exploitation/bruteforcing attempts
55890+ and is useful for later forensics.
55891+
55892+endmenu
55893+menu "Role Based Access Control Options"
55894+depends on GRKERNSEC
55895+
55896+config GRKERNSEC_RBAC_DEBUG
55897+ bool
55898+
55899+config GRKERNSEC_NO_RBAC
55900+ bool "Disable RBAC system"
55901+ help
55902+ If you say Y here, the /dev/grsec device will be removed from the kernel,
55903+ preventing the RBAC system from being enabled. You should only say Y
55904+ here if you have no intention of using the RBAC system, so as to prevent
55905+ an attacker with root access from misusing the RBAC system to hide files
55906+ and processes when loadable module support and /dev/[k]mem have been
55907+ locked down.
55908+
55909+config GRKERNSEC_ACL_HIDEKERN
55910+ bool "Hide kernel processes"
55911+ help
55912+ If you say Y here, all kernel threads will be hidden to all
55913+ processes but those whose subject has the "view hidden processes"
55914+ flag.
55915+
55916+config GRKERNSEC_ACL_MAXTRIES
55917+ int "Maximum tries before password lockout"
55918+ default 3
55919+ help
55920+ This option enforces the maximum number of times a user can attempt
55921+ to authorize themselves with the grsecurity RBAC system before being
55922+ denied the ability to attempt authorization again for a specified time.
55923+ The lower the number, the harder it will be to brute-force a password.
55924+
55925+config GRKERNSEC_ACL_TIMEOUT
55926+ int "Time to wait after max password tries, in seconds"
55927+ default 30
55928+ help
55929+ This option specifies the time the user must wait after attempting to
55930+ authorize to the RBAC system with the maximum number of invalid
55931+ passwords. The higher the number, the harder it will be to brute-force
55932+ a password.
55933+
55934+endmenu
55935+menu "Filesystem Protections"
55936+depends on GRKERNSEC
55937+
55938+config GRKERNSEC_PROC
55939+ bool "Proc restrictions"
55940+ help
55941+ If you say Y here, the permissions of the /proc filesystem
55942+ will be altered to enhance system security and privacy. You MUST
55943+ choose either a user only restriction or a user and group restriction.
55944+ Depending upon the option you choose, you can either restrict users to
55945+ see only the processes they themselves run, or choose a group that can
55946+ view all processes and files normally restricted to root if you choose
55947+ the "restrict to user only" option. NOTE: If you're running identd as
55948+ a non-root user, you will have to run it as the group you specify here.
55949+
55950+config GRKERNSEC_PROC_USER
55951+ bool "Restrict /proc to user only"
55952+ depends on GRKERNSEC_PROC
55953+ help
55954+ If you say Y here, non-root users will only be able to view their own
55955+ processes, and restricts them from viewing network-related information,
55956+ and viewing kernel symbol and module information.
55957+
55958+config GRKERNSEC_PROC_USERGROUP
55959+ bool "Allow special group"
55960+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55961+ help
55962+ If you say Y here, you will be able to select a group that will be
55963+ able to view all processes and network-related information. If you've
55964+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55965+ remain hidden. This option is useful if you want to run identd as
55966+ a non-root user.
55967+
55968+config GRKERNSEC_PROC_GID
55969+ int "GID for special group"
55970+ depends on GRKERNSEC_PROC_USERGROUP
55971+ default 1001
55972+
55973+config GRKERNSEC_PROC_ADD
55974+ bool "Additional restrictions"
55975+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55976+ help
55977+ If you say Y here, additional restrictions will be placed on
55978+ /proc that keep normal users from viewing device information and
55979+ slabinfo information that could be useful for exploits.
55980+
55981+config GRKERNSEC_LINK
55982+ bool "Linking restrictions"
55983+ help
55984+ If you say Y here, /tmp race exploits will be prevented, since users
55985+ will no longer be able to follow symlinks owned by other users in
55986+ world-writable +t directories (e.g. /tmp), unless the owner of the
55987+ symlink is the owner of the directory. users will also not be
55988+ able to hardlink to files they do not own. If the sysctl option is
55989+ enabled, a sysctl option with name "linking_restrictions" is created.
55990+
55991+config GRKERNSEC_FIFO
55992+ bool "FIFO restrictions"
55993+ help
55994+ If you say Y here, users will not be able to write to FIFOs they don't
55995+ own in world-writable +t directories (e.g. /tmp), unless the owner of
55996+ the FIFO is the same owner of the directory it's held in. If the sysctl
55997+ option is enabled, a sysctl option with name "fifo_restrictions" is
55998+ created.
55999+
56000+config GRKERNSEC_SYSFS_RESTRICT
56001+ bool "Sysfs/debugfs restriction"
56002+ depends on SYSFS
56003+ help
56004+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56005+ any filesystem normally mounted under it (e.g. debugfs) will only
56006+ be accessible by root. These filesystems generally provide access
56007+ to hardware and debug information that isn't appropriate for unprivileged
56008+ users of the system. Sysfs and debugfs have also become a large source
56009+ of new vulnerabilities, ranging from infoleaks to local compromise.
56010+ There has been very little oversight with an eye toward security involved
56011+ in adding new exporters of information to these filesystems, so their
56012+ use is discouraged.
56013+ This option is equivalent to a chmod 0700 of the mount paths.
56014+
56015+config GRKERNSEC_ROFS
56016+ bool "Runtime read-only mount protection"
56017+ help
56018+ If you say Y here, a sysctl option with name "romount_protect" will
56019+ be created. By setting this option to 1 at runtime, filesystems
56020+ will be protected in the following ways:
56021+ * No new writable mounts will be allowed
56022+ * Existing read-only mounts won't be able to be remounted read/write
56023+ * Write operations will be denied on all block devices
56024+ This option acts independently of grsec_lock: once it is set to 1,
56025+ it cannot be turned off. Therefore, please be mindful of the resulting
56026+ behavior if this option is enabled in an init script on a read-only
56027+ filesystem. This feature is mainly intended for secure embedded systems.
56028+
56029+config GRKERNSEC_CHROOT
56030+ bool "Chroot jail restrictions"
56031+ help
56032+ If you say Y here, you will be able to choose several options that will
56033+ make breaking out of a chrooted jail much more difficult. If you
56034+ encounter no software incompatibilities with the following options, it
56035+ is recommended that you enable each one.
56036+
56037+config GRKERNSEC_CHROOT_MOUNT
56038+ bool "Deny mounts"
56039+ depends on GRKERNSEC_CHROOT
56040+ help
56041+ If you say Y here, processes inside a chroot will not be able to
56042+ mount or remount filesystems. If the sysctl option is enabled, a
56043+ sysctl option with name "chroot_deny_mount" is created.
56044+
56045+config GRKERNSEC_CHROOT_DOUBLE
56046+ bool "Deny double-chroots"
56047+ depends on GRKERNSEC_CHROOT
56048+ help
56049+ If you say Y here, processes inside a chroot will not be able to chroot
56050+ again outside the chroot. This is a widely used method of breaking
56051+ out of a chroot jail and should not be allowed. If the sysctl
56052+ option is enabled, a sysctl option with name
56053+ "chroot_deny_chroot" is created.
56054+
56055+config GRKERNSEC_CHROOT_PIVOT
56056+ bool "Deny pivot_root in chroot"
56057+ depends on GRKERNSEC_CHROOT
56058+ help
56059+ If you say Y here, processes inside a chroot will not be able to use
56060+ a function called pivot_root() that was introduced in Linux 2.3.41. It
56061+ works similar to chroot in that it changes the root filesystem. This
56062+ function could be misused in a chrooted process to attempt to break out
56063+ of the chroot, and therefore should not be allowed. If the sysctl
56064+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
56065+ created.
56066+
56067+config GRKERNSEC_CHROOT_CHDIR
56068+ bool "Enforce chdir(\"/\") on all chroots"
56069+ depends on GRKERNSEC_CHROOT
56070+ help
56071+ If you say Y here, the current working directory of all newly-chrooted
56072+ applications will be set to the the root directory of the chroot.
56073+ The man page on chroot(2) states:
56074+ Note that this call does not change the current working
56075+ directory, so that `.' can be outside the tree rooted at
56076+ `/'. In particular, the super-user can escape from a
56077+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56078+
56079+ It is recommended that you say Y here, since it's not known to break
56080+ any software. If the sysctl option is enabled, a sysctl option with
56081+ name "chroot_enforce_chdir" is created.
56082+
56083+config GRKERNSEC_CHROOT_CHMOD
56084+ bool "Deny (f)chmod +s"
56085+ depends on GRKERNSEC_CHROOT
56086+ help
56087+ If you say Y here, processes inside a chroot will not be able to chmod
56088+ or fchmod files to make them have suid or sgid bits. This protects
56089+ against another published method of breaking a chroot. If the sysctl
56090+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
56091+ created.
56092+
56093+config GRKERNSEC_CHROOT_FCHDIR
56094+ bool "Deny fchdir out of chroot"
56095+ depends on GRKERNSEC_CHROOT
56096+ help
56097+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
56098+ to a file descriptor of the chrooting process that points to a directory
56099+ outside the filesystem will be stopped. If the sysctl option
56100+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56101+
56102+config GRKERNSEC_CHROOT_MKNOD
56103+ bool "Deny mknod"
56104+ depends on GRKERNSEC_CHROOT
56105+ help
56106+ If you say Y here, processes inside a chroot will not be allowed to
56107+ mknod. The problem with using mknod inside a chroot is that it
56108+ would allow an attacker to create a device entry that is the same
56109+ as one on the physical root of your system, which could range from
56110+ anything from the console device to a device for your harddrive (which
56111+ they could then use to wipe the drive or steal data). It is recommended
56112+ that you say Y here, unless you run into software incompatibilities.
56113+ If the sysctl option is enabled, a sysctl option with name
56114+ "chroot_deny_mknod" is created.
56115+
56116+config GRKERNSEC_CHROOT_SHMAT
56117+ bool "Deny shmat() out of chroot"
56118+ depends on GRKERNSEC_CHROOT
56119+ help
56120+ If you say Y here, processes inside a chroot will not be able to attach
56121+ to shared memory segments that were created outside of the chroot jail.
56122+ It is recommended that you say Y here. If the sysctl option is enabled,
56123+ a sysctl option with name "chroot_deny_shmat" is created.
56124+
56125+config GRKERNSEC_CHROOT_UNIX
56126+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
56127+ depends on GRKERNSEC_CHROOT
56128+ help
56129+ If you say Y here, processes inside a chroot will not be able to
56130+ connect to abstract (meaning not belonging to a filesystem) Unix
56131+ domain sockets that were bound outside of a chroot. It is recommended
56132+ that you say Y here. If the sysctl option is enabled, a sysctl option
56133+ with name "chroot_deny_unix" is created.
56134+
56135+config GRKERNSEC_CHROOT_FINDTASK
56136+ bool "Protect outside processes"
56137+ depends on GRKERNSEC_CHROOT
56138+ help
56139+ If you say Y here, processes inside a chroot will not be able to
56140+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56141+ getsid, or view any process outside of the chroot. If the sysctl
56142+ option is enabled, a sysctl option with name "chroot_findtask" is
56143+ created.
56144+
56145+config GRKERNSEC_CHROOT_NICE
56146+ bool "Restrict priority changes"
56147+ depends on GRKERNSEC_CHROOT
56148+ help
56149+ If you say Y here, processes inside a chroot will not be able to raise
56150+ the priority of processes in the chroot, or alter the priority of
56151+ processes outside the chroot. This provides more security than simply
56152+ removing CAP_SYS_NICE from the process' capability set. If the
56153+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56154+ is created.
56155+
56156+config GRKERNSEC_CHROOT_SYSCTL
56157+ bool "Deny sysctl writes"
56158+ depends on GRKERNSEC_CHROOT
56159+ help
56160+ If you say Y here, an attacker in a chroot will not be able to
56161+ write to sysctl entries, either by sysctl(2) or through a /proc
56162+ interface. It is strongly recommended that you say Y here. If the
56163+ sysctl option is enabled, a sysctl option with name
56164+ "chroot_deny_sysctl" is created.
56165+
56166+config GRKERNSEC_CHROOT_CAPS
56167+ bool "Capability restrictions"
56168+ depends on GRKERNSEC_CHROOT
56169+ help
56170+ If you say Y here, the capabilities on all root processes within a
56171+ chroot jail will be lowered to stop module insertion, raw i/o,
56172+ system and net admin tasks, rebooting the system, modifying immutable
56173+ files, modifying IPC owned by another, and changing the system time.
56174+ This is left an option because it can break some apps. Disable this
56175+ if your chrooted apps are having problems performing those kinds of
56176+ tasks. If the sysctl option is enabled, a sysctl option with
56177+ name "chroot_caps" is created.
56178+
56179+endmenu
56180+menu "Kernel Auditing"
56181+depends on GRKERNSEC
56182+
56183+config GRKERNSEC_AUDIT_GROUP
56184+ bool "Single group for auditing"
56185+ help
56186+ If you say Y here, the exec, chdir, and (un)mount logging features
56187+ will only operate on a group you specify. This option is recommended
56188+ if you only want to watch certain users instead of having a large
56189+ amount of logs from the entire system. If the sysctl option is enabled,
56190+ a sysctl option with name "audit_group" is created.
56191+
56192+config GRKERNSEC_AUDIT_GID
56193+ int "GID for auditing"
56194+ depends on GRKERNSEC_AUDIT_GROUP
56195+ default 1007
56196+
56197+config GRKERNSEC_EXECLOG
56198+ bool "Exec logging"
56199+ help
56200+ If you say Y here, all execve() calls will be logged (since the
56201+ other exec*() calls are frontends to execve(), all execution
56202+ will be logged). Useful for shell-servers that like to keep track
56203+ of their users. If the sysctl option is enabled, a sysctl option with
56204+ name "exec_logging" is created.
56205+ WARNING: This option when enabled will produce a LOT of logs, especially
56206+ on an active system.
56207+
56208+config GRKERNSEC_RESLOG
56209+ bool "Resource logging"
56210+ help
56211+ If you say Y here, all attempts to overstep resource limits will
56212+ be logged with the resource name, the requested size, and the current
56213+ limit. It is highly recommended that you say Y here. If the sysctl
56214+ option is enabled, a sysctl option with name "resource_logging" is
56215+ created. If the RBAC system is enabled, the sysctl value is ignored.
56216+
56217+config GRKERNSEC_CHROOT_EXECLOG
56218+ bool "Log execs within chroot"
56219+ help
56220+ If you say Y here, all executions inside a chroot jail will be logged
56221+ to syslog. This can cause a large amount of logs if certain
56222+ applications (eg. djb's daemontools) are installed on the system, and
56223+ is therefore left as an option. If the sysctl option is enabled, a
56224+ sysctl option with name "chroot_execlog" is created.
56225+
56226+config GRKERNSEC_AUDIT_PTRACE
56227+ bool "Ptrace logging"
56228+ help
56229+ If you say Y here, all attempts to attach to a process via ptrace
56230+ will be logged. If the sysctl option is enabled, a sysctl option
56231+ with name "audit_ptrace" is created.
56232+
56233+config GRKERNSEC_AUDIT_CHDIR
56234+ bool "Chdir logging"
56235+ help
56236+ If you say Y here, all chdir() calls will be logged. If the sysctl
56237+ option is enabled, a sysctl option with name "audit_chdir" is created.
56238+
56239+config GRKERNSEC_AUDIT_MOUNT
56240+ bool "(Un)Mount logging"
56241+ help
56242+ If you say Y here, all mounts and unmounts will be logged. If the
56243+ sysctl option is enabled, a sysctl option with name "audit_mount" is
56244+ created.
56245+
56246+config GRKERNSEC_SIGNAL
56247+ bool "Signal logging"
56248+ help
56249+ If you say Y here, certain important signals will be logged, such as
56250+ SIGSEGV, which will as a result inform you of when a error in a program
56251+ occurred, which in some cases could mean a possible exploit attempt.
56252+ If the sysctl option is enabled, a sysctl option with name
56253+ "signal_logging" is created.
56254+
56255+config GRKERNSEC_FORKFAIL
56256+ bool "Fork failure logging"
56257+ help
56258+ If you say Y here, all failed fork() attempts will be logged.
56259+ This could suggest a fork bomb, or someone attempting to overstep
56260+ their process limit. If the sysctl option is enabled, a sysctl option
56261+ with name "forkfail_logging" is created.
56262+
56263+config GRKERNSEC_TIME
56264+ bool "Time change logging"
56265+ help
56266+ If you say Y here, any changes of the system clock will be logged.
56267+ If the sysctl option is enabled, a sysctl option with name
56268+ "timechange_logging" is created.
56269+
56270+config GRKERNSEC_PROC_IPADDR
56271+ bool "/proc/<pid>/ipaddr support"
56272+ help
56273+ If you say Y here, a new entry will be added to each /proc/<pid>
56274+ directory that contains the IP address of the person using the task.
56275+ The IP is carried across local TCP and AF_UNIX stream sockets.
56276+ This information can be useful for IDS/IPSes to perform remote response
56277+ to a local attack. The entry is readable by only the owner of the
56278+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56279+ the RBAC system), and thus does not create privacy concerns.
56280+
56281+config GRKERNSEC_RWXMAP_LOG
56282+ bool 'Denied RWX mmap/mprotect logging'
56283+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56284+ help
56285+ If you say Y here, calls to mmap() and mprotect() with explicit
56286+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56287+ denied by the PAX_MPROTECT feature. If the sysctl option is
56288+ enabled, a sysctl option with name "rwxmap_logging" is created.
56289+
56290+config GRKERNSEC_AUDIT_TEXTREL
56291+ bool 'ELF text relocations logging (READ HELP)'
56292+ depends on PAX_MPROTECT
56293+ help
56294+ If you say Y here, text relocations will be logged with the filename
56295+ of the offending library or binary. The purpose of the feature is
56296+ to help Linux distribution developers get rid of libraries and
56297+ binaries that need text relocations which hinder the future progress
56298+ of PaX. Only Linux distribution developers should say Y here, and
56299+ never on a production machine, as this option creates an information
56300+ leak that could aid an attacker in defeating the randomization of
56301+ a single memory region. If the sysctl option is enabled, a sysctl
56302+ option with name "audit_textrel" is created.
56303+
56304+endmenu
56305+
56306+menu "Executable Protections"
56307+depends on GRKERNSEC
56308+
56309+config GRKERNSEC_DMESG
56310+ bool "Dmesg(8) restriction"
56311+ help
56312+ If you say Y here, non-root users will not be able to use dmesg(8)
56313+ to view up to the last 4kb of messages in the kernel's log buffer.
56314+ The kernel's log buffer often contains kernel addresses and other
56315+ identifying information useful to an attacker in fingerprinting a
56316+ system for a targeted exploit.
56317+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56318+ created.
56319+
56320+config GRKERNSEC_HARDEN_PTRACE
56321+ bool "Deter ptrace-based process snooping"
56322+ help
56323+ If you say Y here, TTY sniffers and other malicious monitoring
56324+ programs implemented through ptrace will be defeated. If you
56325+ have been using the RBAC system, this option has already been
56326+ enabled for several years for all users, with the ability to make
56327+ fine-grained exceptions.
56328+
56329+ This option only affects the ability of non-root users to ptrace
56330+ processes that are not a descendent of the ptracing process.
56331+ This means that strace ./binary and gdb ./binary will still work,
56332+ but attaching to arbitrary processes will not. If the sysctl
56333+ option is enabled, a sysctl option with name "harden_ptrace" is
56334+ created.
56335+
56336+config GRKERNSEC_TPE
56337+ bool "Trusted Path Execution (TPE)"
56338+ help
56339+ If you say Y here, you will be able to choose a gid to add to the
56340+ supplementary groups of users you want to mark as "untrusted."
56341+ These users will not be able to execute any files that are not in
56342+ root-owned directories writable only by root. If the sysctl option
56343+ is enabled, a sysctl option with name "tpe" is created.
56344+
56345+config GRKERNSEC_TPE_ALL
56346+ bool "Partially restrict all non-root users"
56347+ depends on GRKERNSEC_TPE
56348+ help
56349+ If you say Y here, all non-root users will be covered under
56350+ a weaker TPE restriction. This is separate from, and in addition to,
56351+ the main TPE options that you have selected elsewhere. Thus, if a
56352+ "trusted" GID is chosen, this restriction applies to even that GID.
56353+ Under this restriction, all non-root users will only be allowed to
56354+ execute files in directories they own that are not group or
56355+ world-writable, or in directories owned by root and writable only by
56356+ root. If the sysctl option is enabled, a sysctl option with name
56357+ "tpe_restrict_all" is created.
56358+
56359+config GRKERNSEC_TPE_INVERT
56360+ bool "Invert GID option"
56361+ depends on GRKERNSEC_TPE
56362+ help
56363+ If you say Y here, the group you specify in the TPE configuration will
56364+ decide what group TPE restrictions will be *disabled* for. This
56365+ option is useful if you want TPE restrictions to be applied to most
56366+ users on the system. If the sysctl option is enabled, a sysctl option
56367+ with name "tpe_invert" is created. Unlike other sysctl options, this
56368+ entry will default to on for backward-compatibility.
56369+
56370+config GRKERNSEC_TPE_GID
56371+ int "GID for untrusted users"
56372+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56373+ default 1005
56374+ help
56375+ Setting this GID determines what group TPE restrictions will be
56376+ *enabled* for. If the sysctl option is enabled, a sysctl option
56377+ with name "tpe_gid" is created.
56378+
56379+config GRKERNSEC_TPE_GID
56380+ int "GID for trusted users"
56381+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56382+ default 1005
56383+ help
56384+ Setting this GID determines what group TPE restrictions will be
56385+ *disabled* for. If the sysctl option is enabled, a sysctl option
56386+ with name "tpe_gid" is created.
56387+
56388+endmenu
56389+menu "Network Protections"
56390+depends on GRKERNSEC
56391+
56392+config GRKERNSEC_RANDNET
56393+ bool "Larger entropy pools"
56394+ help
56395+ If you say Y here, the entropy pools used for many features of Linux
56396+ and grsecurity will be doubled in size. Since several grsecurity
56397+ features use additional randomness, it is recommended that you say Y
56398+ here. Saying Y here has a similar effect as modifying
56399+ /proc/sys/kernel/random/poolsize.
56400+
56401+config GRKERNSEC_BLACKHOLE
56402+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56403+ help
56404+ If you say Y here, neither TCP resets nor ICMP
56405+ destination-unreachable packets will be sent in response to packets
56406+ sent to ports for which no associated listening process exists.
56407+ This feature supports both IPV4 and IPV6 and exempts the
56408+ loopback interface from blackholing. Enabling this feature
56409+ makes a host more resilient to DoS attacks and reduces network
56410+ visibility against scanners.
56411+
56412+ The blackhole feature as-implemented is equivalent to the FreeBSD
56413+ blackhole feature, as it prevents RST responses to all packets, not
56414+ just SYNs. Under most application behavior this causes no
56415+ problems, but applications (like haproxy) may not close certain
56416+ connections in a way that cleanly terminates them on the remote
56417+ end, leaving the remote host in LAST_ACK state. Because of this
56418+ side-effect and to prevent intentional LAST_ACK DoSes, this
56419+ feature also adds automatic mitigation against such attacks.
56420+ The mitigation drastically reduces the amount of time a socket
56421+ can spend in LAST_ACK state. If you're using haproxy and not
56422+ all servers it connects to have this option enabled, consider
56423+ disabling this feature on the haproxy host.
56424+
56425+ If the sysctl option is enabled, two sysctl options with names
56426+ "ip_blackhole" and "lastack_retries" will be created.
56427+ While "ip_blackhole" takes the standard zero/non-zero on/off
56428+ toggle, "lastack_retries" uses the same kinds of values as
56429+ "tcp_retries1" and "tcp_retries2". The default value of 4
56430+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56431+ state.
56432+
56433+config GRKERNSEC_SOCKET
56434+ bool "Socket restrictions"
56435+ help
56436+ If you say Y here, you will be able to choose from several options.
56437+ If you assign a GID on your system and add it to the supplementary
56438+ groups of users you want to restrict socket access to, this patch
56439+ will perform up to three things, based on the option(s) you choose.
56440+
56441+config GRKERNSEC_SOCKET_ALL
56442+ bool "Deny any sockets to group"
56443+ depends on GRKERNSEC_SOCKET
56444+ help
56445+ If you say Y here, you will be able to choose a GID of whose users will
56446+ be unable to connect to other hosts from your machine or run server
56447+ applications from your machine. If the sysctl option is enabled, a
56448+ sysctl option with name "socket_all" is created.
56449+
56450+config GRKERNSEC_SOCKET_ALL_GID
56451+ int "GID to deny all sockets for"
56452+ depends on GRKERNSEC_SOCKET_ALL
56453+ default 1004
56454+ help
56455+ Here you can choose the GID to disable socket access for. Remember to
56456+ add the users you want socket access disabled for to the GID
56457+ specified here. If the sysctl option is enabled, a sysctl option
56458+ with name "socket_all_gid" is created.
56459+
56460+config GRKERNSEC_SOCKET_CLIENT
56461+ bool "Deny client sockets to group"
56462+ depends on GRKERNSEC_SOCKET
56463+ help
56464+ If you say Y here, you will be able to choose a GID of whose users will
56465+ be unable to connect to other hosts from your machine, but will be
56466+ able to run servers. If this option is enabled, all users in the group
56467+ you specify will have to use passive mode when initiating ftp transfers
56468+ from the shell on your machine. If the sysctl option is enabled, a
56469+ sysctl option with name "socket_client" is created.
56470+
56471+config GRKERNSEC_SOCKET_CLIENT_GID
56472+ int "GID to deny client sockets for"
56473+ depends on GRKERNSEC_SOCKET_CLIENT
56474+ default 1003
56475+ help
56476+ Here you can choose the GID to disable client socket access for.
56477+ Remember to add the users you want client socket access disabled for to
56478+ the GID specified here. If the sysctl option is enabled, a sysctl
56479+ option with name "socket_client_gid" is created.
56480+
56481+config GRKERNSEC_SOCKET_SERVER
56482+ bool "Deny server sockets to group"
56483+ depends on GRKERNSEC_SOCKET
56484+ help
56485+ If you say Y here, you will be able to choose a GID of whose users will
56486+ be unable to run server applications from your machine. If the sysctl
56487+ option is enabled, a sysctl option with name "socket_server" is created.
56488+
56489+config GRKERNSEC_SOCKET_SERVER_GID
56490+ int "GID to deny server sockets for"
56491+ depends on GRKERNSEC_SOCKET_SERVER
56492+ default 1002
56493+ help
56494+ Here you can choose the GID to disable server socket access for.
56495+ Remember to add the users you want server socket access disabled for to
56496+ the GID specified here. If the sysctl option is enabled, a sysctl
56497+ option with name "socket_server_gid" is created.
56498+
56499+endmenu
56500+menu "Sysctl support"
56501+depends on GRKERNSEC && SYSCTL
56502+
56503+config GRKERNSEC_SYSCTL
56504+ bool "Sysctl support"
56505+ help
56506+ If you say Y here, you will be able to change the options that
56507+ grsecurity runs with at bootup, without having to recompile your
56508+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56509+ to enable (1) or disable (0) various features. All the sysctl entries
56510+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56511+ All features enabled in the kernel configuration are disabled at boot
56512+ if you do not say Y to the "Turn on features by default" option.
56513+ All options should be set at startup, and the grsec_lock entry should
56514+ be set to a non-zero value after all the options are set.
56515+ *THIS IS EXTREMELY IMPORTANT*
56516+
56517+config GRKERNSEC_SYSCTL_DISTRO
56518+ bool "Extra sysctl support for distro makers (READ HELP)"
56519+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56520+ help
56521+ If you say Y here, additional sysctl options will be created
56522+ for features that affect processes running as root. Therefore,
56523+ it is critical when using this option that the grsec_lock entry be
56524+ enabled after boot. Only distros with prebuilt kernel packages
56525+ with this option enabled that can ensure grsec_lock is enabled
56526+ after boot should use this option.
56527+ *Failure to set grsec_lock after boot makes all grsec features
56528+ this option covers useless*
56529+
56530+ Currently this option creates the following sysctl entries:
56531+ "Disable Privileged I/O": "disable_priv_io"
56532+
56533+config GRKERNSEC_SYSCTL_ON
56534+ bool "Turn on features by default"
56535+ depends on GRKERNSEC_SYSCTL
56536+ help
56537+ If you say Y here, instead of having all features enabled in the
56538+ kernel configuration disabled at boot time, the features will be
56539+ enabled at boot time. It is recommended you say Y here unless
56540+ there is some reason you would want all sysctl-tunable features to
56541+ be disabled by default. As mentioned elsewhere, it is important
56542+ to enable the grsec_lock entry once you have finished modifying
56543+ the sysctl entries.
56544+
56545+endmenu
56546+menu "Logging Options"
56547+depends on GRKERNSEC
56548+
56549+config GRKERNSEC_FLOODTIME
56550+ int "Seconds in between log messages (minimum)"
56551+ default 10
56552+ help
56553+ This option allows you to enforce the number of seconds between
56554+ grsecurity log messages. The default should be suitable for most
56555+ people, however, if you choose to change it, choose a value small enough
56556+ to allow informative logs to be produced, but large enough to
56557+ prevent flooding.
56558+
56559+config GRKERNSEC_FLOODBURST
56560+ int "Number of messages in a burst (maximum)"
56561+ default 4
56562+ help
56563+ This option allows you to choose the maximum number of messages allowed
56564+ within the flood time interval you chose in a separate option. The
56565+ default should be suitable for most people, however if you find that
56566+ many of your logs are being interpreted as flooding, you may want to
56567+ raise this value.
56568+
56569+endmenu
56570+
56571+endmenu
56572diff -urNp linux-2.6.32.45/grsecurity/Makefile linux-2.6.32.45/grsecurity/Makefile
56573--- linux-2.6.32.45/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56574+++ linux-2.6.32.45/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
56575@@ -0,0 +1,33 @@
56576+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56577+# during 2001-2009 it has been completely redesigned by Brad Spengler
56578+# into an RBAC system
56579+#
56580+# All code in this directory and various hooks inserted throughout the kernel
56581+# are copyright Brad Spengler - Open Source Security, Inc., and released
56582+# under the GPL v2 or higher
56583+
56584+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56585+ grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
56586+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56587+
56588+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56589+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56590+ gracl_learn.o grsec_log.o
56591+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56592+
56593+ifdef CONFIG_NET
56594+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56595+endif
56596+
56597+ifndef CONFIG_GRKERNSEC
56598+obj-y += grsec_disabled.o
56599+endif
56600+
56601+ifdef CONFIG_GRKERNSEC_HIDESYM
56602+extra-y := grsec_hidesym.o
56603+$(obj)/grsec_hidesym.o:
56604+ @-chmod -f 500 /boot
56605+ @-chmod -f 500 /lib/modules
56606+ @-chmod -f 700 .
56607+ @echo ' grsec: protected kernel image paths'
56608+endif
56609diff -urNp linux-2.6.32.45/include/acpi/acpi_bus.h linux-2.6.32.45/include/acpi/acpi_bus.h
56610--- linux-2.6.32.45/include/acpi/acpi_bus.h 2011-03-27 14:31:47.000000000 -0400
56611+++ linux-2.6.32.45/include/acpi/acpi_bus.h 2011-08-05 20:33:55.000000000 -0400
56612@@ -107,7 +107,7 @@ struct acpi_device_ops {
56613 acpi_op_bind bind;
56614 acpi_op_unbind unbind;
56615 acpi_op_notify notify;
56616-};
56617+} __no_const;
56618
56619 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56620
56621diff -urNp linux-2.6.32.45/include/acpi/acpi_drivers.h linux-2.6.32.45/include/acpi/acpi_drivers.h
56622--- linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
56623+++ linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
56624@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
56625 Dock Station
56626 -------------------------------------------------------------------------- */
56627 struct acpi_dock_ops {
56628- acpi_notify_handler handler;
56629- acpi_notify_handler uevent;
56630+ const acpi_notify_handler handler;
56631+ const acpi_notify_handler uevent;
56632 };
56633
56634 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
56635@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
56636 extern int register_dock_notifier(struct notifier_block *nb);
56637 extern void unregister_dock_notifier(struct notifier_block *nb);
56638 extern int register_hotplug_dock_device(acpi_handle handle,
56639- struct acpi_dock_ops *ops,
56640+ const struct acpi_dock_ops *ops,
56641 void *context);
56642 extern void unregister_hotplug_dock_device(acpi_handle handle);
56643 #else
56644@@ -144,7 +144,7 @@ static inline void unregister_dock_notif
56645 {
56646 }
56647 static inline int register_hotplug_dock_device(acpi_handle handle,
56648- struct acpi_dock_ops *ops,
56649+ const struct acpi_dock_ops *ops,
56650 void *context)
56651 {
56652 return -ENODEV;
56653diff -urNp linux-2.6.32.45/include/asm-generic/atomic-long.h linux-2.6.32.45/include/asm-generic/atomic-long.h
56654--- linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
56655+++ linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
56656@@ -22,6 +22,12 @@
56657
56658 typedef atomic64_t atomic_long_t;
56659
56660+#ifdef CONFIG_PAX_REFCOUNT
56661+typedef atomic64_unchecked_t atomic_long_unchecked_t;
56662+#else
56663+typedef atomic64_t atomic_long_unchecked_t;
56664+#endif
56665+
56666 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56667
56668 static inline long atomic_long_read(atomic_long_t *l)
56669@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56670 return (long)atomic64_read(v);
56671 }
56672
56673+#ifdef CONFIG_PAX_REFCOUNT
56674+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56675+{
56676+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56677+
56678+ return (long)atomic64_read_unchecked(v);
56679+}
56680+#endif
56681+
56682 static inline void atomic_long_set(atomic_long_t *l, long i)
56683 {
56684 atomic64_t *v = (atomic64_t *)l;
56685@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56686 atomic64_set(v, i);
56687 }
56688
56689+#ifdef CONFIG_PAX_REFCOUNT
56690+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56691+{
56692+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56693+
56694+ atomic64_set_unchecked(v, i);
56695+}
56696+#endif
56697+
56698 static inline void atomic_long_inc(atomic_long_t *l)
56699 {
56700 atomic64_t *v = (atomic64_t *)l;
56701@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56702 atomic64_inc(v);
56703 }
56704
56705+#ifdef CONFIG_PAX_REFCOUNT
56706+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56707+{
56708+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56709+
56710+ atomic64_inc_unchecked(v);
56711+}
56712+#endif
56713+
56714 static inline void atomic_long_dec(atomic_long_t *l)
56715 {
56716 atomic64_t *v = (atomic64_t *)l;
56717@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56718 atomic64_dec(v);
56719 }
56720
56721+#ifdef CONFIG_PAX_REFCOUNT
56722+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56723+{
56724+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56725+
56726+ atomic64_dec_unchecked(v);
56727+}
56728+#endif
56729+
56730 static inline void atomic_long_add(long i, atomic_long_t *l)
56731 {
56732 atomic64_t *v = (atomic64_t *)l;
56733@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56734 atomic64_add(i, v);
56735 }
56736
56737+#ifdef CONFIG_PAX_REFCOUNT
56738+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56739+{
56740+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56741+
56742+ atomic64_add_unchecked(i, v);
56743+}
56744+#endif
56745+
56746 static inline void atomic_long_sub(long i, atomic_long_t *l)
56747 {
56748 atomic64_t *v = (atomic64_t *)l;
56749@@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
56750 return (long)atomic64_inc_return(v);
56751 }
56752
56753+#ifdef CONFIG_PAX_REFCOUNT
56754+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56755+{
56756+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56757+
56758+ return (long)atomic64_inc_return_unchecked(v);
56759+}
56760+#endif
56761+
56762 static inline long atomic_long_dec_return(atomic_long_t *l)
56763 {
56764 atomic64_t *v = (atomic64_t *)l;
56765@@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
56766
56767 typedef atomic_t atomic_long_t;
56768
56769+#ifdef CONFIG_PAX_REFCOUNT
56770+typedef atomic_unchecked_t atomic_long_unchecked_t;
56771+#else
56772+typedef atomic_t atomic_long_unchecked_t;
56773+#endif
56774+
56775 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56776 static inline long atomic_long_read(atomic_long_t *l)
56777 {
56778@@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
56779 return (long)atomic_read(v);
56780 }
56781
56782+#ifdef CONFIG_PAX_REFCOUNT
56783+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56784+{
56785+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56786+
56787+ return (long)atomic_read_unchecked(v);
56788+}
56789+#endif
56790+
56791 static inline void atomic_long_set(atomic_long_t *l, long i)
56792 {
56793 atomic_t *v = (atomic_t *)l;
56794@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
56795 atomic_set(v, i);
56796 }
56797
56798+#ifdef CONFIG_PAX_REFCOUNT
56799+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56800+{
56801+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56802+
56803+ atomic_set_unchecked(v, i);
56804+}
56805+#endif
56806+
56807 static inline void atomic_long_inc(atomic_long_t *l)
56808 {
56809 atomic_t *v = (atomic_t *)l;
56810@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
56811 atomic_inc(v);
56812 }
56813
56814+#ifdef CONFIG_PAX_REFCOUNT
56815+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56816+{
56817+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56818+
56819+ atomic_inc_unchecked(v);
56820+}
56821+#endif
56822+
56823 static inline void atomic_long_dec(atomic_long_t *l)
56824 {
56825 atomic_t *v = (atomic_t *)l;
56826@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
56827 atomic_dec(v);
56828 }
56829
56830+#ifdef CONFIG_PAX_REFCOUNT
56831+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56832+{
56833+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56834+
56835+ atomic_dec_unchecked(v);
56836+}
56837+#endif
56838+
56839 static inline void atomic_long_add(long i, atomic_long_t *l)
56840 {
56841 atomic_t *v = (atomic_t *)l;
56842@@ -176,6 +278,15 @@ static inline void atomic_long_add(long
56843 atomic_add(i, v);
56844 }
56845
56846+#ifdef CONFIG_PAX_REFCOUNT
56847+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56848+{
56849+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56850+
56851+ atomic_add_unchecked(i, v);
56852+}
56853+#endif
56854+
56855 static inline void atomic_long_sub(long i, atomic_long_t *l)
56856 {
56857 atomic_t *v = (atomic_t *)l;
56858@@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
56859 return (long)atomic_inc_return(v);
56860 }
56861
56862+#ifdef CONFIG_PAX_REFCOUNT
56863+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56864+{
56865+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56866+
56867+ return (long)atomic_inc_return_unchecked(v);
56868+}
56869+#endif
56870+
56871 static inline long atomic_long_dec_return(atomic_long_t *l)
56872 {
56873 atomic_t *v = (atomic_t *)l;
56874@@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
56875
56876 #endif /* BITS_PER_LONG == 64 */
56877
56878+#ifdef CONFIG_PAX_REFCOUNT
56879+static inline void pax_refcount_needs_these_functions(void)
56880+{
56881+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
56882+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56883+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56884+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56885+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56886+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56887+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56888+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56889+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56890+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56891+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56892+
56893+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56894+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56895+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56896+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56897+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56898+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56899+}
56900+#else
56901+#define atomic_read_unchecked(v) atomic_read(v)
56902+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56903+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56904+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56905+#define atomic_inc_unchecked(v) atomic_inc(v)
56906+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56907+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56908+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56909+#define atomic_dec_unchecked(v) atomic_dec(v)
56910+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56911+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56912+
56913+#define atomic_long_read_unchecked(v) atomic_long_read(v)
56914+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56915+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56916+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56917+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56918+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56919+#endif
56920+
56921 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56922diff -urNp linux-2.6.32.45/include/asm-generic/cache.h linux-2.6.32.45/include/asm-generic/cache.h
56923--- linux-2.6.32.45/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
56924+++ linux-2.6.32.45/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
56925@@ -6,7 +6,7 @@
56926 * cache lines need to provide their own cache.h.
56927 */
56928
56929-#define L1_CACHE_SHIFT 5
56930-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
56931+#define L1_CACHE_SHIFT 5UL
56932+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
56933
56934 #endif /* __ASM_GENERIC_CACHE_H */
56935diff -urNp linux-2.6.32.45/include/asm-generic/dma-mapping-common.h linux-2.6.32.45/include/asm-generic/dma-mapping-common.h
56936--- linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
56937+++ linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
56938@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
56939 enum dma_data_direction dir,
56940 struct dma_attrs *attrs)
56941 {
56942- struct dma_map_ops *ops = get_dma_ops(dev);
56943+ const struct dma_map_ops *ops = get_dma_ops(dev);
56944 dma_addr_t addr;
56945
56946 kmemcheck_mark_initialized(ptr, size);
56947@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
56948 enum dma_data_direction dir,
56949 struct dma_attrs *attrs)
56950 {
56951- struct dma_map_ops *ops = get_dma_ops(dev);
56952+ const struct dma_map_ops *ops = get_dma_ops(dev);
56953
56954 BUG_ON(!valid_dma_direction(dir));
56955 if (ops->unmap_page)
56956@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
56957 int nents, enum dma_data_direction dir,
56958 struct dma_attrs *attrs)
56959 {
56960- struct dma_map_ops *ops = get_dma_ops(dev);
56961+ const struct dma_map_ops *ops = get_dma_ops(dev);
56962 int i, ents;
56963 struct scatterlist *s;
56964
56965@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
56966 int nents, enum dma_data_direction dir,
56967 struct dma_attrs *attrs)
56968 {
56969- struct dma_map_ops *ops = get_dma_ops(dev);
56970+ const struct dma_map_ops *ops = get_dma_ops(dev);
56971
56972 BUG_ON(!valid_dma_direction(dir));
56973 debug_dma_unmap_sg(dev, sg, nents, dir);
56974@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
56975 size_t offset, size_t size,
56976 enum dma_data_direction dir)
56977 {
56978- struct dma_map_ops *ops = get_dma_ops(dev);
56979+ const struct dma_map_ops *ops = get_dma_ops(dev);
56980 dma_addr_t addr;
56981
56982 kmemcheck_mark_initialized(page_address(page) + offset, size);
56983@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
56984 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
56985 size_t size, enum dma_data_direction dir)
56986 {
56987- struct dma_map_ops *ops = get_dma_ops(dev);
56988+ const struct dma_map_ops *ops = get_dma_ops(dev);
56989
56990 BUG_ON(!valid_dma_direction(dir));
56991 if (ops->unmap_page)
56992@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
56993 size_t size,
56994 enum dma_data_direction dir)
56995 {
56996- struct dma_map_ops *ops = get_dma_ops(dev);
56997+ const struct dma_map_ops *ops = get_dma_ops(dev);
56998
56999 BUG_ON(!valid_dma_direction(dir));
57000 if (ops->sync_single_for_cpu)
57001@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
57002 dma_addr_t addr, size_t size,
57003 enum dma_data_direction dir)
57004 {
57005- struct dma_map_ops *ops = get_dma_ops(dev);
57006+ const struct dma_map_ops *ops = get_dma_ops(dev);
57007
57008 BUG_ON(!valid_dma_direction(dir));
57009 if (ops->sync_single_for_device)
57010@@ -123,7 +123,7 @@ static inline void dma_sync_single_range
57011 size_t size,
57012 enum dma_data_direction dir)
57013 {
57014- struct dma_map_ops *ops = get_dma_ops(dev);
57015+ const struct dma_map_ops *ops = get_dma_ops(dev);
57016
57017 BUG_ON(!valid_dma_direction(dir));
57018 if (ops->sync_single_range_for_cpu) {
57019@@ -140,7 +140,7 @@ static inline void dma_sync_single_range
57020 size_t size,
57021 enum dma_data_direction dir)
57022 {
57023- struct dma_map_ops *ops = get_dma_ops(dev);
57024+ const struct dma_map_ops *ops = get_dma_ops(dev);
57025
57026 BUG_ON(!valid_dma_direction(dir));
57027 if (ops->sync_single_range_for_device) {
57028@@ -155,7 +155,7 @@ static inline void
57029 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
57030 int nelems, enum dma_data_direction dir)
57031 {
57032- struct dma_map_ops *ops = get_dma_ops(dev);
57033+ const struct dma_map_ops *ops = get_dma_ops(dev);
57034
57035 BUG_ON(!valid_dma_direction(dir));
57036 if (ops->sync_sg_for_cpu)
57037@@ -167,7 +167,7 @@ static inline void
57038 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
57039 int nelems, enum dma_data_direction dir)
57040 {
57041- struct dma_map_ops *ops = get_dma_ops(dev);
57042+ const struct dma_map_ops *ops = get_dma_ops(dev);
57043
57044 BUG_ON(!valid_dma_direction(dir));
57045 if (ops->sync_sg_for_device)
57046diff -urNp linux-2.6.32.45/include/asm-generic/futex.h linux-2.6.32.45/include/asm-generic/futex.h
57047--- linux-2.6.32.45/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
57048+++ linux-2.6.32.45/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
57049@@ -6,7 +6,7 @@
57050 #include <asm/errno.h>
57051
57052 static inline int
57053-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
57054+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
57055 {
57056 int op = (encoded_op >> 28) & 7;
57057 int cmp = (encoded_op >> 24) & 15;
57058@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
57059 }
57060
57061 static inline int
57062-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
57063+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
57064 {
57065 return -ENOSYS;
57066 }
57067diff -urNp linux-2.6.32.45/include/asm-generic/int-l64.h linux-2.6.32.45/include/asm-generic/int-l64.h
57068--- linux-2.6.32.45/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
57069+++ linux-2.6.32.45/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
57070@@ -46,6 +46,8 @@ typedef unsigned int u32;
57071 typedef signed long s64;
57072 typedef unsigned long u64;
57073
57074+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57075+
57076 #define S8_C(x) x
57077 #define U8_C(x) x ## U
57078 #define S16_C(x) x
57079diff -urNp linux-2.6.32.45/include/asm-generic/int-ll64.h linux-2.6.32.45/include/asm-generic/int-ll64.h
57080--- linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
57081+++ linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
57082@@ -51,6 +51,8 @@ typedef unsigned int u32;
57083 typedef signed long long s64;
57084 typedef unsigned long long u64;
57085
57086+typedef unsigned long long intoverflow_t;
57087+
57088 #define S8_C(x) x
57089 #define U8_C(x) x ## U
57090 #define S16_C(x) x
57091diff -urNp linux-2.6.32.45/include/asm-generic/kmap_types.h linux-2.6.32.45/include/asm-generic/kmap_types.h
57092--- linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
57093+++ linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
57094@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
57095 KMAP_D(16) KM_IRQ_PTE,
57096 KMAP_D(17) KM_NMI,
57097 KMAP_D(18) KM_NMI_PTE,
57098-KMAP_D(19) KM_TYPE_NR
57099+KMAP_D(19) KM_CLEARPAGE,
57100+KMAP_D(20) KM_TYPE_NR
57101 };
57102
57103 #undef KMAP_D
57104diff -urNp linux-2.6.32.45/include/asm-generic/pgtable.h linux-2.6.32.45/include/asm-generic/pgtable.h
57105--- linux-2.6.32.45/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
57106+++ linux-2.6.32.45/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
57107@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
57108 unsigned long size);
57109 #endif
57110
57111+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57112+static inline unsigned long pax_open_kernel(void) { return 0; }
57113+#endif
57114+
57115+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57116+static inline unsigned long pax_close_kernel(void) { return 0; }
57117+#endif
57118+
57119 #endif /* !__ASSEMBLY__ */
57120
57121 #endif /* _ASM_GENERIC_PGTABLE_H */
57122diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h
57123--- linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
57124+++ linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
57125@@ -1,14 +1,19 @@
57126 #ifndef _PGTABLE_NOPMD_H
57127 #define _PGTABLE_NOPMD_H
57128
57129-#ifndef __ASSEMBLY__
57130-
57131 #include <asm-generic/pgtable-nopud.h>
57132
57133-struct mm_struct;
57134-
57135 #define __PAGETABLE_PMD_FOLDED
57136
57137+#define PMD_SHIFT PUD_SHIFT
57138+#define PTRS_PER_PMD 1
57139+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57140+#define PMD_MASK (~(PMD_SIZE-1))
57141+
57142+#ifndef __ASSEMBLY__
57143+
57144+struct mm_struct;
57145+
57146 /*
57147 * Having the pmd type consist of a pud gets the size right, and allows
57148 * us to conceptually access the pud entry that this pmd is folded into
57149@@ -16,11 +21,6 @@ struct mm_struct;
57150 */
57151 typedef struct { pud_t pud; } pmd_t;
57152
57153-#define PMD_SHIFT PUD_SHIFT
57154-#define PTRS_PER_PMD 1
57155-#define PMD_SIZE (1UL << PMD_SHIFT)
57156-#define PMD_MASK (~(PMD_SIZE-1))
57157-
57158 /*
57159 * The "pud_xxx()" functions here are trivial for a folded two-level
57160 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57161diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopud.h linux-2.6.32.45/include/asm-generic/pgtable-nopud.h
57162--- linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
57163+++ linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
57164@@ -1,10 +1,15 @@
57165 #ifndef _PGTABLE_NOPUD_H
57166 #define _PGTABLE_NOPUD_H
57167
57168-#ifndef __ASSEMBLY__
57169-
57170 #define __PAGETABLE_PUD_FOLDED
57171
57172+#define PUD_SHIFT PGDIR_SHIFT
57173+#define PTRS_PER_PUD 1
57174+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57175+#define PUD_MASK (~(PUD_SIZE-1))
57176+
57177+#ifndef __ASSEMBLY__
57178+
57179 /*
57180 * Having the pud type consist of a pgd gets the size right, and allows
57181 * us to conceptually access the pgd entry that this pud is folded into
57182@@ -12,11 +17,6 @@
57183 */
57184 typedef struct { pgd_t pgd; } pud_t;
57185
57186-#define PUD_SHIFT PGDIR_SHIFT
57187-#define PTRS_PER_PUD 1
57188-#define PUD_SIZE (1UL << PUD_SHIFT)
57189-#define PUD_MASK (~(PUD_SIZE-1))
57190-
57191 /*
57192 * The "pgd_xxx()" functions here are trivial for a folded two-level
57193 * setup: the pud is never bad, and a pud always exists (as it's folded
57194diff -urNp linux-2.6.32.45/include/asm-generic/vmlinux.lds.h linux-2.6.32.45/include/asm-generic/vmlinux.lds.h
57195--- linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
57196+++ linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
57197@@ -199,6 +199,7 @@
57198 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57199 VMLINUX_SYMBOL(__start_rodata) = .; \
57200 *(.rodata) *(.rodata.*) \
57201+ *(.data.read_only) \
57202 *(__vermagic) /* Kernel version magic */ \
57203 *(__markers_strings) /* Markers: strings */ \
57204 *(__tracepoints_strings)/* Tracepoints: strings */ \
57205@@ -656,22 +657,24 @@
57206 * section in the linker script will go there too. @phdr should have
57207 * a leading colon.
57208 *
57209- * Note that this macros defines __per_cpu_load as an absolute symbol.
57210+ * Note that this macros defines per_cpu_load as an absolute symbol.
57211 * If there is no need to put the percpu section at a predetermined
57212 * address, use PERCPU().
57213 */
57214 #define PERCPU_VADDR(vaddr, phdr) \
57215- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57216- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57217+ per_cpu_load = .; \
57218+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57219 - LOAD_OFFSET) { \
57220+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57221 VMLINUX_SYMBOL(__per_cpu_start) = .; \
57222 *(.data.percpu.first) \
57223- *(.data.percpu.page_aligned) \
57224 *(.data.percpu) \
57225+ . = ALIGN(PAGE_SIZE); \
57226+ *(.data.percpu.page_aligned) \
57227 *(.data.percpu.shared_aligned) \
57228 VMLINUX_SYMBOL(__per_cpu_end) = .; \
57229 } phdr \
57230- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
57231+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
57232
57233 /**
57234 * PERCPU - define output section for percpu area, simple version
57235diff -urNp linux-2.6.32.45/include/drm/drm_crtc_helper.h linux-2.6.32.45/include/drm/drm_crtc_helper.h
57236--- linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-03-27 14:31:47.000000000 -0400
57237+++ linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-08-05 20:33:55.000000000 -0400
57238@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
57239
57240 /* reload the current crtc LUT */
57241 void (*load_lut)(struct drm_crtc *crtc);
57242-};
57243+} __no_const;
57244
57245 struct drm_encoder_helper_funcs {
57246 void (*dpms)(struct drm_encoder *encoder, int mode);
57247@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
57248 struct drm_connector *connector);
57249 /* disable encoder when not in use - more explicit than dpms off */
57250 void (*disable)(struct drm_encoder *encoder);
57251-};
57252+} __no_const;
57253
57254 struct drm_connector_helper_funcs {
57255 int (*get_modes)(struct drm_connector *connector);
57256diff -urNp linux-2.6.32.45/include/drm/drmP.h linux-2.6.32.45/include/drm/drmP.h
57257--- linux-2.6.32.45/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
57258+++ linux-2.6.32.45/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
57259@@ -71,6 +71,7 @@
57260 #include <linux/workqueue.h>
57261 #include <linux/poll.h>
57262 #include <asm/pgalloc.h>
57263+#include <asm/local.h>
57264 #include "drm.h"
57265
57266 #include <linux/idr.h>
57267@@ -814,7 +815,7 @@ struct drm_driver {
57268 void (*vgaarb_irq)(struct drm_device *dev, bool state);
57269
57270 /* Driver private ops for this object */
57271- struct vm_operations_struct *gem_vm_ops;
57272+ const struct vm_operations_struct *gem_vm_ops;
57273
57274 int major;
57275 int minor;
57276@@ -917,7 +918,7 @@ struct drm_device {
57277
57278 /** \name Usage Counters */
57279 /*@{ */
57280- int open_count; /**< Outstanding files open */
57281+ local_t open_count; /**< Outstanding files open */
57282 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57283 atomic_t vma_count; /**< Outstanding vma areas open */
57284 int buf_use; /**< Buffers in use -- cannot alloc */
57285@@ -928,7 +929,7 @@ struct drm_device {
57286 /*@{ */
57287 unsigned long counters;
57288 enum drm_stat_type types[15];
57289- atomic_t counts[15];
57290+ atomic_unchecked_t counts[15];
57291 /*@} */
57292
57293 struct list_head filelist;
57294@@ -1016,7 +1017,7 @@ struct drm_device {
57295 struct pci_controller *hose;
57296 #endif
57297 struct drm_sg_mem *sg; /**< Scatter gather memory */
57298- unsigned int num_crtcs; /**< Number of CRTCs on this device */
57299+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
57300 void *dev_private; /**< device private data */
57301 void *mm_private;
57302 struct address_space *dev_mapping;
57303@@ -1042,11 +1043,11 @@ struct drm_device {
57304 spinlock_t object_name_lock;
57305 struct idr object_name_idr;
57306 atomic_t object_count;
57307- atomic_t object_memory;
57308+ atomic_unchecked_t object_memory;
57309 atomic_t pin_count;
57310- atomic_t pin_memory;
57311+ atomic_unchecked_t pin_memory;
57312 atomic_t gtt_count;
57313- atomic_t gtt_memory;
57314+ atomic_unchecked_t gtt_memory;
57315 uint32_t gtt_total;
57316 uint32_t invalidate_domains; /* domains pending invalidation */
57317 uint32_t flush_domains; /* domains pending flush */
57318diff -urNp linux-2.6.32.45/include/drm/ttm/ttm_memory.h linux-2.6.32.45/include/drm/ttm/ttm_memory.h
57319--- linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-03-27 14:31:47.000000000 -0400
57320+++ linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-08-05 20:33:55.000000000 -0400
57321@@ -47,7 +47,7 @@
57322
57323 struct ttm_mem_shrink {
57324 int (*do_shrink) (struct ttm_mem_shrink *);
57325-};
57326+} __no_const;
57327
57328 /**
57329 * struct ttm_mem_global - Global memory accounting structure.
57330diff -urNp linux-2.6.32.45/include/linux/a.out.h linux-2.6.32.45/include/linux/a.out.h
57331--- linux-2.6.32.45/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
57332+++ linux-2.6.32.45/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
57333@@ -39,6 +39,14 @@ enum machine_type {
57334 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57335 };
57336
57337+/* Constants for the N_FLAGS field */
57338+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57339+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57340+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57341+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57342+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57343+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57344+
57345 #if !defined (N_MAGIC)
57346 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57347 #endif
57348diff -urNp linux-2.6.32.45/include/linux/atmdev.h linux-2.6.32.45/include/linux/atmdev.h
57349--- linux-2.6.32.45/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
57350+++ linux-2.6.32.45/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
57351@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57352 #endif
57353
57354 struct k_atm_aal_stats {
57355-#define __HANDLE_ITEM(i) atomic_t i
57356+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57357 __AAL_STAT_ITEMS
57358 #undef __HANDLE_ITEM
57359 };
57360diff -urNp linux-2.6.32.45/include/linux/backlight.h linux-2.6.32.45/include/linux/backlight.h
57361--- linux-2.6.32.45/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
57362+++ linux-2.6.32.45/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
57363@@ -36,18 +36,18 @@ struct backlight_device;
57364 struct fb_info;
57365
57366 struct backlight_ops {
57367- unsigned int options;
57368+ const unsigned int options;
57369
57370 #define BL_CORE_SUSPENDRESUME (1 << 0)
57371
57372 /* Notify the backlight driver some property has changed */
57373- int (*update_status)(struct backlight_device *);
57374+ int (* const update_status)(struct backlight_device *);
57375 /* Return the current backlight brightness (accounting for power,
57376 fb_blank etc.) */
57377- int (*get_brightness)(struct backlight_device *);
57378+ int (* const get_brightness)(struct backlight_device *);
57379 /* Check if given framebuffer device is the one bound to this backlight;
57380 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
57381- int (*check_fb)(struct fb_info *);
57382+ int (* const check_fb)(struct fb_info *);
57383 };
57384
57385 /* This structure defines all the properties of a backlight */
57386@@ -86,7 +86,7 @@ struct backlight_device {
57387 registered this device has been unloaded, and if class_get_devdata()
57388 points to something in the body of that driver, it is also invalid. */
57389 struct mutex ops_lock;
57390- struct backlight_ops *ops;
57391+ const struct backlight_ops *ops;
57392
57393 /* The framebuffer notifier block */
57394 struct notifier_block fb_notif;
57395@@ -103,7 +103,7 @@ static inline void backlight_update_stat
57396 }
57397
57398 extern struct backlight_device *backlight_device_register(const char *name,
57399- struct device *dev, void *devdata, struct backlight_ops *ops);
57400+ struct device *dev, void *devdata, const struct backlight_ops *ops);
57401 extern void backlight_device_unregister(struct backlight_device *bd);
57402 extern void backlight_force_update(struct backlight_device *bd,
57403 enum backlight_update_reason reason);
57404diff -urNp linux-2.6.32.45/include/linux/binfmts.h linux-2.6.32.45/include/linux/binfmts.h
57405--- linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
57406+++ linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
57407@@ -83,6 +83,7 @@ struct linux_binfmt {
57408 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57409 int (*load_shlib)(struct file *);
57410 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
57411+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57412 unsigned long min_coredump; /* minimal dump size */
57413 int hasvdso;
57414 };
57415diff -urNp linux-2.6.32.45/include/linux/blkdev.h linux-2.6.32.45/include/linux/blkdev.h
57416--- linux-2.6.32.45/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
57417+++ linux-2.6.32.45/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
57418@@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
57419 #endif /* CONFIG_BLK_DEV_INTEGRITY */
57420
57421 struct block_device_operations {
57422- int (*open) (struct block_device *, fmode_t);
57423- int (*release) (struct gendisk *, fmode_t);
57424- int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57425- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57426- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57427- int (*direct_access) (struct block_device *, sector_t,
57428+ int (* const open) (struct block_device *, fmode_t);
57429+ int (* const release) (struct gendisk *, fmode_t);
57430+ int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57431+ int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57432+ int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57433+ int (* const direct_access) (struct block_device *, sector_t,
57434 void **, unsigned long *);
57435- int (*media_changed) (struct gendisk *);
57436- unsigned long long (*set_capacity) (struct gendisk *,
57437+ int (* const media_changed) (struct gendisk *);
57438+ unsigned long long (* const set_capacity) (struct gendisk *,
57439 unsigned long long);
57440- int (*revalidate_disk) (struct gendisk *);
57441- int (*getgeo)(struct block_device *, struct hd_geometry *);
57442- struct module *owner;
57443+ int (* const revalidate_disk) (struct gendisk *);
57444+ int (*const getgeo)(struct block_device *, struct hd_geometry *);
57445+ struct module * const owner;
57446 };
57447
57448 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57449diff -urNp linux-2.6.32.45/include/linux/blktrace_api.h linux-2.6.32.45/include/linux/blktrace_api.h
57450--- linux-2.6.32.45/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
57451+++ linux-2.6.32.45/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
57452@@ -160,7 +160,7 @@ struct blk_trace {
57453 struct dentry *dir;
57454 struct dentry *dropped_file;
57455 struct dentry *msg_file;
57456- atomic_t dropped;
57457+ atomic_unchecked_t dropped;
57458 };
57459
57460 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57461diff -urNp linux-2.6.32.45/include/linux/byteorder/little_endian.h linux-2.6.32.45/include/linux/byteorder/little_endian.h
57462--- linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
57463+++ linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
57464@@ -42,51 +42,51 @@
57465
57466 static inline __le64 __cpu_to_le64p(const __u64 *p)
57467 {
57468- return (__force __le64)*p;
57469+ return (__force const __le64)*p;
57470 }
57471 static inline __u64 __le64_to_cpup(const __le64 *p)
57472 {
57473- return (__force __u64)*p;
57474+ return (__force const __u64)*p;
57475 }
57476 static inline __le32 __cpu_to_le32p(const __u32 *p)
57477 {
57478- return (__force __le32)*p;
57479+ return (__force const __le32)*p;
57480 }
57481 static inline __u32 __le32_to_cpup(const __le32 *p)
57482 {
57483- return (__force __u32)*p;
57484+ return (__force const __u32)*p;
57485 }
57486 static inline __le16 __cpu_to_le16p(const __u16 *p)
57487 {
57488- return (__force __le16)*p;
57489+ return (__force const __le16)*p;
57490 }
57491 static inline __u16 __le16_to_cpup(const __le16 *p)
57492 {
57493- return (__force __u16)*p;
57494+ return (__force const __u16)*p;
57495 }
57496 static inline __be64 __cpu_to_be64p(const __u64 *p)
57497 {
57498- return (__force __be64)__swab64p(p);
57499+ return (__force const __be64)__swab64p(p);
57500 }
57501 static inline __u64 __be64_to_cpup(const __be64 *p)
57502 {
57503- return __swab64p((__u64 *)p);
57504+ return __swab64p((const __u64 *)p);
57505 }
57506 static inline __be32 __cpu_to_be32p(const __u32 *p)
57507 {
57508- return (__force __be32)__swab32p(p);
57509+ return (__force const __be32)__swab32p(p);
57510 }
57511 static inline __u32 __be32_to_cpup(const __be32 *p)
57512 {
57513- return __swab32p((__u32 *)p);
57514+ return __swab32p((const __u32 *)p);
57515 }
57516 static inline __be16 __cpu_to_be16p(const __u16 *p)
57517 {
57518- return (__force __be16)__swab16p(p);
57519+ return (__force const __be16)__swab16p(p);
57520 }
57521 static inline __u16 __be16_to_cpup(const __be16 *p)
57522 {
57523- return __swab16p((__u16 *)p);
57524+ return __swab16p((const __u16 *)p);
57525 }
57526 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57527 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57528diff -urNp linux-2.6.32.45/include/linux/cache.h linux-2.6.32.45/include/linux/cache.h
57529--- linux-2.6.32.45/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
57530+++ linux-2.6.32.45/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
57531@@ -16,6 +16,10 @@
57532 #define __read_mostly
57533 #endif
57534
57535+#ifndef __read_only
57536+#define __read_only __read_mostly
57537+#endif
57538+
57539 #ifndef ____cacheline_aligned
57540 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57541 #endif
57542diff -urNp linux-2.6.32.45/include/linux/capability.h linux-2.6.32.45/include/linux/capability.h
57543--- linux-2.6.32.45/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
57544+++ linux-2.6.32.45/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
57545@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
57546 (security_real_capable_noaudit((t), (cap)) == 0)
57547
57548 extern int capable(int cap);
57549+int capable_nolog(int cap);
57550
57551 /* audit system wants to get cap info from files as well */
57552 struct dentry;
57553diff -urNp linux-2.6.32.45/include/linux/compiler-gcc4.h linux-2.6.32.45/include/linux/compiler-gcc4.h
57554--- linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
57555+++ linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-08-05 20:33:55.000000000 -0400
57556@@ -36,4 +36,13 @@
57557 the kernel context */
57558 #define __cold __attribute__((__cold__))
57559
57560+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57561+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57562+#define __bos0(ptr) __bos((ptr), 0)
57563+#define __bos1(ptr) __bos((ptr), 1)
57564+
57565+#if __GNUC_MINOR__ >= 5
57566+#define __no_const __attribute__((no_const))
57567+#endif
57568+
57569 #endif
57570diff -urNp linux-2.6.32.45/include/linux/compiler.h linux-2.6.32.45/include/linux/compiler.h
57571--- linux-2.6.32.45/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
57572+++ linux-2.6.32.45/include/linux/compiler.h 2011-08-05 20:33:55.000000000 -0400
57573@@ -247,6 +247,10 @@ void ftrace_likely_update(struct ftrace_
57574 # define __attribute_const__ /* unimplemented */
57575 #endif
57576
57577+#ifndef __no_const
57578+# define __no_const
57579+#endif
57580+
57581 /*
57582 * Tell gcc if a function is cold. The compiler will assume any path
57583 * directly leading to the call is unlikely.
57584@@ -256,6 +260,22 @@ void ftrace_likely_update(struct ftrace_
57585 #define __cold
57586 #endif
57587
57588+#ifndef __alloc_size
57589+#define __alloc_size(...)
57590+#endif
57591+
57592+#ifndef __bos
57593+#define __bos(ptr, arg)
57594+#endif
57595+
57596+#ifndef __bos0
57597+#define __bos0(ptr)
57598+#endif
57599+
57600+#ifndef __bos1
57601+#define __bos1(ptr)
57602+#endif
57603+
57604 /* Simple shorthand for a section definition */
57605 #ifndef __section
57606 # define __section(S) __attribute__ ((__section__(#S)))
57607@@ -278,6 +298,7 @@ void ftrace_likely_update(struct ftrace_
57608 * use is to mediate communication between process-level code and irq/NMI
57609 * handlers, all running on the same CPU.
57610 */
57611-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57612+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57613+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57614
57615 #endif /* __LINUX_COMPILER_H */
57616diff -urNp linux-2.6.32.45/include/linux/crypto.h linux-2.6.32.45/include/linux/crypto.h
57617--- linux-2.6.32.45/include/linux/crypto.h 2011-03-27 14:31:47.000000000 -0400
57618+++ linux-2.6.32.45/include/linux/crypto.h 2011-08-05 20:33:55.000000000 -0400
57619@@ -394,7 +394,7 @@ struct cipher_tfm {
57620 const u8 *key, unsigned int keylen);
57621 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57622 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57623-};
57624+} __no_const;
57625
57626 struct hash_tfm {
57627 int (*init)(struct hash_desc *desc);
57628@@ -415,13 +415,13 @@ struct compress_tfm {
57629 int (*cot_decompress)(struct crypto_tfm *tfm,
57630 const u8 *src, unsigned int slen,
57631 u8 *dst, unsigned int *dlen);
57632-};
57633+} __no_const;
57634
57635 struct rng_tfm {
57636 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57637 unsigned int dlen);
57638 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57639-};
57640+} __no_const;
57641
57642 #define crt_ablkcipher crt_u.ablkcipher
57643 #define crt_aead crt_u.aead
57644diff -urNp linux-2.6.32.45/include/linux/dcache.h linux-2.6.32.45/include/linux/dcache.h
57645--- linux-2.6.32.45/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
57646+++ linux-2.6.32.45/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
57647@@ -119,6 +119,8 @@ struct dentry {
57648 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
57649 };
57650
57651+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
57652+
57653 /*
57654 * dentry->d_lock spinlock nesting subclasses:
57655 *
57656diff -urNp linux-2.6.32.45/include/linux/decompress/mm.h linux-2.6.32.45/include/linux/decompress/mm.h
57657--- linux-2.6.32.45/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
57658+++ linux-2.6.32.45/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
57659@@ -78,7 +78,7 @@ static void free(void *where)
57660 * warnings when not needed (indeed large_malloc / large_free are not
57661 * needed by inflate */
57662
57663-#define malloc(a) kmalloc(a, GFP_KERNEL)
57664+#define malloc(a) kmalloc((a), GFP_KERNEL)
57665 #define free(a) kfree(a)
57666
57667 #define large_malloc(a) vmalloc(a)
57668diff -urNp linux-2.6.32.45/include/linux/dma-mapping.h linux-2.6.32.45/include/linux/dma-mapping.h
57669--- linux-2.6.32.45/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
57670+++ linux-2.6.32.45/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
57671@@ -16,50 +16,50 @@ enum dma_data_direction {
57672 };
57673
57674 struct dma_map_ops {
57675- void* (*alloc_coherent)(struct device *dev, size_t size,
57676+ void* (* const alloc_coherent)(struct device *dev, size_t size,
57677 dma_addr_t *dma_handle, gfp_t gfp);
57678- void (*free_coherent)(struct device *dev, size_t size,
57679+ void (* const free_coherent)(struct device *dev, size_t size,
57680 void *vaddr, dma_addr_t dma_handle);
57681- dma_addr_t (*map_page)(struct device *dev, struct page *page,
57682+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
57683 unsigned long offset, size_t size,
57684 enum dma_data_direction dir,
57685 struct dma_attrs *attrs);
57686- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
57687+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
57688 size_t size, enum dma_data_direction dir,
57689 struct dma_attrs *attrs);
57690- int (*map_sg)(struct device *dev, struct scatterlist *sg,
57691+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
57692 int nents, enum dma_data_direction dir,
57693 struct dma_attrs *attrs);
57694- void (*unmap_sg)(struct device *dev,
57695+ void (* const unmap_sg)(struct device *dev,
57696 struct scatterlist *sg, int nents,
57697 enum dma_data_direction dir,
57698 struct dma_attrs *attrs);
57699- void (*sync_single_for_cpu)(struct device *dev,
57700+ void (* const sync_single_for_cpu)(struct device *dev,
57701 dma_addr_t dma_handle, size_t size,
57702 enum dma_data_direction dir);
57703- void (*sync_single_for_device)(struct device *dev,
57704+ void (* const sync_single_for_device)(struct device *dev,
57705 dma_addr_t dma_handle, size_t size,
57706 enum dma_data_direction dir);
57707- void (*sync_single_range_for_cpu)(struct device *dev,
57708+ void (* const sync_single_range_for_cpu)(struct device *dev,
57709 dma_addr_t dma_handle,
57710 unsigned long offset,
57711 size_t size,
57712 enum dma_data_direction dir);
57713- void (*sync_single_range_for_device)(struct device *dev,
57714+ void (* const sync_single_range_for_device)(struct device *dev,
57715 dma_addr_t dma_handle,
57716 unsigned long offset,
57717 size_t size,
57718 enum dma_data_direction dir);
57719- void (*sync_sg_for_cpu)(struct device *dev,
57720+ void (* const sync_sg_for_cpu)(struct device *dev,
57721 struct scatterlist *sg, int nents,
57722 enum dma_data_direction dir);
57723- void (*sync_sg_for_device)(struct device *dev,
57724+ void (* const sync_sg_for_device)(struct device *dev,
57725 struct scatterlist *sg, int nents,
57726 enum dma_data_direction dir);
57727- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
57728- int (*dma_supported)(struct device *dev, u64 mask);
57729+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
57730+ int (* const dma_supported)(struct device *dev, u64 mask);
57731 int (*set_dma_mask)(struct device *dev, u64 mask);
57732- int is_phys;
57733+ const int is_phys;
57734 };
57735
57736 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57737diff -urNp linux-2.6.32.45/include/linux/dst.h linux-2.6.32.45/include/linux/dst.h
57738--- linux-2.6.32.45/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
57739+++ linux-2.6.32.45/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
57740@@ -380,7 +380,7 @@ struct dst_node
57741 struct thread_pool *pool;
57742
57743 /* Transaction IDs live here */
57744- atomic_long_t gen;
57745+ atomic_long_unchecked_t gen;
57746
57747 /*
57748 * How frequently and how many times transaction
57749diff -urNp linux-2.6.32.45/include/linux/elf.h linux-2.6.32.45/include/linux/elf.h
57750--- linux-2.6.32.45/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
57751+++ linux-2.6.32.45/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
57752@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57753 #define PT_GNU_EH_FRAME 0x6474e550
57754
57755 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57756+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57757+
57758+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57759+
57760+/* Constants for the e_flags field */
57761+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57762+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57763+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57764+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57765+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57766+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57767
57768 /* These constants define the different elf file types */
57769 #define ET_NONE 0
57770@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
57771 #define DT_DEBUG 21
57772 #define DT_TEXTREL 22
57773 #define DT_JMPREL 23
57774+#define DT_FLAGS 30
57775+ #define DF_TEXTREL 0x00000004
57776 #define DT_ENCODING 32
57777 #define OLD_DT_LOOS 0x60000000
57778 #define DT_LOOS 0x6000000d
57779@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
57780 #define PF_W 0x2
57781 #define PF_X 0x1
57782
57783+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57784+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57785+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57786+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57787+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57788+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57789+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57790+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57791+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57792+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57793+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57794+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57795+
57796 typedef struct elf32_phdr{
57797 Elf32_Word p_type;
57798 Elf32_Off p_offset;
57799@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
57800 #define EI_OSABI 7
57801 #define EI_PAD 8
57802
57803+#define EI_PAX 14
57804+
57805 #define ELFMAG0 0x7f /* EI_MAG */
57806 #define ELFMAG1 'E'
57807 #define ELFMAG2 'L'
57808@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
57809 #define elf_phdr elf32_phdr
57810 #define elf_note elf32_note
57811 #define elf_addr_t Elf32_Off
57812+#define elf_dyn Elf32_Dyn
57813
57814 #else
57815
57816@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
57817 #define elf_phdr elf64_phdr
57818 #define elf_note elf64_note
57819 #define elf_addr_t Elf64_Off
57820+#define elf_dyn Elf64_Dyn
57821
57822 #endif
57823
57824diff -urNp linux-2.6.32.45/include/linux/fscache-cache.h linux-2.6.32.45/include/linux/fscache-cache.h
57825--- linux-2.6.32.45/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
57826+++ linux-2.6.32.45/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
57827@@ -116,7 +116,7 @@ struct fscache_operation {
57828 #endif
57829 };
57830
57831-extern atomic_t fscache_op_debug_id;
57832+extern atomic_unchecked_t fscache_op_debug_id;
57833 extern const struct slow_work_ops fscache_op_slow_work_ops;
57834
57835 extern void fscache_enqueue_operation(struct fscache_operation *);
57836@@ -134,7 +134,7 @@ static inline void fscache_operation_ini
57837 fscache_operation_release_t release)
57838 {
57839 atomic_set(&op->usage, 1);
57840- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57841+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57842 op->release = release;
57843 INIT_LIST_HEAD(&op->pend_link);
57844 fscache_set_op_state(op, "Init");
57845diff -urNp linux-2.6.32.45/include/linux/fs.h linux-2.6.32.45/include/linux/fs.h
57846--- linux-2.6.32.45/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
57847+++ linux-2.6.32.45/include/linux/fs.h 2011-08-05 20:33:55.000000000 -0400
57848@@ -90,6 +90,11 @@ struct inodes_stat_t {
57849 /* Expect random access pattern */
57850 #define FMODE_RANDOM ((__force fmode_t)4096)
57851
57852+/* Hack for grsec so as not to require read permission simply to execute
57853+ * a binary
57854+ */
57855+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
57856+
57857 /*
57858 * The below are the various read and write types that we support. Some of
57859 * them include behavioral modifiers that send information down to the
57860@@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
57861 unsigned long, unsigned long);
57862
57863 struct address_space_operations {
57864- int (*writepage)(struct page *page, struct writeback_control *wbc);
57865- int (*readpage)(struct file *, struct page *);
57866- void (*sync_page)(struct page *);
57867+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
57868+ int (* const readpage)(struct file *, struct page *);
57869+ void (* const sync_page)(struct page *);
57870
57871 /* Write back some dirty pages from this mapping. */
57872- int (*writepages)(struct address_space *, struct writeback_control *);
57873+ int (* const writepages)(struct address_space *, struct writeback_control *);
57874
57875 /* Set a page dirty. Return true if this dirtied it */
57876- int (*set_page_dirty)(struct page *page);
57877+ int (* const set_page_dirty)(struct page *page);
57878
57879- int (*readpages)(struct file *filp, struct address_space *mapping,
57880+ int (* const readpages)(struct file *filp, struct address_space *mapping,
57881 struct list_head *pages, unsigned nr_pages);
57882
57883- int (*write_begin)(struct file *, struct address_space *mapping,
57884+ int (* const write_begin)(struct file *, struct address_space *mapping,
57885 loff_t pos, unsigned len, unsigned flags,
57886 struct page **pagep, void **fsdata);
57887- int (*write_end)(struct file *, struct address_space *mapping,
57888+ int (* const write_end)(struct file *, struct address_space *mapping,
57889 loff_t pos, unsigned len, unsigned copied,
57890 struct page *page, void *fsdata);
57891
57892 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
57893- sector_t (*bmap)(struct address_space *, sector_t);
57894- void (*invalidatepage) (struct page *, unsigned long);
57895- int (*releasepage) (struct page *, gfp_t);
57896- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
57897+ sector_t (* const bmap)(struct address_space *, sector_t);
57898+ void (* const invalidatepage) (struct page *, unsigned long);
57899+ int (* const releasepage) (struct page *, gfp_t);
57900+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
57901 loff_t offset, unsigned long nr_segs);
57902- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
57903+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
57904 void **, unsigned long *);
57905 /* migrate the contents of a page to the specified target */
57906- int (*migratepage) (struct address_space *,
57907+ int (* const migratepage) (struct address_space *,
57908 struct page *, struct page *);
57909- int (*launder_page) (struct page *);
57910- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
57911+ int (* const launder_page) (struct page *);
57912+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
57913 unsigned long);
57914- int (*error_remove_page)(struct address_space *, struct page *);
57915+ int (* const error_remove_page)(struct address_space *, struct page *);
57916 };
57917
57918 /*
57919@@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
57920 typedef struct files_struct *fl_owner_t;
57921
57922 struct file_lock_operations {
57923- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57924- void (*fl_release_private)(struct file_lock *);
57925+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57926+ void (* const fl_release_private)(struct file_lock *);
57927 };
57928
57929 struct lock_manager_operations {
57930- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
57931- void (*fl_notify)(struct file_lock *); /* unblock callback */
57932- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
57933- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57934- void (*fl_release_private)(struct file_lock *);
57935- void (*fl_break)(struct file_lock *);
57936- int (*fl_mylease)(struct file_lock *, struct file_lock *);
57937- int (*fl_change)(struct file_lock **, int);
57938+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
57939+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
57940+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
57941+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57942+ void (* const fl_release_private)(struct file_lock *);
57943+ void (* const fl_break)(struct file_lock *);
57944+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
57945+ int (* const fl_change)(struct file_lock **, int);
57946 };
57947
57948 struct lock_manager {
57949@@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
57950 unsigned int fi_flags; /* Flags as passed from user */
57951 unsigned int fi_extents_mapped; /* Number of mapped extents */
57952 unsigned int fi_extents_max; /* Size of fiemap_extent array */
57953- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
57954+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
57955 * array */
57956 };
57957 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
57958@@ -1486,7 +1491,7 @@ struct block_device_operations;
57959 * can be called without the big kernel lock held in all filesystems.
57960 */
57961 struct file_operations {
57962- struct module *owner;
57963+ struct module * const owner;
57964 loff_t (*llseek) (struct file *, loff_t, int);
57965 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
57966 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
57967@@ -1559,30 +1564,30 @@ extern ssize_t vfs_writev(struct file *,
57968 unsigned long, loff_t *);
57969
57970 struct super_operations {
57971- struct inode *(*alloc_inode)(struct super_block *sb);
57972- void (*destroy_inode)(struct inode *);
57973+ struct inode *(* const alloc_inode)(struct super_block *sb);
57974+ void (* const destroy_inode)(struct inode *);
57975
57976- void (*dirty_inode) (struct inode *);
57977- int (*write_inode) (struct inode *, int);
57978- void (*drop_inode) (struct inode *);
57979- void (*delete_inode) (struct inode *);
57980- void (*put_super) (struct super_block *);
57981- void (*write_super) (struct super_block *);
57982- int (*sync_fs)(struct super_block *sb, int wait);
57983- int (*freeze_fs) (struct super_block *);
57984- int (*unfreeze_fs) (struct super_block *);
57985- int (*statfs) (struct dentry *, struct kstatfs *);
57986- int (*remount_fs) (struct super_block *, int *, char *);
57987- void (*clear_inode) (struct inode *);
57988- void (*umount_begin) (struct super_block *);
57989+ void (* const dirty_inode) (struct inode *);
57990+ int (* const write_inode) (struct inode *, int);
57991+ void (* const drop_inode) (struct inode *);
57992+ void (* const delete_inode) (struct inode *);
57993+ void (* const put_super) (struct super_block *);
57994+ void (* const write_super) (struct super_block *);
57995+ int (* const sync_fs)(struct super_block *sb, int wait);
57996+ int (* const freeze_fs) (struct super_block *);
57997+ int (* const unfreeze_fs) (struct super_block *);
57998+ int (* const statfs) (struct dentry *, struct kstatfs *);
57999+ int (* const remount_fs) (struct super_block *, int *, char *);
58000+ void (* const clear_inode) (struct inode *);
58001+ void (* const umount_begin) (struct super_block *);
58002
58003- int (*show_options)(struct seq_file *, struct vfsmount *);
58004- int (*show_stats)(struct seq_file *, struct vfsmount *);
58005+ int (* const show_options)(struct seq_file *, struct vfsmount *);
58006+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
58007 #ifdef CONFIG_QUOTA
58008- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
58009- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58010+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
58011+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58012 #endif
58013- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58014+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58015 };
58016
58017 /*
58018diff -urNp linux-2.6.32.45/include/linux/fs_struct.h linux-2.6.32.45/include/linux/fs_struct.h
58019--- linux-2.6.32.45/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
58020+++ linux-2.6.32.45/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
58021@@ -4,7 +4,7 @@
58022 #include <linux/path.h>
58023
58024 struct fs_struct {
58025- int users;
58026+ atomic_t users;
58027 rwlock_t lock;
58028 int umask;
58029 int in_exec;
58030diff -urNp linux-2.6.32.45/include/linux/ftrace_event.h linux-2.6.32.45/include/linux/ftrace_event.h
58031--- linux-2.6.32.45/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
58032+++ linux-2.6.32.45/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
58033@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
58034 int filter_type);
58035 extern int trace_define_common_fields(struct ftrace_event_call *call);
58036
58037-#define is_signed_type(type) (((type)(-1)) < 0)
58038+#define is_signed_type(type) (((type)(-1)) < (type)1)
58039
58040 int trace_set_clr_event(const char *system, const char *event, int set);
58041
58042diff -urNp linux-2.6.32.45/include/linux/genhd.h linux-2.6.32.45/include/linux/genhd.h
58043--- linux-2.6.32.45/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
58044+++ linux-2.6.32.45/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
58045@@ -161,7 +161,7 @@ struct gendisk {
58046
58047 struct timer_rand_state *random;
58048
58049- atomic_t sync_io; /* RAID */
58050+ atomic_unchecked_t sync_io; /* RAID */
58051 struct work_struct async_notify;
58052 #ifdef CONFIG_BLK_DEV_INTEGRITY
58053 struct blk_integrity *integrity;
58054diff -urNp linux-2.6.32.45/include/linux/gracl.h linux-2.6.32.45/include/linux/gracl.h
58055--- linux-2.6.32.45/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
58056+++ linux-2.6.32.45/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
58057@@ -0,0 +1,317 @@
58058+#ifndef GR_ACL_H
58059+#define GR_ACL_H
58060+
58061+#include <linux/grdefs.h>
58062+#include <linux/resource.h>
58063+#include <linux/capability.h>
58064+#include <linux/dcache.h>
58065+#include <asm/resource.h>
58066+
58067+/* Major status information */
58068+
58069+#define GR_VERSION "grsecurity 2.2.2"
58070+#define GRSECURITY_VERSION 0x2202
58071+
58072+enum {
58073+ GR_SHUTDOWN = 0,
58074+ GR_ENABLE = 1,
58075+ GR_SPROLE = 2,
58076+ GR_RELOAD = 3,
58077+ GR_SEGVMOD = 4,
58078+ GR_STATUS = 5,
58079+ GR_UNSPROLE = 6,
58080+ GR_PASSSET = 7,
58081+ GR_SPROLEPAM = 8,
58082+};
58083+
58084+/* Password setup definitions
58085+ * kernel/grhash.c */
58086+enum {
58087+ GR_PW_LEN = 128,
58088+ GR_SALT_LEN = 16,
58089+ GR_SHA_LEN = 32,
58090+};
58091+
58092+enum {
58093+ GR_SPROLE_LEN = 64,
58094+};
58095+
58096+enum {
58097+ GR_NO_GLOB = 0,
58098+ GR_REG_GLOB,
58099+ GR_CREATE_GLOB
58100+};
58101+
58102+#define GR_NLIMITS 32
58103+
58104+/* Begin Data Structures */
58105+
58106+struct sprole_pw {
58107+ unsigned char *rolename;
58108+ unsigned char salt[GR_SALT_LEN];
58109+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58110+};
58111+
58112+struct name_entry {
58113+ __u32 key;
58114+ ino_t inode;
58115+ dev_t device;
58116+ char *name;
58117+ __u16 len;
58118+ __u8 deleted;
58119+ struct name_entry *prev;
58120+ struct name_entry *next;
58121+};
58122+
58123+struct inodev_entry {
58124+ struct name_entry *nentry;
58125+ struct inodev_entry *prev;
58126+ struct inodev_entry *next;
58127+};
58128+
58129+struct acl_role_db {
58130+ struct acl_role_label **r_hash;
58131+ __u32 r_size;
58132+};
58133+
58134+struct inodev_db {
58135+ struct inodev_entry **i_hash;
58136+ __u32 i_size;
58137+};
58138+
58139+struct name_db {
58140+ struct name_entry **n_hash;
58141+ __u32 n_size;
58142+};
58143+
58144+struct crash_uid {
58145+ uid_t uid;
58146+ unsigned long expires;
58147+};
58148+
58149+struct gr_hash_struct {
58150+ void **table;
58151+ void **nametable;
58152+ void *first;
58153+ __u32 table_size;
58154+ __u32 used_size;
58155+ int type;
58156+};
58157+
58158+/* Userspace Grsecurity ACL data structures */
58159+
58160+struct acl_subject_label {
58161+ char *filename;
58162+ ino_t inode;
58163+ dev_t device;
58164+ __u32 mode;
58165+ kernel_cap_t cap_mask;
58166+ kernel_cap_t cap_lower;
58167+ kernel_cap_t cap_invert_audit;
58168+
58169+ struct rlimit res[GR_NLIMITS];
58170+ __u32 resmask;
58171+
58172+ __u8 user_trans_type;
58173+ __u8 group_trans_type;
58174+ uid_t *user_transitions;
58175+ gid_t *group_transitions;
58176+ __u16 user_trans_num;
58177+ __u16 group_trans_num;
58178+
58179+ __u32 sock_families[2];
58180+ __u32 ip_proto[8];
58181+ __u32 ip_type;
58182+ struct acl_ip_label **ips;
58183+ __u32 ip_num;
58184+ __u32 inaddr_any_override;
58185+
58186+ __u32 crashes;
58187+ unsigned long expires;
58188+
58189+ struct acl_subject_label *parent_subject;
58190+ struct gr_hash_struct *hash;
58191+ struct acl_subject_label *prev;
58192+ struct acl_subject_label *next;
58193+
58194+ struct acl_object_label **obj_hash;
58195+ __u32 obj_hash_size;
58196+ __u16 pax_flags;
58197+};
58198+
58199+struct role_allowed_ip {
58200+ __u32 addr;
58201+ __u32 netmask;
58202+
58203+ struct role_allowed_ip *prev;
58204+ struct role_allowed_ip *next;
58205+};
58206+
58207+struct role_transition {
58208+ char *rolename;
58209+
58210+ struct role_transition *prev;
58211+ struct role_transition *next;
58212+};
58213+
58214+struct acl_role_label {
58215+ char *rolename;
58216+ uid_t uidgid;
58217+ __u16 roletype;
58218+
58219+ __u16 auth_attempts;
58220+ unsigned long expires;
58221+
58222+ struct acl_subject_label *root_label;
58223+ struct gr_hash_struct *hash;
58224+
58225+ struct acl_role_label *prev;
58226+ struct acl_role_label *next;
58227+
58228+ struct role_transition *transitions;
58229+ struct role_allowed_ip *allowed_ips;
58230+ uid_t *domain_children;
58231+ __u16 domain_child_num;
58232+
58233+ struct acl_subject_label **subj_hash;
58234+ __u32 subj_hash_size;
58235+};
58236+
58237+struct user_acl_role_db {
58238+ struct acl_role_label **r_table;
58239+ __u32 num_pointers; /* Number of allocations to track */
58240+ __u32 num_roles; /* Number of roles */
58241+ __u32 num_domain_children; /* Number of domain children */
58242+ __u32 num_subjects; /* Number of subjects */
58243+ __u32 num_objects; /* Number of objects */
58244+};
58245+
58246+struct acl_object_label {
58247+ char *filename;
58248+ ino_t inode;
58249+ dev_t device;
58250+ __u32 mode;
58251+
58252+ struct acl_subject_label *nested;
58253+ struct acl_object_label *globbed;
58254+
58255+ /* next two structures not used */
58256+
58257+ struct acl_object_label *prev;
58258+ struct acl_object_label *next;
58259+};
58260+
58261+struct acl_ip_label {
58262+ char *iface;
58263+ __u32 addr;
58264+ __u32 netmask;
58265+ __u16 low, high;
58266+ __u8 mode;
58267+ __u32 type;
58268+ __u32 proto[8];
58269+
58270+ /* next two structures not used */
58271+
58272+ struct acl_ip_label *prev;
58273+ struct acl_ip_label *next;
58274+};
58275+
58276+struct gr_arg {
58277+ struct user_acl_role_db role_db;
58278+ unsigned char pw[GR_PW_LEN];
58279+ unsigned char salt[GR_SALT_LEN];
58280+ unsigned char sum[GR_SHA_LEN];
58281+ unsigned char sp_role[GR_SPROLE_LEN];
58282+ struct sprole_pw *sprole_pws;
58283+ dev_t segv_device;
58284+ ino_t segv_inode;
58285+ uid_t segv_uid;
58286+ __u16 num_sprole_pws;
58287+ __u16 mode;
58288+};
58289+
58290+struct gr_arg_wrapper {
58291+ struct gr_arg *arg;
58292+ __u32 version;
58293+ __u32 size;
58294+};
58295+
58296+struct subject_map {
58297+ struct acl_subject_label *user;
58298+ struct acl_subject_label *kernel;
58299+ struct subject_map *prev;
58300+ struct subject_map *next;
58301+};
58302+
58303+struct acl_subj_map_db {
58304+ struct subject_map **s_hash;
58305+ __u32 s_size;
58306+};
58307+
58308+/* End Data Structures Section */
58309+
58310+/* Hash functions generated by empirical testing by Brad Spengler
58311+ Makes good use of the low bits of the inode. Generally 0-1 times
58312+ in loop for successful match. 0-3 for unsuccessful match.
58313+ Shift/add algorithm with modulus of table size and an XOR*/
58314+
58315+static __inline__ unsigned int
58316+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58317+{
58318+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58319+}
58320+
58321+ static __inline__ unsigned int
58322+shash(const struct acl_subject_label *userp, const unsigned int sz)
58323+{
58324+ return ((const unsigned long)userp % sz);
58325+}
58326+
58327+static __inline__ unsigned int
58328+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58329+{
58330+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58331+}
58332+
58333+static __inline__ unsigned int
58334+nhash(const char *name, const __u16 len, const unsigned int sz)
58335+{
58336+ return full_name_hash((const unsigned char *)name, len) % sz;
58337+}
58338+
58339+#define FOR_EACH_ROLE_START(role) \
58340+ role = role_list; \
58341+ while (role) {
58342+
58343+#define FOR_EACH_ROLE_END(role) \
58344+ role = role->prev; \
58345+ }
58346+
58347+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58348+ subj = NULL; \
58349+ iter = 0; \
58350+ while (iter < role->subj_hash_size) { \
58351+ if (subj == NULL) \
58352+ subj = role->subj_hash[iter]; \
58353+ if (subj == NULL) { \
58354+ iter++; \
58355+ continue; \
58356+ }
58357+
58358+#define FOR_EACH_SUBJECT_END(subj,iter) \
58359+ subj = subj->next; \
58360+ if (subj == NULL) \
58361+ iter++; \
58362+ }
58363+
58364+
58365+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58366+ subj = role->hash->first; \
58367+ while (subj != NULL) {
58368+
58369+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58370+ subj = subj->next; \
58371+ }
58372+
58373+#endif
58374+
58375diff -urNp linux-2.6.32.45/include/linux/gralloc.h linux-2.6.32.45/include/linux/gralloc.h
58376--- linux-2.6.32.45/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58377+++ linux-2.6.32.45/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
58378@@ -0,0 +1,9 @@
58379+#ifndef __GRALLOC_H
58380+#define __GRALLOC_H
58381+
58382+void acl_free_all(void);
58383+int acl_alloc_stack_init(unsigned long size);
58384+void *acl_alloc(unsigned long len);
58385+void *acl_alloc_num(unsigned long num, unsigned long len);
58386+
58387+#endif
58388diff -urNp linux-2.6.32.45/include/linux/grdefs.h linux-2.6.32.45/include/linux/grdefs.h
58389--- linux-2.6.32.45/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58390+++ linux-2.6.32.45/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
58391@@ -0,0 +1,140 @@
58392+#ifndef GRDEFS_H
58393+#define GRDEFS_H
58394+
58395+/* Begin grsecurity status declarations */
58396+
58397+enum {
58398+ GR_READY = 0x01,
58399+ GR_STATUS_INIT = 0x00 // disabled state
58400+};
58401+
58402+/* Begin ACL declarations */
58403+
58404+/* Role flags */
58405+
58406+enum {
58407+ GR_ROLE_USER = 0x0001,
58408+ GR_ROLE_GROUP = 0x0002,
58409+ GR_ROLE_DEFAULT = 0x0004,
58410+ GR_ROLE_SPECIAL = 0x0008,
58411+ GR_ROLE_AUTH = 0x0010,
58412+ GR_ROLE_NOPW = 0x0020,
58413+ GR_ROLE_GOD = 0x0040,
58414+ GR_ROLE_LEARN = 0x0080,
58415+ GR_ROLE_TPE = 0x0100,
58416+ GR_ROLE_DOMAIN = 0x0200,
58417+ GR_ROLE_PAM = 0x0400,
58418+ GR_ROLE_PERSIST = 0x800
58419+};
58420+
58421+/* ACL Subject and Object mode flags */
58422+enum {
58423+ GR_DELETED = 0x80000000
58424+};
58425+
58426+/* ACL Object-only mode flags */
58427+enum {
58428+ GR_READ = 0x00000001,
58429+ GR_APPEND = 0x00000002,
58430+ GR_WRITE = 0x00000004,
58431+ GR_EXEC = 0x00000008,
58432+ GR_FIND = 0x00000010,
58433+ GR_INHERIT = 0x00000020,
58434+ GR_SETID = 0x00000040,
58435+ GR_CREATE = 0x00000080,
58436+ GR_DELETE = 0x00000100,
58437+ GR_LINK = 0x00000200,
58438+ GR_AUDIT_READ = 0x00000400,
58439+ GR_AUDIT_APPEND = 0x00000800,
58440+ GR_AUDIT_WRITE = 0x00001000,
58441+ GR_AUDIT_EXEC = 0x00002000,
58442+ GR_AUDIT_FIND = 0x00004000,
58443+ GR_AUDIT_INHERIT= 0x00008000,
58444+ GR_AUDIT_SETID = 0x00010000,
58445+ GR_AUDIT_CREATE = 0x00020000,
58446+ GR_AUDIT_DELETE = 0x00040000,
58447+ GR_AUDIT_LINK = 0x00080000,
58448+ GR_PTRACERD = 0x00100000,
58449+ GR_NOPTRACE = 0x00200000,
58450+ GR_SUPPRESS = 0x00400000,
58451+ GR_NOLEARN = 0x00800000,
58452+ GR_INIT_TRANSFER= 0x01000000
58453+};
58454+
58455+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58456+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58457+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58458+
58459+/* ACL subject-only mode flags */
58460+enum {
58461+ GR_KILL = 0x00000001,
58462+ GR_VIEW = 0x00000002,
58463+ GR_PROTECTED = 0x00000004,
58464+ GR_LEARN = 0x00000008,
58465+ GR_OVERRIDE = 0x00000010,
58466+ /* just a placeholder, this mode is only used in userspace */
58467+ GR_DUMMY = 0x00000020,
58468+ GR_PROTSHM = 0x00000040,
58469+ GR_KILLPROC = 0x00000080,
58470+ GR_KILLIPPROC = 0x00000100,
58471+ /* just a placeholder, this mode is only used in userspace */
58472+ GR_NOTROJAN = 0x00000200,
58473+ GR_PROTPROCFD = 0x00000400,
58474+ GR_PROCACCT = 0x00000800,
58475+ GR_RELAXPTRACE = 0x00001000,
58476+ GR_NESTED = 0x00002000,
58477+ GR_INHERITLEARN = 0x00004000,
58478+ GR_PROCFIND = 0x00008000,
58479+ GR_POVERRIDE = 0x00010000,
58480+ GR_KERNELAUTH = 0x00020000,
58481+ GR_ATSECURE = 0x00040000,
58482+ GR_SHMEXEC = 0x00080000
58483+};
58484+
58485+enum {
58486+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58487+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58488+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58489+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58490+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58491+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58492+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58493+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58494+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58495+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58496+};
58497+
58498+enum {
58499+ GR_ID_USER = 0x01,
58500+ GR_ID_GROUP = 0x02,
58501+};
58502+
58503+enum {
58504+ GR_ID_ALLOW = 0x01,
58505+ GR_ID_DENY = 0x02,
58506+};
58507+
58508+#define GR_CRASH_RES 31
58509+#define GR_UIDTABLE_MAX 500
58510+
58511+/* begin resource learning section */
58512+enum {
58513+ GR_RLIM_CPU_BUMP = 60,
58514+ GR_RLIM_FSIZE_BUMP = 50000,
58515+ GR_RLIM_DATA_BUMP = 10000,
58516+ GR_RLIM_STACK_BUMP = 1000,
58517+ GR_RLIM_CORE_BUMP = 10000,
58518+ GR_RLIM_RSS_BUMP = 500000,
58519+ GR_RLIM_NPROC_BUMP = 1,
58520+ GR_RLIM_NOFILE_BUMP = 5,
58521+ GR_RLIM_MEMLOCK_BUMP = 50000,
58522+ GR_RLIM_AS_BUMP = 500000,
58523+ GR_RLIM_LOCKS_BUMP = 2,
58524+ GR_RLIM_SIGPENDING_BUMP = 5,
58525+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58526+ GR_RLIM_NICE_BUMP = 1,
58527+ GR_RLIM_RTPRIO_BUMP = 1,
58528+ GR_RLIM_RTTIME_BUMP = 1000000
58529+};
58530+
58531+#endif
58532diff -urNp linux-2.6.32.45/include/linux/grinternal.h linux-2.6.32.45/include/linux/grinternal.h
58533--- linux-2.6.32.45/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58534+++ linux-2.6.32.45/include/linux/grinternal.h 2011-08-11 19:58:37.000000000 -0400
58535@@ -0,0 +1,217 @@
58536+#ifndef __GRINTERNAL_H
58537+#define __GRINTERNAL_H
58538+
58539+#ifdef CONFIG_GRKERNSEC
58540+
58541+#include <linux/fs.h>
58542+#include <linux/mnt_namespace.h>
58543+#include <linux/nsproxy.h>
58544+#include <linux/gracl.h>
58545+#include <linux/grdefs.h>
58546+#include <linux/grmsg.h>
58547+
58548+void gr_add_learn_entry(const char *fmt, ...)
58549+ __attribute__ ((format (printf, 1, 2)));
58550+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58551+ const struct vfsmount *mnt);
58552+__u32 gr_check_create(const struct dentry *new_dentry,
58553+ const struct dentry *parent,
58554+ const struct vfsmount *mnt, const __u32 mode);
58555+int gr_check_protected_task(const struct task_struct *task);
58556+__u32 to_gr_audit(const __u32 reqmode);
58557+int gr_set_acls(const int type);
58558+int gr_apply_subject_to_task(struct task_struct *task);
58559+int gr_acl_is_enabled(void);
58560+char gr_roletype_to_char(void);
58561+
58562+void gr_handle_alertkill(struct task_struct *task);
58563+char *gr_to_filename(const struct dentry *dentry,
58564+ const struct vfsmount *mnt);
58565+char *gr_to_filename1(const struct dentry *dentry,
58566+ const struct vfsmount *mnt);
58567+char *gr_to_filename2(const struct dentry *dentry,
58568+ const struct vfsmount *mnt);
58569+char *gr_to_filename3(const struct dentry *dentry,
58570+ const struct vfsmount *mnt);
58571+
58572+extern int grsec_enable_harden_ptrace;
58573+extern int grsec_enable_link;
58574+extern int grsec_enable_fifo;
58575+extern int grsec_enable_shm;
58576+extern int grsec_enable_execlog;
58577+extern int grsec_enable_signal;
58578+extern int grsec_enable_audit_ptrace;
58579+extern int grsec_enable_forkfail;
58580+extern int grsec_enable_time;
58581+extern int grsec_enable_rofs;
58582+extern int grsec_enable_chroot_shmat;
58583+extern int grsec_enable_chroot_mount;
58584+extern int grsec_enable_chroot_double;
58585+extern int grsec_enable_chroot_pivot;
58586+extern int grsec_enable_chroot_chdir;
58587+extern int grsec_enable_chroot_chmod;
58588+extern int grsec_enable_chroot_mknod;
58589+extern int grsec_enable_chroot_fchdir;
58590+extern int grsec_enable_chroot_nice;
58591+extern int grsec_enable_chroot_execlog;
58592+extern int grsec_enable_chroot_caps;
58593+extern int grsec_enable_chroot_sysctl;
58594+extern int grsec_enable_chroot_unix;
58595+extern int grsec_enable_tpe;
58596+extern int grsec_tpe_gid;
58597+extern int grsec_enable_tpe_all;
58598+extern int grsec_enable_tpe_invert;
58599+extern int grsec_enable_socket_all;
58600+extern int grsec_socket_all_gid;
58601+extern int grsec_enable_socket_client;
58602+extern int grsec_socket_client_gid;
58603+extern int grsec_enable_socket_server;
58604+extern int grsec_socket_server_gid;
58605+extern int grsec_audit_gid;
58606+extern int grsec_enable_group;
58607+extern int grsec_enable_audit_textrel;
58608+extern int grsec_enable_log_rwxmaps;
58609+extern int grsec_enable_mount;
58610+extern int grsec_enable_chdir;
58611+extern int grsec_resource_logging;
58612+extern int grsec_enable_blackhole;
58613+extern int grsec_lastack_retries;
58614+extern int grsec_enable_brute;
58615+extern int grsec_lock;
58616+
58617+extern spinlock_t grsec_alert_lock;
58618+extern unsigned long grsec_alert_wtime;
58619+extern unsigned long grsec_alert_fyet;
58620+
58621+extern spinlock_t grsec_audit_lock;
58622+
58623+extern rwlock_t grsec_exec_file_lock;
58624+
58625+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58626+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58627+ (tsk)->exec_file->f_vfsmnt) : "/")
58628+
58629+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58630+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58631+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58632+
58633+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58634+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58635+ (tsk)->exec_file->f_vfsmnt) : "/")
58636+
58637+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58638+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58639+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58640+
58641+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58642+
58643+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58644+
58645+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58646+ (task)->pid, (cred)->uid, \
58647+ (cred)->euid, (cred)->gid, (cred)->egid, \
58648+ gr_parent_task_fullpath(task), \
58649+ (task)->real_parent->comm, (task)->real_parent->pid, \
58650+ (pcred)->uid, (pcred)->euid, \
58651+ (pcred)->gid, (pcred)->egid
58652+
58653+#define GR_CHROOT_CAPS {{ \
58654+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58655+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58656+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58657+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58658+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58659+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
58660+
58661+#define security_learn(normal_msg,args...) \
58662+({ \
58663+ read_lock(&grsec_exec_file_lock); \
58664+ gr_add_learn_entry(normal_msg "\n", ## args); \
58665+ read_unlock(&grsec_exec_file_lock); \
58666+})
58667+
58668+enum {
58669+ GR_DO_AUDIT,
58670+ GR_DONT_AUDIT,
58671+ GR_DONT_AUDIT_GOOD
58672+};
58673+
58674+enum {
58675+ GR_TTYSNIFF,
58676+ GR_RBAC,
58677+ GR_RBAC_STR,
58678+ GR_STR_RBAC,
58679+ GR_RBAC_MODE2,
58680+ GR_RBAC_MODE3,
58681+ GR_FILENAME,
58682+ GR_SYSCTL_HIDDEN,
58683+ GR_NOARGS,
58684+ GR_ONE_INT,
58685+ GR_ONE_INT_TWO_STR,
58686+ GR_ONE_STR,
58687+ GR_STR_INT,
58688+ GR_TWO_STR_INT,
58689+ GR_TWO_INT,
58690+ GR_TWO_U64,
58691+ GR_THREE_INT,
58692+ GR_FIVE_INT_TWO_STR,
58693+ GR_TWO_STR,
58694+ GR_THREE_STR,
58695+ GR_FOUR_STR,
58696+ GR_STR_FILENAME,
58697+ GR_FILENAME_STR,
58698+ GR_FILENAME_TWO_INT,
58699+ GR_FILENAME_TWO_INT_STR,
58700+ GR_TEXTREL,
58701+ GR_PTRACE,
58702+ GR_RESOURCE,
58703+ GR_CAP,
58704+ GR_SIG,
58705+ GR_SIG2,
58706+ GR_CRASH1,
58707+ GR_CRASH2,
58708+ GR_PSACCT,
58709+ GR_RWXMAP
58710+};
58711+
58712+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58713+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58714+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58715+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58716+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58717+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58718+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58719+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58720+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58721+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58722+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58723+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58724+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58725+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58726+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58727+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58728+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58729+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58730+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58731+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58732+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58733+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58734+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58735+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58736+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58737+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58738+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58739+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58740+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58741+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58742+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58743+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58744+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58745+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58746+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58747+
58748+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58749+
58750+#endif
58751+
58752+#endif
58753diff -urNp linux-2.6.32.45/include/linux/grmsg.h linux-2.6.32.45/include/linux/grmsg.h
58754--- linux-2.6.32.45/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58755+++ linux-2.6.32.45/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
58756@@ -0,0 +1,108 @@
58757+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58758+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58759+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58760+#define GR_STOPMOD_MSG "denied modification of module state by "
58761+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58762+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58763+#define GR_IOPERM_MSG "denied use of ioperm() by "
58764+#define GR_IOPL_MSG "denied use of iopl() by "
58765+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58766+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58767+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58768+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58769+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58770+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58771+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58772+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58773+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58774+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58775+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58776+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58777+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58778+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58779+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58780+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58781+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58782+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58783+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58784+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58785+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58786+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58787+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58788+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58789+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58790+#define GR_NPROC_MSG "denied overstep of process limit by "
58791+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58792+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58793+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58794+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58795+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58796+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58797+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58798+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58799+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58800+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58801+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58802+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58803+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58804+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58805+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58806+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58807+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58808+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58809+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58810+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58811+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58812+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58813+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58814+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58815+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58816+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58817+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58818+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58819+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58820+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58821+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58822+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58823+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58824+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58825+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58826+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58827+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58828+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58829+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58830+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58831+#define GR_NICE_CHROOT_MSG "denied priority change by "
58832+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58833+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58834+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58835+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58836+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58837+#define GR_TIME_MSG "time set by "
58838+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58839+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58840+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58841+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58842+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58843+#define GR_BIND_MSG "denied bind() by "
58844+#define GR_CONNECT_MSG "denied connect() by "
58845+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58846+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58847+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58848+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58849+#define GR_CAP_ACL_MSG "use of %s denied for "
58850+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58851+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58852+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58853+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58854+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58855+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58856+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58857+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58858+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58859+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58860+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58861+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58862+#define GR_VM86_MSG "denied use of vm86 by "
58863+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58864+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58865diff -urNp linux-2.6.32.45/include/linux/grsecurity.h linux-2.6.32.45/include/linux/grsecurity.h
58866--- linux-2.6.32.45/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58867+++ linux-2.6.32.45/include/linux/grsecurity.h 2011-08-11 19:58:57.000000000 -0400
58868@@ -0,0 +1,217 @@
58869+#ifndef GR_SECURITY_H
58870+#define GR_SECURITY_H
58871+#include <linux/fs.h>
58872+#include <linux/fs_struct.h>
58873+#include <linux/binfmts.h>
58874+#include <linux/gracl.h>
58875+#include <linux/compat.h>
58876+
58877+/* notify of brain-dead configs */
58878+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58879+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58880+#endif
58881+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58882+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58883+#endif
58884+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58885+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58886+#endif
58887+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58888+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58889+#endif
58890+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58891+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58892+#endif
58893+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58894+#error "CONFIG_PAX enabled, but no PaX options are enabled."
58895+#endif
58896+
58897+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58898+void gr_handle_brute_check(void);
58899+void gr_handle_kernel_exploit(void);
58900+int gr_process_user_ban(void);
58901+
58902+char gr_roletype_to_char(void);
58903+
58904+int gr_acl_enable_at_secure(void);
58905+
58906+int gr_check_user_change(int real, int effective, int fs);
58907+int gr_check_group_change(int real, int effective, int fs);
58908+
58909+void gr_del_task_from_ip_table(struct task_struct *p);
58910+
58911+int gr_pid_is_chrooted(struct task_struct *p);
58912+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58913+int gr_handle_chroot_nice(void);
58914+int gr_handle_chroot_sysctl(const int op);
58915+int gr_handle_chroot_setpriority(struct task_struct *p,
58916+ const int niceval);
58917+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58918+int gr_handle_chroot_chroot(const struct dentry *dentry,
58919+ const struct vfsmount *mnt);
58920+int gr_handle_chroot_caps(struct path *path);
58921+void gr_handle_chroot_chdir(struct path *path);
58922+int gr_handle_chroot_chmod(const struct dentry *dentry,
58923+ const struct vfsmount *mnt, const int mode);
58924+int gr_handle_chroot_mknod(const struct dentry *dentry,
58925+ const struct vfsmount *mnt, const int mode);
58926+int gr_handle_chroot_mount(const struct dentry *dentry,
58927+ const struct vfsmount *mnt,
58928+ const char *dev_name);
58929+int gr_handle_chroot_pivot(void);
58930+int gr_handle_chroot_unix(const pid_t pid);
58931+
58932+int gr_handle_rawio(const struct inode *inode);
58933+
58934+void gr_handle_ioperm(void);
58935+void gr_handle_iopl(void);
58936+
58937+int gr_tpe_allow(const struct file *file);
58938+
58939+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58940+void gr_clear_chroot_entries(struct task_struct *task);
58941+
58942+void gr_log_forkfail(const int retval);
58943+void gr_log_timechange(void);
58944+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58945+void gr_log_chdir(const struct dentry *dentry,
58946+ const struct vfsmount *mnt);
58947+void gr_log_chroot_exec(const struct dentry *dentry,
58948+ const struct vfsmount *mnt);
58949+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
58950+#ifdef CONFIG_COMPAT
58951+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
58952+#endif
58953+void gr_log_remount(const char *devname, const int retval);
58954+void gr_log_unmount(const char *devname, const int retval);
58955+void gr_log_mount(const char *from, const char *to, const int retval);
58956+void gr_log_textrel(struct vm_area_struct *vma);
58957+void gr_log_rwxmmap(struct file *file);
58958+void gr_log_rwxmprotect(struct file *file);
58959+
58960+int gr_handle_follow_link(const struct inode *parent,
58961+ const struct inode *inode,
58962+ const struct dentry *dentry,
58963+ const struct vfsmount *mnt);
58964+int gr_handle_fifo(const struct dentry *dentry,
58965+ const struct vfsmount *mnt,
58966+ const struct dentry *dir, const int flag,
58967+ const int acc_mode);
58968+int gr_handle_hardlink(const struct dentry *dentry,
58969+ const struct vfsmount *mnt,
58970+ struct inode *inode,
58971+ const int mode, const char *to);
58972+
58973+int gr_is_capable(const int cap);
58974+int gr_is_capable_nolog(const int cap);
58975+void gr_learn_resource(const struct task_struct *task, const int limit,
58976+ const unsigned long wanted, const int gt);
58977+void gr_copy_label(struct task_struct *tsk);
58978+void gr_handle_crash(struct task_struct *task, const int sig);
58979+int gr_handle_signal(const struct task_struct *p, const int sig);
58980+int gr_check_crash_uid(const uid_t uid);
58981+int gr_check_protected_task(const struct task_struct *task);
58982+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58983+int gr_acl_handle_mmap(const struct file *file,
58984+ const unsigned long prot);
58985+int gr_acl_handle_mprotect(const struct file *file,
58986+ const unsigned long prot);
58987+int gr_check_hidden_task(const struct task_struct *tsk);
58988+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
58989+ const struct vfsmount *mnt);
58990+__u32 gr_acl_handle_utime(const struct dentry *dentry,
58991+ const struct vfsmount *mnt);
58992+__u32 gr_acl_handle_access(const struct dentry *dentry,
58993+ const struct vfsmount *mnt, const int fmode);
58994+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
58995+ const struct vfsmount *mnt, mode_t mode);
58996+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
58997+ const struct vfsmount *mnt, mode_t mode);
58998+__u32 gr_acl_handle_chown(const struct dentry *dentry,
58999+ const struct vfsmount *mnt);
59000+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59001+ const struct vfsmount *mnt);
59002+int gr_handle_ptrace(struct task_struct *task, const long request);
59003+int gr_handle_proc_ptrace(struct task_struct *task);
59004+__u32 gr_acl_handle_execve(const struct dentry *dentry,
59005+ const struct vfsmount *mnt);
59006+int gr_check_crash_exec(const struct file *filp);
59007+int gr_acl_is_enabled(void);
59008+void gr_set_kernel_label(struct task_struct *task);
59009+void gr_set_role_label(struct task_struct *task, const uid_t uid,
59010+ const gid_t gid);
59011+int gr_set_proc_label(const struct dentry *dentry,
59012+ const struct vfsmount *mnt,
59013+ const int unsafe_share);
59014+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59015+ const struct vfsmount *mnt);
59016+__u32 gr_acl_handle_open(const struct dentry *dentry,
59017+ const struct vfsmount *mnt, const int fmode);
59018+__u32 gr_acl_handle_creat(const struct dentry *dentry,
59019+ const struct dentry *p_dentry,
59020+ const struct vfsmount *p_mnt, const int fmode,
59021+ const int imode);
59022+void gr_handle_create(const struct dentry *dentry,
59023+ const struct vfsmount *mnt);
59024+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59025+ const struct dentry *parent_dentry,
59026+ const struct vfsmount *parent_mnt,
59027+ const int mode);
59028+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59029+ const struct dentry *parent_dentry,
59030+ const struct vfsmount *parent_mnt);
59031+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59032+ const struct vfsmount *mnt);
59033+void gr_handle_delete(const ino_t ino, const dev_t dev);
59034+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59035+ const struct vfsmount *mnt);
59036+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59037+ const struct dentry *parent_dentry,
59038+ const struct vfsmount *parent_mnt,
59039+ const char *from);
59040+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59041+ const struct dentry *parent_dentry,
59042+ const struct vfsmount *parent_mnt,
59043+ const struct dentry *old_dentry,
59044+ const struct vfsmount *old_mnt, const char *to);
59045+int gr_acl_handle_rename(struct dentry *new_dentry,
59046+ struct dentry *parent_dentry,
59047+ const struct vfsmount *parent_mnt,
59048+ struct dentry *old_dentry,
59049+ struct inode *old_parent_inode,
59050+ struct vfsmount *old_mnt, const char *newname);
59051+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59052+ struct dentry *old_dentry,
59053+ struct dentry *new_dentry,
59054+ struct vfsmount *mnt, const __u8 replace);
59055+__u32 gr_check_link(const struct dentry *new_dentry,
59056+ const struct dentry *parent_dentry,
59057+ const struct vfsmount *parent_mnt,
59058+ const struct dentry *old_dentry,
59059+ const struct vfsmount *old_mnt);
59060+int gr_acl_handle_filldir(const struct file *file, const char *name,
59061+ const unsigned int namelen, const ino_t ino);
59062+
59063+__u32 gr_acl_handle_unix(const struct dentry *dentry,
59064+ const struct vfsmount *mnt);
59065+void gr_acl_handle_exit(void);
59066+void gr_acl_handle_psacct(struct task_struct *task, const long code);
59067+int gr_acl_handle_procpidmem(const struct task_struct *task);
59068+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59069+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59070+void gr_audit_ptrace(struct task_struct *task);
59071+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59072+
59073+#ifdef CONFIG_GRKERNSEC
59074+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59075+void gr_handle_vm86(void);
59076+void gr_handle_mem_readwrite(u64 from, u64 to);
59077+
59078+extern int grsec_enable_dmesg;
59079+extern int grsec_disable_privio;
59080+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59081+extern int grsec_enable_chroot_findtask;
59082+#endif
59083+#endif
59084+
59085+#endif
59086diff -urNp linux-2.6.32.45/include/linux/hdpu_features.h linux-2.6.32.45/include/linux/hdpu_features.h
59087--- linux-2.6.32.45/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
59088+++ linux-2.6.32.45/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
59089@@ -3,7 +3,7 @@
59090 struct cpustate_t {
59091 spinlock_t lock;
59092 int excl;
59093- int open_count;
59094+ atomic_t open_count;
59095 unsigned char cached_val;
59096 int inited;
59097 unsigned long *set_addr;
59098diff -urNp linux-2.6.32.45/include/linux/highmem.h linux-2.6.32.45/include/linux/highmem.h
59099--- linux-2.6.32.45/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
59100+++ linux-2.6.32.45/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
59101@@ -137,6 +137,18 @@ static inline void clear_highpage(struct
59102 kunmap_atomic(kaddr, KM_USER0);
59103 }
59104
59105+static inline void sanitize_highpage(struct page *page)
59106+{
59107+ void *kaddr;
59108+ unsigned long flags;
59109+
59110+ local_irq_save(flags);
59111+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
59112+ clear_page(kaddr);
59113+ kunmap_atomic(kaddr, KM_CLEARPAGE);
59114+ local_irq_restore(flags);
59115+}
59116+
59117 static inline void zero_user_segments(struct page *page,
59118 unsigned start1, unsigned end1,
59119 unsigned start2, unsigned end2)
59120diff -urNp linux-2.6.32.45/include/linux/i2o.h linux-2.6.32.45/include/linux/i2o.h
59121--- linux-2.6.32.45/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
59122+++ linux-2.6.32.45/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
59123@@ -564,7 +564,7 @@ struct i2o_controller {
59124 struct i2o_device *exec; /* Executive */
59125 #if BITS_PER_LONG == 64
59126 spinlock_t context_list_lock; /* lock for context_list */
59127- atomic_t context_list_counter; /* needed for unique contexts */
59128+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59129 struct list_head context_list; /* list of context id's
59130 and pointers */
59131 #endif
59132diff -urNp linux-2.6.32.45/include/linux/init_task.h linux-2.6.32.45/include/linux/init_task.h
59133--- linux-2.6.32.45/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
59134+++ linux-2.6.32.45/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
59135@@ -83,6 +83,12 @@ extern struct group_info init_groups;
59136 #define INIT_IDS
59137 #endif
59138
59139+#ifdef CONFIG_X86
59140+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59141+#else
59142+#define INIT_TASK_THREAD_INFO
59143+#endif
59144+
59145 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
59146 /*
59147 * Because of the reduced scope of CAP_SETPCAP when filesystem
59148@@ -156,6 +162,7 @@ extern struct cred init_cred;
59149 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
59150 .comm = "swapper", \
59151 .thread = INIT_THREAD, \
59152+ INIT_TASK_THREAD_INFO \
59153 .fs = &init_fs, \
59154 .files = &init_files, \
59155 .signal = &init_signals, \
59156diff -urNp linux-2.6.32.45/include/linux/intel-iommu.h linux-2.6.32.45/include/linux/intel-iommu.h
59157--- linux-2.6.32.45/include/linux/intel-iommu.h 2011-03-27 14:31:47.000000000 -0400
59158+++ linux-2.6.32.45/include/linux/intel-iommu.h 2011-08-05 20:33:55.000000000 -0400
59159@@ -296,7 +296,7 @@ struct iommu_flush {
59160 u8 fm, u64 type);
59161 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59162 unsigned int size_order, u64 type);
59163-};
59164+} __no_const;
59165
59166 enum {
59167 SR_DMAR_FECTL_REG,
59168diff -urNp linux-2.6.32.45/include/linux/interrupt.h linux-2.6.32.45/include/linux/interrupt.h
59169--- linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
59170+++ linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
59171@@ -363,7 +363,7 @@ enum
59172 /* map softirq index to softirq name. update 'softirq_to_name' in
59173 * kernel/softirq.c when adding a new softirq.
59174 */
59175-extern char *softirq_to_name[NR_SOFTIRQS];
59176+extern const char * const softirq_to_name[NR_SOFTIRQS];
59177
59178 /* softirq mask and active fields moved to irq_cpustat_t in
59179 * asm/hardirq.h to get better cache usage. KAO
59180@@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
59181
59182 struct softirq_action
59183 {
59184- void (*action)(struct softirq_action *);
59185+ void (*action)(void);
59186 };
59187
59188 asmlinkage void do_softirq(void);
59189 asmlinkage void __do_softirq(void);
59190-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59191+extern void open_softirq(int nr, void (*action)(void));
59192 extern void softirq_init(void);
59193 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
59194 extern void raise_softirq_irqoff(unsigned int nr);
59195diff -urNp linux-2.6.32.45/include/linux/irq.h linux-2.6.32.45/include/linux/irq.h
59196--- linux-2.6.32.45/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
59197+++ linux-2.6.32.45/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
59198@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
59199 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
59200 bool boot)
59201 {
59202+#ifdef CONFIG_CPUMASK_OFFSTACK
59203 gfp_t gfp = GFP_ATOMIC;
59204
59205 if (boot)
59206 gfp = GFP_NOWAIT;
59207
59208-#ifdef CONFIG_CPUMASK_OFFSTACK
59209 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
59210 return false;
59211
59212diff -urNp linux-2.6.32.45/include/linux/kallsyms.h linux-2.6.32.45/include/linux/kallsyms.h
59213--- linux-2.6.32.45/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
59214+++ linux-2.6.32.45/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
59215@@ -15,7 +15,8 @@
59216
59217 struct module;
59218
59219-#ifdef CONFIG_KALLSYMS
59220+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59221+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59222 /* Lookup the address for a symbol. Returns 0 if not found. */
59223 unsigned long kallsyms_lookup_name(const char *name);
59224
59225@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
59226 /* Stupid that this does nothing, but I didn't create this mess. */
59227 #define __print_symbol(fmt, addr)
59228 #endif /*CONFIG_KALLSYMS*/
59229+#else /* when included by kallsyms.c, vsnprintf.c, or
59230+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59231+extern void __print_symbol(const char *fmt, unsigned long address);
59232+extern int sprint_symbol(char *buffer, unsigned long address);
59233+const char *kallsyms_lookup(unsigned long addr,
59234+ unsigned long *symbolsize,
59235+ unsigned long *offset,
59236+ char **modname, char *namebuf);
59237+#endif
59238
59239 /* This macro allows us to keep printk typechecking */
59240 static void __check_printsym_format(const char *fmt, ...)
59241diff -urNp linux-2.6.32.45/include/linux/kgdb.h linux-2.6.32.45/include/linux/kgdb.h
59242--- linux-2.6.32.45/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
59243+++ linux-2.6.32.45/include/linux/kgdb.h 2011-08-05 20:33:55.000000000 -0400
59244@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
59245
59246 extern int kgdb_connected;
59247
59248-extern atomic_t kgdb_setting_breakpoint;
59249-extern atomic_t kgdb_cpu_doing_single_step;
59250+extern atomic_unchecked_t kgdb_setting_breakpoint;
59251+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59252
59253 extern struct task_struct *kgdb_usethread;
59254 extern struct task_struct *kgdb_contthread;
59255@@ -226,8 +226,8 @@ extern int kgdb_arch_remove_breakpoint(u
59256 * hardware debug registers.
59257 */
59258 struct kgdb_arch {
59259- unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59260- unsigned long flags;
59261+ const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59262+ const unsigned long flags;
59263
59264 int (*set_breakpoint)(unsigned long, char *);
59265 int (*remove_breakpoint)(unsigned long, char *);
59266@@ -251,20 +251,20 @@ struct kgdb_arch {
59267 */
59268 struct kgdb_io {
59269 const char *name;
59270- int (*read_char) (void);
59271- void (*write_char) (u8);
59272- void (*flush) (void);
59273- int (*init) (void);
59274- void (*pre_exception) (void);
59275- void (*post_exception) (void);
59276+ int (* const read_char) (void);
59277+ void (* const write_char) (u8);
59278+ void (* const flush) (void);
59279+ int (* const init) (void);
59280+ void (* const pre_exception) (void);
59281+ void (* const post_exception) (void);
59282 };
59283
59284-extern struct kgdb_arch arch_kgdb_ops;
59285+extern const struct kgdb_arch arch_kgdb_ops;
59286
59287 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
59288
59289-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
59290-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
59291+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
59292+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
59293
59294 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
59295 extern int kgdb_mem2hex(char *mem, char *buf, int count);
59296diff -urNp linux-2.6.32.45/include/linux/kmod.h linux-2.6.32.45/include/linux/kmod.h
59297--- linux-2.6.32.45/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
59298+++ linux-2.6.32.45/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
59299@@ -31,6 +31,8 @@
59300 * usually useless though. */
59301 extern int __request_module(bool wait, const char *name, ...) \
59302 __attribute__((format(printf, 2, 3)));
59303+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59304+ __attribute__((format(printf, 3, 4)));
59305 #define request_module(mod...) __request_module(true, mod)
59306 #define request_module_nowait(mod...) __request_module(false, mod)
59307 #define try_then_request_module(x, mod...) \
59308diff -urNp linux-2.6.32.45/include/linux/kobject.h linux-2.6.32.45/include/linux/kobject.h
59309--- linux-2.6.32.45/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
59310+++ linux-2.6.32.45/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
59311@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
59312
59313 struct kobj_type {
59314 void (*release)(struct kobject *kobj);
59315- struct sysfs_ops *sysfs_ops;
59316+ const struct sysfs_ops *sysfs_ops;
59317 struct attribute **default_attrs;
59318 };
59319
59320@@ -118,9 +118,9 @@ struct kobj_uevent_env {
59321 };
59322
59323 struct kset_uevent_ops {
59324- int (*filter)(struct kset *kset, struct kobject *kobj);
59325- const char *(*name)(struct kset *kset, struct kobject *kobj);
59326- int (*uevent)(struct kset *kset, struct kobject *kobj,
59327+ int (* const filter)(struct kset *kset, struct kobject *kobj);
59328+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
59329+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
59330 struct kobj_uevent_env *env);
59331 };
59332
59333@@ -132,7 +132,7 @@ struct kobj_attribute {
59334 const char *buf, size_t count);
59335 };
59336
59337-extern struct sysfs_ops kobj_sysfs_ops;
59338+extern const struct sysfs_ops kobj_sysfs_ops;
59339
59340 /**
59341 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
59342@@ -155,14 +155,14 @@ struct kset {
59343 struct list_head list;
59344 spinlock_t list_lock;
59345 struct kobject kobj;
59346- struct kset_uevent_ops *uevent_ops;
59347+ const struct kset_uevent_ops *uevent_ops;
59348 };
59349
59350 extern void kset_init(struct kset *kset);
59351 extern int __must_check kset_register(struct kset *kset);
59352 extern void kset_unregister(struct kset *kset);
59353 extern struct kset * __must_check kset_create_and_add(const char *name,
59354- struct kset_uevent_ops *u,
59355+ const struct kset_uevent_ops *u,
59356 struct kobject *parent_kobj);
59357
59358 static inline struct kset *to_kset(struct kobject *kobj)
59359diff -urNp linux-2.6.32.45/include/linux/kvm_host.h linux-2.6.32.45/include/linux/kvm_host.h
59360--- linux-2.6.32.45/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
59361+++ linux-2.6.32.45/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
59362@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59363 void vcpu_load(struct kvm_vcpu *vcpu);
59364 void vcpu_put(struct kvm_vcpu *vcpu);
59365
59366-int kvm_init(void *opaque, unsigned int vcpu_size,
59367+int kvm_init(const void *opaque, unsigned int vcpu_size,
59368 struct module *module);
59369 void kvm_exit(void);
59370
59371@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59372 struct kvm_guest_debug *dbg);
59373 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59374
59375-int kvm_arch_init(void *opaque);
59376+int kvm_arch_init(const void *opaque);
59377 void kvm_arch_exit(void);
59378
59379 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59380diff -urNp linux-2.6.32.45/include/linux/libata.h linux-2.6.32.45/include/linux/libata.h
59381--- linux-2.6.32.45/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
59382+++ linux-2.6.32.45/include/linux/libata.h 2011-08-05 20:33:55.000000000 -0400
59383@@ -525,11 +525,11 @@ struct ata_ioports {
59384
59385 struct ata_host {
59386 spinlock_t lock;
59387- struct device *dev;
59388+ struct device *dev;
59389 void __iomem * const *iomap;
59390 unsigned int n_ports;
59391 void *private_data;
59392- struct ata_port_operations *ops;
59393+ const struct ata_port_operations *ops;
59394 unsigned long flags;
59395 #ifdef CONFIG_ATA_ACPI
59396 acpi_handle acpi_handle;
59397@@ -710,7 +710,7 @@ struct ata_link {
59398
59399 struct ata_port {
59400 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
59401- struct ata_port_operations *ops;
59402+ const struct ata_port_operations *ops;
59403 spinlock_t *lock;
59404 /* Flags owned by the EH context. Only EH should touch these once the
59405 port is active */
59406@@ -883,7 +883,7 @@ struct ata_port_operations {
59407 * ->inherits must be the last field and all the preceding
59408 * fields must be pointers.
59409 */
59410- const struct ata_port_operations *inherits;
59411+ const struct ata_port_operations * const inherits;
59412 };
59413
59414 struct ata_port_info {
59415@@ -892,7 +892,7 @@ struct ata_port_info {
59416 unsigned long pio_mask;
59417 unsigned long mwdma_mask;
59418 unsigned long udma_mask;
59419- struct ata_port_operations *port_ops;
59420+ const struct ata_port_operations *port_ops;
59421 void *private_data;
59422 };
59423
59424@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
59425 extern const unsigned long sata_deb_timing_hotplug[];
59426 extern const unsigned long sata_deb_timing_long[];
59427
59428-extern struct ata_port_operations ata_dummy_port_ops;
59429+extern const struct ata_port_operations ata_dummy_port_ops;
59430 extern const struct ata_port_info ata_dummy_port_info;
59431
59432 static inline const unsigned long *
59433@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
59434 struct scsi_host_template *sht);
59435 extern void ata_host_detach(struct ata_host *host);
59436 extern void ata_host_init(struct ata_host *, struct device *,
59437- unsigned long, struct ata_port_operations *);
59438+ unsigned long, const struct ata_port_operations *);
59439 extern int ata_scsi_detect(struct scsi_host_template *sht);
59440 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
59441 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
59442diff -urNp linux-2.6.32.45/include/linux/lockd/bind.h linux-2.6.32.45/include/linux/lockd/bind.h
59443--- linux-2.6.32.45/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
59444+++ linux-2.6.32.45/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
59445@@ -23,13 +23,13 @@ struct svc_rqst;
59446 * This is the set of functions for lockd->nfsd communication
59447 */
59448 struct nlmsvc_binding {
59449- __be32 (*fopen)(struct svc_rqst *,
59450+ __be32 (* const fopen)(struct svc_rqst *,
59451 struct nfs_fh *,
59452 struct file **);
59453- void (*fclose)(struct file *);
59454+ void (* const fclose)(struct file *);
59455 };
59456
59457-extern struct nlmsvc_binding * nlmsvc_ops;
59458+extern const struct nlmsvc_binding * nlmsvc_ops;
59459
59460 /*
59461 * Similar to nfs_client_initdata, but without the NFS-specific
59462diff -urNp linux-2.6.32.45/include/linux/mca.h linux-2.6.32.45/include/linux/mca.h
59463--- linux-2.6.32.45/include/linux/mca.h 2011-03-27 14:31:47.000000000 -0400
59464+++ linux-2.6.32.45/include/linux/mca.h 2011-08-05 20:33:55.000000000 -0400
59465@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59466 int region);
59467 void * (*mca_transform_memory)(struct mca_device *,
59468 void *memory);
59469-};
59470+} __no_const;
59471
59472 struct mca_bus {
59473 u64 default_dma_mask;
59474diff -urNp linux-2.6.32.45/include/linux/memory.h linux-2.6.32.45/include/linux/memory.h
59475--- linux-2.6.32.45/include/linux/memory.h 2011-03-27 14:31:47.000000000 -0400
59476+++ linux-2.6.32.45/include/linux/memory.h 2011-08-05 20:33:55.000000000 -0400
59477@@ -108,7 +108,7 @@ struct memory_accessor {
59478 size_t count);
59479 ssize_t (*write)(struct memory_accessor *, const char *buf,
59480 off_t offset, size_t count);
59481-};
59482+} __no_const;
59483
59484 /*
59485 * Kernel text modification mutex, used for code patching. Users of this lock
59486diff -urNp linux-2.6.32.45/include/linux/mm.h linux-2.6.32.45/include/linux/mm.h
59487--- linux-2.6.32.45/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
59488+++ linux-2.6.32.45/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
59489@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
59490
59491 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59492 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59493+
59494+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59495+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59496+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59497+#else
59498 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59499+#endif
59500+
59501 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59502 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59503
59504@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
59505 int set_page_dirty_lock(struct page *page);
59506 int clear_page_dirty_for_io(struct page *page);
59507
59508-/* Is the vma a continuation of the stack vma above it? */
59509-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
59510-{
59511- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59512-}
59513-
59514 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59515 unsigned long old_addr, struct vm_area_struct *new_vma,
59516 unsigned long new_addr, unsigned long len);
59517@@ -890,6 +891,8 @@ struct shrinker {
59518 extern void register_shrinker(struct shrinker *);
59519 extern void unregister_shrinker(struct shrinker *);
59520
59521+pgprot_t vm_get_page_prot(unsigned long vm_flags);
59522+
59523 int vma_wants_writenotify(struct vm_area_struct *vma);
59524
59525 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
59526@@ -1162,6 +1165,7 @@ out:
59527 }
59528
59529 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59530+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59531
59532 extern unsigned long do_brk(unsigned long, unsigned long);
59533
59534@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
59535 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59536 struct vm_area_struct **pprev);
59537
59538+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59539+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59540+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59541+
59542 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59543 NULL if none. Assume start_addr < end_addr. */
59544 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59545@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
59546 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59547 }
59548
59549-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59550 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59551 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59552 unsigned long pfn, unsigned long size, pgprot_t);
59553@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
59554 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
59555 extern int sysctl_memory_failure_early_kill;
59556 extern int sysctl_memory_failure_recovery;
59557-extern atomic_long_t mce_bad_pages;
59558+extern atomic_long_unchecked_t mce_bad_pages;
59559+
59560+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59561+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59562+#else
59563+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59564+#endif
59565
59566 #endif /* __KERNEL__ */
59567 #endif /* _LINUX_MM_H */
59568diff -urNp linux-2.6.32.45/include/linux/mm_types.h linux-2.6.32.45/include/linux/mm_types.h
59569--- linux-2.6.32.45/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
59570+++ linux-2.6.32.45/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
59571@@ -186,6 +186,8 @@ struct vm_area_struct {
59572 #ifdef CONFIG_NUMA
59573 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59574 #endif
59575+
59576+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59577 };
59578
59579 struct core_thread {
59580@@ -287,6 +289,24 @@ struct mm_struct {
59581 #ifdef CONFIG_MMU_NOTIFIER
59582 struct mmu_notifier_mm *mmu_notifier_mm;
59583 #endif
59584+
59585+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59586+ unsigned long pax_flags;
59587+#endif
59588+
59589+#ifdef CONFIG_PAX_DLRESOLVE
59590+ unsigned long call_dl_resolve;
59591+#endif
59592+
59593+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59594+ unsigned long call_syscall;
59595+#endif
59596+
59597+#ifdef CONFIG_PAX_ASLR
59598+ unsigned long delta_mmap; /* randomized offset */
59599+ unsigned long delta_stack; /* randomized offset */
59600+#endif
59601+
59602 };
59603
59604 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
59605diff -urNp linux-2.6.32.45/include/linux/mmu_notifier.h linux-2.6.32.45/include/linux/mmu_notifier.h
59606--- linux-2.6.32.45/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
59607+++ linux-2.6.32.45/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
59608@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
59609 */
59610 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59611 ({ \
59612- pte_t __pte; \
59613+ pte_t ___pte; \
59614 struct vm_area_struct *___vma = __vma; \
59615 unsigned long ___address = __address; \
59616- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59617+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59618 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59619- __pte; \
59620+ ___pte; \
59621 })
59622
59623 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
59624diff -urNp linux-2.6.32.45/include/linux/mmzone.h linux-2.6.32.45/include/linux/mmzone.h
59625--- linux-2.6.32.45/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
59626+++ linux-2.6.32.45/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
59627@@ -350,7 +350,7 @@ struct zone {
59628 unsigned long flags; /* zone flags, see below */
59629
59630 /* Zone statistics */
59631- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59632+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59633
59634 /*
59635 * prev_priority holds the scanning priority for this zone. It is
59636diff -urNp linux-2.6.32.45/include/linux/mod_devicetable.h linux-2.6.32.45/include/linux/mod_devicetable.h
59637--- linux-2.6.32.45/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
59638+++ linux-2.6.32.45/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
59639@@ -12,7 +12,7 @@
59640 typedef unsigned long kernel_ulong_t;
59641 #endif
59642
59643-#define PCI_ANY_ID (~0)
59644+#define PCI_ANY_ID ((__u16)~0)
59645
59646 struct pci_device_id {
59647 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59648@@ -131,7 +131,7 @@ struct usb_device_id {
59649 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59650 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59651
59652-#define HID_ANY_ID (~0)
59653+#define HID_ANY_ID (~0U)
59654
59655 struct hid_device_id {
59656 __u16 bus;
59657diff -urNp linux-2.6.32.45/include/linux/module.h linux-2.6.32.45/include/linux/module.h
59658--- linux-2.6.32.45/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
59659+++ linux-2.6.32.45/include/linux/module.h 2011-08-05 20:33:55.000000000 -0400
59660@@ -16,6 +16,7 @@
59661 #include <linux/kobject.h>
59662 #include <linux/moduleparam.h>
59663 #include <linux/tracepoint.h>
59664+#include <linux/fs.h>
59665
59666 #include <asm/local.h>
59667 #include <asm/module.h>
59668@@ -287,16 +288,16 @@ struct module
59669 int (*init)(void);
59670
59671 /* If this is non-NULL, vfree after init() returns */
59672- void *module_init;
59673+ void *module_init_rx, *module_init_rw;
59674
59675 /* Here is the actual code + data, vfree'd on unload. */
59676- void *module_core;
59677+ void *module_core_rx, *module_core_rw;
59678
59679 /* Here are the sizes of the init and core sections */
59680- unsigned int init_size, core_size;
59681+ unsigned int init_size_rw, core_size_rw;
59682
59683 /* The size of the executable code in each section. */
59684- unsigned int init_text_size, core_text_size;
59685+ unsigned int init_size_rx, core_size_rx;
59686
59687 /* Arch-specific module values */
59688 struct mod_arch_specific arch;
59689@@ -345,6 +346,10 @@ struct module
59690 #ifdef CONFIG_EVENT_TRACING
59691 struct ftrace_event_call *trace_events;
59692 unsigned int num_trace_events;
59693+ struct file_operations trace_id;
59694+ struct file_operations trace_enable;
59695+ struct file_operations trace_format;
59696+ struct file_operations trace_filter;
59697 #endif
59698 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59699 unsigned long *ftrace_callsites;
59700@@ -393,16 +398,46 @@ struct module *__module_address(unsigned
59701 bool is_module_address(unsigned long addr);
59702 bool is_module_text_address(unsigned long addr);
59703
59704+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59705+{
59706+
59707+#ifdef CONFIG_PAX_KERNEXEC
59708+ if (ktla_ktva(addr) >= (unsigned long)start &&
59709+ ktla_ktva(addr) < (unsigned long)start + size)
59710+ return 1;
59711+#endif
59712+
59713+ return ((void *)addr >= start && (void *)addr < start + size);
59714+}
59715+
59716+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59717+{
59718+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59719+}
59720+
59721+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59722+{
59723+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59724+}
59725+
59726+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59727+{
59728+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59729+}
59730+
59731+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59732+{
59733+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59734+}
59735+
59736 static inline int within_module_core(unsigned long addr, struct module *mod)
59737 {
59738- return (unsigned long)mod->module_core <= addr &&
59739- addr < (unsigned long)mod->module_core + mod->core_size;
59740+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59741 }
59742
59743 static inline int within_module_init(unsigned long addr, struct module *mod)
59744 {
59745- return (unsigned long)mod->module_init <= addr &&
59746- addr < (unsigned long)mod->module_init + mod->init_size;
59747+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59748 }
59749
59750 /* Search for module by name: must hold module_mutex. */
59751diff -urNp linux-2.6.32.45/include/linux/moduleloader.h linux-2.6.32.45/include/linux/moduleloader.h
59752--- linux-2.6.32.45/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
59753+++ linux-2.6.32.45/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
59754@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59755 sections. Returns NULL on failure. */
59756 void *module_alloc(unsigned long size);
59757
59758+#ifdef CONFIG_PAX_KERNEXEC
59759+void *module_alloc_exec(unsigned long size);
59760+#else
59761+#define module_alloc_exec(x) module_alloc(x)
59762+#endif
59763+
59764 /* Free memory returned from module_alloc. */
59765 void module_free(struct module *mod, void *module_region);
59766
59767+#ifdef CONFIG_PAX_KERNEXEC
59768+void module_free_exec(struct module *mod, void *module_region);
59769+#else
59770+#define module_free_exec(x, y) module_free((x), (y))
59771+#endif
59772+
59773 /* Apply the given relocation to the (simplified) ELF. Return -error
59774 or 0. */
59775 int apply_relocate(Elf_Shdr *sechdrs,
59776diff -urNp linux-2.6.32.45/include/linux/moduleparam.h linux-2.6.32.45/include/linux/moduleparam.h
59777--- linux-2.6.32.45/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
59778+++ linux-2.6.32.45/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
59779@@ -132,7 +132,7 @@ struct kparam_array
59780
59781 /* Actually copy string: maxlen param is usually sizeof(string). */
59782 #define module_param_string(name, string, len, perm) \
59783- static const struct kparam_string __param_string_##name \
59784+ static const struct kparam_string __param_string_##name __used \
59785 = { len, string }; \
59786 __module_param_call(MODULE_PARAM_PREFIX, name, \
59787 param_set_copystring, param_get_string, \
59788@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
59789
59790 /* Comma-separated array: *nump is set to number they actually specified. */
59791 #define module_param_array_named(name, array, type, nump, perm) \
59792- static const struct kparam_array __param_arr_##name \
59793+ static const struct kparam_array __param_arr_##name __used \
59794 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
59795 sizeof(array[0]), array }; \
59796 __module_param_call(MODULE_PARAM_PREFIX, name, \
59797diff -urNp linux-2.6.32.45/include/linux/mutex.h linux-2.6.32.45/include/linux/mutex.h
59798--- linux-2.6.32.45/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
59799+++ linux-2.6.32.45/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
59800@@ -51,7 +51,7 @@ struct mutex {
59801 spinlock_t wait_lock;
59802 struct list_head wait_list;
59803 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
59804- struct thread_info *owner;
59805+ struct task_struct *owner;
59806 #endif
59807 #ifdef CONFIG_DEBUG_MUTEXES
59808 const char *name;
59809diff -urNp linux-2.6.32.45/include/linux/namei.h linux-2.6.32.45/include/linux/namei.h
59810--- linux-2.6.32.45/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
59811+++ linux-2.6.32.45/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
59812@@ -22,7 +22,7 @@ struct nameidata {
59813 unsigned int flags;
59814 int last_type;
59815 unsigned depth;
59816- char *saved_names[MAX_NESTED_LINKS + 1];
59817+ const char *saved_names[MAX_NESTED_LINKS + 1];
59818
59819 /* Intent data */
59820 union {
59821@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
59822 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59823 extern void unlock_rename(struct dentry *, struct dentry *);
59824
59825-static inline void nd_set_link(struct nameidata *nd, char *path)
59826+static inline void nd_set_link(struct nameidata *nd, const char *path)
59827 {
59828 nd->saved_names[nd->depth] = path;
59829 }
59830
59831-static inline char *nd_get_link(struct nameidata *nd)
59832+static inline const char *nd_get_link(const struct nameidata *nd)
59833 {
59834 return nd->saved_names[nd->depth];
59835 }
59836diff -urNp linux-2.6.32.45/include/linux/netfilter/xt_gradm.h linux-2.6.32.45/include/linux/netfilter/xt_gradm.h
59837--- linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59838+++ linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
59839@@ -0,0 +1,9 @@
59840+#ifndef _LINUX_NETFILTER_XT_GRADM_H
59841+#define _LINUX_NETFILTER_XT_GRADM_H 1
59842+
59843+struct xt_gradm_mtinfo {
59844+ __u16 flags;
59845+ __u16 invflags;
59846+};
59847+
59848+#endif
59849diff -urNp linux-2.6.32.45/include/linux/nodemask.h linux-2.6.32.45/include/linux/nodemask.h
59850--- linux-2.6.32.45/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
59851+++ linux-2.6.32.45/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
59852@@ -464,11 +464,11 @@ static inline int num_node_state(enum no
59853
59854 #define any_online_node(mask) \
59855 ({ \
59856- int node; \
59857- for_each_node_mask(node, (mask)) \
59858- if (node_online(node)) \
59859+ int __node; \
59860+ for_each_node_mask(__node, (mask)) \
59861+ if (node_online(__node)) \
59862 break; \
59863- node; \
59864+ __node; \
59865 })
59866
59867 #define num_online_nodes() num_node_state(N_ONLINE)
59868diff -urNp linux-2.6.32.45/include/linux/oprofile.h linux-2.6.32.45/include/linux/oprofile.h
59869--- linux-2.6.32.45/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
59870+++ linux-2.6.32.45/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
59871@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
59872 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59873 char const * name, ulong * val);
59874
59875-/** Create a file for read-only access to an atomic_t. */
59876+/** Create a file for read-only access to an atomic_unchecked_t. */
59877 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59878- char const * name, atomic_t * val);
59879+ char const * name, atomic_unchecked_t * val);
59880
59881 /** create a directory */
59882 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59883diff -urNp linux-2.6.32.45/include/linux/perf_event.h linux-2.6.32.45/include/linux/perf_event.h
59884--- linux-2.6.32.45/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
59885+++ linux-2.6.32.45/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
59886@@ -476,7 +476,7 @@ struct hw_perf_event {
59887 struct hrtimer hrtimer;
59888 };
59889 };
59890- atomic64_t prev_count;
59891+ atomic64_unchecked_t prev_count;
59892 u64 sample_period;
59893 u64 last_period;
59894 atomic64_t period_left;
59895@@ -557,7 +557,7 @@ struct perf_event {
59896 const struct pmu *pmu;
59897
59898 enum perf_event_active_state state;
59899- atomic64_t count;
59900+ atomic64_unchecked_t count;
59901
59902 /*
59903 * These are the total time in nanoseconds that the event
59904@@ -595,8 +595,8 @@ struct perf_event {
59905 * These accumulate total time (in nanoseconds) that children
59906 * events have been enabled and running, respectively.
59907 */
59908- atomic64_t child_total_time_enabled;
59909- atomic64_t child_total_time_running;
59910+ atomic64_unchecked_t child_total_time_enabled;
59911+ atomic64_unchecked_t child_total_time_running;
59912
59913 /*
59914 * Protect attach/detach and child_list:
59915diff -urNp linux-2.6.32.45/include/linux/pipe_fs_i.h linux-2.6.32.45/include/linux/pipe_fs_i.h
59916--- linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
59917+++ linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
59918@@ -46,9 +46,9 @@ struct pipe_inode_info {
59919 wait_queue_head_t wait;
59920 unsigned int nrbufs, curbuf;
59921 struct page *tmp_page;
59922- unsigned int readers;
59923- unsigned int writers;
59924- unsigned int waiting_writers;
59925+ atomic_t readers;
59926+ atomic_t writers;
59927+ atomic_t waiting_writers;
59928 unsigned int r_counter;
59929 unsigned int w_counter;
59930 struct fasync_struct *fasync_readers;
59931diff -urNp linux-2.6.32.45/include/linux/poison.h linux-2.6.32.45/include/linux/poison.h
59932--- linux-2.6.32.45/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
59933+++ linux-2.6.32.45/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
59934@@ -19,8 +19,8 @@
59935 * under normal circumstances, used to verify that nobody uses
59936 * non-initialized list entries.
59937 */
59938-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59939-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59940+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59941+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59942
59943 /********** include/linux/timer.h **********/
59944 /*
59945diff -urNp linux-2.6.32.45/include/linux/posix-timers.h linux-2.6.32.45/include/linux/posix-timers.h
59946--- linux-2.6.32.45/include/linux/posix-timers.h 2011-03-27 14:31:47.000000000 -0400
59947+++ linux-2.6.32.45/include/linux/posix-timers.h 2011-08-05 20:33:55.000000000 -0400
59948@@ -67,7 +67,7 @@ struct k_itimer {
59949 };
59950
59951 struct k_clock {
59952- int res; /* in nanoseconds */
59953+ const int res; /* in nanoseconds */
59954 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
59955 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
59956 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
59957diff -urNp linux-2.6.32.45/include/linux/preempt.h linux-2.6.32.45/include/linux/preempt.h
59958--- linux-2.6.32.45/include/linux/preempt.h 2011-03-27 14:31:47.000000000 -0400
59959+++ linux-2.6.32.45/include/linux/preempt.h 2011-08-05 20:33:55.000000000 -0400
59960@@ -110,7 +110,7 @@ struct preempt_ops {
59961 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59962 void (*sched_out)(struct preempt_notifier *notifier,
59963 struct task_struct *next);
59964-};
59965+} __no_const;
59966
59967 /**
59968 * preempt_notifier - key for installing preemption notifiers
59969diff -urNp linux-2.6.32.45/include/linux/proc_fs.h linux-2.6.32.45/include/linux/proc_fs.h
59970--- linux-2.6.32.45/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
59971+++ linux-2.6.32.45/include/linux/proc_fs.h 2011-08-05 20:33:55.000000000 -0400
59972@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
59973 return proc_create_data(name, mode, parent, proc_fops, NULL);
59974 }
59975
59976+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
59977+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
59978+{
59979+#ifdef CONFIG_GRKERNSEC_PROC_USER
59980+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
59981+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59982+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
59983+#else
59984+ return proc_create_data(name, mode, parent, proc_fops, NULL);
59985+#endif
59986+}
59987+
59988+
59989 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
59990 mode_t mode, struct proc_dir_entry *base,
59991 read_proc_t *read_proc, void * data)
59992@@ -256,7 +269,7 @@ union proc_op {
59993 int (*proc_show)(struct seq_file *m,
59994 struct pid_namespace *ns, struct pid *pid,
59995 struct task_struct *task);
59996-};
59997+} __no_const;
59998
59999 struct ctl_table_header;
60000 struct ctl_table;
60001diff -urNp linux-2.6.32.45/include/linux/ptrace.h linux-2.6.32.45/include/linux/ptrace.h
60002--- linux-2.6.32.45/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
60003+++ linux-2.6.32.45/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
60004@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
60005 extern void exit_ptrace(struct task_struct *tracer);
60006 #define PTRACE_MODE_READ 1
60007 #define PTRACE_MODE_ATTACH 2
60008-/* Returns 0 on success, -errno on denial. */
60009-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60010 /* Returns true on success, false on denial. */
60011 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60012+/* Returns true on success, false on denial. */
60013+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60014
60015 static inline int ptrace_reparented(struct task_struct *child)
60016 {
60017diff -urNp linux-2.6.32.45/include/linux/random.h linux-2.6.32.45/include/linux/random.h
60018--- linux-2.6.32.45/include/linux/random.h 2011-08-16 20:37:25.000000000 -0400
60019+++ linux-2.6.32.45/include/linux/random.h 2011-08-07 19:48:09.000000000 -0400
60020@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned l
60021 u32 random32(void);
60022 void srandom32(u32 seed);
60023
60024+static inline unsigned long pax_get_random_long(void)
60025+{
60026+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60027+}
60028+
60029 #endif /* __KERNEL___ */
60030
60031 #endif /* _LINUX_RANDOM_H */
60032diff -urNp linux-2.6.32.45/include/linux/reboot.h linux-2.6.32.45/include/linux/reboot.h
60033--- linux-2.6.32.45/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
60034+++ linux-2.6.32.45/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
60035@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
60036 * Architecture-specific implementations of sys_reboot commands.
60037 */
60038
60039-extern void machine_restart(char *cmd);
60040-extern void machine_halt(void);
60041-extern void machine_power_off(void);
60042+extern void machine_restart(char *cmd) __noreturn;
60043+extern void machine_halt(void) __noreturn;
60044+extern void machine_power_off(void) __noreturn;
60045
60046 extern void machine_shutdown(void);
60047 struct pt_regs;
60048@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
60049 */
60050
60051 extern void kernel_restart_prepare(char *cmd);
60052-extern void kernel_restart(char *cmd);
60053-extern void kernel_halt(void);
60054-extern void kernel_power_off(void);
60055+extern void kernel_restart(char *cmd) __noreturn;
60056+extern void kernel_halt(void) __noreturn;
60057+extern void kernel_power_off(void) __noreturn;
60058
60059 void ctrl_alt_del(void);
60060
60061@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
60062 * Emergency restart, callable from an interrupt handler.
60063 */
60064
60065-extern void emergency_restart(void);
60066+extern void emergency_restart(void) __noreturn;
60067 #include <asm/emergency-restart.h>
60068
60069 #endif
60070diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs.h linux-2.6.32.45/include/linux/reiserfs_fs.h
60071--- linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
60072+++ linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
60073@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
60074 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60075
60076 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60077-#define get_generation(s) atomic_read (&fs_generation(s))
60078+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60079 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60080 #define __fs_changed(gen,s) (gen != get_generation (s))
60081 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
60082@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
60083 */
60084
60085 struct item_operations {
60086- int (*bytes_number) (struct item_head * ih, int block_size);
60087- void (*decrement_key) (struct cpu_key *);
60088- int (*is_left_mergeable) (struct reiserfs_key * ih,
60089+ int (* const bytes_number) (struct item_head * ih, int block_size);
60090+ void (* const decrement_key) (struct cpu_key *);
60091+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
60092 unsigned long bsize);
60093- void (*print_item) (struct item_head *, char *item);
60094- void (*check_item) (struct item_head *, char *item);
60095+ void (* const print_item) (struct item_head *, char *item);
60096+ void (* const check_item) (struct item_head *, char *item);
60097
60098- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60099+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60100 int is_affected, int insert_size);
60101- int (*check_left) (struct virtual_item * vi, int free,
60102+ int (* const check_left) (struct virtual_item * vi, int free,
60103 int start_skip, int end_skip);
60104- int (*check_right) (struct virtual_item * vi, int free);
60105- int (*part_size) (struct virtual_item * vi, int from, int to);
60106- int (*unit_num) (struct virtual_item * vi);
60107- void (*print_vi) (struct virtual_item * vi);
60108+ int (* const check_right) (struct virtual_item * vi, int free);
60109+ int (* const part_size) (struct virtual_item * vi, int from, int to);
60110+ int (* const unit_num) (struct virtual_item * vi);
60111+ void (* const print_vi) (struct virtual_item * vi);
60112 };
60113
60114-extern struct item_operations *item_ops[TYPE_ANY + 1];
60115+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
60116
60117 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
60118 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
60119diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs_sb.h linux-2.6.32.45/include/linux/reiserfs_fs_sb.h
60120--- linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
60121+++ linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
60122@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
60123 /* Comment? -Hans */
60124 wait_queue_head_t s_wait;
60125 /* To be obsoleted soon by per buffer seals.. -Hans */
60126- atomic_t s_generation_counter; // increased by one every time the
60127+ atomic_unchecked_t s_generation_counter; // increased by one every time the
60128 // tree gets re-balanced
60129 unsigned long s_properties; /* File system properties. Currently holds
60130 on-disk FS format */
60131diff -urNp linux-2.6.32.45/include/linux/relay.h linux-2.6.32.45/include/linux/relay.h
60132--- linux-2.6.32.45/include/linux/relay.h 2011-03-27 14:31:47.000000000 -0400
60133+++ linux-2.6.32.45/include/linux/relay.h 2011-08-05 20:33:55.000000000 -0400
60134@@ -159,7 +159,7 @@ struct rchan_callbacks
60135 * The callback should return 0 if successful, negative if not.
60136 */
60137 int (*remove_buf_file)(struct dentry *dentry);
60138-};
60139+} __no_const;
60140
60141 /*
60142 * CONFIG_RELAY kernel API, kernel/relay.c
60143diff -urNp linux-2.6.32.45/include/linux/sched.h linux-2.6.32.45/include/linux/sched.h
60144--- linux-2.6.32.45/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
60145+++ linux-2.6.32.45/include/linux/sched.h 2011-08-11 19:48:55.000000000 -0400
60146@@ -101,6 +101,7 @@ struct bio;
60147 struct fs_struct;
60148 struct bts_context;
60149 struct perf_event_context;
60150+struct linux_binprm;
60151
60152 /*
60153 * List of flags we want to share for kernel threads,
60154@@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
60155 extern signed long schedule_timeout_uninterruptible(signed long timeout);
60156 asmlinkage void __schedule(void);
60157 asmlinkage void schedule(void);
60158-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
60159+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
60160
60161 struct nsproxy;
60162 struct user_namespace;
60163@@ -371,9 +372,12 @@ struct user_namespace;
60164 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60165
60166 extern int sysctl_max_map_count;
60167+extern unsigned long sysctl_heap_stack_gap;
60168
60169 #include <linux/aio.h>
60170
60171+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60172+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60173 extern unsigned long
60174 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60175 unsigned long, unsigned long);
60176@@ -666,6 +670,16 @@ struct signal_struct {
60177 struct tty_audit_buf *tty_audit_buf;
60178 #endif
60179
60180+#ifdef CONFIG_GRKERNSEC
60181+ u32 curr_ip;
60182+ u32 saved_ip;
60183+ u32 gr_saddr;
60184+ u32 gr_daddr;
60185+ u16 gr_sport;
60186+ u16 gr_dport;
60187+ u8 used_accept:1;
60188+#endif
60189+
60190 int oom_adj; /* OOM kill score adjustment (bit shift) */
60191 };
60192
60193@@ -723,6 +737,11 @@ struct user_struct {
60194 struct key *session_keyring; /* UID's default session keyring */
60195 #endif
60196
60197+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60198+ unsigned int banned;
60199+ unsigned long ban_expires;
60200+#endif
60201+
60202 /* Hash table maintenance information */
60203 struct hlist_node uidhash_node;
60204 uid_t uid;
60205@@ -1328,8 +1347,8 @@ struct task_struct {
60206 struct list_head thread_group;
60207
60208 struct completion *vfork_done; /* for vfork() */
60209- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60210- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60211+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60212+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60213
60214 cputime_t utime, stime, utimescaled, stimescaled;
60215 cputime_t gtime;
60216@@ -1343,16 +1362,6 @@ struct task_struct {
60217 struct task_cputime cputime_expires;
60218 struct list_head cpu_timers[3];
60219
60220-/* process credentials */
60221- const struct cred *real_cred; /* objective and real subjective task
60222- * credentials (COW) */
60223- const struct cred *cred; /* effective (overridable) subjective task
60224- * credentials (COW) */
60225- struct mutex cred_guard_mutex; /* guard against foreign influences on
60226- * credential calculations
60227- * (notably. ptrace) */
60228- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60229-
60230 char comm[TASK_COMM_LEN]; /* executable name excluding path
60231 - access with [gs]et_task_comm (which lock
60232 it with task_lock())
60233@@ -1369,6 +1378,10 @@ struct task_struct {
60234 #endif
60235 /* CPU-specific state of this task */
60236 struct thread_struct thread;
60237+/* thread_info moved to task_struct */
60238+#ifdef CONFIG_X86
60239+ struct thread_info tinfo;
60240+#endif
60241 /* filesystem information */
60242 struct fs_struct *fs;
60243 /* open file information */
60244@@ -1436,6 +1449,15 @@ struct task_struct {
60245 int hardirq_context;
60246 int softirq_context;
60247 #endif
60248+
60249+/* process credentials */
60250+ const struct cred *real_cred; /* objective and real subjective task
60251+ * credentials (COW) */
60252+ struct mutex cred_guard_mutex; /* guard against foreign influences on
60253+ * credential calculations
60254+ * (notably. ptrace) */
60255+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60256+
60257 #ifdef CONFIG_LOCKDEP
60258 # define MAX_LOCK_DEPTH 48UL
60259 u64 curr_chain_key;
60260@@ -1456,6 +1478,9 @@ struct task_struct {
60261
60262 struct backing_dev_info *backing_dev_info;
60263
60264+ const struct cred *cred; /* effective (overridable) subjective task
60265+ * credentials (COW) */
60266+
60267 struct io_context *io_context;
60268
60269 unsigned long ptrace_message;
60270@@ -1519,6 +1544,21 @@ struct task_struct {
60271 unsigned long default_timer_slack_ns;
60272
60273 struct list_head *scm_work_list;
60274+
60275+#ifdef CONFIG_GRKERNSEC
60276+ /* grsecurity */
60277+ struct dentry *gr_chroot_dentry;
60278+ struct acl_subject_label *acl;
60279+ struct acl_role_label *role;
60280+ struct file *exec_file;
60281+ u16 acl_role_id;
60282+ /* is this the task that authenticated to the special role */
60283+ u8 acl_sp_role;
60284+ u8 is_writable;
60285+ u8 brute;
60286+ u8 gr_is_chrooted;
60287+#endif
60288+
60289 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60290 /* Index of current stored adress in ret_stack */
60291 int curr_ret_stack;
60292@@ -1542,6 +1582,57 @@ struct task_struct {
60293 #endif /* CONFIG_TRACING */
60294 };
60295
60296+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60297+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60298+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60299+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60300+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60301+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60302+
60303+#ifdef CONFIG_PAX_SOFTMODE
60304+extern int pax_softmode;
60305+#endif
60306+
60307+extern int pax_check_flags(unsigned long *);
60308+
60309+/* if tsk != current then task_lock must be held on it */
60310+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60311+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60312+{
60313+ if (likely(tsk->mm))
60314+ return tsk->mm->pax_flags;
60315+ else
60316+ return 0UL;
60317+}
60318+
60319+/* if tsk != current then task_lock must be held on it */
60320+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60321+{
60322+ if (likely(tsk->mm)) {
60323+ tsk->mm->pax_flags = flags;
60324+ return 0;
60325+ }
60326+ return -EINVAL;
60327+}
60328+#endif
60329+
60330+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60331+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60332+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60333+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60334+#endif
60335+
60336+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60337+extern void pax_report_insns(void *pc, void *sp);
60338+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60339+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60340+
60341+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60342+extern void pax_track_stack(void);
60343+#else
60344+static inline void pax_track_stack(void) {}
60345+#endif
60346+
60347 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60348 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
60349
60350@@ -1740,7 +1831,7 @@ extern void thread_group_times(struct ta
60351 #define PF_DUMPCORE 0x00000200 /* dumped core */
60352 #define PF_SIGNALED 0x00000400 /* killed by a signal */
60353 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
60354-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
60355+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
60356 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
60357 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
60358 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
60359@@ -1978,7 +2069,9 @@ void yield(void);
60360 extern struct exec_domain default_exec_domain;
60361
60362 union thread_union {
60363+#ifndef CONFIG_X86
60364 struct thread_info thread_info;
60365+#endif
60366 unsigned long stack[THREAD_SIZE/sizeof(long)];
60367 };
60368
60369@@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
60370 */
60371
60372 extern struct task_struct *find_task_by_vpid(pid_t nr);
60373+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60374 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60375 struct pid_namespace *ns);
60376
60377@@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
60378 extern void exit_itimers(struct signal_struct *);
60379 extern void flush_itimer_signals(void);
60380
60381-extern NORET_TYPE void do_group_exit(int);
60382+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60383
60384 extern void daemonize(const char *, ...);
60385 extern int allow_signal(int);
60386@@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
60387
60388 #endif
60389
60390-static inline int object_is_on_stack(void *obj)
60391+static inline int object_starts_on_stack(void *obj)
60392 {
60393- void *stack = task_stack_page(current);
60394+ const void *stack = task_stack_page(current);
60395
60396 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60397 }
60398
60399+#ifdef CONFIG_PAX_USERCOPY
60400+extern int object_is_on_stack(const void *obj, unsigned long len);
60401+#endif
60402+
60403 extern void thread_info_cache_init(void);
60404
60405 #ifdef CONFIG_DEBUG_STACK_USAGE
60406diff -urNp linux-2.6.32.45/include/linux/screen_info.h linux-2.6.32.45/include/linux/screen_info.h
60407--- linux-2.6.32.45/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
60408+++ linux-2.6.32.45/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
60409@@ -42,7 +42,8 @@ struct screen_info {
60410 __u16 pages; /* 0x32 */
60411 __u16 vesa_attributes; /* 0x34 */
60412 __u32 capabilities; /* 0x36 */
60413- __u8 _reserved[6]; /* 0x3a */
60414+ __u16 vesapm_size; /* 0x3a */
60415+ __u8 _reserved[4]; /* 0x3c */
60416 } __attribute__((packed));
60417
60418 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60419diff -urNp linux-2.6.32.45/include/linux/security.h linux-2.6.32.45/include/linux/security.h
60420--- linux-2.6.32.45/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
60421+++ linux-2.6.32.45/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
60422@@ -34,6 +34,7 @@
60423 #include <linux/key.h>
60424 #include <linux/xfrm.h>
60425 #include <linux/gfp.h>
60426+#include <linux/grsecurity.h>
60427 #include <net/flow.h>
60428
60429 /* Maximum number of letters for an LSM name string */
60430diff -urNp linux-2.6.32.45/include/linux/shm.h linux-2.6.32.45/include/linux/shm.h
60431--- linux-2.6.32.45/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
60432+++ linux-2.6.32.45/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
60433@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60434 pid_t shm_cprid;
60435 pid_t shm_lprid;
60436 struct user_struct *mlock_user;
60437+#ifdef CONFIG_GRKERNSEC
60438+ time_t shm_createtime;
60439+ pid_t shm_lapid;
60440+#endif
60441 };
60442
60443 /* shm_mode upper byte flags */
60444diff -urNp linux-2.6.32.45/include/linux/skbuff.h linux-2.6.32.45/include/linux/skbuff.h
60445--- linux-2.6.32.45/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
60446+++ linux-2.6.32.45/include/linux/skbuff.h 2011-07-06 19:53:33.000000000 -0400
60447@@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
60448 */
60449 static inline int skb_queue_empty(const struct sk_buff_head *list)
60450 {
60451- return list->next == (struct sk_buff *)list;
60452+ return list->next == (const struct sk_buff *)list;
60453 }
60454
60455 /**
60456@@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
60457 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60458 const struct sk_buff *skb)
60459 {
60460- return (skb->next == (struct sk_buff *) list);
60461+ return (skb->next == (const struct sk_buff *) list);
60462 }
60463
60464 /**
60465@@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
60466 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60467 const struct sk_buff *skb)
60468 {
60469- return (skb->prev == (struct sk_buff *) list);
60470+ return (skb->prev == (const struct sk_buff *) list);
60471 }
60472
60473 /**
60474@@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
60475 * headroom, you should not reduce this.
60476 */
60477 #ifndef NET_SKB_PAD
60478-#define NET_SKB_PAD 32
60479+#define NET_SKB_PAD (_AC(32,UL))
60480 #endif
60481
60482 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60483diff -urNp linux-2.6.32.45/include/linux/slab_def.h linux-2.6.32.45/include/linux/slab_def.h
60484--- linux-2.6.32.45/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
60485+++ linux-2.6.32.45/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
60486@@ -69,10 +69,10 @@ struct kmem_cache {
60487 unsigned long node_allocs;
60488 unsigned long node_frees;
60489 unsigned long node_overflow;
60490- atomic_t allochit;
60491- atomic_t allocmiss;
60492- atomic_t freehit;
60493- atomic_t freemiss;
60494+ atomic_unchecked_t allochit;
60495+ atomic_unchecked_t allocmiss;
60496+ atomic_unchecked_t freehit;
60497+ atomic_unchecked_t freemiss;
60498
60499 /*
60500 * If debugging is enabled, then the allocator can add additional
60501diff -urNp linux-2.6.32.45/include/linux/slab.h linux-2.6.32.45/include/linux/slab.h
60502--- linux-2.6.32.45/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
60503+++ linux-2.6.32.45/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
60504@@ -11,12 +11,20 @@
60505
60506 #include <linux/gfp.h>
60507 #include <linux/types.h>
60508+#include <linux/err.h>
60509
60510 /*
60511 * Flags to pass to kmem_cache_create().
60512 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60513 */
60514 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60515+
60516+#ifdef CONFIG_PAX_USERCOPY
60517+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60518+#else
60519+#define SLAB_USERCOPY 0x00000000UL
60520+#endif
60521+
60522 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60523 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60524 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60525@@ -82,10 +90,13 @@
60526 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60527 * Both make kfree a no-op.
60528 */
60529-#define ZERO_SIZE_PTR ((void *)16)
60530+#define ZERO_SIZE_PTR \
60531+({ \
60532+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60533+ (void *)(-MAX_ERRNO-1L); \
60534+})
60535
60536-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60537- (unsigned long)ZERO_SIZE_PTR)
60538+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60539
60540 /*
60541 * struct kmem_cache related prototypes
60542@@ -138,6 +149,7 @@ void * __must_check krealloc(const void
60543 void kfree(const void *);
60544 void kzfree(const void *);
60545 size_t ksize(const void *);
60546+void check_object_size(const void *ptr, unsigned long n, bool to);
60547
60548 /*
60549 * Allocator specific definitions. These are mainly used to establish optimized
60550@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
60551
60552 void __init kmem_cache_init_late(void);
60553
60554+#define kmalloc(x, y) \
60555+({ \
60556+ void *___retval; \
60557+ intoverflow_t ___x = (intoverflow_t)x; \
60558+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
60559+ ___retval = NULL; \
60560+ else \
60561+ ___retval = kmalloc((size_t)___x, (y)); \
60562+ ___retval; \
60563+})
60564+
60565+#define kmalloc_node(x, y, z) \
60566+({ \
60567+ void *___retval; \
60568+ intoverflow_t ___x = (intoverflow_t)x; \
60569+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60570+ ___retval = NULL; \
60571+ else \
60572+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60573+ ___retval; \
60574+})
60575+
60576+#define kzalloc(x, y) \
60577+({ \
60578+ void *___retval; \
60579+ intoverflow_t ___x = (intoverflow_t)x; \
60580+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
60581+ ___retval = NULL; \
60582+ else \
60583+ ___retval = kzalloc((size_t)___x, (y)); \
60584+ ___retval; \
60585+})
60586+
60587 #endif /* _LINUX_SLAB_H */
60588diff -urNp linux-2.6.32.45/include/linux/slub_def.h linux-2.6.32.45/include/linux/slub_def.h
60589--- linux-2.6.32.45/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
60590+++ linux-2.6.32.45/include/linux/slub_def.h 2011-08-05 20:33:55.000000000 -0400
60591@@ -86,7 +86,7 @@ struct kmem_cache {
60592 struct kmem_cache_order_objects max;
60593 struct kmem_cache_order_objects min;
60594 gfp_t allocflags; /* gfp flags to use on each alloc */
60595- int refcount; /* Refcount for slab cache destroy */
60596+ atomic_t refcount; /* Refcount for slab cache destroy */
60597 void (*ctor)(void *);
60598 int inuse; /* Offset to metadata */
60599 int align; /* Alignment */
60600@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache
60601 #endif
60602
60603 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60604-void *__kmalloc(size_t size, gfp_t flags);
60605+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60606
60607 #ifdef CONFIG_KMEMTRACE
60608 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
60609diff -urNp linux-2.6.32.45/include/linux/sonet.h linux-2.6.32.45/include/linux/sonet.h
60610--- linux-2.6.32.45/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
60611+++ linux-2.6.32.45/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
60612@@ -61,7 +61,7 @@ struct sonet_stats {
60613 #include <asm/atomic.h>
60614
60615 struct k_sonet_stats {
60616-#define __HANDLE_ITEM(i) atomic_t i
60617+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60618 __SONET_ITEMS
60619 #undef __HANDLE_ITEM
60620 };
60621diff -urNp linux-2.6.32.45/include/linux/sunrpc/cache.h linux-2.6.32.45/include/linux/sunrpc/cache.h
60622--- linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-03-27 14:31:47.000000000 -0400
60623+++ linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-08-05 20:33:55.000000000 -0400
60624@@ -125,7 +125,7 @@ struct cache_detail {
60625 */
60626 struct cache_req {
60627 struct cache_deferred_req *(*defer)(struct cache_req *req);
60628-};
60629+} __no_const;
60630 /* this must be embedded in a deferred_request that is being
60631 * delayed awaiting cache-fill
60632 */
60633diff -urNp linux-2.6.32.45/include/linux/sunrpc/clnt.h linux-2.6.32.45/include/linux/sunrpc/clnt.h
60634--- linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
60635+++ linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
60636@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
60637 {
60638 switch (sap->sa_family) {
60639 case AF_INET:
60640- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60641+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60642 case AF_INET6:
60643- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60644+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60645 }
60646 return 0;
60647 }
60648@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
60649 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60650 const struct sockaddr *src)
60651 {
60652- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60653+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60654 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60655
60656 dsin->sin_family = ssin->sin_family;
60657@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
60658 if (sa->sa_family != AF_INET6)
60659 return 0;
60660
60661- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60662+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60663 }
60664
60665 #endif /* __KERNEL__ */
60666diff -urNp linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h
60667--- linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
60668+++ linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
60669@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60670 extern unsigned int svcrdma_max_requests;
60671 extern unsigned int svcrdma_max_req_size;
60672
60673-extern atomic_t rdma_stat_recv;
60674-extern atomic_t rdma_stat_read;
60675-extern atomic_t rdma_stat_write;
60676-extern atomic_t rdma_stat_sq_starve;
60677-extern atomic_t rdma_stat_rq_starve;
60678-extern atomic_t rdma_stat_rq_poll;
60679-extern atomic_t rdma_stat_rq_prod;
60680-extern atomic_t rdma_stat_sq_poll;
60681-extern atomic_t rdma_stat_sq_prod;
60682+extern atomic_unchecked_t rdma_stat_recv;
60683+extern atomic_unchecked_t rdma_stat_read;
60684+extern atomic_unchecked_t rdma_stat_write;
60685+extern atomic_unchecked_t rdma_stat_sq_starve;
60686+extern atomic_unchecked_t rdma_stat_rq_starve;
60687+extern atomic_unchecked_t rdma_stat_rq_poll;
60688+extern atomic_unchecked_t rdma_stat_rq_prod;
60689+extern atomic_unchecked_t rdma_stat_sq_poll;
60690+extern atomic_unchecked_t rdma_stat_sq_prod;
60691
60692 #define RPCRDMA_VERSION 1
60693
60694diff -urNp linux-2.6.32.45/include/linux/suspend.h linux-2.6.32.45/include/linux/suspend.h
60695--- linux-2.6.32.45/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
60696+++ linux-2.6.32.45/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
60697@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
60698 * which require special recovery actions in that situation.
60699 */
60700 struct platform_suspend_ops {
60701- int (*valid)(suspend_state_t state);
60702- int (*begin)(suspend_state_t state);
60703- int (*prepare)(void);
60704- int (*prepare_late)(void);
60705- int (*enter)(suspend_state_t state);
60706- void (*wake)(void);
60707- void (*finish)(void);
60708- void (*end)(void);
60709- void (*recover)(void);
60710+ int (* const valid)(suspend_state_t state);
60711+ int (* const begin)(suspend_state_t state);
60712+ int (* const prepare)(void);
60713+ int (* const prepare_late)(void);
60714+ int (* const enter)(suspend_state_t state);
60715+ void (* const wake)(void);
60716+ void (* const finish)(void);
60717+ void (* const end)(void);
60718+ void (* const recover)(void);
60719 };
60720
60721 #ifdef CONFIG_SUSPEND
60722@@ -120,7 +120,7 @@ struct platform_suspend_ops {
60723 * suspend_set_ops - set platform dependent suspend operations
60724 * @ops: The new suspend operations to set.
60725 */
60726-extern void suspend_set_ops(struct platform_suspend_ops *ops);
60727+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
60728 extern int suspend_valid_only_mem(suspend_state_t state);
60729
60730 /**
60731@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
60732 #else /* !CONFIG_SUSPEND */
60733 #define suspend_valid_only_mem NULL
60734
60735-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
60736+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
60737 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
60738 #endif /* !CONFIG_SUSPEND */
60739
60740@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
60741 * platforms which require special recovery actions in that situation.
60742 */
60743 struct platform_hibernation_ops {
60744- int (*begin)(void);
60745- void (*end)(void);
60746- int (*pre_snapshot)(void);
60747- void (*finish)(void);
60748- int (*prepare)(void);
60749- int (*enter)(void);
60750- void (*leave)(void);
60751- int (*pre_restore)(void);
60752- void (*restore_cleanup)(void);
60753- void (*recover)(void);
60754+ int (* const begin)(void);
60755+ void (* const end)(void);
60756+ int (* const pre_snapshot)(void);
60757+ void (* const finish)(void);
60758+ int (* const prepare)(void);
60759+ int (* const enter)(void);
60760+ void (* const leave)(void);
60761+ int (* const pre_restore)(void);
60762+ void (* const restore_cleanup)(void);
60763+ void (* const recover)(void);
60764 };
60765
60766 #ifdef CONFIG_HIBERNATION
60767@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
60768 extern void swsusp_unset_page_free(struct page *);
60769 extern unsigned long get_safe_page(gfp_t gfp_mask);
60770
60771-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
60772+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
60773 extern int hibernate(void);
60774 extern bool system_entering_hibernation(void);
60775 #else /* CONFIG_HIBERNATION */
60776@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
60777 static inline void swsusp_set_page_free(struct page *p) {}
60778 static inline void swsusp_unset_page_free(struct page *p) {}
60779
60780-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
60781+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
60782 static inline int hibernate(void) { return -ENOSYS; }
60783 static inline bool system_entering_hibernation(void) { return false; }
60784 #endif /* CONFIG_HIBERNATION */
60785diff -urNp linux-2.6.32.45/include/linux/sysctl.h linux-2.6.32.45/include/linux/sysctl.h
60786--- linux-2.6.32.45/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
60787+++ linux-2.6.32.45/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
60788@@ -164,7 +164,11 @@ enum
60789 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60790 };
60791
60792-
60793+#ifdef CONFIG_PAX_SOFTMODE
60794+enum {
60795+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60796+};
60797+#endif
60798
60799 /* CTL_VM names: */
60800 enum
60801@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
60802
60803 extern int proc_dostring(struct ctl_table *, int,
60804 void __user *, size_t *, loff_t *);
60805+extern int proc_dostring_modpriv(struct ctl_table *, int,
60806+ void __user *, size_t *, loff_t *);
60807 extern int proc_dointvec(struct ctl_table *, int,
60808 void __user *, size_t *, loff_t *);
60809 extern int proc_dointvec_minmax(struct ctl_table *, int,
60810@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
60811
60812 extern ctl_handler sysctl_data;
60813 extern ctl_handler sysctl_string;
60814+extern ctl_handler sysctl_string_modpriv;
60815 extern ctl_handler sysctl_intvec;
60816 extern ctl_handler sysctl_jiffies;
60817 extern ctl_handler sysctl_ms_jiffies;
60818diff -urNp linux-2.6.32.45/include/linux/sysfs.h linux-2.6.32.45/include/linux/sysfs.h
60819--- linux-2.6.32.45/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
60820+++ linux-2.6.32.45/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
60821@@ -75,8 +75,8 @@ struct bin_attribute {
60822 };
60823
60824 struct sysfs_ops {
60825- ssize_t (*show)(struct kobject *, struct attribute *,char *);
60826- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
60827+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
60828+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
60829 };
60830
60831 struct sysfs_dirent;
60832diff -urNp linux-2.6.32.45/include/linux/thread_info.h linux-2.6.32.45/include/linux/thread_info.h
60833--- linux-2.6.32.45/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
60834+++ linux-2.6.32.45/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
60835@@ -23,7 +23,7 @@ struct restart_block {
60836 };
60837 /* For futex_wait and futex_wait_requeue_pi */
60838 struct {
60839- u32 *uaddr;
60840+ u32 __user *uaddr;
60841 u32 val;
60842 u32 flags;
60843 u32 bitset;
60844diff -urNp linux-2.6.32.45/include/linux/tty.h linux-2.6.32.45/include/linux/tty.h
60845--- linux-2.6.32.45/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
60846+++ linux-2.6.32.45/include/linux/tty.h 2011-08-05 20:33:55.000000000 -0400
60847@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
60848 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
60849 extern void tty_ldisc_enable(struct tty_struct *tty);
60850
60851-
60852 /* n_tty.c */
60853 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
60854
60855diff -urNp linux-2.6.32.45/include/linux/tty_ldisc.h linux-2.6.32.45/include/linux/tty_ldisc.h
60856--- linux-2.6.32.45/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
60857+++ linux-2.6.32.45/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
60858@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
60859
60860 struct module *owner;
60861
60862- int refcount;
60863+ atomic_t refcount;
60864 };
60865
60866 struct tty_ldisc {
60867diff -urNp linux-2.6.32.45/include/linux/types.h linux-2.6.32.45/include/linux/types.h
60868--- linux-2.6.32.45/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
60869+++ linux-2.6.32.45/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
60870@@ -191,10 +191,26 @@ typedef struct {
60871 volatile int counter;
60872 } atomic_t;
60873
60874+#ifdef CONFIG_PAX_REFCOUNT
60875+typedef struct {
60876+ volatile int counter;
60877+} atomic_unchecked_t;
60878+#else
60879+typedef atomic_t atomic_unchecked_t;
60880+#endif
60881+
60882 #ifdef CONFIG_64BIT
60883 typedef struct {
60884 volatile long counter;
60885 } atomic64_t;
60886+
60887+#ifdef CONFIG_PAX_REFCOUNT
60888+typedef struct {
60889+ volatile long counter;
60890+} atomic64_unchecked_t;
60891+#else
60892+typedef atomic64_t atomic64_unchecked_t;
60893+#endif
60894 #endif
60895
60896 struct ustat {
60897diff -urNp linux-2.6.32.45/include/linux/uaccess.h linux-2.6.32.45/include/linux/uaccess.h
60898--- linux-2.6.32.45/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
60899+++ linux-2.6.32.45/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
60900@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60901 long ret; \
60902 mm_segment_t old_fs = get_fs(); \
60903 \
60904- set_fs(KERNEL_DS); \
60905 pagefault_disable(); \
60906+ set_fs(KERNEL_DS); \
60907 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60908- pagefault_enable(); \
60909 set_fs(old_fs); \
60910+ pagefault_enable(); \
60911 ret; \
60912 })
60913
60914@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
60915 * Safely read from address @src to the buffer at @dst. If a kernel fault
60916 * happens, handle that and return -EFAULT.
60917 */
60918-extern long probe_kernel_read(void *dst, void *src, size_t size);
60919+extern long probe_kernel_read(void *dst, const void *src, size_t size);
60920
60921 /*
60922 * probe_kernel_write(): safely attempt to write to a location
60923@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
60924 * Safely write to address @dst from the buffer at @src. If a kernel fault
60925 * happens, handle that and return -EFAULT.
60926 */
60927-extern long probe_kernel_write(void *dst, void *src, size_t size);
60928+extern long probe_kernel_write(void *dst, const void *src, size_t size);
60929
60930 #endif /* __LINUX_UACCESS_H__ */
60931diff -urNp linux-2.6.32.45/include/linux/unaligned/access_ok.h linux-2.6.32.45/include/linux/unaligned/access_ok.h
60932--- linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
60933+++ linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
60934@@ -6,32 +6,32 @@
60935
60936 static inline u16 get_unaligned_le16(const void *p)
60937 {
60938- return le16_to_cpup((__le16 *)p);
60939+ return le16_to_cpup((const __le16 *)p);
60940 }
60941
60942 static inline u32 get_unaligned_le32(const void *p)
60943 {
60944- return le32_to_cpup((__le32 *)p);
60945+ return le32_to_cpup((const __le32 *)p);
60946 }
60947
60948 static inline u64 get_unaligned_le64(const void *p)
60949 {
60950- return le64_to_cpup((__le64 *)p);
60951+ return le64_to_cpup((const __le64 *)p);
60952 }
60953
60954 static inline u16 get_unaligned_be16(const void *p)
60955 {
60956- return be16_to_cpup((__be16 *)p);
60957+ return be16_to_cpup((const __be16 *)p);
60958 }
60959
60960 static inline u32 get_unaligned_be32(const void *p)
60961 {
60962- return be32_to_cpup((__be32 *)p);
60963+ return be32_to_cpup((const __be32 *)p);
60964 }
60965
60966 static inline u64 get_unaligned_be64(const void *p)
60967 {
60968- return be64_to_cpup((__be64 *)p);
60969+ return be64_to_cpup((const __be64 *)p);
60970 }
60971
60972 static inline void put_unaligned_le16(u16 val, void *p)
60973diff -urNp linux-2.6.32.45/include/linux/vmalloc.h linux-2.6.32.45/include/linux/vmalloc.h
60974--- linux-2.6.32.45/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
60975+++ linux-2.6.32.45/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
60976@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
60977 #define VM_MAP 0x00000004 /* vmap()ed pages */
60978 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60979 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60980+
60981+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60982+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
60983+#endif
60984+
60985 /* bits [20..32] reserved for arch specific ioremap internals */
60986
60987 /*
60988@@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
60989
60990 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
60991
60992+#define vmalloc(x) \
60993+({ \
60994+ void *___retval; \
60995+ intoverflow_t ___x = (intoverflow_t)x; \
60996+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60997+ ___retval = NULL; \
60998+ else \
60999+ ___retval = vmalloc((unsigned long)___x); \
61000+ ___retval; \
61001+})
61002+
61003+#define __vmalloc(x, y, z) \
61004+({ \
61005+ void *___retval; \
61006+ intoverflow_t ___x = (intoverflow_t)x; \
61007+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61008+ ___retval = NULL; \
61009+ else \
61010+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61011+ ___retval; \
61012+})
61013+
61014+#define vmalloc_user(x) \
61015+({ \
61016+ void *___retval; \
61017+ intoverflow_t ___x = (intoverflow_t)x; \
61018+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61019+ ___retval = NULL; \
61020+ else \
61021+ ___retval = vmalloc_user((unsigned long)___x); \
61022+ ___retval; \
61023+})
61024+
61025+#define vmalloc_exec(x) \
61026+({ \
61027+ void *___retval; \
61028+ intoverflow_t ___x = (intoverflow_t)x; \
61029+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61030+ ___retval = NULL; \
61031+ else \
61032+ ___retval = vmalloc_exec((unsigned long)___x); \
61033+ ___retval; \
61034+})
61035+
61036+#define vmalloc_node(x, y) \
61037+({ \
61038+ void *___retval; \
61039+ intoverflow_t ___x = (intoverflow_t)x; \
61040+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61041+ ___retval = NULL; \
61042+ else \
61043+ ___retval = vmalloc_node((unsigned long)___x, (y));\
61044+ ___retval; \
61045+})
61046+
61047+#define vmalloc_32(x) \
61048+({ \
61049+ void *___retval; \
61050+ intoverflow_t ___x = (intoverflow_t)x; \
61051+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61052+ ___retval = NULL; \
61053+ else \
61054+ ___retval = vmalloc_32((unsigned long)___x); \
61055+ ___retval; \
61056+})
61057+
61058+#define vmalloc_32_user(x) \
61059+({ \
61060+ void *___retval; \
61061+ intoverflow_t ___x = (intoverflow_t)x; \
61062+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61063+ ___retval = NULL; \
61064+ else \
61065+ ___retval = vmalloc_32_user((unsigned long)___x);\
61066+ ___retval; \
61067+})
61068+
61069 #endif /* _LINUX_VMALLOC_H */
61070diff -urNp linux-2.6.32.45/include/linux/vmstat.h linux-2.6.32.45/include/linux/vmstat.h
61071--- linux-2.6.32.45/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
61072+++ linux-2.6.32.45/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
61073@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
61074 /*
61075 * Zone based page accounting with per cpu differentials.
61076 */
61077-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61078+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61079
61080 static inline void zone_page_state_add(long x, struct zone *zone,
61081 enum zone_stat_item item)
61082 {
61083- atomic_long_add(x, &zone->vm_stat[item]);
61084- atomic_long_add(x, &vm_stat[item]);
61085+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61086+ atomic_long_add_unchecked(x, &vm_stat[item]);
61087 }
61088
61089 static inline unsigned long global_page_state(enum zone_stat_item item)
61090 {
61091- long x = atomic_long_read(&vm_stat[item]);
61092+ long x = atomic_long_read_unchecked(&vm_stat[item]);
61093 #ifdef CONFIG_SMP
61094 if (x < 0)
61095 x = 0;
61096@@ -158,7 +158,7 @@ static inline unsigned long global_page_
61097 static inline unsigned long zone_page_state(struct zone *zone,
61098 enum zone_stat_item item)
61099 {
61100- long x = atomic_long_read(&zone->vm_stat[item]);
61101+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61102 #ifdef CONFIG_SMP
61103 if (x < 0)
61104 x = 0;
61105@@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
61106 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61107 enum zone_stat_item item)
61108 {
61109- long x = atomic_long_read(&zone->vm_stat[item]);
61110+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61111
61112 #ifdef CONFIG_SMP
61113 int cpu;
61114@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
61115
61116 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61117 {
61118- atomic_long_inc(&zone->vm_stat[item]);
61119- atomic_long_inc(&vm_stat[item]);
61120+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
61121+ atomic_long_inc_unchecked(&vm_stat[item]);
61122 }
61123
61124 static inline void __inc_zone_page_state(struct page *page,
61125@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
61126
61127 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61128 {
61129- atomic_long_dec(&zone->vm_stat[item]);
61130- atomic_long_dec(&vm_stat[item]);
61131+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
61132+ atomic_long_dec_unchecked(&vm_stat[item]);
61133 }
61134
61135 static inline void __dec_zone_page_state(struct page *page,
61136diff -urNp linux-2.6.32.45/include/media/v4l2-dev.h linux-2.6.32.45/include/media/v4l2-dev.h
61137--- linux-2.6.32.45/include/media/v4l2-dev.h 2011-03-27 14:31:47.000000000 -0400
61138+++ linux-2.6.32.45/include/media/v4l2-dev.h 2011-08-05 20:33:55.000000000 -0400
61139@@ -34,7 +34,7 @@ struct v4l2_device;
61140 #define V4L2_FL_UNREGISTERED (0)
61141
61142 struct v4l2_file_operations {
61143- struct module *owner;
61144+ struct module * const owner;
61145 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61146 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61147 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61148diff -urNp linux-2.6.32.45/include/media/v4l2-device.h linux-2.6.32.45/include/media/v4l2-device.h
61149--- linux-2.6.32.45/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
61150+++ linux-2.6.32.45/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
61151@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
61152 this function returns 0. If the name ends with a digit (e.g. cx18),
61153 then the name will be set to cx18-0 since cx180 looks really odd. */
61154 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
61155- atomic_t *instance);
61156+ atomic_unchecked_t *instance);
61157
61158 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
61159 Since the parent disappears this ensures that v4l2_dev doesn't have an
61160diff -urNp linux-2.6.32.45/include/net/flow.h linux-2.6.32.45/include/net/flow.h
61161--- linux-2.6.32.45/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
61162+++ linux-2.6.32.45/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
61163@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
61164 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
61165 u8 dir, flow_resolve_t resolver);
61166 extern void flow_cache_flush(void);
61167-extern atomic_t flow_cache_genid;
61168+extern atomic_unchecked_t flow_cache_genid;
61169
61170 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
61171 {
61172diff -urNp linux-2.6.32.45/include/net/inetpeer.h linux-2.6.32.45/include/net/inetpeer.h
61173--- linux-2.6.32.45/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
61174+++ linux-2.6.32.45/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
61175@@ -24,7 +24,7 @@ struct inet_peer
61176 __u32 dtime; /* the time of last use of not
61177 * referenced entries */
61178 atomic_t refcnt;
61179- atomic_t rid; /* Frag reception counter */
61180+ atomic_unchecked_t rid; /* Frag reception counter */
61181 __u32 tcp_ts;
61182 unsigned long tcp_ts_stamp;
61183 };
61184diff -urNp linux-2.6.32.45/include/net/ip_vs.h linux-2.6.32.45/include/net/ip_vs.h
61185--- linux-2.6.32.45/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
61186+++ linux-2.6.32.45/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
61187@@ -365,7 +365,7 @@ struct ip_vs_conn {
61188 struct ip_vs_conn *control; /* Master control connection */
61189 atomic_t n_control; /* Number of controlled ones */
61190 struct ip_vs_dest *dest; /* real server */
61191- atomic_t in_pkts; /* incoming packet counter */
61192+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61193
61194 /* packet transmitter for different forwarding methods. If it
61195 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61196@@ -466,7 +466,7 @@ struct ip_vs_dest {
61197 union nf_inet_addr addr; /* IP address of the server */
61198 __be16 port; /* port number of the server */
61199 volatile unsigned flags; /* dest status flags */
61200- atomic_t conn_flags; /* flags to copy to conn */
61201+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61202 atomic_t weight; /* server weight */
61203
61204 atomic_t refcnt; /* reference counter */
61205diff -urNp linux-2.6.32.45/include/net/irda/ircomm_core.h linux-2.6.32.45/include/net/irda/ircomm_core.h
61206--- linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-03-27 14:31:47.000000000 -0400
61207+++ linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-08-05 20:33:55.000000000 -0400
61208@@ -51,7 +51,7 @@ typedef struct {
61209 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61210 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61211 struct ircomm_info *);
61212-} call_t;
61213+} __no_const call_t;
61214
61215 struct ircomm_cb {
61216 irda_queue_t queue;
61217diff -urNp linux-2.6.32.45/include/net/irda/ircomm_tty.h linux-2.6.32.45/include/net/irda/ircomm_tty.h
61218--- linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
61219+++ linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
61220@@ -35,6 +35,7 @@
61221 #include <linux/termios.h>
61222 #include <linux/timer.h>
61223 #include <linux/tty.h> /* struct tty_struct */
61224+#include <asm/local.h>
61225
61226 #include <net/irda/irias_object.h>
61227 #include <net/irda/ircomm_core.h>
61228@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61229 unsigned short close_delay;
61230 unsigned short closing_wait; /* time to wait before closing */
61231
61232- int open_count;
61233- int blocked_open; /* # of blocked opens */
61234+ local_t open_count;
61235+ local_t blocked_open; /* # of blocked opens */
61236
61237 /* Protect concurent access to :
61238 * o self->open_count
61239diff -urNp linux-2.6.32.45/include/net/iucv/af_iucv.h linux-2.6.32.45/include/net/iucv/af_iucv.h
61240--- linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
61241+++ linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
61242@@ -87,7 +87,7 @@ struct iucv_sock {
61243 struct iucv_sock_list {
61244 struct hlist_head head;
61245 rwlock_t lock;
61246- atomic_t autobind_name;
61247+ atomic_unchecked_t autobind_name;
61248 };
61249
61250 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61251diff -urNp linux-2.6.32.45/include/net/lapb.h linux-2.6.32.45/include/net/lapb.h
61252--- linux-2.6.32.45/include/net/lapb.h 2011-03-27 14:31:47.000000000 -0400
61253+++ linux-2.6.32.45/include/net/lapb.h 2011-08-05 20:33:55.000000000 -0400
61254@@ -95,7 +95,7 @@ struct lapb_cb {
61255 struct sk_buff_head write_queue;
61256 struct sk_buff_head ack_queue;
61257 unsigned char window;
61258- struct lapb_register_struct callbacks;
61259+ struct lapb_register_struct *callbacks;
61260
61261 /* FRMR control information */
61262 struct lapb_frame frmr_data;
61263diff -urNp linux-2.6.32.45/include/net/neighbour.h linux-2.6.32.45/include/net/neighbour.h
61264--- linux-2.6.32.45/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
61265+++ linux-2.6.32.45/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
61266@@ -125,12 +125,12 @@ struct neighbour
61267 struct neigh_ops
61268 {
61269 int family;
61270- void (*solicit)(struct neighbour *, struct sk_buff*);
61271- void (*error_report)(struct neighbour *, struct sk_buff*);
61272- int (*output)(struct sk_buff*);
61273- int (*connected_output)(struct sk_buff*);
61274- int (*hh_output)(struct sk_buff*);
61275- int (*queue_xmit)(struct sk_buff*);
61276+ void (* const solicit)(struct neighbour *, struct sk_buff*);
61277+ void (* const error_report)(struct neighbour *, struct sk_buff*);
61278+ int (* const output)(struct sk_buff*);
61279+ int (* const connected_output)(struct sk_buff*);
61280+ int (* const hh_output)(struct sk_buff*);
61281+ int (* const queue_xmit)(struct sk_buff*);
61282 };
61283
61284 struct pneigh_entry
61285diff -urNp linux-2.6.32.45/include/net/netlink.h linux-2.6.32.45/include/net/netlink.h
61286--- linux-2.6.32.45/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
61287+++ linux-2.6.32.45/include/net/netlink.h 2011-07-13 17:23:19.000000000 -0400
61288@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
61289 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61290 {
61291 if (mark)
61292- skb_trim(skb, (unsigned char *) mark - skb->data);
61293+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61294 }
61295
61296 /**
61297diff -urNp linux-2.6.32.45/include/net/netns/ipv4.h linux-2.6.32.45/include/net/netns/ipv4.h
61298--- linux-2.6.32.45/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
61299+++ linux-2.6.32.45/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
61300@@ -54,7 +54,7 @@ struct netns_ipv4 {
61301 int current_rt_cache_rebuild_count;
61302
61303 struct timer_list rt_secret_timer;
61304- atomic_t rt_genid;
61305+ atomic_unchecked_t rt_genid;
61306
61307 #ifdef CONFIG_IP_MROUTE
61308 struct sock *mroute_sk;
61309diff -urNp linux-2.6.32.45/include/net/sctp/sctp.h linux-2.6.32.45/include/net/sctp/sctp.h
61310--- linux-2.6.32.45/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
61311+++ linux-2.6.32.45/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
61312@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
61313
61314 #else /* SCTP_DEBUG */
61315
61316-#define SCTP_DEBUG_PRINTK(whatever...)
61317-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61318+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61319+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61320 #define SCTP_ENABLE_DEBUG
61321 #define SCTP_DISABLE_DEBUG
61322 #define SCTP_ASSERT(expr, str, func)
61323diff -urNp linux-2.6.32.45/include/net/secure_seq.h linux-2.6.32.45/include/net/secure_seq.h
61324--- linux-2.6.32.45/include/net/secure_seq.h 2011-08-16 20:37:25.000000000 -0400
61325+++ linux-2.6.32.45/include/net/secure_seq.h 2011-08-07 19:48:09.000000000 -0400
61326@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
61327 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
61328 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
61329 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
61330- __be16 dport);
61331+ __be16 dport);
61332 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
61333 __be16 sport, __be16 dport);
61334 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61335- __be16 sport, __be16 dport);
61336+ __be16 sport, __be16 dport);
61337 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
61338- __be16 sport, __be16 dport);
61339+ __be16 sport, __be16 dport);
61340 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61341- __be16 sport, __be16 dport);
61342+ __be16 sport, __be16 dport);
61343
61344 #endif /* _NET_SECURE_SEQ */
61345diff -urNp linux-2.6.32.45/include/net/sock.h linux-2.6.32.45/include/net/sock.h
61346--- linux-2.6.32.45/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
61347+++ linux-2.6.32.45/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
61348@@ -272,7 +272,7 @@ struct sock {
61349 rwlock_t sk_callback_lock;
61350 int sk_err,
61351 sk_err_soft;
61352- atomic_t sk_drops;
61353+ atomic_unchecked_t sk_drops;
61354 unsigned short sk_ack_backlog;
61355 unsigned short sk_max_ack_backlog;
61356 __u32 sk_priority;
61357diff -urNp linux-2.6.32.45/include/net/tcp.h linux-2.6.32.45/include/net/tcp.h
61358--- linux-2.6.32.45/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
61359+++ linux-2.6.32.45/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
61360@@ -1444,6 +1444,7 @@ enum tcp_seq_states {
61361 struct tcp_seq_afinfo {
61362 char *name;
61363 sa_family_t family;
61364+ /* cannot be const */
61365 struct file_operations seq_fops;
61366 struct seq_operations seq_ops;
61367 };
61368diff -urNp linux-2.6.32.45/include/net/udp.h linux-2.6.32.45/include/net/udp.h
61369--- linux-2.6.32.45/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
61370+++ linux-2.6.32.45/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
61371@@ -187,6 +187,7 @@ struct udp_seq_afinfo {
61372 char *name;
61373 sa_family_t family;
61374 struct udp_table *udp_table;
61375+ /* cannot be const */
61376 struct file_operations seq_fops;
61377 struct seq_operations seq_ops;
61378 };
61379diff -urNp linux-2.6.32.45/include/rdma/iw_cm.h linux-2.6.32.45/include/rdma/iw_cm.h
61380--- linux-2.6.32.45/include/rdma/iw_cm.h 2011-03-27 14:31:47.000000000 -0400
61381+++ linux-2.6.32.45/include/rdma/iw_cm.h 2011-08-05 20:33:55.000000000 -0400
61382@@ -129,7 +129,7 @@ struct iw_cm_verbs {
61383 int backlog);
61384
61385 int (*destroy_listen)(struct iw_cm_id *cm_id);
61386-};
61387+} __no_const;
61388
61389 /**
61390 * iw_create_cm_id - Create an IW CM identifier.
61391diff -urNp linux-2.6.32.45/include/scsi/scsi_device.h linux-2.6.32.45/include/scsi/scsi_device.h
61392--- linux-2.6.32.45/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
61393+++ linux-2.6.32.45/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
61394@@ -156,9 +156,9 @@ struct scsi_device {
61395 unsigned int max_device_blocked; /* what device_blocked counts down from */
61396 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61397
61398- atomic_t iorequest_cnt;
61399- atomic_t iodone_cnt;
61400- atomic_t ioerr_cnt;
61401+ atomic_unchecked_t iorequest_cnt;
61402+ atomic_unchecked_t iodone_cnt;
61403+ atomic_unchecked_t ioerr_cnt;
61404
61405 struct device sdev_gendev,
61406 sdev_dev;
61407diff -urNp linux-2.6.32.45/include/scsi/scsi_transport_fc.h linux-2.6.32.45/include/scsi/scsi_transport_fc.h
61408--- linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-03-27 14:31:47.000000000 -0400
61409+++ linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-08-05 20:33:55.000000000 -0400
61410@@ -663,9 +663,9 @@ struct fc_function_template {
61411 int (*bsg_timeout)(struct fc_bsg_job *);
61412
61413 /* allocation lengths for host-specific data */
61414- u32 dd_fcrport_size;
61415- u32 dd_fcvport_size;
61416- u32 dd_bsg_size;
61417+ const u32 dd_fcrport_size;
61418+ const u32 dd_fcvport_size;
61419+ const u32 dd_bsg_size;
61420
61421 /*
61422 * The driver sets these to tell the transport class it
61423@@ -675,39 +675,39 @@ struct fc_function_template {
61424 */
61425
61426 /* remote port fixed attributes */
61427- unsigned long show_rport_maxframe_size:1;
61428- unsigned long show_rport_supported_classes:1;
61429- unsigned long show_rport_dev_loss_tmo:1;
61430+ const unsigned long show_rport_maxframe_size:1;
61431+ const unsigned long show_rport_supported_classes:1;
61432+ const unsigned long show_rport_dev_loss_tmo:1;
61433
61434 /*
61435 * target dynamic attributes
61436 * These should all be "1" if the driver uses the remote port
61437 * add/delete functions (so attributes reflect rport values).
61438 */
61439- unsigned long show_starget_node_name:1;
61440- unsigned long show_starget_port_name:1;
61441- unsigned long show_starget_port_id:1;
61442+ const unsigned long show_starget_node_name:1;
61443+ const unsigned long show_starget_port_name:1;
61444+ const unsigned long show_starget_port_id:1;
61445
61446 /* host fixed attributes */
61447- unsigned long show_host_node_name:1;
61448- unsigned long show_host_port_name:1;
61449- unsigned long show_host_permanent_port_name:1;
61450- unsigned long show_host_supported_classes:1;
61451- unsigned long show_host_supported_fc4s:1;
61452- unsigned long show_host_supported_speeds:1;
61453- unsigned long show_host_maxframe_size:1;
61454- unsigned long show_host_serial_number:1;
61455+ const unsigned long show_host_node_name:1;
61456+ const unsigned long show_host_port_name:1;
61457+ const unsigned long show_host_permanent_port_name:1;
61458+ const unsigned long show_host_supported_classes:1;
61459+ const unsigned long show_host_supported_fc4s:1;
61460+ const unsigned long show_host_supported_speeds:1;
61461+ const unsigned long show_host_maxframe_size:1;
61462+ const unsigned long show_host_serial_number:1;
61463 /* host dynamic attributes */
61464- unsigned long show_host_port_id:1;
61465- unsigned long show_host_port_type:1;
61466- unsigned long show_host_port_state:1;
61467- unsigned long show_host_active_fc4s:1;
61468- unsigned long show_host_speed:1;
61469- unsigned long show_host_fabric_name:1;
61470- unsigned long show_host_symbolic_name:1;
61471- unsigned long show_host_system_hostname:1;
61472+ const unsigned long show_host_port_id:1;
61473+ const unsigned long show_host_port_type:1;
61474+ const unsigned long show_host_port_state:1;
61475+ const unsigned long show_host_active_fc4s:1;
61476+ const unsigned long show_host_speed:1;
61477+ const unsigned long show_host_fabric_name:1;
61478+ const unsigned long show_host_symbolic_name:1;
61479+ const unsigned long show_host_system_hostname:1;
61480
61481- unsigned long disable_target_scan:1;
61482+ const unsigned long disable_target_scan:1;
61483 };
61484
61485
61486diff -urNp linux-2.6.32.45/include/sound/ac97_codec.h linux-2.6.32.45/include/sound/ac97_codec.h
61487--- linux-2.6.32.45/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
61488+++ linux-2.6.32.45/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
61489@@ -419,15 +419,15 @@
61490 struct snd_ac97;
61491
61492 struct snd_ac97_build_ops {
61493- int (*build_3d) (struct snd_ac97 *ac97);
61494- int (*build_specific) (struct snd_ac97 *ac97);
61495- int (*build_spdif) (struct snd_ac97 *ac97);
61496- int (*build_post_spdif) (struct snd_ac97 *ac97);
61497+ int (* const build_3d) (struct snd_ac97 *ac97);
61498+ int (* const build_specific) (struct snd_ac97 *ac97);
61499+ int (* const build_spdif) (struct snd_ac97 *ac97);
61500+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
61501 #ifdef CONFIG_PM
61502- void (*suspend) (struct snd_ac97 *ac97);
61503- void (*resume) (struct snd_ac97 *ac97);
61504+ void (* const suspend) (struct snd_ac97 *ac97);
61505+ void (* const resume) (struct snd_ac97 *ac97);
61506 #endif
61507- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61508+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61509 };
61510
61511 struct snd_ac97_bus_ops {
61512@@ -477,7 +477,7 @@ struct snd_ac97_template {
61513
61514 struct snd_ac97 {
61515 /* -- lowlevel (hardware) driver specific -- */
61516- struct snd_ac97_build_ops * build_ops;
61517+ const struct snd_ac97_build_ops * build_ops;
61518 void *private_data;
61519 void (*private_free) (struct snd_ac97 *ac97);
61520 /* --- */
61521diff -urNp linux-2.6.32.45/include/sound/ak4xxx-adda.h linux-2.6.32.45/include/sound/ak4xxx-adda.h
61522--- linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-03-27 14:31:47.000000000 -0400
61523+++ linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-08-05 20:33:55.000000000 -0400
61524@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61525 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61526 unsigned char val);
61527 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61528-};
61529+} __no_const;
61530
61531 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61532
61533diff -urNp linux-2.6.32.45/include/sound/hwdep.h linux-2.6.32.45/include/sound/hwdep.h
61534--- linux-2.6.32.45/include/sound/hwdep.h 2011-03-27 14:31:47.000000000 -0400
61535+++ linux-2.6.32.45/include/sound/hwdep.h 2011-08-05 20:33:55.000000000 -0400
61536@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61537 struct snd_hwdep_dsp_status *status);
61538 int (*dsp_load)(struct snd_hwdep *hw,
61539 struct snd_hwdep_dsp_image *image);
61540-};
61541+} __no_const;
61542
61543 struct snd_hwdep {
61544 struct snd_card *card;
61545diff -urNp linux-2.6.32.45/include/sound/info.h linux-2.6.32.45/include/sound/info.h
61546--- linux-2.6.32.45/include/sound/info.h 2011-03-27 14:31:47.000000000 -0400
61547+++ linux-2.6.32.45/include/sound/info.h 2011-08-05 20:33:55.000000000 -0400
61548@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61549 struct snd_info_buffer *buffer);
61550 void (*write)(struct snd_info_entry *entry,
61551 struct snd_info_buffer *buffer);
61552-};
61553+} __no_const;
61554
61555 struct snd_info_entry_ops {
61556 int (*open)(struct snd_info_entry *entry,
61557diff -urNp linux-2.6.32.45/include/sound/sb16_csp.h linux-2.6.32.45/include/sound/sb16_csp.h
61558--- linux-2.6.32.45/include/sound/sb16_csp.h 2011-03-27 14:31:47.000000000 -0400
61559+++ linux-2.6.32.45/include/sound/sb16_csp.h 2011-08-05 20:33:55.000000000 -0400
61560@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
61561 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61562 int (*csp_stop) (struct snd_sb_csp * p);
61563 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61564-};
61565+} __no_const;
61566
61567 /*
61568 * CSP private data
61569diff -urNp linux-2.6.32.45/include/sound/ymfpci.h linux-2.6.32.45/include/sound/ymfpci.h
61570--- linux-2.6.32.45/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
61571+++ linux-2.6.32.45/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
61572@@ -358,7 +358,7 @@ struct snd_ymfpci {
61573 spinlock_t reg_lock;
61574 spinlock_t voice_lock;
61575 wait_queue_head_t interrupt_sleep;
61576- atomic_t interrupt_sleep_count;
61577+ atomic_unchecked_t interrupt_sleep_count;
61578 struct snd_info_entry *proc_entry;
61579 const struct firmware *dsp_microcode;
61580 const struct firmware *controller_microcode;
61581diff -urNp linux-2.6.32.45/include/trace/events/irq.h linux-2.6.32.45/include/trace/events/irq.h
61582--- linux-2.6.32.45/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
61583+++ linux-2.6.32.45/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
61584@@ -34,7 +34,7 @@
61585 */
61586 TRACE_EVENT(irq_handler_entry,
61587
61588- TP_PROTO(int irq, struct irqaction *action),
61589+ TP_PROTO(int irq, const struct irqaction *action),
61590
61591 TP_ARGS(irq, action),
61592
61593@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
61594 */
61595 TRACE_EVENT(irq_handler_exit,
61596
61597- TP_PROTO(int irq, struct irqaction *action, int ret),
61598+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61599
61600 TP_ARGS(irq, action, ret),
61601
61602@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
61603 */
61604 TRACE_EVENT(softirq_entry,
61605
61606- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61607+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61608
61609 TP_ARGS(h, vec),
61610
61611@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
61612 */
61613 TRACE_EVENT(softirq_exit,
61614
61615- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61616+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61617
61618 TP_ARGS(h, vec),
61619
61620diff -urNp linux-2.6.32.45/include/video/uvesafb.h linux-2.6.32.45/include/video/uvesafb.h
61621--- linux-2.6.32.45/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
61622+++ linux-2.6.32.45/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
61623@@ -177,6 +177,7 @@ struct uvesafb_par {
61624 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61625 u8 pmi_setpal; /* PMI for palette changes */
61626 u16 *pmi_base; /* protected mode interface location */
61627+ u8 *pmi_code; /* protected mode code location */
61628 void *pmi_start;
61629 void *pmi_pal;
61630 u8 *vbe_state_orig; /*
61631diff -urNp linux-2.6.32.45/init/do_mounts.c linux-2.6.32.45/init/do_mounts.c
61632--- linux-2.6.32.45/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
61633+++ linux-2.6.32.45/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
61634@@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
61635
61636 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61637 {
61638- int err = sys_mount(name, "/root", fs, flags, data);
61639+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
61640 if (err)
61641 return err;
61642
61643- sys_chdir("/root");
61644+ sys_chdir((__force const char __user *)"/root");
61645 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61646 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61647 current->fs->pwd.mnt->mnt_sb->s_type->name,
61648@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
61649 va_start(args, fmt);
61650 vsprintf(buf, fmt, args);
61651 va_end(args);
61652- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61653+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61654 if (fd >= 0) {
61655 sys_ioctl(fd, FDEJECT, 0);
61656 sys_close(fd);
61657 }
61658 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61659- fd = sys_open("/dev/console", O_RDWR, 0);
61660+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
61661 if (fd >= 0) {
61662 sys_ioctl(fd, TCGETS, (long)&termios);
61663 termios.c_lflag &= ~ICANON;
61664 sys_ioctl(fd, TCSETSF, (long)&termios);
61665- sys_read(fd, &c, 1);
61666+ sys_read(fd, (char __user *)&c, 1);
61667 termios.c_lflag |= ICANON;
61668 sys_ioctl(fd, TCSETSF, (long)&termios);
61669 sys_close(fd);
61670@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
61671 mount_root();
61672 out:
61673 devtmpfs_mount("dev");
61674- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61675- sys_chroot(".");
61676+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61677+ sys_chroot((__force char __user *)".");
61678 }
61679diff -urNp linux-2.6.32.45/init/do_mounts.h linux-2.6.32.45/init/do_mounts.h
61680--- linux-2.6.32.45/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
61681+++ linux-2.6.32.45/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
61682@@ -15,15 +15,15 @@ extern int root_mountflags;
61683
61684 static inline int create_dev(char *name, dev_t dev)
61685 {
61686- sys_unlink(name);
61687- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61688+ sys_unlink((__force char __user *)name);
61689+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
61690 }
61691
61692 #if BITS_PER_LONG == 32
61693 static inline u32 bstat(char *name)
61694 {
61695 struct stat64 stat;
61696- if (sys_stat64(name, &stat) != 0)
61697+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
61698 return 0;
61699 if (!S_ISBLK(stat.st_mode))
61700 return 0;
61701diff -urNp linux-2.6.32.45/init/do_mounts_initrd.c linux-2.6.32.45/init/do_mounts_initrd.c
61702--- linux-2.6.32.45/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
61703+++ linux-2.6.32.45/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
61704@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
61705 sys_close(old_fd);sys_close(root_fd);
61706 sys_close(0);sys_close(1);sys_close(2);
61707 sys_setsid();
61708- (void) sys_open("/dev/console",O_RDWR,0);
61709+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
61710 (void) sys_dup(0);
61711 (void) sys_dup(0);
61712 return kernel_execve(shell, argv, envp_init);
61713@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
61714 create_dev("/dev/root.old", Root_RAM0);
61715 /* mount initrd on rootfs' /root */
61716 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61717- sys_mkdir("/old", 0700);
61718- root_fd = sys_open("/", 0, 0);
61719- old_fd = sys_open("/old", 0, 0);
61720+ sys_mkdir((__force const char __user *)"/old", 0700);
61721+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
61722+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
61723 /* move initrd over / and chdir/chroot in initrd root */
61724- sys_chdir("/root");
61725- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61726- sys_chroot(".");
61727+ sys_chdir((__force const char __user *)"/root");
61728+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61729+ sys_chroot((__force const char __user *)".");
61730
61731 /*
61732 * In case that a resume from disk is carried out by linuxrc or one of
61733@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
61734
61735 /* move initrd to rootfs' /old */
61736 sys_fchdir(old_fd);
61737- sys_mount("/", ".", NULL, MS_MOVE, NULL);
61738+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
61739 /* switch root and cwd back to / of rootfs */
61740 sys_fchdir(root_fd);
61741- sys_chroot(".");
61742+ sys_chroot((__force const char __user *)".");
61743 sys_close(old_fd);
61744 sys_close(root_fd);
61745
61746 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61747- sys_chdir("/old");
61748+ sys_chdir((__force const char __user *)"/old");
61749 return;
61750 }
61751
61752@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
61753 mount_root();
61754
61755 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61756- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61757+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
61758 if (!error)
61759 printk("okay\n");
61760 else {
61761- int fd = sys_open("/dev/root.old", O_RDWR, 0);
61762+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
61763 if (error == -ENOENT)
61764 printk("/initrd does not exist. Ignored.\n");
61765 else
61766 printk("failed\n");
61767 printk(KERN_NOTICE "Unmounting old root\n");
61768- sys_umount("/old", MNT_DETACH);
61769+ sys_umount((__force char __user *)"/old", MNT_DETACH);
61770 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61771 if (fd < 0) {
61772 error = fd;
61773@@ -119,11 +119,11 @@ int __init initrd_load(void)
61774 * mounted in the normal path.
61775 */
61776 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61777- sys_unlink("/initrd.image");
61778+ sys_unlink((__force const char __user *)"/initrd.image");
61779 handle_initrd();
61780 return 1;
61781 }
61782 }
61783- sys_unlink("/initrd.image");
61784+ sys_unlink((__force const char __user *)"/initrd.image");
61785 return 0;
61786 }
61787diff -urNp linux-2.6.32.45/init/do_mounts_md.c linux-2.6.32.45/init/do_mounts_md.c
61788--- linux-2.6.32.45/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
61789+++ linux-2.6.32.45/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
61790@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61791 partitioned ? "_d" : "", minor,
61792 md_setup_args[ent].device_names);
61793
61794- fd = sys_open(name, 0, 0);
61795+ fd = sys_open((__force char __user *)name, 0, 0);
61796 if (fd < 0) {
61797 printk(KERN_ERR "md: open failed - cannot start "
61798 "array %s\n", name);
61799@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61800 * array without it
61801 */
61802 sys_close(fd);
61803- fd = sys_open(name, 0, 0);
61804+ fd = sys_open((__force char __user *)name, 0, 0);
61805 sys_ioctl(fd, BLKRRPART, 0);
61806 }
61807 sys_close(fd);
61808@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61809
61810 wait_for_device_probe();
61811
61812- fd = sys_open("/dev/md0", 0, 0);
61813+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
61814 if (fd >= 0) {
61815 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61816 sys_close(fd);
61817diff -urNp linux-2.6.32.45/init/initramfs.c linux-2.6.32.45/init/initramfs.c
61818--- linux-2.6.32.45/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
61819+++ linux-2.6.32.45/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
61820@@ -74,7 +74,7 @@ static void __init free_hash(void)
61821 }
61822 }
61823
61824-static long __init do_utime(char __user *filename, time_t mtime)
61825+static long __init do_utime(__force char __user *filename, time_t mtime)
61826 {
61827 struct timespec t[2];
61828
61829@@ -109,7 +109,7 @@ static void __init dir_utime(void)
61830 struct dir_entry *de, *tmp;
61831 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61832 list_del(&de->list);
61833- do_utime(de->name, de->mtime);
61834+ do_utime((__force char __user *)de->name, de->mtime);
61835 kfree(de->name);
61836 kfree(de);
61837 }
61838@@ -271,7 +271,7 @@ static int __init maybe_link(void)
61839 if (nlink >= 2) {
61840 char *old = find_link(major, minor, ino, mode, collected);
61841 if (old)
61842- return (sys_link(old, collected) < 0) ? -1 : 1;
61843+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
61844 }
61845 return 0;
61846 }
61847@@ -280,11 +280,11 @@ static void __init clean_path(char *path
61848 {
61849 struct stat st;
61850
61851- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61852+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
61853 if (S_ISDIR(st.st_mode))
61854- sys_rmdir(path);
61855+ sys_rmdir((__force char __user *)path);
61856 else
61857- sys_unlink(path);
61858+ sys_unlink((__force char __user *)path);
61859 }
61860 }
61861
61862@@ -305,7 +305,7 @@ static int __init do_name(void)
61863 int openflags = O_WRONLY|O_CREAT;
61864 if (ml != 1)
61865 openflags |= O_TRUNC;
61866- wfd = sys_open(collected, openflags, mode);
61867+ wfd = sys_open((__force char __user *)collected, openflags, mode);
61868
61869 if (wfd >= 0) {
61870 sys_fchown(wfd, uid, gid);
61871@@ -317,17 +317,17 @@ static int __init do_name(void)
61872 }
61873 }
61874 } else if (S_ISDIR(mode)) {
61875- sys_mkdir(collected, mode);
61876- sys_chown(collected, uid, gid);
61877- sys_chmod(collected, mode);
61878+ sys_mkdir((__force char __user *)collected, mode);
61879+ sys_chown((__force char __user *)collected, uid, gid);
61880+ sys_chmod((__force char __user *)collected, mode);
61881 dir_add(collected, mtime);
61882 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61883 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61884 if (maybe_link() == 0) {
61885- sys_mknod(collected, mode, rdev);
61886- sys_chown(collected, uid, gid);
61887- sys_chmod(collected, mode);
61888- do_utime(collected, mtime);
61889+ sys_mknod((__force char __user *)collected, mode, rdev);
61890+ sys_chown((__force char __user *)collected, uid, gid);
61891+ sys_chmod((__force char __user *)collected, mode);
61892+ do_utime((__force char __user *)collected, mtime);
61893 }
61894 }
61895 return 0;
61896@@ -336,15 +336,15 @@ static int __init do_name(void)
61897 static int __init do_copy(void)
61898 {
61899 if (count >= body_len) {
61900- sys_write(wfd, victim, body_len);
61901+ sys_write(wfd, (__force char __user *)victim, body_len);
61902 sys_close(wfd);
61903- do_utime(vcollected, mtime);
61904+ do_utime((__force char __user *)vcollected, mtime);
61905 kfree(vcollected);
61906 eat(body_len);
61907 state = SkipIt;
61908 return 0;
61909 } else {
61910- sys_write(wfd, victim, count);
61911+ sys_write(wfd, (__force char __user *)victim, count);
61912 body_len -= count;
61913 eat(count);
61914 return 1;
61915@@ -355,9 +355,9 @@ static int __init do_symlink(void)
61916 {
61917 collected[N_ALIGN(name_len) + body_len] = '\0';
61918 clean_path(collected, 0);
61919- sys_symlink(collected + N_ALIGN(name_len), collected);
61920- sys_lchown(collected, uid, gid);
61921- do_utime(collected, mtime);
61922+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
61923+ sys_lchown((__force char __user *)collected, uid, gid);
61924+ do_utime((__force char __user *)collected, mtime);
61925 state = SkipIt;
61926 next_state = Reset;
61927 return 0;
61928diff -urNp linux-2.6.32.45/init/Kconfig linux-2.6.32.45/init/Kconfig
61929--- linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
61930+++ linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
61931@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
61932
61933 config COMPAT_BRK
61934 bool "Disable heap randomization"
61935- default y
61936+ default n
61937 help
61938 Randomizing heap placement makes heap exploits harder, but it
61939 also breaks ancient binaries (including anything libc5 based).
61940diff -urNp linux-2.6.32.45/init/main.c linux-2.6.32.45/init/main.c
61941--- linux-2.6.32.45/init/main.c 2011-05-10 22:12:01.000000000 -0400
61942+++ linux-2.6.32.45/init/main.c 2011-08-05 20:33:55.000000000 -0400
61943@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
61944 #ifdef CONFIG_TC
61945 extern void tc_init(void);
61946 #endif
61947+extern void grsecurity_init(void);
61948
61949 enum system_states system_state __read_mostly;
61950 EXPORT_SYMBOL(system_state);
61951@@ -183,6 +184,49 @@ static int __init set_reset_devices(char
61952
61953 __setup("reset_devices", set_reset_devices);
61954
61955+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61956+extern char pax_enter_kernel_user[];
61957+extern char pax_exit_kernel_user[];
61958+extern pgdval_t clone_pgd_mask;
61959+#endif
61960+
61961+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61962+static int __init setup_pax_nouderef(char *str)
61963+{
61964+#ifdef CONFIG_X86_32
61965+ unsigned int cpu;
61966+ struct desc_struct *gdt;
61967+
61968+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
61969+ gdt = get_cpu_gdt_table(cpu);
61970+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61971+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61972+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61973+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61974+ }
61975+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61976+#else
61977+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61978+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61979+ clone_pgd_mask = ~(pgdval_t)0UL;
61980+#endif
61981+
61982+ return 0;
61983+}
61984+early_param("pax_nouderef", setup_pax_nouderef);
61985+#endif
61986+
61987+#ifdef CONFIG_PAX_SOFTMODE
61988+int pax_softmode;
61989+
61990+static int __init setup_pax_softmode(char *str)
61991+{
61992+ get_option(&str, &pax_softmode);
61993+ return 1;
61994+}
61995+__setup("pax_softmode=", setup_pax_softmode);
61996+#endif
61997+
61998 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61999 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62000 static const char *panic_later, *panic_param;
62001@@ -705,52 +749,53 @@ int initcall_debug;
62002 core_param(initcall_debug, initcall_debug, bool, 0644);
62003
62004 static char msgbuf[64];
62005-static struct boot_trace_call call;
62006-static struct boot_trace_ret ret;
62007+static struct boot_trace_call trace_call;
62008+static struct boot_trace_ret trace_ret;
62009
62010 int do_one_initcall(initcall_t fn)
62011 {
62012 int count = preempt_count();
62013 ktime_t calltime, delta, rettime;
62014+ const char *msg1 = "", *msg2 = "";
62015
62016 if (initcall_debug) {
62017- call.caller = task_pid_nr(current);
62018- printk("calling %pF @ %i\n", fn, call.caller);
62019+ trace_call.caller = task_pid_nr(current);
62020+ printk("calling %pF @ %i\n", fn, trace_call.caller);
62021 calltime = ktime_get();
62022- trace_boot_call(&call, fn);
62023+ trace_boot_call(&trace_call, fn);
62024 enable_boot_trace();
62025 }
62026
62027- ret.result = fn();
62028+ trace_ret.result = fn();
62029
62030 if (initcall_debug) {
62031 disable_boot_trace();
62032 rettime = ktime_get();
62033 delta = ktime_sub(rettime, calltime);
62034- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62035- trace_boot_ret(&ret, fn);
62036+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62037+ trace_boot_ret(&trace_ret, fn);
62038 printk("initcall %pF returned %d after %Ld usecs\n", fn,
62039- ret.result, ret.duration);
62040+ trace_ret.result, trace_ret.duration);
62041 }
62042
62043 msgbuf[0] = 0;
62044
62045- if (ret.result && ret.result != -ENODEV && initcall_debug)
62046- sprintf(msgbuf, "error code %d ", ret.result);
62047+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
62048+ sprintf(msgbuf, "error code %d ", trace_ret.result);
62049
62050 if (preempt_count() != count) {
62051- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62052+ msg1 = " preemption imbalance";
62053 preempt_count() = count;
62054 }
62055 if (irqs_disabled()) {
62056- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62057+ msg2 = " disabled interrupts";
62058 local_irq_enable();
62059 }
62060- if (msgbuf[0]) {
62061- printk("initcall %pF returned with %s\n", fn, msgbuf);
62062+ if (msgbuf[0] || *msg1 || *msg2) {
62063+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62064 }
62065
62066- return ret.result;
62067+ return trace_ret.result;
62068 }
62069
62070
62071@@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
62072 if (!ramdisk_execute_command)
62073 ramdisk_execute_command = "/init";
62074
62075- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62076+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
62077 ramdisk_execute_command = NULL;
62078 prepare_namespace();
62079 }
62080
62081+ grsecurity_init();
62082+
62083 /*
62084 * Ok, we have completed the initial bootup, and
62085 * we're essentially up and running. Get rid of the
62086diff -urNp linux-2.6.32.45/init/noinitramfs.c linux-2.6.32.45/init/noinitramfs.c
62087--- linux-2.6.32.45/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
62088+++ linux-2.6.32.45/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
62089@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
62090 {
62091 int err;
62092
62093- err = sys_mkdir("/dev", 0755);
62094+ err = sys_mkdir((const char __user *)"/dev", 0755);
62095 if (err < 0)
62096 goto out;
62097
62098@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
62099 if (err < 0)
62100 goto out;
62101
62102- err = sys_mkdir("/root", 0700);
62103+ err = sys_mkdir((const char __user *)"/root", 0700);
62104 if (err < 0)
62105 goto out;
62106
62107diff -urNp linux-2.6.32.45/ipc/mqueue.c linux-2.6.32.45/ipc/mqueue.c
62108--- linux-2.6.32.45/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
62109+++ linux-2.6.32.45/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
62110@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
62111 mq_bytes = (mq_msg_tblsz +
62112 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62113
62114+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62115 spin_lock(&mq_lock);
62116 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62117 u->mq_bytes + mq_bytes >
62118diff -urNp linux-2.6.32.45/ipc/msg.c linux-2.6.32.45/ipc/msg.c
62119--- linux-2.6.32.45/ipc/msg.c 2011-03-27 14:31:47.000000000 -0400
62120+++ linux-2.6.32.45/ipc/msg.c 2011-08-05 20:33:55.000000000 -0400
62121@@ -310,18 +310,19 @@ static inline int msg_security(struct ke
62122 return security_msg_queue_associate(msq, msgflg);
62123 }
62124
62125+static struct ipc_ops msg_ops = {
62126+ .getnew = newque,
62127+ .associate = msg_security,
62128+ .more_checks = NULL
62129+};
62130+
62131 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62132 {
62133 struct ipc_namespace *ns;
62134- struct ipc_ops msg_ops;
62135 struct ipc_params msg_params;
62136
62137 ns = current->nsproxy->ipc_ns;
62138
62139- msg_ops.getnew = newque;
62140- msg_ops.associate = msg_security;
62141- msg_ops.more_checks = NULL;
62142-
62143 msg_params.key = key;
62144 msg_params.flg = msgflg;
62145
62146diff -urNp linux-2.6.32.45/ipc/sem.c linux-2.6.32.45/ipc/sem.c
62147--- linux-2.6.32.45/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
62148+++ linux-2.6.32.45/ipc/sem.c 2011-08-05 20:33:55.000000000 -0400
62149@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct
62150 return 0;
62151 }
62152
62153+static struct ipc_ops sem_ops = {
62154+ .getnew = newary,
62155+ .associate = sem_security,
62156+ .more_checks = sem_more_checks
62157+};
62158+
62159 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62160 {
62161 struct ipc_namespace *ns;
62162- struct ipc_ops sem_ops;
62163 struct ipc_params sem_params;
62164
62165 ns = current->nsproxy->ipc_ns;
62166@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62167 if (nsems < 0 || nsems > ns->sc_semmsl)
62168 return -EINVAL;
62169
62170- sem_ops.getnew = newary;
62171- sem_ops.associate = sem_security;
62172- sem_ops.more_checks = sem_more_checks;
62173-
62174 sem_params.key = key;
62175 sem_params.flg = semflg;
62176 sem_params.u.nsems = nsems;
62177@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namesp
62178 ushort* sem_io = fast_sem_io;
62179 int nsems;
62180
62181+ pax_track_stack();
62182+
62183 sma = sem_lock_check(ns, semid);
62184 if (IS_ERR(sma))
62185 return PTR_ERR(sma);
62186@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62187 unsigned long jiffies_left = 0;
62188 struct ipc_namespace *ns;
62189
62190+ pax_track_stack();
62191+
62192 ns = current->nsproxy->ipc_ns;
62193
62194 if (nsops < 1 || semid < 0)
62195diff -urNp linux-2.6.32.45/ipc/shm.c linux-2.6.32.45/ipc/shm.c
62196--- linux-2.6.32.45/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
62197+++ linux-2.6.32.45/ipc/shm.c 2011-08-05 20:33:55.000000000 -0400
62198@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
62199 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62200 #endif
62201
62202+#ifdef CONFIG_GRKERNSEC
62203+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62204+ const time_t shm_createtime, const uid_t cuid,
62205+ const int shmid);
62206+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62207+ const time_t shm_createtime);
62208+#endif
62209+
62210 void shm_init_ns(struct ipc_namespace *ns)
62211 {
62212 ns->shm_ctlmax = SHMMAX;
62213@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
62214 shp->shm_lprid = 0;
62215 shp->shm_atim = shp->shm_dtim = 0;
62216 shp->shm_ctim = get_seconds();
62217+#ifdef CONFIG_GRKERNSEC
62218+ {
62219+ struct timespec timeval;
62220+ do_posix_clock_monotonic_gettime(&timeval);
62221+
62222+ shp->shm_createtime = timeval.tv_sec;
62223+ }
62224+#endif
62225 shp->shm_segsz = size;
62226 shp->shm_nattch = 0;
62227 shp->shm_file = file;
62228@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct
62229 return 0;
62230 }
62231
62232+static struct ipc_ops shm_ops = {
62233+ .getnew = newseg,
62234+ .associate = shm_security,
62235+ .more_checks = shm_more_checks
62236+};
62237+
62238 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62239 {
62240 struct ipc_namespace *ns;
62241- struct ipc_ops shm_ops;
62242 struct ipc_params shm_params;
62243
62244 ns = current->nsproxy->ipc_ns;
62245
62246- shm_ops.getnew = newseg;
62247- shm_ops.associate = shm_security;
62248- shm_ops.more_checks = shm_more_checks;
62249-
62250 shm_params.key = key;
62251 shm_params.flg = shmflg;
62252 shm_params.u.size = size;
62253@@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *sh
62254 if (err)
62255 goto out_unlock;
62256
62257+#ifdef CONFIG_GRKERNSEC
62258+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62259+ shp->shm_perm.cuid, shmid) ||
62260+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62261+ err = -EACCES;
62262+ goto out_unlock;
62263+ }
62264+#endif
62265+
62266 path.dentry = dget(shp->shm_file->f_path.dentry);
62267 path.mnt = shp->shm_file->f_path.mnt;
62268 shp->shm_nattch++;
62269+#ifdef CONFIG_GRKERNSEC
62270+ shp->shm_lapid = current->pid;
62271+#endif
62272 size = i_size_read(path.dentry->d_inode);
62273 shm_unlock(shp);
62274
62275diff -urNp linux-2.6.32.45/kernel/acct.c linux-2.6.32.45/kernel/acct.c
62276--- linux-2.6.32.45/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
62277+++ linux-2.6.32.45/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
62278@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
62279 */
62280 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62281 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62282- file->f_op->write(file, (char *)&ac,
62283+ file->f_op->write(file, (__force char __user *)&ac,
62284 sizeof(acct_t), &file->f_pos);
62285 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62286 set_fs(fs);
62287diff -urNp linux-2.6.32.45/kernel/audit.c linux-2.6.32.45/kernel/audit.c
62288--- linux-2.6.32.45/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
62289+++ linux-2.6.32.45/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
62290@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
62291 3) suppressed due to audit_rate_limit
62292 4) suppressed due to audit_backlog_limit
62293 */
62294-static atomic_t audit_lost = ATOMIC_INIT(0);
62295+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62296
62297 /* The netlink socket. */
62298 static struct sock *audit_sock;
62299@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
62300 unsigned long now;
62301 int print;
62302
62303- atomic_inc(&audit_lost);
62304+ atomic_inc_unchecked(&audit_lost);
62305
62306 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62307
62308@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
62309 printk(KERN_WARNING
62310 "audit: audit_lost=%d audit_rate_limit=%d "
62311 "audit_backlog_limit=%d\n",
62312- atomic_read(&audit_lost),
62313+ atomic_read_unchecked(&audit_lost),
62314 audit_rate_limit,
62315 audit_backlog_limit);
62316 audit_panic(message);
62317@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
62318 status_set.pid = audit_pid;
62319 status_set.rate_limit = audit_rate_limit;
62320 status_set.backlog_limit = audit_backlog_limit;
62321- status_set.lost = atomic_read(&audit_lost);
62322+ status_set.lost = atomic_read_unchecked(&audit_lost);
62323 status_set.backlog = skb_queue_len(&audit_skb_queue);
62324 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62325 &status_set, sizeof(status_set));
62326@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
62327 spin_unlock_irq(&tsk->sighand->siglock);
62328 }
62329 read_unlock(&tasklist_lock);
62330- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
62331- &s, sizeof(s));
62332+
62333+ if (!err)
62334+ audit_send_reply(NETLINK_CB(skb).pid, seq,
62335+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
62336 break;
62337 }
62338 case AUDIT_TTY_SET: {
62339diff -urNp linux-2.6.32.45/kernel/auditsc.c linux-2.6.32.45/kernel/auditsc.c
62340--- linux-2.6.32.45/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
62341+++ linux-2.6.32.45/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
62342@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
62343 }
62344
62345 /* global counter which is incremented every time something logs in */
62346-static atomic_t session_id = ATOMIC_INIT(0);
62347+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62348
62349 /**
62350 * audit_set_loginuid - set a task's audit_context loginuid
62351@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
62352 */
62353 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62354 {
62355- unsigned int sessionid = atomic_inc_return(&session_id);
62356+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62357 struct audit_context *context = task->audit_context;
62358
62359 if (context && context->in_syscall) {
62360diff -urNp linux-2.6.32.45/kernel/capability.c linux-2.6.32.45/kernel/capability.c
62361--- linux-2.6.32.45/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
62362+++ linux-2.6.32.45/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
62363@@ -305,10 +305,26 @@ int capable(int cap)
62364 BUG();
62365 }
62366
62367- if (security_capable(cap) == 0) {
62368+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
62369 current->flags |= PF_SUPERPRIV;
62370 return 1;
62371 }
62372 return 0;
62373 }
62374+
62375+int capable_nolog(int cap)
62376+{
62377+ if (unlikely(!cap_valid(cap))) {
62378+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62379+ BUG();
62380+ }
62381+
62382+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
62383+ current->flags |= PF_SUPERPRIV;
62384+ return 1;
62385+ }
62386+ return 0;
62387+}
62388+
62389 EXPORT_SYMBOL(capable);
62390+EXPORT_SYMBOL(capable_nolog);
62391diff -urNp linux-2.6.32.45/kernel/cgroup.c linux-2.6.32.45/kernel/cgroup.c
62392--- linux-2.6.32.45/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
62393+++ linux-2.6.32.45/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
62394@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
62395 struct hlist_head *hhead;
62396 struct cg_cgroup_link *link;
62397
62398+ pax_track_stack();
62399+
62400 /* First see if we already have a cgroup group that matches
62401 * the desired set */
62402 read_lock(&css_set_lock);
62403diff -urNp linux-2.6.32.45/kernel/configs.c linux-2.6.32.45/kernel/configs.c
62404--- linux-2.6.32.45/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
62405+++ linux-2.6.32.45/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
62406@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
62407 struct proc_dir_entry *entry;
62408
62409 /* create the current config file */
62410+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62411+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62412+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62413+ &ikconfig_file_ops);
62414+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62415+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62416+ &ikconfig_file_ops);
62417+#endif
62418+#else
62419 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62420 &ikconfig_file_ops);
62421+#endif
62422+
62423 if (!entry)
62424 return -ENOMEM;
62425
62426diff -urNp linux-2.6.32.45/kernel/cpu.c linux-2.6.32.45/kernel/cpu.c
62427--- linux-2.6.32.45/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
62428+++ linux-2.6.32.45/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
62429@@ -19,7 +19,7 @@
62430 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
62431 static DEFINE_MUTEX(cpu_add_remove_lock);
62432
62433-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
62434+static RAW_NOTIFIER_HEAD(cpu_chain);
62435
62436 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
62437 * Should always be manipulated under cpu_add_remove_lock
62438diff -urNp linux-2.6.32.45/kernel/cred.c linux-2.6.32.45/kernel/cred.c
62439--- linux-2.6.32.45/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
62440+++ linux-2.6.32.45/kernel/cred.c 2011-08-11 19:49:38.000000000 -0400
62441@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
62442 */
62443 void __put_cred(struct cred *cred)
62444 {
62445+ pax_track_stack();
62446+
62447 kdebug("__put_cred(%p{%d,%d})", cred,
62448 atomic_read(&cred->usage),
62449 read_cred_subscribers(cred));
62450@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
62451 {
62452 struct cred *cred;
62453
62454+ pax_track_stack();
62455+
62456 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62457 atomic_read(&tsk->cred->usage),
62458 read_cred_subscribers(tsk->cred));
62459@@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
62460 {
62461 const struct cred *cred;
62462
62463+ pax_track_stack();
62464+
62465 rcu_read_lock();
62466
62467 do {
62468@@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
62469 {
62470 struct cred *new;
62471
62472+ pax_track_stack();
62473+
62474 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62475 if (!new)
62476 return NULL;
62477@@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
62478 const struct cred *old;
62479 struct cred *new;
62480
62481+ pax_track_stack();
62482+
62483 validate_process_creds();
62484
62485 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62486@@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
62487 struct thread_group_cred *tgcred = NULL;
62488 struct cred *new;
62489
62490+ pax_track_stack();
62491+
62492 #ifdef CONFIG_KEYS
62493 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62494 if (!tgcred)
62495@@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
62496 struct cred *new;
62497 int ret;
62498
62499+ pax_track_stack();
62500+
62501 mutex_init(&p->cred_guard_mutex);
62502
62503 if (
62504@@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
62505 struct task_struct *task = current;
62506 const struct cred *old = task->real_cred;
62507
62508+ pax_track_stack();
62509+
62510 kdebug("commit_creds(%p{%d,%d})", new,
62511 atomic_read(&new->usage),
62512 read_cred_subscribers(new));
62513@@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
62514
62515 get_cred(new); /* we will require a ref for the subj creds too */
62516
62517+ gr_set_role_label(task, new->uid, new->gid);
62518+
62519 /* dumpability changes */
62520 if (old->euid != new->euid ||
62521 old->egid != new->egid ||
62522@@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
62523 key_fsgid_changed(task);
62524
62525 /* do it
62526- * - What if a process setreuid()'s and this brings the
62527- * new uid over his NPROC rlimit? We can check this now
62528- * cheaply with the new uid cache, so if it matters
62529- * we should be checking for it. -DaveM
62530+ * RLIMIT_NPROC limits on user->processes have already been checked
62531+ * in set_user().
62532 */
62533 alter_cred_subscribers(new, 2);
62534 if (new->user != old->user)
62535@@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
62536 */
62537 void abort_creds(struct cred *new)
62538 {
62539+ pax_track_stack();
62540+
62541 kdebug("abort_creds(%p{%d,%d})", new,
62542 atomic_read(&new->usage),
62543 read_cred_subscribers(new));
62544@@ -629,6 +647,8 @@ const struct cred *override_creds(const
62545 {
62546 const struct cred *old = current->cred;
62547
62548+ pax_track_stack();
62549+
62550 kdebug("override_creds(%p{%d,%d})", new,
62551 atomic_read(&new->usage),
62552 read_cred_subscribers(new));
62553@@ -658,6 +678,8 @@ void revert_creds(const struct cred *old
62554 {
62555 const struct cred *override = current->cred;
62556
62557+ pax_track_stack();
62558+
62559 kdebug("revert_creds(%p{%d,%d})", old,
62560 atomic_read(&old->usage),
62561 read_cred_subscribers(old));
62562@@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct
62563 const struct cred *old;
62564 struct cred *new;
62565
62566+ pax_track_stack();
62567+
62568 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62569 if (!new)
62570 return NULL;
62571@@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62572 */
62573 int set_security_override(struct cred *new, u32 secid)
62574 {
62575+ pax_track_stack();
62576+
62577 return security_kernel_act_as(new, secid);
62578 }
62579 EXPORT_SYMBOL(set_security_override);
62580@@ -777,6 +803,8 @@ int set_security_override_from_ctx(struc
62581 u32 secid;
62582 int ret;
62583
62584+ pax_track_stack();
62585+
62586 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62587 if (ret < 0)
62588 return ret;
62589diff -urNp linux-2.6.32.45/kernel/exit.c linux-2.6.32.45/kernel/exit.c
62590--- linux-2.6.32.45/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
62591+++ linux-2.6.32.45/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
62592@@ -55,6 +55,10 @@
62593 #include <asm/pgtable.h>
62594 #include <asm/mmu_context.h>
62595
62596+#ifdef CONFIG_GRKERNSEC
62597+extern rwlock_t grsec_exec_file_lock;
62598+#endif
62599+
62600 static void exit_mm(struct task_struct * tsk);
62601
62602 static void __unhash_process(struct task_struct *p)
62603@@ -174,6 +178,8 @@ void release_task(struct task_struct * p
62604 struct task_struct *leader;
62605 int zap_leader;
62606 repeat:
62607+ gr_del_task_from_ip_table(p);
62608+
62609 tracehook_prepare_release_task(p);
62610 /* don't need to get the RCU readlock here - the process is dead and
62611 * can't be modifying its own credentials */
62612@@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
62613 {
62614 write_lock_irq(&tasklist_lock);
62615
62616+#ifdef CONFIG_GRKERNSEC
62617+ write_lock(&grsec_exec_file_lock);
62618+ if (current->exec_file) {
62619+ fput(current->exec_file);
62620+ current->exec_file = NULL;
62621+ }
62622+ write_unlock(&grsec_exec_file_lock);
62623+#endif
62624+
62625 ptrace_unlink(current);
62626 /* Reparent to init */
62627 current->real_parent = current->parent = kthreadd_task;
62628 list_move_tail(&current->sibling, &current->real_parent->children);
62629
62630+ gr_set_kernel_label(current);
62631+
62632 /* Set the exit signal to SIGCHLD so we signal init on exit */
62633 current->exit_signal = SIGCHLD;
62634
62635@@ -397,7 +414,7 @@ int allow_signal(int sig)
62636 * know it'll be handled, so that they don't get converted to
62637 * SIGKILL or just silently dropped.
62638 */
62639- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62640+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62641 recalc_sigpending();
62642 spin_unlock_irq(&current->sighand->siglock);
62643 return 0;
62644@@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
62645 vsnprintf(current->comm, sizeof(current->comm), name, args);
62646 va_end(args);
62647
62648+#ifdef CONFIG_GRKERNSEC
62649+ write_lock(&grsec_exec_file_lock);
62650+ if (current->exec_file) {
62651+ fput(current->exec_file);
62652+ current->exec_file = NULL;
62653+ }
62654+ write_unlock(&grsec_exec_file_lock);
62655+#endif
62656+
62657+ gr_set_kernel_label(current);
62658+
62659 /*
62660 * If we were started as result of loading a module, close all of the
62661 * user space pages. We don't need them, and if we didn't close them
62662@@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
62663 struct task_struct *tsk = current;
62664 int group_dead;
62665
62666- profile_task_exit(tsk);
62667-
62668- WARN_ON(atomic_read(&tsk->fs_excl));
62669-
62670+ /*
62671+ * Check this first since set_fs() below depends on
62672+ * current_thread_info(), which we better not access when we're in
62673+ * interrupt context. Other than that, we want to do the set_fs()
62674+ * as early as possible.
62675+ */
62676 if (unlikely(in_interrupt()))
62677 panic("Aiee, killing interrupt handler!");
62678- if (unlikely(!tsk->pid))
62679- panic("Attempted to kill the idle task!");
62680
62681 /*
62682- * If do_exit is called because this processes oopsed, it's possible
62683+ * If do_exit is called because this processes Oops'ed, it's possible
62684 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
62685 * continuing. Amongst other possible reasons, this is to prevent
62686 * mm_release()->clear_child_tid() from writing to a user-controlled
62687@@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
62688 */
62689 set_fs(USER_DS);
62690
62691+ profile_task_exit(tsk);
62692+
62693+ WARN_ON(atomic_read(&tsk->fs_excl));
62694+
62695+ if (unlikely(!tsk->pid))
62696+ panic("Attempted to kill the idle task!");
62697+
62698 tracehook_report_exit(&code);
62699
62700 validate_creds_for_do_exit(tsk);
62701@@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
62702 tsk->exit_code = code;
62703 taskstats_exit(tsk, group_dead);
62704
62705+ gr_acl_handle_psacct(tsk, code);
62706+ gr_acl_handle_exit();
62707+
62708 exit_mm(tsk);
62709
62710 if (group_dead)
62711@@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
62712
62713 if (unlikely(wo->wo_flags & WNOWAIT)) {
62714 int exit_code = p->exit_code;
62715- int why, status;
62716+ int why;
62717
62718 get_task_struct(p);
62719 read_unlock(&tasklist_lock);
62720diff -urNp linux-2.6.32.45/kernel/fork.c linux-2.6.32.45/kernel/fork.c
62721--- linux-2.6.32.45/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
62722+++ linux-2.6.32.45/kernel/fork.c 2011-08-11 19:50:07.000000000 -0400
62723@@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
62724 *stackend = STACK_END_MAGIC; /* for overflow detection */
62725
62726 #ifdef CONFIG_CC_STACKPROTECTOR
62727- tsk->stack_canary = get_random_int();
62728+ tsk->stack_canary = pax_get_random_long();
62729 #endif
62730
62731 /* One for us, one for whoever does the "release_task()" (usually parent) */
62732@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
62733 mm->locked_vm = 0;
62734 mm->mmap = NULL;
62735 mm->mmap_cache = NULL;
62736- mm->free_area_cache = oldmm->mmap_base;
62737- mm->cached_hole_size = ~0UL;
62738+ mm->free_area_cache = oldmm->free_area_cache;
62739+ mm->cached_hole_size = oldmm->cached_hole_size;
62740 mm->map_count = 0;
62741 cpumask_clear(mm_cpumask(mm));
62742 mm->mm_rb = RB_ROOT;
62743@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
62744 tmp->vm_flags &= ~VM_LOCKED;
62745 tmp->vm_mm = mm;
62746 tmp->vm_next = tmp->vm_prev = NULL;
62747+ tmp->vm_mirror = NULL;
62748 anon_vma_link(tmp);
62749 file = tmp->vm_file;
62750 if (file) {
62751@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
62752 if (retval)
62753 goto out;
62754 }
62755+
62756+#ifdef CONFIG_PAX_SEGMEXEC
62757+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
62758+ struct vm_area_struct *mpnt_m;
62759+
62760+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
62761+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
62762+
62763+ if (!mpnt->vm_mirror)
62764+ continue;
62765+
62766+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
62767+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
62768+ mpnt->vm_mirror = mpnt_m;
62769+ } else {
62770+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
62771+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
62772+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
62773+ mpnt->vm_mirror->vm_mirror = mpnt;
62774+ }
62775+ }
62776+ BUG_ON(mpnt_m);
62777+ }
62778+#endif
62779+
62780 /* a new mm has just been created */
62781 arch_dup_mmap(oldmm, mm);
62782 retval = 0;
62783@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
62784 write_unlock(&fs->lock);
62785 return -EAGAIN;
62786 }
62787- fs->users++;
62788+ atomic_inc(&fs->users);
62789 write_unlock(&fs->lock);
62790 return 0;
62791 }
62792 tsk->fs = copy_fs_struct(fs);
62793 if (!tsk->fs)
62794 return -ENOMEM;
62795+ gr_set_chroot_entries(tsk, &tsk->fs->root);
62796 return 0;
62797 }
62798
62799@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(
62800 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
62801 #endif
62802 retval = -EAGAIN;
62803+
62804+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
62805+
62806 if (atomic_read(&p->real_cred->user->processes) >=
62807 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
62808- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
62809- p->real_cred->user != INIT_USER)
62810+ if (p->real_cred->user != INIT_USER &&
62811+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
62812 goto bad_fork_free;
62813 }
62814+ current->flags &= ~PF_NPROC_EXCEEDED;
62815
62816 retval = copy_creds(p, clone_flags);
62817 if (retval < 0)
62818@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(
62819 goto bad_fork_free_pid;
62820 }
62821
62822+ gr_copy_label(p);
62823+
62824 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
62825 /*
62826 * Clear TID on mm_release()?
62827@@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
62828 bad_fork_free:
62829 free_task(p);
62830 fork_out:
62831+ gr_log_forkfail(retval);
62832+
62833 return ERR_PTR(retval);
62834 }
62835
62836@@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
62837 if (clone_flags & CLONE_PARENT_SETTID)
62838 put_user(nr, parent_tidptr);
62839
62840+ gr_handle_brute_check();
62841+
62842 if (clone_flags & CLONE_VFORK) {
62843 p->vfork_done = &vfork;
62844 init_completion(&vfork);
62845@@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unsh
62846 return 0;
62847
62848 /* don't need lock here; in the worst case we'll do useless copy */
62849- if (fs->users == 1)
62850+ if (atomic_read(&fs->users) == 1)
62851 return 0;
62852
62853 *new_fsp = copy_fs_struct(fs);
62854@@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
62855 fs = current->fs;
62856 write_lock(&fs->lock);
62857 current->fs = new_fs;
62858- if (--fs->users)
62859+ gr_set_chroot_entries(current, &current->fs->root);
62860+ if (atomic_dec_return(&fs->users))
62861 new_fs = NULL;
62862 else
62863 new_fs = fs;
62864diff -urNp linux-2.6.32.45/kernel/futex.c linux-2.6.32.45/kernel/futex.c
62865--- linux-2.6.32.45/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
62866+++ linux-2.6.32.45/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
62867@@ -54,6 +54,7 @@
62868 #include <linux/mount.h>
62869 #include <linux/pagemap.h>
62870 #include <linux/syscalls.h>
62871+#include <linux/ptrace.h>
62872 #include <linux/signal.h>
62873 #include <linux/module.h>
62874 #include <linux/magic.h>
62875@@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
62876 struct page *page;
62877 int err;
62878
62879+#ifdef CONFIG_PAX_SEGMEXEC
62880+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
62881+ return -EFAULT;
62882+#endif
62883+
62884 /*
62885 * The futex address must be "naturally" aligned.
62886 */
62887@@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
62888 struct futex_q q;
62889 int ret;
62890
62891+ pax_track_stack();
62892+
62893 if (!bitset)
62894 return -EINVAL;
62895
62896@@ -1841,7 +1849,7 @@ retry:
62897
62898 restart = &current_thread_info()->restart_block;
62899 restart->fn = futex_wait_restart;
62900- restart->futex.uaddr = (u32 *)uaddr;
62901+ restart->futex.uaddr = uaddr;
62902 restart->futex.val = val;
62903 restart->futex.time = abs_time->tv64;
62904 restart->futex.bitset = bitset;
62905@@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
62906 struct futex_q q;
62907 int res, ret;
62908
62909+ pax_track_stack();
62910+
62911 if (!bitset)
62912 return -EINVAL;
62913
62914@@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62915 {
62916 struct robust_list_head __user *head;
62917 unsigned long ret;
62918+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62919 const struct cred *cred = current_cred(), *pcred;
62920+#endif
62921
62922 if (!futex_cmpxchg_enabled)
62923 return -ENOSYS;
62924@@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62925 if (!p)
62926 goto err_unlock;
62927 ret = -EPERM;
62928+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62929+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
62930+ goto err_unlock;
62931+#else
62932 pcred = __task_cred(p);
62933 if (cred->euid != pcred->euid &&
62934 cred->euid != pcred->uid &&
62935 !capable(CAP_SYS_PTRACE))
62936 goto err_unlock;
62937+#endif
62938 head = p->robust_list;
62939 rcu_read_unlock();
62940 }
62941@@ -2459,7 +2476,7 @@ retry:
62942 */
62943 static inline int fetch_robust_entry(struct robust_list __user **entry,
62944 struct robust_list __user * __user *head,
62945- int *pi)
62946+ unsigned int *pi)
62947 {
62948 unsigned long uentry;
62949
62950@@ -2640,6 +2657,7 @@ static int __init futex_init(void)
62951 {
62952 u32 curval;
62953 int i;
62954+ mm_segment_t oldfs;
62955
62956 /*
62957 * This will fail and we want it. Some arch implementations do
62958@@ -2651,7 +2669,10 @@ static int __init futex_init(void)
62959 * implementation, the non functional ones will return
62960 * -ENOSYS.
62961 */
62962+ oldfs = get_fs();
62963+ set_fs(USER_DS);
62964 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
62965+ set_fs(oldfs);
62966 if (curval == -EFAULT)
62967 futex_cmpxchg_enabled = 1;
62968
62969diff -urNp linux-2.6.32.45/kernel/futex_compat.c linux-2.6.32.45/kernel/futex_compat.c
62970--- linux-2.6.32.45/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
62971+++ linux-2.6.32.45/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
62972@@ -10,6 +10,7 @@
62973 #include <linux/compat.h>
62974 #include <linux/nsproxy.h>
62975 #include <linux/futex.h>
62976+#include <linux/ptrace.h>
62977
62978 #include <asm/uaccess.h>
62979
62980@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
62981 {
62982 struct compat_robust_list_head __user *head;
62983 unsigned long ret;
62984- const struct cred *cred = current_cred(), *pcred;
62985+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62986+ const struct cred *cred = current_cred();
62987+ const struct cred *pcred;
62988+#endif
62989
62990 if (!futex_cmpxchg_enabled)
62991 return -ENOSYS;
62992@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
62993 if (!p)
62994 goto err_unlock;
62995 ret = -EPERM;
62996+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62997+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
62998+ goto err_unlock;
62999+#else
63000 pcred = __task_cred(p);
63001 if (cred->euid != pcred->euid &&
63002 cred->euid != pcred->uid &&
63003 !capable(CAP_SYS_PTRACE))
63004 goto err_unlock;
63005+#endif
63006 head = p->compat_robust_list;
63007 read_unlock(&tasklist_lock);
63008 }
63009diff -urNp linux-2.6.32.45/kernel/gcov/base.c linux-2.6.32.45/kernel/gcov/base.c
63010--- linux-2.6.32.45/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
63011+++ linux-2.6.32.45/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
63012@@ -102,11 +102,6 @@ void gcov_enable_events(void)
63013 }
63014
63015 #ifdef CONFIG_MODULES
63016-static inline int within(void *addr, void *start, unsigned long size)
63017-{
63018- return ((addr >= start) && (addr < start + size));
63019-}
63020-
63021 /* Update list and generate events when modules are unloaded. */
63022 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63023 void *data)
63024@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
63025 prev = NULL;
63026 /* Remove entries located in module from linked list. */
63027 for (info = gcov_info_head; info; info = info->next) {
63028- if (within(info, mod->module_core, mod->core_size)) {
63029+ if (within_module_core_rw((unsigned long)info, mod)) {
63030 if (prev)
63031 prev->next = info->next;
63032 else
63033diff -urNp linux-2.6.32.45/kernel/hrtimer.c linux-2.6.32.45/kernel/hrtimer.c
63034--- linux-2.6.32.45/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
63035+++ linux-2.6.32.45/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
63036@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
63037 local_irq_restore(flags);
63038 }
63039
63040-static void run_hrtimer_softirq(struct softirq_action *h)
63041+static void run_hrtimer_softirq(void)
63042 {
63043 hrtimer_peek_ahead_timers();
63044 }
63045diff -urNp linux-2.6.32.45/kernel/kallsyms.c linux-2.6.32.45/kernel/kallsyms.c
63046--- linux-2.6.32.45/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
63047+++ linux-2.6.32.45/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
63048@@ -11,6 +11,9 @@
63049 * Changed the compression method from stem compression to "table lookup"
63050 * compression (see scripts/kallsyms.c for a more complete description)
63051 */
63052+#ifdef CONFIG_GRKERNSEC_HIDESYM
63053+#define __INCLUDED_BY_HIDESYM 1
63054+#endif
63055 #include <linux/kallsyms.h>
63056 #include <linux/module.h>
63057 #include <linux/init.h>
63058@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
63059
63060 static inline int is_kernel_inittext(unsigned long addr)
63061 {
63062+ if (system_state != SYSTEM_BOOTING)
63063+ return 0;
63064+
63065 if (addr >= (unsigned long)_sinittext
63066 && addr <= (unsigned long)_einittext)
63067 return 1;
63068 return 0;
63069 }
63070
63071+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63072+#ifdef CONFIG_MODULES
63073+static inline int is_module_text(unsigned long addr)
63074+{
63075+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63076+ return 1;
63077+
63078+ addr = ktla_ktva(addr);
63079+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63080+}
63081+#else
63082+static inline int is_module_text(unsigned long addr)
63083+{
63084+ return 0;
63085+}
63086+#endif
63087+#endif
63088+
63089 static inline int is_kernel_text(unsigned long addr)
63090 {
63091 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63092@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
63093
63094 static inline int is_kernel(unsigned long addr)
63095 {
63096+
63097+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63098+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63099+ return 1;
63100+
63101+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63102+#else
63103 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63104+#endif
63105+
63106 return 1;
63107 return in_gate_area_no_task(addr);
63108 }
63109
63110 static int is_ksym_addr(unsigned long addr)
63111 {
63112+
63113+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63114+ if (is_module_text(addr))
63115+ return 0;
63116+#endif
63117+
63118 if (all_var)
63119 return is_kernel(addr);
63120
63121@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
63122
63123 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63124 {
63125- iter->name[0] = '\0';
63126 iter->nameoff = get_symbol_offset(new_pos);
63127 iter->pos = new_pos;
63128 }
63129@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
63130 {
63131 struct kallsym_iter *iter = m->private;
63132
63133+#ifdef CONFIG_GRKERNSEC_HIDESYM
63134+ if (current_uid())
63135+ return 0;
63136+#endif
63137+
63138 /* Some debugging symbols have no name. Ignore them. */
63139 if (!iter->name[0])
63140 return 0;
63141@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
63142 struct kallsym_iter *iter;
63143 int ret;
63144
63145- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63146+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63147 if (!iter)
63148 return -ENOMEM;
63149 reset_iter(iter, 0);
63150diff -urNp linux-2.6.32.45/kernel/kgdb.c linux-2.6.32.45/kernel/kgdb.c
63151--- linux-2.6.32.45/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
63152+++ linux-2.6.32.45/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
63153@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
63154 /* Guard for recursive entry */
63155 static int exception_level;
63156
63157-static struct kgdb_io *kgdb_io_ops;
63158+static const struct kgdb_io *kgdb_io_ops;
63159 static DEFINE_SPINLOCK(kgdb_registration_lock);
63160
63161 /* kgdb console driver is loaded */
63162@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
63163 */
63164 static atomic_t passive_cpu_wait[NR_CPUS];
63165 static atomic_t cpu_in_kgdb[NR_CPUS];
63166-atomic_t kgdb_setting_breakpoint;
63167+atomic_unchecked_t kgdb_setting_breakpoint;
63168
63169 struct task_struct *kgdb_usethread;
63170 struct task_struct *kgdb_contthread;
63171@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
63172 sizeof(unsigned long)];
63173
63174 /* to keep track of the CPU which is doing the single stepping*/
63175-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63176+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63177
63178 /*
63179 * If you are debugging a problem where roundup (the collection of
63180@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
63181 return 0;
63182 if (kgdb_connected)
63183 return 1;
63184- if (atomic_read(&kgdb_setting_breakpoint))
63185+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
63186 return 1;
63187 if (print_wait)
63188 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
63189@@ -1426,8 +1426,8 @@ acquirelock:
63190 * instance of the exception handler wanted to come into the
63191 * debugger on a different CPU via a single step
63192 */
63193- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63194- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
63195+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63196+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
63197
63198 atomic_set(&kgdb_active, -1);
63199 touch_softlockup_watchdog();
63200@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
63201 *
63202 * Register it with the KGDB core.
63203 */
63204-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
63205+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
63206 {
63207 int err;
63208
63209@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
63210 *
63211 * Unregister it with the KGDB core.
63212 */
63213-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
63214+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
63215 {
63216 BUG_ON(kgdb_connected);
63217
63218@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
63219 */
63220 void kgdb_breakpoint(void)
63221 {
63222- atomic_set(&kgdb_setting_breakpoint, 1);
63223+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
63224 wmb(); /* Sync point before breakpoint */
63225 arch_kgdb_breakpoint();
63226 wmb(); /* Sync point after breakpoint */
63227- atomic_set(&kgdb_setting_breakpoint, 0);
63228+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
63229 }
63230 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
63231
63232diff -urNp linux-2.6.32.45/kernel/kmod.c linux-2.6.32.45/kernel/kmod.c
63233--- linux-2.6.32.45/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
63234+++ linux-2.6.32.45/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
63235@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63236 * If module auto-loading support is disabled then this function
63237 * becomes a no-operation.
63238 */
63239-int __request_module(bool wait, const char *fmt, ...)
63240+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63241 {
63242- va_list args;
63243 char module_name[MODULE_NAME_LEN];
63244 unsigned int max_modprobes;
63245 int ret;
63246- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63247+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63248 static char *envp[] = { "HOME=/",
63249 "TERM=linux",
63250 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63251@@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
63252 if (ret)
63253 return ret;
63254
63255- va_start(args, fmt);
63256- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63257- va_end(args);
63258+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63259 if (ret >= MODULE_NAME_LEN)
63260 return -ENAMETOOLONG;
63261
63262+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63263+ if (!current_uid()) {
63264+ /* hack to workaround consolekit/udisks stupidity */
63265+ read_lock(&tasklist_lock);
63266+ if (!strcmp(current->comm, "mount") &&
63267+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63268+ read_unlock(&tasklist_lock);
63269+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63270+ return -EPERM;
63271+ }
63272+ read_unlock(&tasklist_lock);
63273+ }
63274+#endif
63275+
63276 /* If modprobe needs a service that is in a module, we get a recursive
63277 * loop. Limit the number of running kmod threads to max_threads/2 or
63278 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63279@@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
63280 atomic_dec(&kmod_concurrent);
63281 return ret;
63282 }
63283+
63284+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63285+{
63286+ va_list args;
63287+ int ret;
63288+
63289+ va_start(args, fmt);
63290+ ret = ____request_module(wait, module_param, fmt, args);
63291+ va_end(args);
63292+
63293+ return ret;
63294+}
63295+
63296+int __request_module(bool wait, const char *fmt, ...)
63297+{
63298+ va_list args;
63299+ int ret;
63300+
63301+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63302+ if (current_uid()) {
63303+ char module_param[MODULE_NAME_LEN];
63304+
63305+ memset(module_param, 0, sizeof(module_param));
63306+
63307+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63308+
63309+ va_start(args, fmt);
63310+ ret = ____request_module(wait, module_param, fmt, args);
63311+ va_end(args);
63312+
63313+ return ret;
63314+ }
63315+#endif
63316+
63317+ va_start(args, fmt);
63318+ ret = ____request_module(wait, NULL, fmt, args);
63319+ va_end(args);
63320+
63321+ return ret;
63322+}
63323+
63324+
63325 EXPORT_SYMBOL(__request_module);
63326 #endif /* CONFIG_MODULES */
63327
63328diff -urNp linux-2.6.32.45/kernel/kprobes.c linux-2.6.32.45/kernel/kprobes.c
63329--- linux-2.6.32.45/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
63330+++ linux-2.6.32.45/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
63331@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
63332 * kernel image and loaded module images reside. This is required
63333 * so x86_64 can correctly handle the %rip-relative fixups.
63334 */
63335- kip->insns = module_alloc(PAGE_SIZE);
63336+ kip->insns = module_alloc_exec(PAGE_SIZE);
63337 if (!kip->insns) {
63338 kfree(kip);
63339 return NULL;
63340@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
63341 */
63342 if (!list_is_singular(&kprobe_insn_pages)) {
63343 list_del(&kip->list);
63344- module_free(NULL, kip->insns);
63345+ module_free_exec(NULL, kip->insns);
63346 kfree(kip);
63347 }
63348 return 1;
63349@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
63350 {
63351 int i, err = 0;
63352 unsigned long offset = 0, size = 0;
63353- char *modname, namebuf[128];
63354+ char *modname, namebuf[KSYM_NAME_LEN];
63355 const char *symbol_name;
63356 void *addr;
63357 struct kprobe_blackpoint *kb;
63358@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
63359 const char *sym = NULL;
63360 unsigned int i = *(loff_t *) v;
63361 unsigned long offset = 0;
63362- char *modname, namebuf[128];
63363+ char *modname, namebuf[KSYM_NAME_LEN];
63364
63365 head = &kprobe_table[i];
63366 preempt_disable();
63367diff -urNp linux-2.6.32.45/kernel/lockdep.c linux-2.6.32.45/kernel/lockdep.c
63368--- linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
63369+++ linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
63370@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
63371 /*
63372 * Various lockdep statistics:
63373 */
63374-atomic_t chain_lookup_hits;
63375-atomic_t chain_lookup_misses;
63376-atomic_t hardirqs_on_events;
63377-atomic_t hardirqs_off_events;
63378-atomic_t redundant_hardirqs_on;
63379-atomic_t redundant_hardirqs_off;
63380-atomic_t softirqs_on_events;
63381-atomic_t softirqs_off_events;
63382-atomic_t redundant_softirqs_on;
63383-atomic_t redundant_softirqs_off;
63384-atomic_t nr_unused_locks;
63385-atomic_t nr_cyclic_checks;
63386-atomic_t nr_find_usage_forwards_checks;
63387-atomic_t nr_find_usage_backwards_checks;
63388+atomic_unchecked_t chain_lookup_hits;
63389+atomic_unchecked_t chain_lookup_misses;
63390+atomic_unchecked_t hardirqs_on_events;
63391+atomic_unchecked_t hardirqs_off_events;
63392+atomic_unchecked_t redundant_hardirqs_on;
63393+atomic_unchecked_t redundant_hardirqs_off;
63394+atomic_unchecked_t softirqs_on_events;
63395+atomic_unchecked_t softirqs_off_events;
63396+atomic_unchecked_t redundant_softirqs_on;
63397+atomic_unchecked_t redundant_softirqs_off;
63398+atomic_unchecked_t nr_unused_locks;
63399+atomic_unchecked_t nr_cyclic_checks;
63400+atomic_unchecked_t nr_find_usage_forwards_checks;
63401+atomic_unchecked_t nr_find_usage_backwards_checks;
63402 #endif
63403
63404 /*
63405@@ -577,6 +577,10 @@ static int static_obj(void *obj)
63406 int i;
63407 #endif
63408
63409+#ifdef CONFIG_PAX_KERNEXEC
63410+ start = ktla_ktva(start);
63411+#endif
63412+
63413 /*
63414 * static variable?
63415 */
63416@@ -592,8 +596,7 @@ static int static_obj(void *obj)
63417 */
63418 for_each_possible_cpu(i) {
63419 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
63420- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
63421- + per_cpu_offset(i);
63422+ end = start + PERCPU_ENOUGH_ROOM;
63423
63424 if ((addr >= start) && (addr < end))
63425 return 1;
63426@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
63427 if (!static_obj(lock->key)) {
63428 debug_locks_off();
63429 printk("INFO: trying to register non-static key.\n");
63430+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63431 printk("the code is fine but needs lockdep annotation.\n");
63432 printk("turning off the locking correctness validator.\n");
63433 dump_stack();
63434@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
63435 if (!class)
63436 return 0;
63437 }
63438- debug_atomic_inc((atomic_t *)&class->ops);
63439+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
63440 if (very_verbose(class)) {
63441 printk("\nacquire class [%p] %s", class->key, class->name);
63442 if (class->name_version > 1)
63443diff -urNp linux-2.6.32.45/kernel/lockdep_internals.h linux-2.6.32.45/kernel/lockdep_internals.h
63444--- linux-2.6.32.45/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
63445+++ linux-2.6.32.45/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
63446@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
63447 /*
63448 * Various lockdep statistics:
63449 */
63450-extern atomic_t chain_lookup_hits;
63451-extern atomic_t chain_lookup_misses;
63452-extern atomic_t hardirqs_on_events;
63453-extern atomic_t hardirqs_off_events;
63454-extern atomic_t redundant_hardirqs_on;
63455-extern atomic_t redundant_hardirqs_off;
63456-extern atomic_t softirqs_on_events;
63457-extern atomic_t softirqs_off_events;
63458-extern atomic_t redundant_softirqs_on;
63459-extern atomic_t redundant_softirqs_off;
63460-extern atomic_t nr_unused_locks;
63461-extern atomic_t nr_cyclic_checks;
63462-extern atomic_t nr_cyclic_check_recursions;
63463-extern atomic_t nr_find_usage_forwards_checks;
63464-extern atomic_t nr_find_usage_forwards_recursions;
63465-extern atomic_t nr_find_usage_backwards_checks;
63466-extern atomic_t nr_find_usage_backwards_recursions;
63467-# define debug_atomic_inc(ptr) atomic_inc(ptr)
63468-# define debug_atomic_dec(ptr) atomic_dec(ptr)
63469-# define debug_atomic_read(ptr) atomic_read(ptr)
63470+extern atomic_unchecked_t chain_lookup_hits;
63471+extern atomic_unchecked_t chain_lookup_misses;
63472+extern atomic_unchecked_t hardirqs_on_events;
63473+extern atomic_unchecked_t hardirqs_off_events;
63474+extern atomic_unchecked_t redundant_hardirqs_on;
63475+extern atomic_unchecked_t redundant_hardirqs_off;
63476+extern atomic_unchecked_t softirqs_on_events;
63477+extern atomic_unchecked_t softirqs_off_events;
63478+extern atomic_unchecked_t redundant_softirqs_on;
63479+extern atomic_unchecked_t redundant_softirqs_off;
63480+extern atomic_unchecked_t nr_unused_locks;
63481+extern atomic_unchecked_t nr_cyclic_checks;
63482+extern atomic_unchecked_t nr_cyclic_check_recursions;
63483+extern atomic_unchecked_t nr_find_usage_forwards_checks;
63484+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
63485+extern atomic_unchecked_t nr_find_usage_backwards_checks;
63486+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
63487+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
63488+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
63489+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
63490 #else
63491 # define debug_atomic_inc(ptr) do { } while (0)
63492 # define debug_atomic_dec(ptr) do { } while (0)
63493diff -urNp linux-2.6.32.45/kernel/lockdep_proc.c linux-2.6.32.45/kernel/lockdep_proc.c
63494--- linux-2.6.32.45/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
63495+++ linux-2.6.32.45/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
63496@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63497
63498 static void print_name(struct seq_file *m, struct lock_class *class)
63499 {
63500- char str[128];
63501+ char str[KSYM_NAME_LEN];
63502 const char *name = class->name;
63503
63504 if (!name) {
63505diff -urNp linux-2.6.32.45/kernel/module.c linux-2.6.32.45/kernel/module.c
63506--- linux-2.6.32.45/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
63507+++ linux-2.6.32.45/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
63508@@ -55,6 +55,7 @@
63509 #include <linux/async.h>
63510 #include <linux/percpu.h>
63511 #include <linux/kmemleak.h>
63512+#include <linux/grsecurity.h>
63513
63514 #define CREATE_TRACE_POINTS
63515 #include <trace/events/module.h>
63516@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
63517 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
63518
63519 /* Bounds of module allocation, for speeding __module_address */
63520-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63521+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63522+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63523
63524 int register_module_notifier(struct notifier_block * nb)
63525 {
63526@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
63527 return true;
63528
63529 list_for_each_entry_rcu(mod, &modules, list) {
63530- struct symsearch arr[] = {
63531+ struct symsearch modarr[] = {
63532 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63533 NOT_GPL_ONLY, false },
63534 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63535@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
63536 #endif
63537 };
63538
63539- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63540+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63541 return true;
63542 }
63543 return false;
63544@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
63545 void *ptr;
63546 int cpu;
63547
63548- if (align > PAGE_SIZE) {
63549+ if (align-1 >= PAGE_SIZE) {
63550 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63551 name, align, PAGE_SIZE);
63552 align = PAGE_SIZE;
63553@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
63554 * /sys/module/foo/sections stuff
63555 * J. Corbet <corbet@lwn.net>
63556 */
63557-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
63558+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63559
63560 static inline bool sect_empty(const Elf_Shdr *sect)
63561 {
63562@@ -1545,7 +1547,8 @@ static void free_module(struct module *m
63563 destroy_params(mod->kp, mod->num_kp);
63564
63565 /* This may be NULL, but that's OK */
63566- module_free(mod, mod->module_init);
63567+ module_free(mod, mod->module_init_rw);
63568+ module_free_exec(mod, mod->module_init_rx);
63569 kfree(mod->args);
63570 if (mod->percpu)
63571 percpu_modfree(mod->percpu);
63572@@ -1554,10 +1557,12 @@ static void free_module(struct module *m
63573 percpu_modfree(mod->refptr);
63574 #endif
63575 /* Free lock-classes: */
63576- lockdep_free_key_range(mod->module_core, mod->core_size);
63577+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63578+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63579
63580 /* Finally, free the core (containing the module structure) */
63581- module_free(mod, mod->module_core);
63582+ module_free_exec(mod, mod->module_core_rx);
63583+ module_free(mod, mod->module_core_rw);
63584
63585 #ifdef CONFIG_MPU
63586 update_protections(current->mm);
63587@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
63588 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63589 int ret = 0;
63590 const struct kernel_symbol *ksym;
63591+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63592+ int is_fs_load = 0;
63593+ int register_filesystem_found = 0;
63594+ char *p;
63595+
63596+ p = strstr(mod->args, "grsec_modharden_fs");
63597+
63598+ if (p) {
63599+ char *endptr = p + strlen("grsec_modharden_fs");
63600+ /* copy \0 as well */
63601+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63602+ is_fs_load = 1;
63603+ }
63604+#endif
63605+
63606
63607 for (i = 1; i < n; i++) {
63608+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63609+ const char *name = strtab + sym[i].st_name;
63610+
63611+ /* it's a real shame this will never get ripped and copied
63612+ upstream! ;(
63613+ */
63614+ if (is_fs_load && !strcmp(name, "register_filesystem"))
63615+ register_filesystem_found = 1;
63616+#endif
63617 switch (sym[i].st_shndx) {
63618 case SHN_COMMON:
63619 /* We compiled with -fno-common. These are not
63620@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
63621 strtab + sym[i].st_name, mod);
63622 /* Ok if resolved. */
63623 if (ksym) {
63624+ pax_open_kernel();
63625 sym[i].st_value = ksym->value;
63626+ pax_close_kernel();
63627 break;
63628 }
63629
63630@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
63631 secbase = (unsigned long)mod->percpu;
63632 else
63633 secbase = sechdrs[sym[i].st_shndx].sh_addr;
63634+ pax_open_kernel();
63635 sym[i].st_value += secbase;
63636+ pax_close_kernel();
63637 break;
63638 }
63639 }
63640
63641+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63642+ if (is_fs_load && !register_filesystem_found) {
63643+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63644+ ret = -EPERM;
63645+ }
63646+#endif
63647+
63648 return ret;
63649 }
63650
63651@@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
63652 || s->sh_entsize != ~0UL
63653 || strstarts(secstrings + s->sh_name, ".init"))
63654 continue;
63655- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63656+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63657+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63658+ else
63659+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63660 DEBUGP("\t%s\n", secstrings + s->sh_name);
63661 }
63662- if (m == 0)
63663- mod->core_text_size = mod->core_size;
63664 }
63665
63666 DEBUGP("Init section allocation order:\n");
63667@@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
63668 || s->sh_entsize != ~0UL
63669 || !strstarts(secstrings + s->sh_name, ".init"))
63670 continue;
63671- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63672- | INIT_OFFSET_MASK);
63673+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63674+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63675+ else
63676+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63677+ s->sh_entsize |= INIT_OFFSET_MASK;
63678 DEBUGP("\t%s\n", secstrings + s->sh_name);
63679 }
63680- if (m == 0)
63681- mod->init_text_size = mod->init_size;
63682 }
63683 }
63684
63685@@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
63686
63687 /* As per nm */
63688 static char elf_type(const Elf_Sym *sym,
63689- Elf_Shdr *sechdrs,
63690- const char *secstrings,
63691- struct module *mod)
63692+ const Elf_Shdr *sechdrs,
63693+ const char *secstrings)
63694 {
63695 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
63696 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
63697@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
63698
63699 /* Put symbol section at end of init part of module. */
63700 symsect->sh_flags |= SHF_ALLOC;
63701- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63702+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63703 symindex) | INIT_OFFSET_MASK;
63704 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
63705
63706@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
63707 }
63708
63709 /* Append room for core symbols at end of core part. */
63710- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63711- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
63712+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63713+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
63714
63715 /* Put string table section at end of init part of module. */
63716 strsect->sh_flags |= SHF_ALLOC;
63717- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63718+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63719 strindex) | INIT_OFFSET_MASK;
63720 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
63721
63722 /* Append room for core symbols' strings at end of core part. */
63723- *pstroffs = mod->core_size;
63724+ *pstroffs = mod->core_size_rx;
63725 __set_bit(0, strmap);
63726- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
63727+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
63728
63729 return symoffs;
63730 }
63731@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
63732 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63733 mod->strtab = (void *)sechdrs[strindex].sh_addr;
63734
63735+ pax_open_kernel();
63736+
63737 /* Set types up while we still have access to sections. */
63738 for (i = 0; i < mod->num_symtab; i++)
63739 mod->symtab[i].st_info
63740- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
63741+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
63742
63743- mod->core_symtab = dst = mod->module_core + symoffs;
63744+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
63745 src = mod->symtab;
63746 *dst = *src;
63747 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63748@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
63749 }
63750 mod->core_num_syms = ndst;
63751
63752- mod->core_strtab = s = mod->module_core + stroffs;
63753+ mod->core_strtab = s = mod->module_core_rx + stroffs;
63754 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
63755 if (test_bit(i, strmap))
63756 *++s = mod->strtab[i];
63757+
63758+ pax_close_kernel();
63759 }
63760 #else
63761 static inline unsigned long layout_symtab(struct module *mod,
63762@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
63763 #endif
63764 }
63765
63766-static void *module_alloc_update_bounds(unsigned long size)
63767+static void *module_alloc_update_bounds_rw(unsigned long size)
63768 {
63769 void *ret = module_alloc(size);
63770
63771 if (ret) {
63772 /* Update module bounds. */
63773- if ((unsigned long)ret < module_addr_min)
63774- module_addr_min = (unsigned long)ret;
63775- if ((unsigned long)ret + size > module_addr_max)
63776- module_addr_max = (unsigned long)ret + size;
63777+ if ((unsigned long)ret < module_addr_min_rw)
63778+ module_addr_min_rw = (unsigned long)ret;
63779+ if ((unsigned long)ret + size > module_addr_max_rw)
63780+ module_addr_max_rw = (unsigned long)ret + size;
63781+ }
63782+ return ret;
63783+}
63784+
63785+static void *module_alloc_update_bounds_rx(unsigned long size)
63786+{
63787+ void *ret = module_alloc_exec(size);
63788+
63789+ if (ret) {
63790+ /* Update module bounds. */
63791+ if ((unsigned long)ret < module_addr_min_rx)
63792+ module_addr_min_rx = (unsigned long)ret;
63793+ if ((unsigned long)ret + size > module_addr_max_rx)
63794+ module_addr_max_rx = (unsigned long)ret + size;
63795 }
63796 return ret;
63797 }
63798@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
63799 unsigned int i;
63800
63801 /* only scan the sections containing data */
63802- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
63803- (unsigned long)mod->module_core,
63804+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
63805+ (unsigned long)mod->module_core_rw,
63806 sizeof(struct module), GFP_KERNEL);
63807
63808 for (i = 1; i < hdr->e_shnum; i++) {
63809@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
63810 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
63811 continue;
63812
63813- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
63814- (unsigned long)mod->module_core,
63815+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
63816+ (unsigned long)mod->module_core_rw,
63817 sechdrs[i].sh_size, GFP_KERNEL);
63818 }
63819 }
63820@@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
63821 secstrings, &stroffs, strmap);
63822
63823 /* Do the allocs. */
63824- ptr = module_alloc_update_bounds(mod->core_size);
63825+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
63826 /*
63827 * The pointer to this block is stored in the module structure
63828 * which is inside the block. Just mark it as not being a
63829@@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
63830 err = -ENOMEM;
63831 goto free_percpu;
63832 }
63833- memset(ptr, 0, mod->core_size);
63834- mod->module_core = ptr;
63835+ memset(ptr, 0, mod->core_size_rw);
63836+ mod->module_core_rw = ptr;
63837
63838- ptr = module_alloc_update_bounds(mod->init_size);
63839+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
63840 /*
63841 * The pointer to this block is stored in the module structure
63842 * which is inside the block. This block doesn't need to be
63843 * scanned as it contains data and code that will be freed
63844 * after the module is initialized.
63845 */
63846- kmemleak_ignore(ptr);
63847- if (!ptr && mod->init_size) {
63848+ kmemleak_not_leak(ptr);
63849+ if (!ptr && mod->init_size_rw) {
63850+ err = -ENOMEM;
63851+ goto free_core_rw;
63852+ }
63853+ memset(ptr, 0, mod->init_size_rw);
63854+ mod->module_init_rw = ptr;
63855+
63856+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
63857+ kmemleak_not_leak(ptr);
63858+ if (!ptr) {
63859 err = -ENOMEM;
63860- goto free_core;
63861+ goto free_init_rw;
63862 }
63863- memset(ptr, 0, mod->init_size);
63864- mod->module_init = ptr;
63865+
63866+ pax_open_kernel();
63867+ memset(ptr, 0, mod->core_size_rx);
63868+ pax_close_kernel();
63869+ mod->module_core_rx = ptr;
63870+
63871+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
63872+ kmemleak_not_leak(ptr);
63873+ if (!ptr && mod->init_size_rx) {
63874+ err = -ENOMEM;
63875+ goto free_core_rx;
63876+ }
63877+
63878+ pax_open_kernel();
63879+ memset(ptr, 0, mod->init_size_rx);
63880+ pax_close_kernel();
63881+ mod->module_init_rx = ptr;
63882
63883 /* Transfer each section which specifies SHF_ALLOC */
63884 DEBUGP("final section addresses:\n");
63885@@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
63886 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
63887 continue;
63888
63889- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
63890- dest = mod->module_init
63891- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63892- else
63893- dest = mod->module_core + sechdrs[i].sh_entsize;
63894+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
63895+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63896+ dest = mod->module_init_rw
63897+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63898+ else
63899+ dest = mod->module_init_rx
63900+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63901+ } else {
63902+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63903+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
63904+ else
63905+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
63906+ }
63907+
63908+ if (sechdrs[i].sh_type != SHT_NOBITS) {
63909
63910- if (sechdrs[i].sh_type != SHT_NOBITS)
63911- memcpy(dest, (void *)sechdrs[i].sh_addr,
63912- sechdrs[i].sh_size);
63913+#ifdef CONFIG_PAX_KERNEXEC
63914+#ifdef CONFIG_X86_64
63915+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
63916+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
63917+#endif
63918+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
63919+ pax_open_kernel();
63920+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63921+ pax_close_kernel();
63922+ } else
63923+#endif
63924+
63925+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63926+ }
63927 /* Update sh_addr to point to copy in image. */
63928- sechdrs[i].sh_addr = (unsigned long)dest;
63929+
63930+#ifdef CONFIG_PAX_KERNEXEC
63931+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
63932+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
63933+ else
63934+#endif
63935+
63936+ sechdrs[i].sh_addr = (unsigned long)dest;
63937 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
63938 }
63939 /* Module has been moved. */
63940@@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
63941 mod->name);
63942 if (!mod->refptr) {
63943 err = -ENOMEM;
63944- goto free_init;
63945+ goto free_init_rx;
63946 }
63947 #endif
63948 /* Now we've moved module, initialize linked lists, etc. */
63949@@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
63950 /* Set up MODINFO_ATTR fields */
63951 setup_modinfo(mod, sechdrs, infoindex);
63952
63953+ mod->args = args;
63954+
63955+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63956+ {
63957+ char *p, *p2;
63958+
63959+ if (strstr(mod->args, "grsec_modharden_netdev")) {
63960+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
63961+ err = -EPERM;
63962+ goto cleanup;
63963+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
63964+ p += strlen("grsec_modharden_normal");
63965+ p2 = strstr(p, "_");
63966+ if (p2) {
63967+ *p2 = '\0';
63968+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
63969+ *p2 = '_';
63970+ }
63971+ err = -EPERM;
63972+ goto cleanup;
63973+ }
63974+ }
63975+#endif
63976+
63977+
63978 /* Fix up syms, so that st_value is a pointer to location. */
63979 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
63980 mod);
63981@@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
63982
63983 /* Now do relocations. */
63984 for (i = 1; i < hdr->e_shnum; i++) {
63985- const char *strtab = (char *)sechdrs[strindex].sh_addr;
63986 unsigned int info = sechdrs[i].sh_info;
63987+ strtab = (char *)sechdrs[strindex].sh_addr;
63988
63989 /* Not a valid relocation section? */
63990 if (info >= hdr->e_shnum)
63991@@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
63992 * Do it before processing of module parameters, so the module
63993 * can provide parameter accessor functions of its own.
63994 */
63995- if (mod->module_init)
63996- flush_icache_range((unsigned long)mod->module_init,
63997- (unsigned long)mod->module_init
63998- + mod->init_size);
63999- flush_icache_range((unsigned long)mod->module_core,
64000- (unsigned long)mod->module_core + mod->core_size);
64001+ if (mod->module_init_rx)
64002+ flush_icache_range((unsigned long)mod->module_init_rx,
64003+ (unsigned long)mod->module_init_rx
64004+ + mod->init_size_rx);
64005+ flush_icache_range((unsigned long)mod->module_core_rx,
64006+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64007
64008 set_fs(old_fs);
64009
64010- mod->args = args;
64011 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
64012 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
64013 mod->name);
64014@@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
64015 free_unload:
64016 module_unload_free(mod);
64017 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
64018+ free_init_rx:
64019 percpu_modfree(mod->refptr);
64020- free_init:
64021 #endif
64022- module_free(mod, mod->module_init);
64023- free_core:
64024- module_free(mod, mod->module_core);
64025+ module_free_exec(mod, mod->module_init_rx);
64026+ free_core_rx:
64027+ module_free_exec(mod, mod->module_core_rx);
64028+ free_init_rw:
64029+ module_free(mod, mod->module_init_rw);
64030+ free_core_rw:
64031+ module_free(mod, mod->module_core_rw);
64032 /* mod will be freed with core. Don't access it beyond this line! */
64033 free_percpu:
64034 if (percpu)
64035@@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
64036 mod->symtab = mod->core_symtab;
64037 mod->strtab = mod->core_strtab;
64038 #endif
64039- module_free(mod, mod->module_init);
64040- mod->module_init = NULL;
64041- mod->init_size = 0;
64042- mod->init_text_size = 0;
64043+ module_free(mod, mod->module_init_rw);
64044+ module_free_exec(mod, mod->module_init_rx);
64045+ mod->module_init_rw = NULL;
64046+ mod->module_init_rx = NULL;
64047+ mod->init_size_rw = 0;
64048+ mod->init_size_rx = 0;
64049 mutex_unlock(&module_mutex);
64050
64051 return 0;
64052@@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
64053 unsigned long nextval;
64054
64055 /* At worse, next value is at end of module */
64056- if (within_module_init(addr, mod))
64057- nextval = (unsigned long)mod->module_init+mod->init_text_size;
64058+ if (within_module_init_rx(addr, mod))
64059+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64060+ else if (within_module_init_rw(addr, mod))
64061+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64062+ else if (within_module_core_rx(addr, mod))
64063+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64064+ else if (within_module_core_rw(addr, mod))
64065+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64066 else
64067- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64068+ return NULL;
64069
64070 /* Scan for closest preceeding symbol, and next symbol. (ELF
64071 starts real symbols at 1). */
64072@@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
64073 char buf[8];
64074
64075 seq_printf(m, "%s %u",
64076- mod->name, mod->init_size + mod->core_size);
64077+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64078 print_unload_info(m, mod);
64079
64080 /* Informative for users. */
64081@@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
64082 mod->state == MODULE_STATE_COMING ? "Loading":
64083 "Live");
64084 /* Used by oprofile and other similar tools. */
64085- seq_printf(m, " 0x%p", mod->module_core);
64086+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64087
64088 /* Taints info */
64089 if (mod->taints)
64090@@ -2981,7 +3128,17 @@ static const struct file_operations proc
64091
64092 static int __init proc_modules_init(void)
64093 {
64094+#ifndef CONFIG_GRKERNSEC_HIDESYM
64095+#ifdef CONFIG_GRKERNSEC_PROC_USER
64096+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64097+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64098+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64099+#else
64100 proc_create("modules", 0, NULL, &proc_modules_operations);
64101+#endif
64102+#else
64103+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64104+#endif
64105 return 0;
64106 }
64107 module_init(proc_modules_init);
64108@@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
64109 {
64110 struct module *mod;
64111
64112- if (addr < module_addr_min || addr > module_addr_max)
64113+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64114+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64115 return NULL;
64116
64117 list_for_each_entry_rcu(mod, &modules, list)
64118- if (within_module_core(addr, mod)
64119- || within_module_init(addr, mod))
64120+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64121 return mod;
64122 return NULL;
64123 }
64124@@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
64125 */
64126 struct module *__module_text_address(unsigned long addr)
64127 {
64128- struct module *mod = __module_address(addr);
64129+ struct module *mod;
64130+
64131+#ifdef CONFIG_X86_32
64132+ addr = ktla_ktva(addr);
64133+#endif
64134+
64135+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64136+ return NULL;
64137+
64138+ mod = __module_address(addr);
64139+
64140 if (mod) {
64141 /* Make sure it's within the text section. */
64142- if (!within(addr, mod->module_init, mod->init_text_size)
64143- && !within(addr, mod->module_core, mod->core_text_size))
64144+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64145 mod = NULL;
64146 }
64147 return mod;
64148diff -urNp linux-2.6.32.45/kernel/mutex.c linux-2.6.32.45/kernel/mutex.c
64149--- linux-2.6.32.45/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
64150+++ linux-2.6.32.45/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
64151@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
64152 */
64153
64154 for (;;) {
64155- struct thread_info *owner;
64156+ struct task_struct *owner;
64157
64158 /*
64159 * If we own the BKL, then don't spin. The owner of
64160@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
64161 spin_lock_mutex(&lock->wait_lock, flags);
64162
64163 debug_mutex_lock_common(lock, &waiter);
64164- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64165+ debug_mutex_add_waiter(lock, &waiter, task);
64166
64167 /* add waiting tasks to the end of the waitqueue (FIFO): */
64168 list_add_tail(&waiter.list, &lock->wait_list);
64169@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
64170 * TASK_UNINTERRUPTIBLE case.)
64171 */
64172 if (unlikely(signal_pending_state(state, task))) {
64173- mutex_remove_waiter(lock, &waiter,
64174- task_thread_info(task));
64175+ mutex_remove_waiter(lock, &waiter, task);
64176 mutex_release(&lock->dep_map, 1, ip);
64177 spin_unlock_mutex(&lock->wait_lock, flags);
64178
64179@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
64180 done:
64181 lock_acquired(&lock->dep_map, ip);
64182 /* got the lock - rejoice! */
64183- mutex_remove_waiter(lock, &waiter, current_thread_info());
64184+ mutex_remove_waiter(lock, &waiter, task);
64185 mutex_set_owner(lock);
64186
64187 /* set it to 0 if there are no waiters left: */
64188diff -urNp linux-2.6.32.45/kernel/mutex-debug.c linux-2.6.32.45/kernel/mutex-debug.c
64189--- linux-2.6.32.45/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
64190+++ linux-2.6.32.45/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
64191@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64192 }
64193
64194 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64195- struct thread_info *ti)
64196+ struct task_struct *task)
64197 {
64198 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64199
64200 /* Mark the current thread as blocked on the lock: */
64201- ti->task->blocked_on = waiter;
64202+ task->blocked_on = waiter;
64203 }
64204
64205 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64206- struct thread_info *ti)
64207+ struct task_struct *task)
64208 {
64209 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64210- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64211- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64212- ti->task->blocked_on = NULL;
64213+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64214+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64215+ task->blocked_on = NULL;
64216
64217 list_del_init(&waiter->list);
64218 waiter->task = NULL;
64219@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
64220 return;
64221
64222 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
64223- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
64224+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
64225 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
64226 mutex_clear_owner(lock);
64227 }
64228diff -urNp linux-2.6.32.45/kernel/mutex-debug.h linux-2.6.32.45/kernel/mutex-debug.h
64229--- linux-2.6.32.45/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
64230+++ linux-2.6.32.45/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
64231@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
64232 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64233 extern void debug_mutex_add_waiter(struct mutex *lock,
64234 struct mutex_waiter *waiter,
64235- struct thread_info *ti);
64236+ struct task_struct *task);
64237 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64238- struct thread_info *ti);
64239+ struct task_struct *task);
64240 extern void debug_mutex_unlock(struct mutex *lock);
64241 extern void debug_mutex_init(struct mutex *lock, const char *name,
64242 struct lock_class_key *key);
64243
64244 static inline void mutex_set_owner(struct mutex *lock)
64245 {
64246- lock->owner = current_thread_info();
64247+ lock->owner = current;
64248 }
64249
64250 static inline void mutex_clear_owner(struct mutex *lock)
64251diff -urNp linux-2.6.32.45/kernel/mutex.h linux-2.6.32.45/kernel/mutex.h
64252--- linux-2.6.32.45/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
64253+++ linux-2.6.32.45/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
64254@@ -19,7 +19,7 @@
64255 #ifdef CONFIG_SMP
64256 static inline void mutex_set_owner(struct mutex *lock)
64257 {
64258- lock->owner = current_thread_info();
64259+ lock->owner = current;
64260 }
64261
64262 static inline void mutex_clear_owner(struct mutex *lock)
64263diff -urNp linux-2.6.32.45/kernel/panic.c linux-2.6.32.45/kernel/panic.c
64264--- linux-2.6.32.45/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
64265+++ linux-2.6.32.45/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
64266@@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
64267 const char *board;
64268
64269 printk(KERN_WARNING "------------[ cut here ]------------\n");
64270- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64271+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64272 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64273 if (board)
64274 printk(KERN_WARNING "Hardware name: %s\n", board);
64275@@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64276 */
64277 void __stack_chk_fail(void)
64278 {
64279- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64280+ dump_stack();
64281+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64282 __builtin_return_address(0));
64283 }
64284 EXPORT_SYMBOL(__stack_chk_fail);
64285diff -urNp linux-2.6.32.45/kernel/params.c linux-2.6.32.45/kernel/params.c
64286--- linux-2.6.32.45/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
64287+++ linux-2.6.32.45/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
64288@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
64289 return ret;
64290 }
64291
64292-static struct sysfs_ops module_sysfs_ops = {
64293+static const struct sysfs_ops module_sysfs_ops = {
64294 .show = module_attr_show,
64295 .store = module_attr_store,
64296 };
64297@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
64298 return 0;
64299 }
64300
64301-static struct kset_uevent_ops module_uevent_ops = {
64302+static const struct kset_uevent_ops module_uevent_ops = {
64303 .filter = uevent_filter,
64304 };
64305
64306diff -urNp linux-2.6.32.45/kernel/perf_event.c linux-2.6.32.45/kernel/perf_event.c
64307--- linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:35:30.000000000 -0400
64308+++ linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:34:01.000000000 -0400
64309@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
64310 */
64311 int sysctl_perf_event_sample_rate __read_mostly = 100000;
64312
64313-static atomic64_t perf_event_id;
64314+static atomic64_unchecked_t perf_event_id;
64315
64316 /*
64317 * Lock for (sysadmin-configurable) event reservations:
64318@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
64319 * In order to keep per-task stats reliable we need to flip the event
64320 * values when we flip the contexts.
64321 */
64322- value = atomic64_read(&next_event->count);
64323- value = atomic64_xchg(&event->count, value);
64324- atomic64_set(&next_event->count, value);
64325+ value = atomic64_read_unchecked(&next_event->count);
64326+ value = atomic64_xchg_unchecked(&event->count, value);
64327+ atomic64_set_unchecked(&next_event->count, value);
64328
64329 swap(event->total_time_enabled, next_event->total_time_enabled);
64330 swap(event->total_time_running, next_event->total_time_running);
64331@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
64332 update_event_times(event);
64333 }
64334
64335- return atomic64_read(&event->count);
64336+ return atomic64_read_unchecked(&event->count);
64337 }
64338
64339 /*
64340@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
64341 values[n++] = 1 + leader->nr_siblings;
64342 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64343 values[n++] = leader->total_time_enabled +
64344- atomic64_read(&leader->child_total_time_enabled);
64345+ atomic64_read_unchecked(&leader->child_total_time_enabled);
64346 }
64347 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64348 values[n++] = leader->total_time_running +
64349- atomic64_read(&leader->child_total_time_running);
64350+ atomic64_read_unchecked(&leader->child_total_time_running);
64351 }
64352
64353 size = n * sizeof(u64);
64354@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
64355 values[n++] = perf_event_read_value(event);
64356 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64357 values[n++] = event->total_time_enabled +
64358- atomic64_read(&event->child_total_time_enabled);
64359+ atomic64_read_unchecked(&event->child_total_time_enabled);
64360 }
64361 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64362 values[n++] = event->total_time_running +
64363- atomic64_read(&event->child_total_time_running);
64364+ atomic64_read_unchecked(&event->child_total_time_running);
64365 }
64366 if (read_format & PERF_FORMAT_ID)
64367 values[n++] = primary_event_id(event);
64368@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
64369 static void perf_event_reset(struct perf_event *event)
64370 {
64371 (void)perf_event_read(event);
64372- atomic64_set(&event->count, 0);
64373+ atomic64_set_unchecked(&event->count, 0);
64374 perf_event_update_userpage(event);
64375 }
64376
64377@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
64378 ++userpg->lock;
64379 barrier();
64380 userpg->index = perf_event_index(event);
64381- userpg->offset = atomic64_read(&event->count);
64382+ userpg->offset = atomic64_read_unchecked(&event->count);
64383 if (event->state == PERF_EVENT_STATE_ACTIVE)
64384- userpg->offset -= atomic64_read(&event->hw.prev_count);
64385+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
64386
64387 userpg->time_enabled = event->total_time_enabled +
64388- atomic64_read(&event->child_total_time_enabled);
64389+ atomic64_read_unchecked(&event->child_total_time_enabled);
64390
64391 userpg->time_running = event->total_time_running +
64392- atomic64_read(&event->child_total_time_running);
64393+ atomic64_read_unchecked(&event->child_total_time_running);
64394
64395 barrier();
64396 ++userpg->lock;
64397@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
64398 u64 values[4];
64399 int n = 0;
64400
64401- values[n++] = atomic64_read(&event->count);
64402+ values[n++] = atomic64_read_unchecked(&event->count);
64403 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64404 values[n++] = event->total_time_enabled +
64405- atomic64_read(&event->child_total_time_enabled);
64406+ atomic64_read_unchecked(&event->child_total_time_enabled);
64407 }
64408 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64409 values[n++] = event->total_time_running +
64410- atomic64_read(&event->child_total_time_running);
64411+ atomic64_read_unchecked(&event->child_total_time_running);
64412 }
64413 if (read_format & PERF_FORMAT_ID)
64414 values[n++] = primary_event_id(event);
64415@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
64416 if (leader != event)
64417 leader->pmu->read(leader);
64418
64419- values[n++] = atomic64_read(&leader->count);
64420+ values[n++] = atomic64_read_unchecked(&leader->count);
64421 if (read_format & PERF_FORMAT_ID)
64422 values[n++] = primary_event_id(leader);
64423
64424@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
64425 if (sub != event)
64426 sub->pmu->read(sub);
64427
64428- values[n++] = atomic64_read(&sub->count);
64429+ values[n++] = atomic64_read_unchecked(&sub->count);
64430 if (read_format & PERF_FORMAT_ID)
64431 values[n++] = primary_event_id(sub);
64432
64433@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf
64434 {
64435 struct hw_perf_event *hwc = &event->hw;
64436
64437- atomic64_add(nr, &event->count);
64438+ atomic64_add_unchecked(nr, &event->count);
64439
64440 if (!hwc->sample_period)
64441 return;
64442@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(
64443 u64 now;
64444
64445 now = cpu_clock(cpu);
64446- prev = atomic64_read(&event->hw.prev_count);
64447- atomic64_set(&event->hw.prev_count, now);
64448- atomic64_add(now - prev, &event->count);
64449+ prev = atomic64_read_unchecked(&event->hw.prev_count);
64450+ atomic64_set_unchecked(&event->hw.prev_count, now);
64451+ atomic64_add_unchecked(now - prev, &event->count);
64452 }
64453
64454 static int cpu_clock_perf_event_enable(struct perf_event *event)
64455@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(s
64456 struct hw_perf_event *hwc = &event->hw;
64457 int cpu = raw_smp_processor_id();
64458
64459- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
64460+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
64461 perf_swevent_start_hrtimer(event);
64462
64463 return 0;
64464@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update
64465 u64 prev;
64466 s64 delta;
64467
64468- prev = atomic64_xchg(&event->hw.prev_count, now);
64469+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
64470 delta = now - prev;
64471- atomic64_add(delta, &event->count);
64472+ atomic64_add_unchecked(delta, &event->count);
64473 }
64474
64475 static int task_clock_perf_event_enable(struct perf_event *event)
64476@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(
64477
64478 now = event->ctx->time;
64479
64480- atomic64_set(&hwc->prev_count, now);
64481+ atomic64_set_unchecked(&hwc->prev_count, now);
64482
64483 perf_swevent_start_hrtimer(event);
64484
64485@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr
64486 event->parent = parent_event;
64487
64488 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64489- event->id = atomic64_inc_return(&perf_event_id);
64490+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
64491
64492 event->state = PERF_EVENT_STATE_INACTIVE;
64493
64494@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf
64495 if (child_event->attr.inherit_stat)
64496 perf_event_read_event(child_event, child);
64497
64498- child_val = atomic64_read(&child_event->count);
64499+ child_val = atomic64_read_unchecked(&child_event->count);
64500
64501 /*
64502 * Add back the child's count to the parent's count:
64503 */
64504- atomic64_add(child_val, &parent_event->count);
64505- atomic64_add(child_event->total_time_enabled,
64506+ atomic64_add_unchecked(child_val, &parent_event->count);
64507+ atomic64_add_unchecked(child_event->total_time_enabled,
64508 &parent_event->child_total_time_enabled);
64509- atomic64_add(child_event->total_time_running,
64510+ atomic64_add_unchecked(child_event->total_time_running,
64511 &parent_event->child_total_time_running);
64512
64513 /*
64514diff -urNp linux-2.6.32.45/kernel/pid.c linux-2.6.32.45/kernel/pid.c
64515--- linux-2.6.32.45/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
64516+++ linux-2.6.32.45/kernel/pid.c 2011-07-14 19:15:33.000000000 -0400
64517@@ -33,6 +33,7 @@
64518 #include <linux/rculist.h>
64519 #include <linux/bootmem.h>
64520 #include <linux/hash.h>
64521+#include <linux/security.h>
64522 #include <linux/pid_namespace.h>
64523 #include <linux/init_task.h>
64524 #include <linux/syscalls.h>
64525@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64526
64527 int pid_max = PID_MAX_DEFAULT;
64528
64529-#define RESERVED_PIDS 300
64530+#define RESERVED_PIDS 500
64531
64532 int pid_max_min = RESERVED_PIDS + 1;
64533 int pid_max_max = PID_MAX_LIMIT;
64534@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
64535 */
64536 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64537 {
64538- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64539+ struct task_struct *task;
64540+
64541+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64542+
64543+ if (gr_pid_is_chrooted(task))
64544+ return NULL;
64545+
64546+ return task;
64547 }
64548
64549 struct task_struct *find_task_by_vpid(pid_t vnr)
64550@@ -391,6 +399,13 @@ struct task_struct *find_task_by_vpid(pi
64551 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64552 }
64553
64554+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64555+{
64556+ struct task_struct *task;
64557+
64558+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64559+}
64560+
64561 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64562 {
64563 struct pid *pid;
64564diff -urNp linux-2.6.32.45/kernel/posix-cpu-timers.c linux-2.6.32.45/kernel/posix-cpu-timers.c
64565--- linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
64566+++ linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-08-06 09:33:44.000000000 -0400
64567@@ -6,6 +6,7 @@
64568 #include <linux/posix-timers.h>
64569 #include <linux/errno.h>
64570 #include <linux/math64.h>
64571+#include <linux/security.h>
64572 #include <asm/uaccess.h>
64573 #include <linux/kernel_stat.h>
64574 #include <trace/events/timer.h>
64575@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(st
64576
64577 static __init int init_posix_cpu_timers(void)
64578 {
64579- struct k_clock process = {
64580+ static struct k_clock process = {
64581 .clock_getres = process_cpu_clock_getres,
64582 .clock_get = process_cpu_clock_get,
64583 .clock_set = do_posix_clock_nosettime,
64584@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(
64585 .nsleep = process_cpu_nsleep,
64586 .nsleep_restart = process_cpu_nsleep_restart,
64587 };
64588- struct k_clock thread = {
64589+ static struct k_clock thread = {
64590 .clock_getres = thread_cpu_clock_getres,
64591 .clock_get = thread_cpu_clock_get,
64592 .clock_set = do_posix_clock_nosettime,
64593diff -urNp linux-2.6.32.45/kernel/posix-timers.c linux-2.6.32.45/kernel/posix-timers.c
64594--- linux-2.6.32.45/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
64595+++ linux-2.6.32.45/kernel/posix-timers.c 2011-08-06 09:34:14.000000000 -0400
64596@@ -42,6 +42,7 @@
64597 #include <linux/compiler.h>
64598 #include <linux/idr.h>
64599 #include <linux/posix-timers.h>
64600+#include <linux/grsecurity.h>
64601 #include <linux/syscalls.h>
64602 #include <linux/wait.h>
64603 #include <linux/workqueue.h>
64604@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
64605 * which we beg off on and pass to do_sys_settimeofday().
64606 */
64607
64608-static struct k_clock posix_clocks[MAX_CLOCKS];
64609+static struct k_clock *posix_clocks[MAX_CLOCKS];
64610
64611 /*
64612 * These ones are defined below.
64613@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k
64614 */
64615 #define CLOCK_DISPATCH(clock, call, arglist) \
64616 ((clock) < 0 ? posix_cpu_##call arglist : \
64617- (posix_clocks[clock].call != NULL \
64618- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
64619+ (posix_clocks[clock]->call != NULL \
64620+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
64621
64622 /*
64623 * Default clock hook functions when the struct k_clock passed
64624@@ -172,7 +173,7 @@ static inline int common_clock_getres(co
64625 struct timespec *tp)
64626 {
64627 tp->tv_sec = 0;
64628- tp->tv_nsec = posix_clocks[which_clock].res;
64629+ tp->tv_nsec = posix_clocks[which_clock]->res;
64630 return 0;
64631 }
64632
64633@@ -217,9 +218,11 @@ static inline int invalid_clockid(const
64634 return 0;
64635 if ((unsigned) which_clock >= MAX_CLOCKS)
64636 return 1;
64637- if (posix_clocks[which_clock].clock_getres != NULL)
64638+ if (!posix_clocks[which_clock])
64639 return 0;
64640- if (posix_clocks[which_clock].res != 0)
64641+ if (posix_clocks[which_clock]->clock_getres != NULL)
64642+ return 0;
64643+ if (posix_clocks[which_clock]->res != 0)
64644 return 0;
64645 return 1;
64646 }
64647@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t
64648 */
64649 static __init int init_posix_timers(void)
64650 {
64651- struct k_clock clock_realtime = {
64652+ static struct k_clock clock_realtime = {
64653 .clock_getres = hrtimer_get_res,
64654 };
64655- struct k_clock clock_monotonic = {
64656+ static struct k_clock clock_monotonic = {
64657 .clock_getres = hrtimer_get_res,
64658 .clock_get = posix_ktime_get_ts,
64659 .clock_set = do_posix_clock_nosettime,
64660 };
64661- struct k_clock clock_monotonic_raw = {
64662+ static struct k_clock clock_monotonic_raw = {
64663 .clock_getres = hrtimer_get_res,
64664 .clock_get = posix_get_monotonic_raw,
64665 .clock_set = do_posix_clock_nosettime,
64666 .timer_create = no_timer_create,
64667 .nsleep = no_nsleep,
64668 };
64669- struct k_clock clock_realtime_coarse = {
64670+ static struct k_clock clock_realtime_coarse = {
64671 .clock_getres = posix_get_coarse_res,
64672 .clock_get = posix_get_realtime_coarse,
64673 .clock_set = do_posix_clock_nosettime,
64674 .timer_create = no_timer_create,
64675 .nsleep = no_nsleep,
64676 };
64677- struct k_clock clock_monotonic_coarse = {
64678+ static struct k_clock clock_monotonic_coarse = {
64679 .clock_getres = posix_get_coarse_res,
64680 .clock_get = posix_get_monotonic_coarse,
64681 .clock_set = do_posix_clock_nosettime,
64682@@ -296,6 +299,8 @@ static __init int init_posix_timers(void
64683 .nsleep = no_nsleep,
64684 };
64685
64686+ pax_track_stack();
64687+
64688 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
64689 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
64690 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64691@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_
64692 return;
64693 }
64694
64695- posix_clocks[clock_id] = *new_clock;
64696+ posix_clocks[clock_id] = new_clock;
64697 }
64698 EXPORT_SYMBOL_GPL(register_posix_clock);
64699
64700@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64701 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64702 return -EFAULT;
64703
64704+ /* only the CLOCK_REALTIME clock can be set, all other clocks
64705+ have their clock_set fptr set to a nosettime dummy function
64706+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64707+ call common_clock_set, which calls do_sys_settimeofday, which
64708+ we hook
64709+ */
64710+
64711 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
64712 }
64713
64714diff -urNp linux-2.6.32.45/kernel/power/hibernate.c linux-2.6.32.45/kernel/power/hibernate.c
64715--- linux-2.6.32.45/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
64716+++ linux-2.6.32.45/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
64717@@ -48,14 +48,14 @@ enum {
64718
64719 static int hibernation_mode = HIBERNATION_SHUTDOWN;
64720
64721-static struct platform_hibernation_ops *hibernation_ops;
64722+static const struct platform_hibernation_ops *hibernation_ops;
64723
64724 /**
64725 * hibernation_set_ops - set the global hibernate operations
64726 * @ops: the hibernation operations to use in subsequent hibernation transitions
64727 */
64728
64729-void hibernation_set_ops(struct platform_hibernation_ops *ops)
64730+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
64731 {
64732 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
64733 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
64734diff -urNp linux-2.6.32.45/kernel/power/poweroff.c linux-2.6.32.45/kernel/power/poweroff.c
64735--- linux-2.6.32.45/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
64736+++ linux-2.6.32.45/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
64737@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64738 .enable_mask = SYSRQ_ENABLE_BOOT,
64739 };
64740
64741-static int pm_sysrq_init(void)
64742+static int __init pm_sysrq_init(void)
64743 {
64744 register_sysrq_key('o', &sysrq_poweroff_op);
64745 return 0;
64746diff -urNp linux-2.6.32.45/kernel/power/process.c linux-2.6.32.45/kernel/power/process.c
64747--- linux-2.6.32.45/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
64748+++ linux-2.6.32.45/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
64749@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
64750 struct timeval start, end;
64751 u64 elapsed_csecs64;
64752 unsigned int elapsed_csecs;
64753+ bool timedout = false;
64754
64755 do_gettimeofday(&start);
64756
64757 end_time = jiffies + TIMEOUT;
64758 do {
64759 todo = 0;
64760+ if (time_after(jiffies, end_time))
64761+ timedout = true;
64762 read_lock(&tasklist_lock);
64763 do_each_thread(g, p) {
64764 if (frozen(p) || !freezeable(p))
64765@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
64766 * It is "frozen enough". If the task does wake
64767 * up, it will immediately call try_to_freeze.
64768 */
64769- if (!task_is_stopped_or_traced(p) &&
64770- !freezer_should_skip(p))
64771+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64772 todo++;
64773+ if (timedout) {
64774+ printk(KERN_ERR "Task refusing to freeze:\n");
64775+ sched_show_task(p);
64776+ }
64777+ }
64778 } while_each_thread(g, p);
64779 read_unlock(&tasklist_lock);
64780 yield(); /* Yield is okay here */
64781- if (time_after(jiffies, end_time))
64782- break;
64783- } while (todo);
64784+ } while (todo && !timedout);
64785
64786 do_gettimeofday(&end);
64787 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
64788diff -urNp linux-2.6.32.45/kernel/power/suspend.c linux-2.6.32.45/kernel/power/suspend.c
64789--- linux-2.6.32.45/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
64790+++ linux-2.6.32.45/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
64791@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
64792 [PM_SUSPEND_MEM] = "mem",
64793 };
64794
64795-static struct platform_suspend_ops *suspend_ops;
64796+static const struct platform_suspend_ops *suspend_ops;
64797
64798 /**
64799 * suspend_set_ops - Set the global suspend method table.
64800 * @ops: Pointer to ops structure.
64801 */
64802-void suspend_set_ops(struct platform_suspend_ops *ops)
64803+void suspend_set_ops(const struct platform_suspend_ops *ops)
64804 {
64805 mutex_lock(&pm_mutex);
64806 suspend_ops = ops;
64807diff -urNp linux-2.6.32.45/kernel/printk.c linux-2.6.32.45/kernel/printk.c
64808--- linux-2.6.32.45/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
64809+++ linux-2.6.32.45/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
64810@@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
64811 char c;
64812 int error = 0;
64813
64814+#ifdef CONFIG_GRKERNSEC_DMESG
64815+ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
64816+ return -EPERM;
64817+#endif
64818+
64819 error = security_syslog(type);
64820 if (error)
64821 return error;
64822diff -urNp linux-2.6.32.45/kernel/profile.c linux-2.6.32.45/kernel/profile.c
64823--- linux-2.6.32.45/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
64824+++ linux-2.6.32.45/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
64825@@ -39,7 +39,7 @@ struct profile_hit {
64826 /* Oprofile timer tick hook */
64827 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64828
64829-static atomic_t *prof_buffer;
64830+static atomic_unchecked_t *prof_buffer;
64831 static unsigned long prof_len, prof_shift;
64832
64833 int prof_on __read_mostly;
64834@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
64835 hits[i].pc = 0;
64836 continue;
64837 }
64838- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64839+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64840 hits[i].hits = hits[i].pc = 0;
64841 }
64842 }
64843@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
64844 * Add the current hit(s) and flush the write-queue out
64845 * to the global buffer:
64846 */
64847- atomic_add(nr_hits, &prof_buffer[pc]);
64848+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64849 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64850- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64851+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64852 hits[i].pc = hits[i].hits = 0;
64853 }
64854 out:
64855@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
64856 if (prof_on != type || !prof_buffer)
64857 return;
64858 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64859- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64860+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64861 }
64862 #endif /* !CONFIG_SMP */
64863 EXPORT_SYMBOL_GPL(profile_hits);
64864@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64865 return -EFAULT;
64866 buf++; p++; count--; read++;
64867 }
64868- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64869+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64870 if (copy_to_user(buf, (void *)pnt, count))
64871 return -EFAULT;
64872 read += count;
64873@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64874 }
64875 #endif
64876 profile_discard_flip_buffers();
64877- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64878+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64879 return count;
64880 }
64881
64882diff -urNp linux-2.6.32.45/kernel/ptrace.c linux-2.6.32.45/kernel/ptrace.c
64883--- linux-2.6.32.45/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
64884+++ linux-2.6.32.45/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
64885@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
64886 return ret;
64887 }
64888
64889-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64890+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64891+ unsigned int log)
64892 {
64893 const struct cred *cred = current_cred(), *tcred;
64894
64895@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
64896 cred->gid != tcred->egid ||
64897 cred->gid != tcred->sgid ||
64898 cred->gid != tcred->gid) &&
64899- !capable(CAP_SYS_PTRACE)) {
64900+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64901+ (log && !capable(CAP_SYS_PTRACE)))
64902+ ) {
64903 rcu_read_unlock();
64904 return -EPERM;
64905 }
64906@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
64907 smp_rmb();
64908 if (task->mm)
64909 dumpable = get_dumpable(task->mm);
64910- if (!dumpable && !capable(CAP_SYS_PTRACE))
64911+ if (!dumpable &&
64912+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64913+ (log && !capable(CAP_SYS_PTRACE))))
64914 return -EPERM;
64915
64916 return security_ptrace_access_check(task, mode);
64917@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
64918 {
64919 int err;
64920 task_lock(task);
64921- err = __ptrace_may_access(task, mode);
64922+ err = __ptrace_may_access(task, mode, 0);
64923+ task_unlock(task);
64924+ return !err;
64925+}
64926+
64927+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64928+{
64929+ int err;
64930+ task_lock(task);
64931+ err = __ptrace_may_access(task, mode, 1);
64932 task_unlock(task);
64933 return !err;
64934 }
64935@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
64936 goto out;
64937
64938 task_lock(task);
64939- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64940+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64941 task_unlock(task);
64942 if (retval)
64943 goto unlock_creds;
64944@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
64945 goto unlock_tasklist;
64946
64947 task->ptrace = PT_PTRACED;
64948- if (capable(CAP_SYS_PTRACE))
64949+ if (capable_nolog(CAP_SYS_PTRACE))
64950 task->ptrace |= PT_PTRACE_CAP;
64951
64952 __ptrace_link(task, current);
64953@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
64954 {
64955 int copied = 0;
64956
64957+ pax_track_stack();
64958+
64959 while (len > 0) {
64960 char buf[128];
64961 int this_len, retval;
64962@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
64963 {
64964 int copied = 0;
64965
64966+ pax_track_stack();
64967+
64968 while (len > 0) {
64969 char buf[128];
64970 int this_len, retval;
64971@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
64972 int ret = -EIO;
64973 siginfo_t siginfo;
64974
64975+ pax_track_stack();
64976+
64977 switch (request) {
64978 case PTRACE_PEEKTEXT:
64979 case PTRACE_PEEKDATA:
64980@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
64981 ret = ptrace_setoptions(child, data);
64982 break;
64983 case PTRACE_GETEVENTMSG:
64984- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
64985+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
64986 break;
64987
64988 case PTRACE_GETSIGINFO:
64989 ret = ptrace_getsiginfo(child, &siginfo);
64990 if (!ret)
64991- ret = copy_siginfo_to_user((siginfo_t __user *) data,
64992+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
64993 &siginfo);
64994 break;
64995
64996 case PTRACE_SETSIGINFO:
64997- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
64998+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
64999 sizeof siginfo))
65000 ret = -EFAULT;
65001 else
65002@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
65003 goto out;
65004 }
65005
65006+ if (gr_handle_ptrace(child, request)) {
65007+ ret = -EPERM;
65008+ goto out_put_task_struct;
65009+ }
65010+
65011 if (request == PTRACE_ATTACH) {
65012 ret = ptrace_attach(child);
65013 /*
65014 * Some architectures need to do book-keeping after
65015 * a ptrace attach.
65016 */
65017- if (!ret)
65018+ if (!ret) {
65019 arch_ptrace_attach(child);
65020+ gr_audit_ptrace(child);
65021+ }
65022 goto out_put_task_struct;
65023 }
65024
65025@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
65026 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65027 if (copied != sizeof(tmp))
65028 return -EIO;
65029- return put_user(tmp, (unsigned long __user *)data);
65030+ return put_user(tmp, (__force unsigned long __user *)data);
65031 }
65032
65033 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
65034@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
65035 siginfo_t siginfo;
65036 int ret;
65037
65038+ pax_track_stack();
65039+
65040 switch (request) {
65041 case PTRACE_PEEKTEXT:
65042 case PTRACE_PEEKDATA:
65043@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
65044 goto out;
65045 }
65046
65047+ if (gr_handle_ptrace(child, request)) {
65048+ ret = -EPERM;
65049+ goto out_put_task_struct;
65050+ }
65051+
65052 if (request == PTRACE_ATTACH) {
65053 ret = ptrace_attach(child);
65054 /*
65055 * Some architectures need to do book-keeping after
65056 * a ptrace attach.
65057 */
65058- if (!ret)
65059+ if (!ret) {
65060 arch_ptrace_attach(child);
65061+ gr_audit_ptrace(child);
65062+ }
65063 goto out_put_task_struct;
65064 }
65065
65066diff -urNp linux-2.6.32.45/kernel/rcutorture.c linux-2.6.32.45/kernel/rcutorture.c
65067--- linux-2.6.32.45/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
65068+++ linux-2.6.32.45/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
65069@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
65070 { 0 };
65071 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65072 { 0 };
65073-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65074-static atomic_t n_rcu_torture_alloc;
65075-static atomic_t n_rcu_torture_alloc_fail;
65076-static atomic_t n_rcu_torture_free;
65077-static atomic_t n_rcu_torture_mberror;
65078-static atomic_t n_rcu_torture_error;
65079+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65080+static atomic_unchecked_t n_rcu_torture_alloc;
65081+static atomic_unchecked_t n_rcu_torture_alloc_fail;
65082+static atomic_unchecked_t n_rcu_torture_free;
65083+static atomic_unchecked_t n_rcu_torture_mberror;
65084+static atomic_unchecked_t n_rcu_torture_error;
65085 static long n_rcu_torture_timers;
65086 static struct list_head rcu_torture_removed;
65087 static cpumask_var_t shuffle_tmp_mask;
65088@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
65089
65090 spin_lock_bh(&rcu_torture_lock);
65091 if (list_empty(&rcu_torture_freelist)) {
65092- atomic_inc(&n_rcu_torture_alloc_fail);
65093+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65094 spin_unlock_bh(&rcu_torture_lock);
65095 return NULL;
65096 }
65097- atomic_inc(&n_rcu_torture_alloc);
65098+ atomic_inc_unchecked(&n_rcu_torture_alloc);
65099 p = rcu_torture_freelist.next;
65100 list_del_init(p);
65101 spin_unlock_bh(&rcu_torture_lock);
65102@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
65103 static void
65104 rcu_torture_free(struct rcu_torture *p)
65105 {
65106- atomic_inc(&n_rcu_torture_free);
65107+ atomic_inc_unchecked(&n_rcu_torture_free);
65108 spin_lock_bh(&rcu_torture_lock);
65109 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65110 spin_unlock_bh(&rcu_torture_lock);
65111@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
65112 i = rp->rtort_pipe_count;
65113 if (i > RCU_TORTURE_PIPE_LEN)
65114 i = RCU_TORTURE_PIPE_LEN;
65115- atomic_inc(&rcu_torture_wcount[i]);
65116+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65117 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65118 rp->rtort_mbtest = 0;
65119 rcu_torture_free(rp);
65120@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
65121 i = rp->rtort_pipe_count;
65122 if (i > RCU_TORTURE_PIPE_LEN)
65123 i = RCU_TORTURE_PIPE_LEN;
65124- atomic_inc(&rcu_torture_wcount[i]);
65125+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65126 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65127 rp->rtort_mbtest = 0;
65128 list_del(&rp->rtort_free);
65129@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
65130 i = old_rp->rtort_pipe_count;
65131 if (i > RCU_TORTURE_PIPE_LEN)
65132 i = RCU_TORTURE_PIPE_LEN;
65133- atomic_inc(&rcu_torture_wcount[i]);
65134+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65135 old_rp->rtort_pipe_count++;
65136 cur_ops->deferred_free(old_rp);
65137 }
65138@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
65139 return;
65140 }
65141 if (p->rtort_mbtest == 0)
65142- atomic_inc(&n_rcu_torture_mberror);
65143+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65144 spin_lock(&rand_lock);
65145 cur_ops->read_delay(&rand);
65146 n_rcu_torture_timers++;
65147@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
65148 continue;
65149 }
65150 if (p->rtort_mbtest == 0)
65151- atomic_inc(&n_rcu_torture_mberror);
65152+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65153 cur_ops->read_delay(&rand);
65154 preempt_disable();
65155 pipe_count = p->rtort_pipe_count;
65156@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
65157 rcu_torture_current,
65158 rcu_torture_current_version,
65159 list_empty(&rcu_torture_freelist),
65160- atomic_read(&n_rcu_torture_alloc),
65161- atomic_read(&n_rcu_torture_alloc_fail),
65162- atomic_read(&n_rcu_torture_free),
65163- atomic_read(&n_rcu_torture_mberror),
65164+ atomic_read_unchecked(&n_rcu_torture_alloc),
65165+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65166+ atomic_read_unchecked(&n_rcu_torture_free),
65167+ atomic_read_unchecked(&n_rcu_torture_mberror),
65168 n_rcu_torture_timers);
65169- if (atomic_read(&n_rcu_torture_mberror) != 0)
65170+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
65171 cnt += sprintf(&page[cnt], " !!!");
65172 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65173 if (i > 1) {
65174 cnt += sprintf(&page[cnt], "!!! ");
65175- atomic_inc(&n_rcu_torture_error);
65176+ atomic_inc_unchecked(&n_rcu_torture_error);
65177 WARN_ON_ONCE(1);
65178 }
65179 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65180@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
65181 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65182 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65183 cnt += sprintf(&page[cnt], " %d",
65184- atomic_read(&rcu_torture_wcount[i]));
65185+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65186 }
65187 cnt += sprintf(&page[cnt], "\n");
65188 if (cur_ops->stats)
65189@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
65190
65191 if (cur_ops->cleanup)
65192 cur_ops->cleanup();
65193- if (atomic_read(&n_rcu_torture_error))
65194+ if (atomic_read_unchecked(&n_rcu_torture_error))
65195 rcu_torture_print_module_parms("End of test: FAILURE");
65196 else
65197 rcu_torture_print_module_parms("End of test: SUCCESS");
65198@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
65199
65200 rcu_torture_current = NULL;
65201 rcu_torture_current_version = 0;
65202- atomic_set(&n_rcu_torture_alloc, 0);
65203- atomic_set(&n_rcu_torture_alloc_fail, 0);
65204- atomic_set(&n_rcu_torture_free, 0);
65205- atomic_set(&n_rcu_torture_mberror, 0);
65206- atomic_set(&n_rcu_torture_error, 0);
65207+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65208+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65209+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65210+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65211+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65212 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65213- atomic_set(&rcu_torture_wcount[i], 0);
65214+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65215 for_each_possible_cpu(cpu) {
65216 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65217 per_cpu(rcu_torture_count, cpu)[i] = 0;
65218diff -urNp linux-2.6.32.45/kernel/rcutree.c linux-2.6.32.45/kernel/rcutree.c
65219--- linux-2.6.32.45/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
65220+++ linux-2.6.32.45/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
65221@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
65222 /*
65223 * Do softirq processing for the current CPU.
65224 */
65225-static void rcu_process_callbacks(struct softirq_action *unused)
65226+static void rcu_process_callbacks(void)
65227 {
65228 /*
65229 * Memory references from any prior RCU read-side critical sections
65230diff -urNp linux-2.6.32.45/kernel/rcutree_plugin.h linux-2.6.32.45/kernel/rcutree_plugin.h
65231--- linux-2.6.32.45/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
65232+++ linux-2.6.32.45/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
65233@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
65234 */
65235 void __rcu_read_lock(void)
65236 {
65237- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
65238+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
65239 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
65240 }
65241 EXPORT_SYMBOL_GPL(__rcu_read_lock);
65242@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
65243 struct task_struct *t = current;
65244
65245 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
65246- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
65247+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
65248 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
65249 rcu_read_unlock_special(t);
65250 }
65251diff -urNp linux-2.6.32.45/kernel/relay.c linux-2.6.32.45/kernel/relay.c
65252--- linux-2.6.32.45/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
65253+++ linux-2.6.32.45/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
65254@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
65255 unsigned int flags,
65256 int *nonpad_ret)
65257 {
65258- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
65259+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
65260 struct rchan_buf *rbuf = in->private_data;
65261 unsigned int subbuf_size = rbuf->chan->subbuf_size;
65262 uint64_t pos = (uint64_t) *ppos;
65263@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
65264 .ops = &relay_pipe_buf_ops,
65265 .spd_release = relay_page_release,
65266 };
65267+ ssize_t ret;
65268+
65269+ pax_track_stack();
65270
65271 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65272 return 0;
65273diff -urNp linux-2.6.32.45/kernel/resource.c linux-2.6.32.45/kernel/resource.c
65274--- linux-2.6.32.45/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
65275+++ linux-2.6.32.45/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
65276@@ -132,8 +132,18 @@ static const struct file_operations proc
65277
65278 static int __init ioresources_init(void)
65279 {
65280+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65281+#ifdef CONFIG_GRKERNSEC_PROC_USER
65282+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65283+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65284+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65285+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65286+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65287+#endif
65288+#else
65289 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65290 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65291+#endif
65292 return 0;
65293 }
65294 __initcall(ioresources_init);
65295diff -urNp linux-2.6.32.45/kernel/rtmutex.c linux-2.6.32.45/kernel/rtmutex.c
65296--- linux-2.6.32.45/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
65297+++ linux-2.6.32.45/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
65298@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
65299 */
65300 spin_lock_irqsave(&pendowner->pi_lock, flags);
65301
65302- WARN_ON(!pendowner->pi_blocked_on);
65303+ BUG_ON(!pendowner->pi_blocked_on);
65304 WARN_ON(pendowner->pi_blocked_on != waiter);
65305 WARN_ON(pendowner->pi_blocked_on->lock != lock);
65306
65307diff -urNp linux-2.6.32.45/kernel/rtmutex-tester.c linux-2.6.32.45/kernel/rtmutex-tester.c
65308--- linux-2.6.32.45/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
65309+++ linux-2.6.32.45/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
65310@@ -21,7 +21,7 @@
65311 #define MAX_RT_TEST_MUTEXES 8
65312
65313 static spinlock_t rttest_lock;
65314-static atomic_t rttest_event;
65315+static atomic_unchecked_t rttest_event;
65316
65317 struct test_thread_data {
65318 int opcode;
65319@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
65320
65321 case RTTEST_LOCKCONT:
65322 td->mutexes[td->opdata] = 1;
65323- td->event = atomic_add_return(1, &rttest_event);
65324+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65325 return 0;
65326
65327 case RTTEST_RESET:
65328@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
65329 return 0;
65330
65331 case RTTEST_RESETEVENT:
65332- atomic_set(&rttest_event, 0);
65333+ atomic_set_unchecked(&rttest_event, 0);
65334 return 0;
65335
65336 default:
65337@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
65338 return ret;
65339
65340 td->mutexes[id] = 1;
65341- td->event = atomic_add_return(1, &rttest_event);
65342+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65343 rt_mutex_lock(&mutexes[id]);
65344- td->event = atomic_add_return(1, &rttest_event);
65345+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65346 td->mutexes[id] = 4;
65347 return 0;
65348
65349@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
65350 return ret;
65351
65352 td->mutexes[id] = 1;
65353- td->event = atomic_add_return(1, &rttest_event);
65354+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65355 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65356- td->event = atomic_add_return(1, &rttest_event);
65357+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65358 td->mutexes[id] = ret ? 0 : 4;
65359 return ret ? -EINTR : 0;
65360
65361@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
65362 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65363 return ret;
65364
65365- td->event = atomic_add_return(1, &rttest_event);
65366+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65367 rt_mutex_unlock(&mutexes[id]);
65368- td->event = atomic_add_return(1, &rttest_event);
65369+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65370 td->mutexes[id] = 0;
65371 return 0;
65372
65373@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
65374 break;
65375
65376 td->mutexes[dat] = 2;
65377- td->event = atomic_add_return(1, &rttest_event);
65378+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65379 break;
65380
65381 case RTTEST_LOCKBKL:
65382@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
65383 return;
65384
65385 td->mutexes[dat] = 3;
65386- td->event = atomic_add_return(1, &rttest_event);
65387+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65388 break;
65389
65390 case RTTEST_LOCKNOWAIT:
65391@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
65392 return;
65393
65394 td->mutexes[dat] = 1;
65395- td->event = atomic_add_return(1, &rttest_event);
65396+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65397 return;
65398
65399 case RTTEST_LOCKBKL:
65400diff -urNp linux-2.6.32.45/kernel/sched.c linux-2.6.32.45/kernel/sched.c
65401--- linux-2.6.32.45/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
65402+++ linux-2.6.32.45/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
65403@@ -5043,7 +5043,7 @@ out:
65404 * In CONFIG_NO_HZ case, the idle load balance owner will do the
65405 * rebalancing for all the cpus for whom scheduler ticks are stopped.
65406 */
65407-static void run_rebalance_domains(struct softirq_action *h)
65408+static void run_rebalance_domains(void)
65409 {
65410 int this_cpu = smp_processor_id();
65411 struct rq *this_rq = cpu_rq(this_cpu);
65412@@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
65413 struct rq *rq;
65414 int cpu;
65415
65416+ pax_track_stack();
65417+
65418 need_resched:
65419 preempt_disable();
65420 cpu = smp_processor_id();
65421@@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
65422 * Look out! "owner" is an entirely speculative pointer
65423 * access and not reliable.
65424 */
65425-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
65426+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
65427 {
65428 unsigned int cpu;
65429 struct rq *rq;
65430@@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
65431 * DEBUG_PAGEALLOC could have unmapped it if
65432 * the mutex owner just released it and exited.
65433 */
65434- if (probe_kernel_address(&owner->cpu, cpu))
65435+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
65436 return 0;
65437 #else
65438- cpu = owner->cpu;
65439+ cpu = task_thread_info(owner)->cpu;
65440 #endif
65441
65442 /*
65443@@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
65444 /*
65445 * Is that owner really running on that cpu?
65446 */
65447- if (task_thread_info(rq->curr) != owner || need_resched())
65448+ if (rq->curr != owner || need_resched())
65449 return 0;
65450
65451 cpu_relax();
65452@@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
65453 /* convert nice value [19,-20] to rlimit style value [1,40] */
65454 int nice_rlim = 20 - nice;
65455
65456+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65457+
65458 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
65459 capable(CAP_SYS_NICE));
65460 }
65461@@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65462 if (nice > 19)
65463 nice = 19;
65464
65465- if (increment < 0 && !can_nice(current, nice))
65466+ if (increment < 0 && (!can_nice(current, nice) ||
65467+ gr_handle_chroot_nice()))
65468 return -EPERM;
65469
65470 retval = security_task_setnice(current, nice);
65471@@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
65472 long power;
65473 int weight;
65474
65475- WARN_ON(!sd || !sd->groups);
65476+ BUG_ON(!sd || !sd->groups);
65477
65478 if (cpu != group_first_cpu(sd->groups))
65479 return;
65480diff -urNp linux-2.6.32.45/kernel/signal.c linux-2.6.32.45/kernel/signal.c
65481--- linux-2.6.32.45/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
65482+++ linux-2.6.32.45/kernel/signal.c 2011-08-16 21:15:58.000000000 -0400
65483@@ -41,12 +41,12 @@
65484
65485 static struct kmem_cache *sigqueue_cachep;
65486
65487-static void __user *sig_handler(struct task_struct *t, int sig)
65488+static __sighandler_t sig_handler(struct task_struct *t, int sig)
65489 {
65490 return t->sighand->action[sig - 1].sa.sa_handler;
65491 }
65492
65493-static int sig_handler_ignored(void __user *handler, int sig)
65494+static int sig_handler_ignored(__sighandler_t handler, int sig)
65495 {
65496 /* Is it explicitly or implicitly ignored? */
65497 return handler == SIG_IGN ||
65498@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
65499 static int sig_task_ignored(struct task_struct *t, int sig,
65500 int from_ancestor_ns)
65501 {
65502- void __user *handler;
65503+ __sighandler_t handler;
65504
65505 handler = sig_handler(t, sig);
65506
65507@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
65508 */
65509 user = get_uid(__task_cred(t)->user);
65510 atomic_inc(&user->sigpending);
65511+
65512+ if (!override_rlimit)
65513+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65514 if (override_rlimit ||
65515 atomic_read(&user->sigpending) <=
65516 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
65517@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
65518
65519 int unhandled_signal(struct task_struct *tsk, int sig)
65520 {
65521- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65522+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65523 if (is_global_init(tsk))
65524 return 1;
65525 if (handler != SIG_IGN && handler != SIG_DFL)
65526@@ -627,6 +630,13 @@ static int check_kill_permission(int sig
65527 }
65528 }
65529
65530+ /* allow glibc communication via tgkill to other threads in our
65531+ thread group */
65532+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65533+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65534+ && gr_handle_signal(t, sig))
65535+ return -EPERM;
65536+
65537 return security_task_kill(t, info, sig, 0);
65538 }
65539
65540@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct si
65541 return send_signal(sig, info, p, 1);
65542 }
65543
65544-static int
65545+int
65546 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65547 {
65548 return send_signal(sig, info, t, 0);
65549@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *
65550 unsigned long int flags;
65551 int ret, blocked, ignored;
65552 struct k_sigaction *action;
65553+ int is_unhandled = 0;
65554
65555 spin_lock_irqsave(&t->sighand->siglock, flags);
65556 action = &t->sighand->action[sig-1];
65557@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *
65558 }
65559 if (action->sa.sa_handler == SIG_DFL)
65560 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65561+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65562+ is_unhandled = 1;
65563 ret = specific_send_sig_info(sig, info, t);
65564 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65565
65566+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
65567+ normal operation */
65568+ if (is_unhandled) {
65569+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65570+ gr_handle_crash(t, sig);
65571+ }
65572+
65573 return ret;
65574 }
65575
65576@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct
65577 {
65578 int ret = check_kill_permission(sig, info, p);
65579
65580- if (!ret && sig)
65581+ if (!ret && sig) {
65582 ret = do_send_sig_info(sig, info, p, true);
65583+ if (!ret)
65584+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65585+ }
65586
65587 return ret;
65588 }
65589@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
65590 {
65591 siginfo_t info;
65592
65593+ pax_track_stack();
65594+
65595 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65596
65597 memset(&info, 0, sizeof info);
65598@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65599 int error = -ESRCH;
65600
65601 rcu_read_lock();
65602- p = find_task_by_vpid(pid);
65603+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65604+ /* allow glibc communication via tgkill to other threads in our
65605+ thread group */
65606+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65607+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
65608+ p = find_task_by_vpid_unrestricted(pid);
65609+ else
65610+#endif
65611+ p = find_task_by_vpid(pid);
65612 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65613 error = check_kill_permission(sig, info, p);
65614 /*
65615diff -urNp linux-2.6.32.45/kernel/smp.c linux-2.6.32.45/kernel/smp.c
65616--- linux-2.6.32.45/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
65617+++ linux-2.6.32.45/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
65618@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
65619 }
65620 EXPORT_SYMBOL(smp_call_function);
65621
65622-void ipi_call_lock(void)
65623+void ipi_call_lock(void) __acquires(call_function.lock)
65624 {
65625 spin_lock(&call_function.lock);
65626 }
65627
65628-void ipi_call_unlock(void)
65629+void ipi_call_unlock(void) __releases(call_function.lock)
65630 {
65631 spin_unlock(&call_function.lock);
65632 }
65633
65634-void ipi_call_lock_irq(void)
65635+void ipi_call_lock_irq(void) __acquires(call_function.lock)
65636 {
65637 spin_lock_irq(&call_function.lock);
65638 }
65639
65640-void ipi_call_unlock_irq(void)
65641+void ipi_call_unlock_irq(void) __releases(call_function.lock)
65642 {
65643 spin_unlock_irq(&call_function.lock);
65644 }
65645diff -urNp linux-2.6.32.45/kernel/softirq.c linux-2.6.32.45/kernel/softirq.c
65646--- linux-2.6.32.45/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
65647+++ linux-2.6.32.45/kernel/softirq.c 2011-08-05 20:33:55.000000000 -0400
65648@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65649
65650 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65651
65652-char *softirq_to_name[NR_SOFTIRQS] = {
65653+const char * const softirq_to_name[NR_SOFTIRQS] = {
65654 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65655 "TASKLET", "SCHED", "HRTIMER", "RCU"
65656 };
65657@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
65658
65659 asmlinkage void __do_softirq(void)
65660 {
65661- struct softirq_action *h;
65662+ const struct softirq_action *h;
65663 __u32 pending;
65664 int max_restart = MAX_SOFTIRQ_RESTART;
65665 int cpu;
65666@@ -233,7 +233,7 @@ restart:
65667 kstat_incr_softirqs_this_cpu(h - softirq_vec);
65668
65669 trace_softirq_entry(h, softirq_vec);
65670- h->action(h);
65671+ h->action();
65672 trace_softirq_exit(h, softirq_vec);
65673 if (unlikely(prev_count != preempt_count())) {
65674 printk(KERN_ERR "huh, entered softirq %td %s %p"
65675@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
65676 local_irq_restore(flags);
65677 }
65678
65679-void open_softirq(int nr, void (*action)(struct softirq_action *))
65680+void open_softirq(int nr, void (*action)(void))
65681 {
65682- softirq_vec[nr].action = action;
65683+ pax_open_kernel();
65684+ *(void **)&softirq_vec[nr].action = action;
65685+ pax_close_kernel();
65686 }
65687
65688 /*
65689@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct
65690
65691 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65692
65693-static void tasklet_action(struct softirq_action *a)
65694+static void tasklet_action(void)
65695 {
65696 struct tasklet_struct *list;
65697
65698@@ -454,7 +456,7 @@ static void tasklet_action(struct softir
65699 }
65700 }
65701
65702-static void tasklet_hi_action(struct softirq_action *a)
65703+static void tasklet_hi_action(void)
65704 {
65705 struct tasklet_struct *list;
65706
65707diff -urNp linux-2.6.32.45/kernel/sys.c linux-2.6.32.45/kernel/sys.c
65708--- linux-2.6.32.45/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
65709+++ linux-2.6.32.45/kernel/sys.c 2011-08-11 19:51:54.000000000 -0400
65710@@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
65711 error = -EACCES;
65712 goto out;
65713 }
65714+
65715+ if (gr_handle_chroot_setpriority(p, niceval)) {
65716+ error = -EACCES;
65717+ goto out;
65718+ }
65719+
65720 no_nice = security_task_setnice(p, niceval);
65721 if (no_nice) {
65722 error = no_nice;
65723@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
65724 !(user = find_user(who)))
65725 goto out_unlock; /* No processes for this user */
65726
65727- do_each_thread(g, p)
65728+ do_each_thread(g, p) {
65729 if (__task_cred(p)->uid == who)
65730 error = set_one_prio(p, niceval, error);
65731- while_each_thread(g, p);
65732+ } while_each_thread(g, p);
65733 if (who != cred->uid)
65734 free_uid(user); /* For find_user() */
65735 break;
65736@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
65737 !(user = find_user(who)))
65738 goto out_unlock; /* No processes for this user */
65739
65740- do_each_thread(g, p)
65741+ do_each_thread(g, p) {
65742 if (__task_cred(p)->uid == who) {
65743 niceval = 20 - task_nice(p);
65744 if (niceval > retval)
65745 retval = niceval;
65746 }
65747- while_each_thread(g, p);
65748+ } while_each_thread(g, p);
65749 if (who != cred->uid)
65750 free_uid(user); /* for find_user() */
65751 break;
65752@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65753 goto error;
65754 }
65755
65756+ if (gr_check_group_change(new->gid, new->egid, -1))
65757+ goto error;
65758+
65759 if (rgid != (gid_t) -1 ||
65760 (egid != (gid_t) -1 && egid != old->gid))
65761 new->sgid = new->egid;
65762@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65763 goto error;
65764
65765 retval = -EPERM;
65766+
65767+ if (gr_check_group_change(gid, gid, gid))
65768+ goto error;
65769+
65770 if (capable(CAP_SETGID))
65771 new->gid = new->egid = new->sgid = new->fsgid = gid;
65772 else if (gid == old->gid || gid == old->sgid)
65773@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
65774 if (!new_user)
65775 return -EAGAIN;
65776
65777+ /*
65778+ * We don't fail in case of NPROC limit excess here because too many
65779+ * poorly written programs don't check set*uid() return code, assuming
65780+ * it never fails if called by root. We may still enforce NPROC limit
65781+ * for programs doing set*uid()+execve() by harmlessly deferring the
65782+ * failure to the execve() stage.
65783+ */
65784 if (atomic_read(&new_user->processes) >=
65785 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
65786- new_user != INIT_USER) {
65787- free_uid(new_user);
65788- return -EAGAIN;
65789- }
65790+ new_user != INIT_USER)
65791+ current->flags |= PF_NPROC_EXCEEDED;
65792+ else
65793+ current->flags &= ~PF_NPROC_EXCEEDED;
65794
65795 free_uid(new->user);
65796 new->user = new_user;
65797@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65798 goto error;
65799 }
65800
65801+ if (gr_check_user_change(new->uid, new->euid, -1))
65802+ goto error;
65803+
65804 if (new->uid != old->uid) {
65805 retval = set_user(new);
65806 if (retval < 0)
65807@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65808 goto error;
65809
65810 retval = -EPERM;
65811+
65812+ if (gr_check_crash_uid(uid))
65813+ goto error;
65814+ if (gr_check_user_change(uid, uid, uid))
65815+ goto error;
65816+
65817 if (capable(CAP_SETUID)) {
65818 new->suid = new->uid = uid;
65819 if (uid != old->uid) {
65820@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65821 goto error;
65822 }
65823
65824+ if (gr_check_user_change(ruid, euid, -1))
65825+ goto error;
65826+
65827 if (ruid != (uid_t) -1) {
65828 new->uid = ruid;
65829 if (ruid != old->uid) {
65830@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65831 goto error;
65832 }
65833
65834+ if (gr_check_group_change(rgid, egid, -1))
65835+ goto error;
65836+
65837 if (rgid != (gid_t) -1)
65838 new->gid = rgid;
65839 if (egid != (gid_t) -1)
65840@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65841 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
65842 goto error;
65843
65844+ if (gr_check_user_change(-1, -1, uid))
65845+ goto error;
65846+
65847 if (uid == old->uid || uid == old->euid ||
65848 uid == old->suid || uid == old->fsuid ||
65849 capable(CAP_SETUID)) {
65850@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65851 if (gid == old->gid || gid == old->egid ||
65852 gid == old->sgid || gid == old->fsgid ||
65853 capable(CAP_SETGID)) {
65854+ if (gr_check_group_change(-1, -1, gid))
65855+ goto error;
65856+
65857 if (gid != old_fsgid) {
65858 new->fsgid = gid;
65859 goto change_okay;
65860@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65861 error = get_dumpable(me->mm);
65862 break;
65863 case PR_SET_DUMPABLE:
65864- if (arg2 < 0 || arg2 > 1) {
65865+ if (arg2 > 1) {
65866 error = -EINVAL;
65867 break;
65868 }
65869diff -urNp linux-2.6.32.45/kernel/sysctl.c linux-2.6.32.45/kernel/sysctl.c
65870--- linux-2.6.32.45/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
65871+++ linux-2.6.32.45/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
65872@@ -63,6 +63,13 @@
65873 static int deprecated_sysctl_warning(struct __sysctl_args *args);
65874
65875 #if defined(CONFIG_SYSCTL)
65876+#include <linux/grsecurity.h>
65877+#include <linux/grinternal.h>
65878+
65879+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65880+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65881+ const int op);
65882+extern int gr_handle_chroot_sysctl(const int op);
65883
65884 /* External variables not in a header file. */
65885 extern int C_A_D;
65886@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
65887 static int proc_taint(struct ctl_table *table, int write,
65888 void __user *buffer, size_t *lenp, loff_t *ppos);
65889 #endif
65890+extern ctl_table grsecurity_table[];
65891
65892 static struct ctl_table root_table[];
65893 static struct ctl_table_root sysctl_table_root;
65894@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
65895 int sysctl_legacy_va_layout;
65896 #endif
65897
65898+#ifdef CONFIG_PAX_SOFTMODE
65899+static ctl_table pax_table[] = {
65900+ {
65901+ .ctl_name = CTL_UNNUMBERED,
65902+ .procname = "softmode",
65903+ .data = &pax_softmode,
65904+ .maxlen = sizeof(unsigned int),
65905+ .mode = 0600,
65906+ .proc_handler = &proc_dointvec,
65907+ },
65908+
65909+ { .ctl_name = 0 }
65910+};
65911+#endif
65912+
65913 extern int prove_locking;
65914 extern int lock_stat;
65915
65916@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
65917 #endif
65918
65919 static struct ctl_table kern_table[] = {
65920+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65921+ {
65922+ .ctl_name = CTL_UNNUMBERED,
65923+ .procname = "grsecurity",
65924+ .mode = 0500,
65925+ .child = grsecurity_table,
65926+ },
65927+#endif
65928+
65929+#ifdef CONFIG_PAX_SOFTMODE
65930+ {
65931+ .ctl_name = CTL_UNNUMBERED,
65932+ .procname = "pax",
65933+ .mode = 0500,
65934+ .child = pax_table,
65935+ },
65936+#endif
65937+
65938 {
65939 .ctl_name = CTL_UNNUMBERED,
65940 .procname = "sched_child_runs_first",
65941@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
65942 .data = &modprobe_path,
65943 .maxlen = KMOD_PATH_LEN,
65944 .mode = 0644,
65945- .proc_handler = &proc_dostring,
65946- .strategy = &sysctl_string,
65947+ .proc_handler = &proc_dostring_modpriv,
65948+ .strategy = &sysctl_string_modpriv,
65949 },
65950 {
65951 .ctl_name = CTL_UNNUMBERED,
65952@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
65953 .mode = 0644,
65954 .proc_handler = &proc_dointvec
65955 },
65956+ {
65957+ .procname = "heap_stack_gap",
65958+ .data = &sysctl_heap_stack_gap,
65959+ .maxlen = sizeof(sysctl_heap_stack_gap),
65960+ .mode = 0644,
65961+ .proc_handler = proc_doulongvec_minmax,
65962+ },
65963 #else
65964 {
65965 .ctl_name = CTL_UNNUMBERED,
65966@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
65967 return 0;
65968 }
65969
65970+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
65971+
65972 static int parse_table(int __user *name, int nlen,
65973 void __user *oldval, size_t __user *oldlenp,
65974 void __user *newval, size_t newlen,
65975@@ -1821,7 +1871,7 @@ repeat:
65976 if (n == table->ctl_name) {
65977 int error;
65978 if (table->child) {
65979- if (sysctl_perm(root, table, MAY_EXEC))
65980+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
65981 return -EPERM;
65982 name++;
65983 nlen--;
65984@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
65985 int error;
65986 int mode;
65987
65988+ if (table->parent != NULL && table->parent->procname != NULL &&
65989+ table->procname != NULL &&
65990+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
65991+ return -EACCES;
65992+ if (gr_handle_chroot_sysctl(op))
65993+ return -EACCES;
65994+ error = gr_handle_sysctl(table, op);
65995+ if (error)
65996+ return error;
65997+
65998+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
65999+ if (error)
66000+ return error;
66001+
66002+ if (root->permissions)
66003+ mode = root->permissions(root, current->nsproxy, table);
66004+ else
66005+ mode = table->mode;
66006+
66007+ return test_perm(mode, op);
66008+}
66009+
66010+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
66011+{
66012+ int error;
66013+ int mode;
66014+
66015 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
66016 if (error)
66017 return error;
66018@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
66019 buffer, lenp, ppos);
66020 }
66021
66022+int proc_dostring_modpriv(struct ctl_table *table, int write,
66023+ void __user *buffer, size_t *lenp, loff_t *ppos)
66024+{
66025+ if (write && !capable(CAP_SYS_MODULE))
66026+ return -EPERM;
66027+
66028+ return _proc_do_string(table->data, table->maxlen, write,
66029+ buffer, lenp, ppos);
66030+}
66031+
66032
66033 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
66034 int *valp,
66035@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
66036 vleft = table->maxlen / sizeof(unsigned long);
66037 left = *lenp;
66038
66039- for (; left && vleft--; i++, min++, max++, first=0) {
66040+ for (; left && vleft--; i++, first=0) {
66041 if (write) {
66042 while (left) {
66043 char c;
66044@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
66045 return -ENOSYS;
66046 }
66047
66048+int proc_dostring_modpriv(struct ctl_table *table, int write,
66049+ void __user *buffer, size_t *lenp, loff_t *ppos)
66050+{
66051+ return -ENOSYS;
66052+}
66053+
66054 int proc_dointvec(struct ctl_table *table, int write,
66055 void __user *buffer, size_t *lenp, loff_t *ppos)
66056 {
66057@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
66058 return 1;
66059 }
66060
66061+int sysctl_string_modpriv(struct ctl_table *table,
66062+ void __user *oldval, size_t __user *oldlenp,
66063+ void __user *newval, size_t newlen)
66064+{
66065+ if (newval && newlen && !capable(CAP_SYS_MODULE))
66066+ return -EPERM;
66067+
66068+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
66069+}
66070+
66071 /*
66072 * This function makes sure that all of the integers in the vector
66073 * are between the minimum and maximum values given in the arrays
66074@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
66075 return -ENOSYS;
66076 }
66077
66078+int sysctl_string_modpriv(struct ctl_table *table,
66079+ void __user *oldval, size_t __user *oldlenp,
66080+ void __user *newval, size_t newlen)
66081+{
66082+ return -ENOSYS;
66083+}
66084+
66085 int sysctl_intvec(struct ctl_table *table,
66086 void __user *oldval, size_t __user *oldlenp,
66087 void __user *newval, size_t newlen)
66088@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66089 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66090 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66091 EXPORT_SYMBOL(proc_dostring);
66092+EXPORT_SYMBOL(proc_dostring_modpriv);
66093 EXPORT_SYMBOL(proc_doulongvec_minmax);
66094 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66095 EXPORT_SYMBOL(register_sysctl_table);
66096@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
66097 EXPORT_SYMBOL(sysctl_jiffies);
66098 EXPORT_SYMBOL(sysctl_ms_jiffies);
66099 EXPORT_SYMBOL(sysctl_string);
66100+EXPORT_SYMBOL(sysctl_string_modpriv);
66101 EXPORT_SYMBOL(sysctl_data);
66102 EXPORT_SYMBOL(unregister_sysctl_table);
66103diff -urNp linux-2.6.32.45/kernel/sysctl_check.c linux-2.6.32.45/kernel/sysctl_check.c
66104--- linux-2.6.32.45/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
66105+++ linux-2.6.32.45/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
66106@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
66107 } else {
66108 if ((table->strategy == sysctl_data) ||
66109 (table->strategy == sysctl_string) ||
66110+ (table->strategy == sysctl_string_modpriv) ||
66111 (table->strategy == sysctl_intvec) ||
66112 (table->strategy == sysctl_jiffies) ||
66113 (table->strategy == sysctl_ms_jiffies) ||
66114 (table->proc_handler == proc_dostring) ||
66115+ (table->proc_handler == proc_dostring_modpriv) ||
66116 (table->proc_handler == proc_dointvec) ||
66117 (table->proc_handler == proc_dointvec_minmax) ||
66118 (table->proc_handler == proc_dointvec_jiffies) ||
66119diff -urNp linux-2.6.32.45/kernel/taskstats.c linux-2.6.32.45/kernel/taskstats.c
66120--- linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
66121+++ linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
66122@@ -26,9 +26,12 @@
66123 #include <linux/cgroup.h>
66124 #include <linux/fs.h>
66125 #include <linux/file.h>
66126+#include <linux/grsecurity.h>
66127 #include <net/genetlink.h>
66128 #include <asm/atomic.h>
66129
66130+extern int gr_is_taskstats_denied(int pid);
66131+
66132 /*
66133 * Maximum length of a cpumask that can be specified in
66134 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66135@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
66136 size_t size;
66137 cpumask_var_t mask;
66138
66139+ if (gr_is_taskstats_denied(current->pid))
66140+ return -EACCES;
66141+
66142 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
66143 return -ENOMEM;
66144
66145diff -urNp linux-2.6.32.45/kernel/time/tick-broadcast.c linux-2.6.32.45/kernel/time/tick-broadcast.c
66146--- linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
66147+++ linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
66148@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
66149 * then clear the broadcast bit.
66150 */
66151 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66152- int cpu = smp_processor_id();
66153+ cpu = smp_processor_id();
66154
66155 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66156 tick_broadcast_clear_oneshot(cpu);
66157diff -urNp linux-2.6.32.45/kernel/time/timekeeping.c linux-2.6.32.45/kernel/time/timekeeping.c
66158--- linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
66159+++ linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
66160@@ -14,6 +14,7 @@
66161 #include <linux/init.h>
66162 #include <linux/mm.h>
66163 #include <linux/sched.h>
66164+#include <linux/grsecurity.h>
66165 #include <linux/sysdev.h>
66166 #include <linux/clocksource.h>
66167 #include <linux/jiffies.h>
66168@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
66169 */
66170 struct timespec ts = xtime;
66171 timespec_add_ns(&ts, nsec);
66172- ACCESS_ONCE(xtime_cache) = ts;
66173+ ACCESS_ONCE_RW(xtime_cache) = ts;
66174 }
66175
66176 /* must hold xtime_lock */
66177@@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
66178 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66179 return -EINVAL;
66180
66181+ gr_log_timechange();
66182+
66183 write_seqlock_irqsave(&xtime_lock, flags);
66184
66185 timekeeping_forward_now();
66186diff -urNp linux-2.6.32.45/kernel/time/timer_list.c linux-2.6.32.45/kernel/time/timer_list.c
66187--- linux-2.6.32.45/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
66188+++ linux-2.6.32.45/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
66189@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66190
66191 static void print_name_offset(struct seq_file *m, void *sym)
66192 {
66193+#ifdef CONFIG_GRKERNSEC_HIDESYM
66194+ SEQ_printf(m, "<%p>", NULL);
66195+#else
66196 char symname[KSYM_NAME_LEN];
66197
66198 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66199 SEQ_printf(m, "<%p>", sym);
66200 else
66201 SEQ_printf(m, "%s", symname);
66202+#endif
66203 }
66204
66205 static void
66206@@ -112,7 +116,11 @@ next_one:
66207 static void
66208 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66209 {
66210+#ifdef CONFIG_GRKERNSEC_HIDESYM
66211+ SEQ_printf(m, " .base: %p\n", NULL);
66212+#else
66213 SEQ_printf(m, " .base: %p\n", base);
66214+#endif
66215 SEQ_printf(m, " .index: %d\n",
66216 base->index);
66217 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66218@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
66219 {
66220 struct proc_dir_entry *pe;
66221
66222+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66223+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66224+#else
66225 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66226+#endif
66227 if (!pe)
66228 return -ENOMEM;
66229 return 0;
66230diff -urNp linux-2.6.32.45/kernel/time/timer_stats.c linux-2.6.32.45/kernel/time/timer_stats.c
66231--- linux-2.6.32.45/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
66232+++ linux-2.6.32.45/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
66233@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66234 static unsigned long nr_entries;
66235 static struct entry entries[MAX_ENTRIES];
66236
66237-static atomic_t overflow_count;
66238+static atomic_unchecked_t overflow_count;
66239
66240 /*
66241 * The entries are in a hash-table, for fast lookup:
66242@@ -140,7 +140,7 @@ static void reset_entries(void)
66243 nr_entries = 0;
66244 memset(entries, 0, sizeof(entries));
66245 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66246- atomic_set(&overflow_count, 0);
66247+ atomic_set_unchecked(&overflow_count, 0);
66248 }
66249
66250 static struct entry *alloc_entry(void)
66251@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66252 if (likely(entry))
66253 entry->count++;
66254 else
66255- atomic_inc(&overflow_count);
66256+ atomic_inc_unchecked(&overflow_count);
66257
66258 out_unlock:
66259 spin_unlock_irqrestore(lock, flags);
66260@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66261
66262 static void print_name_offset(struct seq_file *m, unsigned long addr)
66263 {
66264+#ifdef CONFIG_GRKERNSEC_HIDESYM
66265+ seq_printf(m, "<%p>", NULL);
66266+#else
66267 char symname[KSYM_NAME_LEN];
66268
66269 if (lookup_symbol_name(addr, symname) < 0)
66270 seq_printf(m, "<%p>", (void *)addr);
66271 else
66272 seq_printf(m, "%s", symname);
66273+#endif
66274 }
66275
66276 static int tstats_show(struct seq_file *m, void *v)
66277@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66278
66279 seq_puts(m, "Timer Stats Version: v0.2\n");
66280 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66281- if (atomic_read(&overflow_count))
66282+ if (atomic_read_unchecked(&overflow_count))
66283 seq_printf(m, "Overflow: %d entries\n",
66284- atomic_read(&overflow_count));
66285+ atomic_read_unchecked(&overflow_count));
66286
66287 for (i = 0; i < nr_entries; i++) {
66288 entry = entries + i;
66289@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
66290 {
66291 struct proc_dir_entry *pe;
66292
66293+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66294+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66295+#else
66296 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66297+#endif
66298 if (!pe)
66299 return -ENOMEM;
66300 return 0;
66301diff -urNp linux-2.6.32.45/kernel/time.c linux-2.6.32.45/kernel/time.c
66302--- linux-2.6.32.45/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
66303+++ linux-2.6.32.45/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
66304@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
66305 return error;
66306
66307 if (tz) {
66308+ /* we log in do_settimeofday called below, so don't log twice
66309+ */
66310+ if (!tv)
66311+ gr_log_timechange();
66312+
66313 /* SMP safe, global irq locking makes it work. */
66314 sys_tz = *tz;
66315 update_vsyscall_tz();
66316@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
66317 * Avoid unnecessary multiplications/divisions in the
66318 * two most common HZ cases:
66319 */
66320-unsigned int inline jiffies_to_msecs(const unsigned long j)
66321+inline unsigned int jiffies_to_msecs(const unsigned long j)
66322 {
66323 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
66324 return (MSEC_PER_SEC / HZ) * j;
66325@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
66326 }
66327 EXPORT_SYMBOL(jiffies_to_msecs);
66328
66329-unsigned int inline jiffies_to_usecs(const unsigned long j)
66330+inline unsigned int jiffies_to_usecs(const unsigned long j)
66331 {
66332 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
66333 return (USEC_PER_SEC / HZ) * j;
66334diff -urNp linux-2.6.32.45/kernel/timer.c linux-2.6.32.45/kernel/timer.c
66335--- linux-2.6.32.45/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
66336+++ linux-2.6.32.45/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
66337@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
66338 /*
66339 * This function runs timers and the timer-tq in bottom half context.
66340 */
66341-static void run_timer_softirq(struct softirq_action *h)
66342+static void run_timer_softirq(void)
66343 {
66344 struct tvec_base *base = __get_cpu_var(tvec_bases);
66345
66346diff -urNp linux-2.6.32.45/kernel/trace/blktrace.c linux-2.6.32.45/kernel/trace/blktrace.c
66347--- linux-2.6.32.45/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
66348+++ linux-2.6.32.45/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
66349@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
66350 struct blk_trace *bt = filp->private_data;
66351 char buf[16];
66352
66353- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66354+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66355
66356 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66357 }
66358@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
66359 return 1;
66360
66361 bt = buf->chan->private_data;
66362- atomic_inc(&bt->dropped);
66363+ atomic_inc_unchecked(&bt->dropped);
66364 return 0;
66365 }
66366
66367@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
66368
66369 bt->dir = dir;
66370 bt->dev = dev;
66371- atomic_set(&bt->dropped, 0);
66372+ atomic_set_unchecked(&bt->dropped, 0);
66373
66374 ret = -EIO;
66375 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66376diff -urNp linux-2.6.32.45/kernel/trace/ftrace.c linux-2.6.32.45/kernel/trace/ftrace.c
66377--- linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
66378+++ linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
66379@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
66380
66381 ip = rec->ip;
66382
66383+ ret = ftrace_arch_code_modify_prepare();
66384+ FTRACE_WARN_ON(ret);
66385+ if (ret)
66386+ return 0;
66387+
66388 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66389+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66390 if (ret) {
66391 ftrace_bug(ret, ip);
66392 rec->flags |= FTRACE_FL_FAILED;
66393- return 0;
66394 }
66395- return 1;
66396+ return ret ? 0 : 1;
66397 }
66398
66399 /*
66400diff -urNp linux-2.6.32.45/kernel/trace/ring_buffer.c linux-2.6.32.45/kernel/trace/ring_buffer.c
66401--- linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
66402+++ linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
66403@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
66404 * the reader page). But if the next page is a header page,
66405 * its flags will be non zero.
66406 */
66407-static int inline
66408+static inline int
66409 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
66410 struct buffer_page *page, struct list_head *list)
66411 {
66412diff -urNp linux-2.6.32.45/kernel/trace/trace.c linux-2.6.32.45/kernel/trace/trace.c
66413--- linux-2.6.32.45/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
66414+++ linux-2.6.32.45/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
66415@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
66416 size_t rem;
66417 unsigned int i;
66418
66419+ pax_track_stack();
66420+
66421 /* copy the tracer to avoid using a global lock all around */
66422 mutex_lock(&trace_types_lock);
66423 if (unlikely(old_tracer != current_trace && current_trace)) {
66424@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
66425 int entries, size, i;
66426 size_t ret;
66427
66428+ pax_track_stack();
66429+
66430 if (*ppos & (PAGE_SIZE - 1)) {
66431 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
66432 return -EINVAL;
66433@@ -3816,10 +3820,9 @@ static const struct file_operations trac
66434 };
66435 #endif
66436
66437-static struct dentry *d_tracer;
66438-
66439 struct dentry *tracing_init_dentry(void)
66440 {
66441+ static struct dentry *d_tracer;
66442 static int once;
66443
66444 if (d_tracer)
66445@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
66446 return d_tracer;
66447 }
66448
66449-static struct dentry *d_percpu;
66450-
66451 struct dentry *tracing_dentry_percpu(void)
66452 {
66453+ static struct dentry *d_percpu;
66454 static int once;
66455 struct dentry *d_tracer;
66456
66457diff -urNp linux-2.6.32.45/kernel/trace/trace_events.c linux-2.6.32.45/kernel/trace/trace_events.c
66458--- linux-2.6.32.45/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
66459+++ linux-2.6.32.45/kernel/trace/trace_events.c 2011-08-05 20:33:55.000000000 -0400
66460@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list
66461 * Modules must own their file_operations to keep up with
66462 * reference counting.
66463 */
66464+
66465 struct ftrace_module_file_ops {
66466 struct list_head list;
66467 struct module *mod;
66468- struct file_operations id;
66469- struct file_operations enable;
66470- struct file_operations format;
66471- struct file_operations filter;
66472 };
66473
66474 static void remove_subsystem_dir(const char *name)
66475@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod
66476
66477 file_ops->mod = mod;
66478
66479- file_ops->id = ftrace_event_id_fops;
66480- file_ops->id.owner = mod;
66481-
66482- file_ops->enable = ftrace_enable_fops;
66483- file_ops->enable.owner = mod;
66484-
66485- file_ops->filter = ftrace_event_filter_fops;
66486- file_ops->filter.owner = mod;
66487-
66488- file_ops->format = ftrace_event_format_fops;
66489- file_ops->format.owner = mod;
66490+ pax_open_kernel();
66491+ *(void **)&mod->trace_id.owner = mod;
66492+ *(void **)&mod->trace_enable.owner = mod;
66493+ *(void **)&mod->trace_filter.owner = mod;
66494+ *(void **)&mod->trace_format.owner = mod;
66495+ pax_close_kernel();
66496
66497 list_add(&file_ops->list, &ftrace_module_file_list);
66498
66499@@ -1063,8 +1055,8 @@ static void trace_module_add_events(stru
66500 call->mod = mod;
66501 list_add(&call->list, &ftrace_events);
66502 event_create_dir(call, d_events,
66503- &file_ops->id, &file_ops->enable,
66504- &file_ops->filter, &file_ops->format);
66505+ &mod->trace_id, &mod->trace_enable,
66506+ &mod->trace_filter, &mod->trace_format);
66507 }
66508 }
66509
66510diff -urNp linux-2.6.32.45/kernel/trace/trace_mmiotrace.c linux-2.6.32.45/kernel/trace/trace_mmiotrace.c
66511--- linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
66512+++ linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
66513@@ -23,7 +23,7 @@ struct header_iter {
66514 static struct trace_array *mmio_trace_array;
66515 static bool overrun_detected;
66516 static unsigned long prev_overruns;
66517-static atomic_t dropped_count;
66518+static atomic_unchecked_t dropped_count;
66519
66520 static void mmio_reset_data(struct trace_array *tr)
66521 {
66522@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
66523
66524 static unsigned long count_overruns(struct trace_iterator *iter)
66525 {
66526- unsigned long cnt = atomic_xchg(&dropped_count, 0);
66527+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66528 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66529
66530 if (over > prev_overruns)
66531@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
66532 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66533 sizeof(*entry), 0, pc);
66534 if (!event) {
66535- atomic_inc(&dropped_count);
66536+ atomic_inc_unchecked(&dropped_count);
66537 return;
66538 }
66539 entry = ring_buffer_event_data(event);
66540@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
66541 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66542 sizeof(*entry), 0, pc);
66543 if (!event) {
66544- atomic_inc(&dropped_count);
66545+ atomic_inc_unchecked(&dropped_count);
66546 return;
66547 }
66548 entry = ring_buffer_event_data(event);
66549diff -urNp linux-2.6.32.45/kernel/trace/trace_output.c linux-2.6.32.45/kernel/trace/trace_output.c
66550--- linux-2.6.32.45/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
66551+++ linux-2.6.32.45/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
66552@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
66553 return 0;
66554 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66555 if (!IS_ERR(p)) {
66556- p = mangle_path(s->buffer + s->len, p, "\n");
66557+ p = mangle_path(s->buffer + s->len, p, "\n\\");
66558 if (p) {
66559 s->len = p - s->buffer;
66560 return 1;
66561diff -urNp linux-2.6.32.45/kernel/trace/trace_stack.c linux-2.6.32.45/kernel/trace/trace_stack.c
66562--- linux-2.6.32.45/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
66563+++ linux-2.6.32.45/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
66564@@ -50,7 +50,7 @@ static inline void check_stack(void)
66565 return;
66566
66567 /* we do not handle interrupt stacks yet */
66568- if (!object_is_on_stack(&this_size))
66569+ if (!object_starts_on_stack(&this_size))
66570 return;
66571
66572 local_irq_save(flags);
66573diff -urNp linux-2.6.32.45/kernel/trace/trace_workqueue.c linux-2.6.32.45/kernel/trace/trace_workqueue.c
66574--- linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
66575+++ linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
66576@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
66577 int cpu;
66578 pid_t pid;
66579 /* Can be inserted from interrupt or user context, need to be atomic */
66580- atomic_t inserted;
66581+ atomic_unchecked_t inserted;
66582 /*
66583 * Don't need to be atomic, works are serialized in a single workqueue thread
66584 * on a single CPU.
66585@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
66586 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66587 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66588 if (node->pid == wq_thread->pid) {
66589- atomic_inc(&node->inserted);
66590+ atomic_inc_unchecked(&node->inserted);
66591 goto found;
66592 }
66593 }
66594@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
66595 tsk = get_pid_task(pid, PIDTYPE_PID);
66596 if (tsk) {
66597 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66598- atomic_read(&cws->inserted), cws->executed,
66599+ atomic_read_unchecked(&cws->inserted), cws->executed,
66600 tsk->comm);
66601 put_task_struct(tsk);
66602 }
66603diff -urNp linux-2.6.32.45/kernel/user.c linux-2.6.32.45/kernel/user.c
66604--- linux-2.6.32.45/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
66605+++ linux-2.6.32.45/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
66606@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
66607 spin_lock_irq(&uidhash_lock);
66608 up = uid_hash_find(uid, hashent);
66609 if (up) {
66610+ put_user_ns(ns);
66611 key_put(new->uid_keyring);
66612 key_put(new->session_keyring);
66613 kmem_cache_free(uid_cachep, new);
66614diff -urNp linux-2.6.32.45/lib/bug.c linux-2.6.32.45/lib/bug.c
66615--- linux-2.6.32.45/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
66616+++ linux-2.6.32.45/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
66617@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
66618 return BUG_TRAP_TYPE_NONE;
66619
66620 bug = find_bug(bugaddr);
66621+ if (!bug)
66622+ return BUG_TRAP_TYPE_NONE;
66623
66624 printk(KERN_EMERG "------------[ cut here ]------------\n");
66625
66626diff -urNp linux-2.6.32.45/lib/debugobjects.c linux-2.6.32.45/lib/debugobjects.c
66627--- linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
66628+++ linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
66629@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
66630 if (limit > 4)
66631 return;
66632
66633- is_on_stack = object_is_on_stack(addr);
66634+ is_on_stack = object_starts_on_stack(addr);
66635 if (is_on_stack == onstack)
66636 return;
66637
66638diff -urNp linux-2.6.32.45/lib/dma-debug.c linux-2.6.32.45/lib/dma-debug.c
66639--- linux-2.6.32.45/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
66640+++ linux-2.6.32.45/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
66641@@ -861,7 +861,7 @@ out:
66642
66643 static void check_for_stack(struct device *dev, void *addr)
66644 {
66645- if (object_is_on_stack(addr))
66646+ if (object_starts_on_stack(addr))
66647 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66648 "stack [addr=%p]\n", addr);
66649 }
66650diff -urNp linux-2.6.32.45/lib/idr.c linux-2.6.32.45/lib/idr.c
66651--- linux-2.6.32.45/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
66652+++ linux-2.6.32.45/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
66653@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
66654 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
66655
66656 /* if already at the top layer, we need to grow */
66657- if (id >= 1 << (idp->layers * IDR_BITS)) {
66658+ if (id >= (1 << (idp->layers * IDR_BITS))) {
66659 *starting_id = id;
66660 return IDR_NEED_TO_GROW;
66661 }
66662diff -urNp linux-2.6.32.45/lib/inflate.c linux-2.6.32.45/lib/inflate.c
66663--- linux-2.6.32.45/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
66664+++ linux-2.6.32.45/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
66665@@ -266,7 +266,7 @@ static void free(void *where)
66666 malloc_ptr = free_mem_ptr;
66667 }
66668 #else
66669-#define malloc(a) kmalloc(a, GFP_KERNEL)
66670+#define malloc(a) kmalloc((a), GFP_KERNEL)
66671 #define free(a) kfree(a)
66672 #endif
66673
66674diff -urNp linux-2.6.32.45/lib/Kconfig.debug linux-2.6.32.45/lib/Kconfig.debug
66675--- linux-2.6.32.45/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
66676+++ linux-2.6.32.45/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
66677@@ -905,7 +905,7 @@ config LATENCYTOP
66678 select STACKTRACE
66679 select SCHEDSTATS
66680 select SCHED_DEBUG
66681- depends on HAVE_LATENCYTOP_SUPPORT
66682+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
66683 help
66684 Enable this option if you want to use the LatencyTOP tool
66685 to find out which userspace is blocking on what kernel operations.
66686diff -urNp linux-2.6.32.45/lib/kobject.c linux-2.6.32.45/lib/kobject.c
66687--- linux-2.6.32.45/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
66688+++ linux-2.6.32.45/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
66689@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
66690 return ret;
66691 }
66692
66693-struct sysfs_ops kobj_sysfs_ops = {
66694+const struct sysfs_ops kobj_sysfs_ops = {
66695 .show = kobj_attr_show,
66696 .store = kobj_attr_store,
66697 };
66698@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
66699 * If the kset was not able to be created, NULL will be returned.
66700 */
66701 static struct kset *kset_create(const char *name,
66702- struct kset_uevent_ops *uevent_ops,
66703+ const struct kset_uevent_ops *uevent_ops,
66704 struct kobject *parent_kobj)
66705 {
66706 struct kset *kset;
66707@@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
66708 * If the kset was not able to be created, NULL will be returned.
66709 */
66710 struct kset *kset_create_and_add(const char *name,
66711- struct kset_uevent_ops *uevent_ops,
66712+ const struct kset_uevent_ops *uevent_ops,
66713 struct kobject *parent_kobj)
66714 {
66715 struct kset *kset;
66716diff -urNp linux-2.6.32.45/lib/kobject_uevent.c linux-2.6.32.45/lib/kobject_uevent.c
66717--- linux-2.6.32.45/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
66718+++ linux-2.6.32.45/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
66719@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
66720 const char *subsystem;
66721 struct kobject *top_kobj;
66722 struct kset *kset;
66723- struct kset_uevent_ops *uevent_ops;
66724+ const struct kset_uevent_ops *uevent_ops;
66725 u64 seq;
66726 int i = 0;
66727 int retval = 0;
66728diff -urNp linux-2.6.32.45/lib/kref.c linux-2.6.32.45/lib/kref.c
66729--- linux-2.6.32.45/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
66730+++ linux-2.6.32.45/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
66731@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
66732 */
66733 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66734 {
66735- WARN_ON(release == NULL);
66736+ BUG_ON(release == NULL);
66737 WARN_ON(release == (void (*)(struct kref *))kfree);
66738
66739 if (atomic_dec_and_test(&kref->refcount)) {
66740diff -urNp linux-2.6.32.45/lib/parser.c linux-2.6.32.45/lib/parser.c
66741--- linux-2.6.32.45/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
66742+++ linux-2.6.32.45/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
66743@@ -126,7 +126,7 @@ static int match_number(substring_t *s,
66744 char *buf;
66745 int ret;
66746
66747- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
66748+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
66749 if (!buf)
66750 return -ENOMEM;
66751 memcpy(buf, s->from, s->to - s->from);
66752diff -urNp linux-2.6.32.45/lib/radix-tree.c linux-2.6.32.45/lib/radix-tree.c
66753--- linux-2.6.32.45/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
66754+++ linux-2.6.32.45/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
66755@@ -81,7 +81,7 @@ struct radix_tree_preload {
66756 int nr;
66757 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66758 };
66759-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66760+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66761
66762 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
66763 {
66764diff -urNp linux-2.6.32.45/lib/random32.c linux-2.6.32.45/lib/random32.c
66765--- linux-2.6.32.45/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
66766+++ linux-2.6.32.45/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
66767@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
66768 */
66769 static inline u32 __seed(u32 x, u32 m)
66770 {
66771- return (x < m) ? x + m : x;
66772+ return (x <= m) ? x + m + 1 : x;
66773 }
66774
66775 /**
66776diff -urNp linux-2.6.32.45/lib/vsprintf.c linux-2.6.32.45/lib/vsprintf.c
66777--- linux-2.6.32.45/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
66778+++ linux-2.6.32.45/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
66779@@ -16,6 +16,9 @@
66780 * - scnprintf and vscnprintf
66781 */
66782
66783+#ifdef CONFIG_GRKERNSEC_HIDESYM
66784+#define __INCLUDED_BY_HIDESYM 1
66785+#endif
66786 #include <stdarg.h>
66787 #include <linux/module.h>
66788 #include <linux/types.h>
66789@@ -546,12 +549,12 @@ static char *number(char *buf, char *end
66790 return buf;
66791 }
66792
66793-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
66794+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
66795 {
66796 int len, i;
66797
66798 if ((unsigned long)s < PAGE_SIZE)
66799- s = "<NULL>";
66800+ s = "(null)";
66801
66802 len = strnlen(s, spec.precision);
66803
66804@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
66805 unsigned long value = (unsigned long) ptr;
66806 #ifdef CONFIG_KALLSYMS
66807 char sym[KSYM_SYMBOL_LEN];
66808- if (ext != 'f' && ext != 's')
66809+ if (ext != 'f' && ext != 's' && ext != 'a')
66810 sprint_symbol(sym, value);
66811 else
66812 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66813@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
66814 * - 'f' For simple symbolic function names without offset
66815 * - 'S' For symbolic direct pointers with offset
66816 * - 's' For symbolic direct pointers without offset
66817+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66818+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66819 * - 'R' For a struct resource pointer, it prints the range of
66820 * addresses (not the name nor the flags)
66821 * - 'M' For a 6-byte MAC address, it prints the address in the
66822@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
66823 struct printf_spec spec)
66824 {
66825 if (!ptr)
66826- return string(buf, end, "(null)", spec);
66827+ return string(buf, end, "(nil)", spec);
66828
66829 switch (*fmt) {
66830 case 'F':
66831@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
66832 case 's':
66833 /* Fallthrough */
66834 case 'S':
66835+#ifdef CONFIG_GRKERNSEC_HIDESYM
66836+ break;
66837+#else
66838+ return symbol_string(buf, end, ptr, spec, *fmt);
66839+#endif
66840+ case 'a':
66841+ /* Fallthrough */
66842+ case 'A':
66843 return symbol_string(buf, end, ptr, spec, *fmt);
66844 case 'R':
66845 return resource_string(buf, end, ptr, spec);
66846@@ -1445,7 +1458,7 @@ do { \
66847 size_t len;
66848 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
66849 || (unsigned long)save_str < PAGE_SIZE)
66850- save_str = "<NULL>";
66851+ save_str = "(null)";
66852 len = strlen(save_str);
66853 if (str + len + 1 < end)
66854 memcpy(str, save_str, len + 1);
66855@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
66856 typeof(type) value; \
66857 if (sizeof(type) == 8) { \
66858 args = PTR_ALIGN(args, sizeof(u32)); \
66859- *(u32 *)&value = *(u32 *)args; \
66860- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66861+ *(u32 *)&value = *(const u32 *)args; \
66862+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66863 } else { \
66864 args = PTR_ALIGN(args, sizeof(type)); \
66865- value = *(typeof(type) *)args; \
66866+ value = *(const typeof(type) *)args; \
66867 } \
66868 args += sizeof(type); \
66869 value; \
66870@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
66871 const char *str_arg = args;
66872 size_t len = strlen(str_arg);
66873 args += len + 1;
66874- str = string(str, end, (char *)str_arg, spec);
66875+ str = string(str, end, str_arg, spec);
66876 break;
66877 }
66878
66879diff -urNp linux-2.6.32.45/localversion-grsec linux-2.6.32.45/localversion-grsec
66880--- linux-2.6.32.45/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66881+++ linux-2.6.32.45/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
66882@@ -0,0 +1 @@
66883+-grsec
66884diff -urNp linux-2.6.32.45/Makefile linux-2.6.32.45/Makefile
66885--- linux-2.6.32.45/Makefile 2011-08-16 20:37:25.000000000 -0400
66886+++ linux-2.6.32.45/Makefile 2011-08-16 20:42:28.000000000 -0400
66887@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66888
66889 HOSTCC = gcc
66890 HOSTCXX = g++
66891-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66892-HOSTCXXFLAGS = -O2
66893+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66894+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
66895+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
66896
66897 # Decide whether to build built-in, modular, or both.
66898 # Normally, just do built-in.
66899@@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
66900 KBUILD_CPPFLAGS := -D__KERNEL__
66901
66902 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
66903+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
66904 -fno-strict-aliasing -fno-common \
66905 -Werror-implicit-function-declaration \
66906 -Wno-format-security \
66907 -fno-delete-null-pointer-checks
66908+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
66909 KBUILD_AFLAGS := -D__ASSEMBLY__
66910
66911 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
66912@@ -376,8 +379,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
66913 # Rules shared between *config targets and build targets
66914
66915 # Basic helpers built in scripts/
66916-PHONY += scripts_basic
66917-scripts_basic:
66918+PHONY += scripts_basic gcc-plugins
66919+scripts_basic: gcc-plugins
66920 $(Q)$(MAKE) $(build)=scripts/basic
66921
66922 # To avoid any implicit rule to kick in, define an empty command.
66923@@ -403,7 +406,7 @@ endif
66924 # of make so .config is not included in this case either (for *config).
66925
66926 no-dot-config-targets := clean mrproper distclean \
66927- cscope TAGS tags help %docs check% \
66928+ cscope gtags TAGS tags help %docs check% \
66929 include/linux/version.h headers_% \
66930 kernelrelease kernelversion
66931
66932@@ -526,6 +529,25 @@ else
66933 KBUILD_CFLAGS += -O2
66934 endif
66935
66936+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
66937+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
66938+ifdef CONFIG_PAX_MEMORY_STACKLEAK
66939+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
66940+endif
66941+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
66942+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
66943+gcc-plugins:
66944+ $(Q)$(MAKE) $(build)=tools/gcc
66945+else
66946+gcc-plugins:
66947+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
66948+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
66949+else
66950+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
66951+endif
66952+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
66953+endif
66954+
66955 include $(srctree)/arch/$(SRCARCH)/Makefile
66956
66957 ifneq ($(CONFIG_FRAME_WARN),0)
66958@@ -644,7 +666,7 @@ export mod_strip_cmd
66959
66960
66961 ifeq ($(KBUILD_EXTMOD),)
66962-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
66963+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
66964
66965 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
66966 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
66967@@ -970,7 +992,7 @@ ifneq ($(KBUILD_SRC),)
66968 endif
66969
66970 # prepare2 creates a makefile if using a separate output directory
66971-prepare2: prepare3 outputmakefile
66972+prepare2: prepare3 outputmakefile gcc-plugins
66973
66974 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
66975 include/asm include/config/auto.conf
66976@@ -1198,7 +1220,7 @@ MRPROPER_FILES += .config .config.old in
66977 include/linux/autoconf.h include/linux/version.h \
66978 include/linux/utsrelease.h \
66979 include/linux/bounds.h include/asm*/asm-offsets.h \
66980- Module.symvers Module.markers tags TAGS cscope*
66981+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
66982
66983 # clean - Delete most, but leave enough to build external modules
66984 #
66985@@ -1289,6 +1311,7 @@ help:
66986 @echo ' modules_prepare - Set up for building external modules'
66987 @echo ' tags/TAGS - Generate tags file for editors'
66988 @echo ' cscope - Generate cscope index'
66989+ @echo ' gtags - Generate GNU GLOBAL index'
66990 @echo ' kernelrelease - Output the release version string'
66991 @echo ' kernelversion - Output the version stored in Makefile'
66992 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
66993@@ -1421,7 +1444,7 @@ clean: $(clean-dirs)
66994 $(call cmd,rmdirs)
66995 $(call cmd,rmfiles)
66996 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
66997- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
66998+ \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
66999 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
67000 -o -name '*.gcno' \) -type f -print | xargs rm -f
67001
67002@@ -1445,7 +1468,7 @@ endif # KBUILD_EXTMOD
67003 quiet_cmd_tags = GEN $@
67004 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
67005
67006-tags TAGS cscope: FORCE
67007+tags TAGS cscope gtags: FORCE
67008 $(call cmd,tags)
67009
67010 # Scripts to check various things for consistency
67011diff -urNp linux-2.6.32.45/mm/backing-dev.c linux-2.6.32.45/mm/backing-dev.c
67012--- linux-2.6.32.45/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
67013+++ linux-2.6.32.45/mm/backing-dev.c 2011-08-11 19:48:17.000000000 -0400
67014@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing
67015 list_add_tail_rcu(&wb->list, &bdi->wb_list);
67016 spin_unlock(&bdi->wb_lock);
67017
67018- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
67019+ tsk->flags |= PF_SWAPWRITE;
67020 set_freezable();
67021
67022 /*
67023@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
67024 * Add the default flusher task that gets created for any bdi
67025 * that has dirty data pending writeout
67026 */
67027-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67028+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67029 {
67030 if (!bdi_cap_writeback_dirty(bdi))
67031 return;
67032diff -urNp linux-2.6.32.45/mm/filemap.c linux-2.6.32.45/mm/filemap.c
67033--- linux-2.6.32.45/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
67034+++ linux-2.6.32.45/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
67035@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
67036 struct address_space *mapping = file->f_mapping;
67037
67038 if (!mapping->a_ops->readpage)
67039- return -ENOEXEC;
67040+ return -ENODEV;
67041 file_accessed(file);
67042 vma->vm_ops = &generic_file_vm_ops;
67043 vma->vm_flags |= VM_CAN_NONLINEAR;
67044@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
67045 *pos = i_size_read(inode);
67046
67047 if (limit != RLIM_INFINITY) {
67048+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67049 if (*pos >= limit) {
67050 send_sig(SIGXFSZ, current, 0);
67051 return -EFBIG;
67052diff -urNp linux-2.6.32.45/mm/fremap.c linux-2.6.32.45/mm/fremap.c
67053--- linux-2.6.32.45/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
67054+++ linux-2.6.32.45/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
67055@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67056 retry:
67057 vma = find_vma(mm, start);
67058
67059+#ifdef CONFIG_PAX_SEGMEXEC
67060+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67061+ goto out;
67062+#endif
67063+
67064 /*
67065 * Make sure the vma is shared, that it supports prefaulting,
67066 * and that the remapped range is valid and fully within
67067@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67068 /*
67069 * drop PG_Mlocked flag for over-mapped range
67070 */
67071- unsigned int saved_flags = vma->vm_flags;
67072+ unsigned long saved_flags = vma->vm_flags;
67073 munlock_vma_pages_range(vma, start, start + size);
67074 vma->vm_flags = saved_flags;
67075 }
67076diff -urNp linux-2.6.32.45/mm/highmem.c linux-2.6.32.45/mm/highmem.c
67077--- linux-2.6.32.45/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
67078+++ linux-2.6.32.45/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
67079@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
67080 * So no dangers, even with speculative execution.
67081 */
67082 page = pte_page(pkmap_page_table[i]);
67083+ pax_open_kernel();
67084 pte_clear(&init_mm, (unsigned long)page_address(page),
67085 &pkmap_page_table[i]);
67086-
67087+ pax_close_kernel();
67088 set_page_address(page, NULL);
67089 need_flush = 1;
67090 }
67091@@ -177,9 +178,11 @@ start:
67092 }
67093 }
67094 vaddr = PKMAP_ADDR(last_pkmap_nr);
67095+
67096+ pax_open_kernel();
67097 set_pte_at(&init_mm, vaddr,
67098 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67099-
67100+ pax_close_kernel();
67101 pkmap_count[last_pkmap_nr] = 1;
67102 set_page_address(page, (void *)vaddr);
67103
67104diff -urNp linux-2.6.32.45/mm/hugetlb.c linux-2.6.32.45/mm/hugetlb.c
67105--- linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
67106+++ linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
67107@@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
67108 return 1;
67109 }
67110
67111+#ifdef CONFIG_PAX_SEGMEXEC
67112+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67113+{
67114+ struct mm_struct *mm = vma->vm_mm;
67115+ struct vm_area_struct *vma_m;
67116+ unsigned long address_m;
67117+ pte_t *ptep_m;
67118+
67119+ vma_m = pax_find_mirror_vma(vma);
67120+ if (!vma_m)
67121+ return;
67122+
67123+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67124+ address_m = address + SEGMEXEC_TASK_SIZE;
67125+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67126+ get_page(page_m);
67127+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67128+}
67129+#endif
67130+
67131 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
67132 unsigned long address, pte_t *ptep, pte_t pte,
67133 struct page *pagecache_page)
67134@@ -2004,6 +2024,11 @@ retry_avoidcopy:
67135 huge_ptep_clear_flush(vma, address, ptep);
67136 set_huge_pte_at(mm, address, ptep,
67137 make_huge_pte(vma, new_page, 1));
67138+
67139+#ifdef CONFIG_PAX_SEGMEXEC
67140+ pax_mirror_huge_pte(vma, address, new_page);
67141+#endif
67142+
67143 /* Make the old page be freed below */
67144 new_page = old_page;
67145 }
67146@@ -2135,6 +2160,10 @@ retry:
67147 && (vma->vm_flags & VM_SHARED)));
67148 set_huge_pte_at(mm, address, ptep, new_pte);
67149
67150+#ifdef CONFIG_PAX_SEGMEXEC
67151+ pax_mirror_huge_pte(vma, address, page);
67152+#endif
67153+
67154 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67155 /* Optimization, do the COW without a second fault */
67156 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67157@@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
67158 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67159 struct hstate *h = hstate_vma(vma);
67160
67161+#ifdef CONFIG_PAX_SEGMEXEC
67162+ struct vm_area_struct *vma_m;
67163+
67164+ vma_m = pax_find_mirror_vma(vma);
67165+ if (vma_m) {
67166+ unsigned long address_m;
67167+
67168+ if (vma->vm_start > vma_m->vm_start) {
67169+ address_m = address;
67170+ address -= SEGMEXEC_TASK_SIZE;
67171+ vma = vma_m;
67172+ h = hstate_vma(vma);
67173+ } else
67174+ address_m = address + SEGMEXEC_TASK_SIZE;
67175+
67176+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67177+ return VM_FAULT_OOM;
67178+ address_m &= HPAGE_MASK;
67179+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67180+ }
67181+#endif
67182+
67183 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67184 if (!ptep)
67185 return VM_FAULT_OOM;
67186diff -urNp linux-2.6.32.45/mm/internal.h linux-2.6.32.45/mm/internal.h
67187--- linux-2.6.32.45/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
67188+++ linux-2.6.32.45/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
67189@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
67190 * in mm/page_alloc.c
67191 */
67192 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67193+extern void free_compound_page(struct page *page);
67194 extern void prep_compound_page(struct page *page, unsigned long order);
67195
67196
67197diff -urNp linux-2.6.32.45/mm/Kconfig linux-2.6.32.45/mm/Kconfig
67198--- linux-2.6.32.45/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
67199+++ linux-2.6.32.45/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
67200@@ -228,7 +228,7 @@ config KSM
67201 config DEFAULT_MMAP_MIN_ADDR
67202 int "Low address space to protect from user allocation"
67203 depends on MMU
67204- default 4096
67205+ default 65536
67206 help
67207 This is the portion of low virtual memory which should be protected
67208 from userspace allocation. Keeping a user from writing to low pages
67209diff -urNp linux-2.6.32.45/mm/kmemleak.c linux-2.6.32.45/mm/kmemleak.c
67210--- linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
67211+++ linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
67212@@ -358,7 +358,7 @@ static void print_unreferenced(struct se
67213
67214 for (i = 0; i < object->trace_len; i++) {
67215 void *ptr = (void *)object->trace[i];
67216- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67217+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67218 }
67219 }
67220
67221diff -urNp linux-2.6.32.45/mm/maccess.c linux-2.6.32.45/mm/maccess.c
67222--- linux-2.6.32.45/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
67223+++ linux-2.6.32.45/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
67224@@ -14,7 +14,7 @@
67225 * Safely read from address @src to the buffer at @dst. If a kernel fault
67226 * happens, handle that and return -EFAULT.
67227 */
67228-long probe_kernel_read(void *dst, void *src, size_t size)
67229+long probe_kernel_read(void *dst, const void *src, size_t size)
67230 {
67231 long ret;
67232 mm_segment_t old_fs = get_fs();
67233@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
67234 * Safely write to address @dst from the buffer at @src. If a kernel fault
67235 * happens, handle that and return -EFAULT.
67236 */
67237-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
67238+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
67239 {
67240 long ret;
67241 mm_segment_t old_fs = get_fs();
67242diff -urNp linux-2.6.32.45/mm/madvise.c linux-2.6.32.45/mm/madvise.c
67243--- linux-2.6.32.45/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
67244+++ linux-2.6.32.45/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
67245@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
67246 pgoff_t pgoff;
67247 unsigned long new_flags = vma->vm_flags;
67248
67249+#ifdef CONFIG_PAX_SEGMEXEC
67250+ struct vm_area_struct *vma_m;
67251+#endif
67252+
67253 switch (behavior) {
67254 case MADV_NORMAL:
67255 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67256@@ -103,6 +107,13 @@ success:
67257 /*
67258 * vm_flags is protected by the mmap_sem held in write mode.
67259 */
67260+
67261+#ifdef CONFIG_PAX_SEGMEXEC
67262+ vma_m = pax_find_mirror_vma(vma);
67263+ if (vma_m)
67264+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67265+#endif
67266+
67267 vma->vm_flags = new_flags;
67268
67269 out:
67270@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
67271 struct vm_area_struct ** prev,
67272 unsigned long start, unsigned long end)
67273 {
67274+
67275+#ifdef CONFIG_PAX_SEGMEXEC
67276+ struct vm_area_struct *vma_m;
67277+#endif
67278+
67279 *prev = vma;
67280 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67281 return -EINVAL;
67282@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
67283 zap_page_range(vma, start, end - start, &details);
67284 } else
67285 zap_page_range(vma, start, end - start, NULL);
67286+
67287+#ifdef CONFIG_PAX_SEGMEXEC
67288+ vma_m = pax_find_mirror_vma(vma);
67289+ if (vma_m) {
67290+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67291+ struct zap_details details = {
67292+ .nonlinear_vma = vma_m,
67293+ .last_index = ULONG_MAX,
67294+ };
67295+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67296+ } else
67297+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67298+ }
67299+#endif
67300+
67301 return 0;
67302 }
67303
67304@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67305 if (end < start)
67306 goto out;
67307
67308+#ifdef CONFIG_PAX_SEGMEXEC
67309+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67310+ if (end > SEGMEXEC_TASK_SIZE)
67311+ goto out;
67312+ } else
67313+#endif
67314+
67315+ if (end > TASK_SIZE)
67316+ goto out;
67317+
67318 error = 0;
67319 if (end == start)
67320 goto out;
67321diff -urNp linux-2.6.32.45/mm/memory.c linux-2.6.32.45/mm/memory.c
67322--- linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
67323+++ linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
67324@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
67325 return;
67326
67327 pmd = pmd_offset(pud, start);
67328+
67329+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67330 pud_clear(pud);
67331 pmd_free_tlb(tlb, pmd, start);
67332+#endif
67333+
67334 }
67335
67336 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67337@@ -219,9 +223,12 @@ static inline void free_pud_range(struct
67338 if (end - 1 > ceiling - 1)
67339 return;
67340
67341+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67342 pud = pud_offset(pgd, start);
67343 pgd_clear(pgd);
67344 pud_free_tlb(tlb, pud, start);
67345+#endif
67346+
67347 }
67348
67349 /*
67350@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
67351 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67352 i = 0;
67353
67354- do {
67355+ while (nr_pages) {
67356 struct vm_area_struct *vma;
67357
67358- vma = find_extend_vma(mm, start);
67359+ vma = find_vma(mm, start);
67360 if (!vma && in_gate_area(tsk, start)) {
67361 unsigned long pg = start & PAGE_MASK;
67362 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
67363@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
67364 continue;
67365 }
67366
67367- if (!vma ||
67368+ if (!vma || start < vma->vm_start ||
67369 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67370 !(vm_flags & vma->vm_flags))
67371 return i ? : -EFAULT;
67372@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
67373 start += PAGE_SIZE;
67374 nr_pages--;
67375 } while (nr_pages && start < vma->vm_end);
67376- } while (nr_pages);
67377+ }
67378 return i;
67379 }
67380
67381@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
67382 page_add_file_rmap(page);
67383 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67384
67385+#ifdef CONFIG_PAX_SEGMEXEC
67386+ pax_mirror_file_pte(vma, addr, page, ptl);
67387+#endif
67388+
67389 retval = 0;
67390 pte_unmap_unlock(pte, ptl);
67391 return retval;
67392@@ -1560,10 +1571,22 @@ out:
67393 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67394 struct page *page)
67395 {
67396+
67397+#ifdef CONFIG_PAX_SEGMEXEC
67398+ struct vm_area_struct *vma_m;
67399+#endif
67400+
67401 if (addr < vma->vm_start || addr >= vma->vm_end)
67402 return -EFAULT;
67403 if (!page_count(page))
67404 return -EINVAL;
67405+
67406+#ifdef CONFIG_PAX_SEGMEXEC
67407+ vma_m = pax_find_mirror_vma(vma);
67408+ if (vma_m)
67409+ vma_m->vm_flags |= VM_INSERTPAGE;
67410+#endif
67411+
67412 vma->vm_flags |= VM_INSERTPAGE;
67413 return insert_page(vma, addr, page, vma->vm_page_prot);
67414 }
67415@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
67416 unsigned long pfn)
67417 {
67418 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67419+ BUG_ON(vma->vm_mirror);
67420
67421 if (addr < vma->vm_start || addr >= vma->vm_end)
67422 return -EFAULT;
67423@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
67424 copy_user_highpage(dst, src, va, vma);
67425 }
67426
67427+#ifdef CONFIG_PAX_SEGMEXEC
67428+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67429+{
67430+ struct mm_struct *mm = vma->vm_mm;
67431+ spinlock_t *ptl;
67432+ pte_t *pte, entry;
67433+
67434+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67435+ entry = *pte;
67436+ if (!pte_present(entry)) {
67437+ if (!pte_none(entry)) {
67438+ BUG_ON(pte_file(entry));
67439+ free_swap_and_cache(pte_to_swp_entry(entry));
67440+ pte_clear_not_present_full(mm, address, pte, 0);
67441+ }
67442+ } else {
67443+ struct page *page;
67444+
67445+ flush_cache_page(vma, address, pte_pfn(entry));
67446+ entry = ptep_clear_flush(vma, address, pte);
67447+ BUG_ON(pte_dirty(entry));
67448+ page = vm_normal_page(vma, address, entry);
67449+ if (page) {
67450+ update_hiwater_rss(mm);
67451+ if (PageAnon(page))
67452+ dec_mm_counter(mm, anon_rss);
67453+ else
67454+ dec_mm_counter(mm, file_rss);
67455+ page_remove_rmap(page);
67456+ page_cache_release(page);
67457+ }
67458+ }
67459+ pte_unmap_unlock(pte, ptl);
67460+}
67461+
67462+/* PaX: if vma is mirrored, synchronize the mirror's PTE
67463+ *
67464+ * the ptl of the lower mapped page is held on entry and is not released on exit
67465+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67466+ */
67467+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67468+{
67469+ struct mm_struct *mm = vma->vm_mm;
67470+ unsigned long address_m;
67471+ spinlock_t *ptl_m;
67472+ struct vm_area_struct *vma_m;
67473+ pmd_t *pmd_m;
67474+ pte_t *pte_m, entry_m;
67475+
67476+ BUG_ON(!page_m || !PageAnon(page_m));
67477+
67478+ vma_m = pax_find_mirror_vma(vma);
67479+ if (!vma_m)
67480+ return;
67481+
67482+ BUG_ON(!PageLocked(page_m));
67483+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67484+ address_m = address + SEGMEXEC_TASK_SIZE;
67485+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67486+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67487+ ptl_m = pte_lockptr(mm, pmd_m);
67488+ if (ptl != ptl_m) {
67489+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67490+ if (!pte_none(*pte_m))
67491+ goto out;
67492+ }
67493+
67494+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67495+ page_cache_get(page_m);
67496+ page_add_anon_rmap(page_m, vma_m, address_m);
67497+ inc_mm_counter(mm, anon_rss);
67498+ set_pte_at(mm, address_m, pte_m, entry_m);
67499+ update_mmu_cache(vma_m, address_m, entry_m);
67500+out:
67501+ if (ptl != ptl_m)
67502+ spin_unlock(ptl_m);
67503+ pte_unmap_nested(pte_m);
67504+ unlock_page(page_m);
67505+}
67506+
67507+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67508+{
67509+ struct mm_struct *mm = vma->vm_mm;
67510+ unsigned long address_m;
67511+ spinlock_t *ptl_m;
67512+ struct vm_area_struct *vma_m;
67513+ pmd_t *pmd_m;
67514+ pte_t *pte_m, entry_m;
67515+
67516+ BUG_ON(!page_m || PageAnon(page_m));
67517+
67518+ vma_m = pax_find_mirror_vma(vma);
67519+ if (!vma_m)
67520+ return;
67521+
67522+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67523+ address_m = address + SEGMEXEC_TASK_SIZE;
67524+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67525+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67526+ ptl_m = pte_lockptr(mm, pmd_m);
67527+ if (ptl != ptl_m) {
67528+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67529+ if (!pte_none(*pte_m))
67530+ goto out;
67531+ }
67532+
67533+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67534+ page_cache_get(page_m);
67535+ page_add_file_rmap(page_m);
67536+ inc_mm_counter(mm, file_rss);
67537+ set_pte_at(mm, address_m, pte_m, entry_m);
67538+ update_mmu_cache(vma_m, address_m, entry_m);
67539+out:
67540+ if (ptl != ptl_m)
67541+ spin_unlock(ptl_m);
67542+ pte_unmap_nested(pte_m);
67543+}
67544+
67545+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67546+{
67547+ struct mm_struct *mm = vma->vm_mm;
67548+ unsigned long address_m;
67549+ spinlock_t *ptl_m;
67550+ struct vm_area_struct *vma_m;
67551+ pmd_t *pmd_m;
67552+ pte_t *pte_m, entry_m;
67553+
67554+ vma_m = pax_find_mirror_vma(vma);
67555+ if (!vma_m)
67556+ return;
67557+
67558+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67559+ address_m = address + SEGMEXEC_TASK_SIZE;
67560+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67561+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67562+ ptl_m = pte_lockptr(mm, pmd_m);
67563+ if (ptl != ptl_m) {
67564+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67565+ if (!pte_none(*pte_m))
67566+ goto out;
67567+ }
67568+
67569+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67570+ set_pte_at(mm, address_m, pte_m, entry_m);
67571+out:
67572+ if (ptl != ptl_m)
67573+ spin_unlock(ptl_m);
67574+ pte_unmap_nested(pte_m);
67575+}
67576+
67577+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67578+{
67579+ struct page *page_m;
67580+ pte_t entry;
67581+
67582+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67583+ goto out;
67584+
67585+ entry = *pte;
67586+ page_m = vm_normal_page(vma, address, entry);
67587+ if (!page_m)
67588+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67589+ else if (PageAnon(page_m)) {
67590+ if (pax_find_mirror_vma(vma)) {
67591+ pte_unmap_unlock(pte, ptl);
67592+ lock_page(page_m);
67593+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67594+ if (pte_same(entry, *pte))
67595+ pax_mirror_anon_pte(vma, address, page_m, ptl);
67596+ else
67597+ unlock_page(page_m);
67598+ }
67599+ } else
67600+ pax_mirror_file_pte(vma, address, page_m, ptl);
67601+
67602+out:
67603+ pte_unmap_unlock(pte, ptl);
67604+}
67605+#endif
67606+
67607 /*
67608 * This routine handles present pages, when users try to write
67609 * to a shared page. It is done by copying the page to a new address
67610@@ -2156,6 +2360,12 @@ gotten:
67611 */
67612 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67613 if (likely(pte_same(*page_table, orig_pte))) {
67614+
67615+#ifdef CONFIG_PAX_SEGMEXEC
67616+ if (pax_find_mirror_vma(vma))
67617+ BUG_ON(!trylock_page(new_page));
67618+#endif
67619+
67620 if (old_page) {
67621 if (!PageAnon(old_page)) {
67622 dec_mm_counter(mm, file_rss);
67623@@ -2207,6 +2417,10 @@ gotten:
67624 page_remove_rmap(old_page);
67625 }
67626
67627+#ifdef CONFIG_PAX_SEGMEXEC
67628+ pax_mirror_anon_pte(vma, address, new_page, ptl);
67629+#endif
67630+
67631 /* Free the old page.. */
67632 new_page = old_page;
67633 ret |= VM_FAULT_WRITE;
67634@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
67635 swap_free(entry);
67636 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67637 try_to_free_swap(page);
67638+
67639+#ifdef CONFIG_PAX_SEGMEXEC
67640+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67641+#endif
67642+
67643 unlock_page(page);
67644
67645 if (flags & FAULT_FLAG_WRITE) {
67646@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
67647
67648 /* No need to invalidate - it was non-present before */
67649 update_mmu_cache(vma, address, pte);
67650+
67651+#ifdef CONFIG_PAX_SEGMEXEC
67652+ pax_mirror_anon_pte(vma, address, page, ptl);
67653+#endif
67654+
67655 unlock:
67656 pte_unmap_unlock(page_table, ptl);
67657 out:
67658@@ -2632,40 +2856,6 @@ out_release:
67659 }
67660
67661 /*
67662- * This is like a special single-page "expand_{down|up}wards()",
67663- * except we must first make sure that 'address{-|+}PAGE_SIZE'
67664- * doesn't hit another vma.
67665- */
67666-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67667-{
67668- address &= PAGE_MASK;
67669- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67670- struct vm_area_struct *prev = vma->vm_prev;
67671-
67672- /*
67673- * Is there a mapping abutting this one below?
67674- *
67675- * That's only ok if it's the same stack mapping
67676- * that has gotten split..
67677- */
67678- if (prev && prev->vm_end == address)
67679- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67680-
67681- expand_stack(vma, address - PAGE_SIZE);
67682- }
67683- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67684- struct vm_area_struct *next = vma->vm_next;
67685-
67686- /* As VM_GROWSDOWN but s/below/above/ */
67687- if (next && next->vm_start == address + PAGE_SIZE)
67688- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67689-
67690- expand_upwards(vma, address + PAGE_SIZE);
67691- }
67692- return 0;
67693-}
67694-
67695-/*
67696 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67697 * but allow concurrent faults), and pte mapped but not yet locked.
67698 * We return with mmap_sem still held, but pte unmapped and unlocked.
67699@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
67700 unsigned long address, pte_t *page_table, pmd_t *pmd,
67701 unsigned int flags)
67702 {
67703- struct page *page;
67704+ struct page *page = NULL;
67705 spinlock_t *ptl;
67706 pte_t entry;
67707
67708- pte_unmap(page_table);
67709-
67710- /* Check if we need to add a guard page to the stack */
67711- if (check_stack_guard_page(vma, address) < 0)
67712- return VM_FAULT_SIGBUS;
67713-
67714- /* Use the zero-page for reads */
67715 if (!(flags & FAULT_FLAG_WRITE)) {
67716 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67717 vma->vm_page_prot));
67718- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67719+ ptl = pte_lockptr(mm, pmd);
67720+ spin_lock(ptl);
67721 if (!pte_none(*page_table))
67722 goto unlock;
67723 goto setpte;
67724 }
67725
67726 /* Allocate our own private page. */
67727+ pte_unmap(page_table);
67728+
67729 if (unlikely(anon_vma_prepare(vma)))
67730 goto oom;
67731 page = alloc_zeroed_user_highpage_movable(vma, address);
67732@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
67733 if (!pte_none(*page_table))
67734 goto release;
67735
67736+#ifdef CONFIG_PAX_SEGMEXEC
67737+ if (pax_find_mirror_vma(vma))
67738+ BUG_ON(!trylock_page(page));
67739+#endif
67740+
67741 inc_mm_counter(mm, anon_rss);
67742 page_add_new_anon_rmap(page, vma, address);
67743 setpte:
67744@@ -2720,6 +2911,12 @@ setpte:
67745
67746 /* No need to invalidate - it was non-present before */
67747 update_mmu_cache(vma, address, entry);
67748+
67749+#ifdef CONFIG_PAX_SEGMEXEC
67750+ if (page)
67751+ pax_mirror_anon_pte(vma, address, page, ptl);
67752+#endif
67753+
67754 unlock:
67755 pte_unmap_unlock(page_table, ptl);
67756 return 0;
67757@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
67758 */
67759 /* Only go through if we didn't race with anybody else... */
67760 if (likely(pte_same(*page_table, orig_pte))) {
67761+
67762+#ifdef CONFIG_PAX_SEGMEXEC
67763+ if (anon && pax_find_mirror_vma(vma))
67764+ BUG_ON(!trylock_page(page));
67765+#endif
67766+
67767 flush_icache_page(vma, page);
67768 entry = mk_pte(page, vma->vm_page_prot);
67769 if (flags & FAULT_FLAG_WRITE)
67770@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
67771
67772 /* no need to invalidate: a not-present page won't be cached */
67773 update_mmu_cache(vma, address, entry);
67774+
67775+#ifdef CONFIG_PAX_SEGMEXEC
67776+ if (anon)
67777+ pax_mirror_anon_pte(vma, address, page, ptl);
67778+ else
67779+ pax_mirror_file_pte(vma, address, page, ptl);
67780+#endif
67781+
67782 } else {
67783 if (charged)
67784 mem_cgroup_uncharge_page(page);
67785@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
67786 if (flags & FAULT_FLAG_WRITE)
67787 flush_tlb_page(vma, address);
67788 }
67789+
67790+#ifdef CONFIG_PAX_SEGMEXEC
67791+ pax_mirror_pte(vma, address, pte, pmd, ptl);
67792+ return 0;
67793+#endif
67794+
67795 unlock:
67796 pte_unmap_unlock(pte, ptl);
67797 return 0;
67798@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
67799 pmd_t *pmd;
67800 pte_t *pte;
67801
67802+#ifdef CONFIG_PAX_SEGMEXEC
67803+ struct vm_area_struct *vma_m;
67804+#endif
67805+
67806 __set_current_state(TASK_RUNNING);
67807
67808 count_vm_event(PGFAULT);
67809@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
67810 if (unlikely(is_vm_hugetlb_page(vma)))
67811 return hugetlb_fault(mm, vma, address, flags);
67812
67813+#ifdef CONFIG_PAX_SEGMEXEC
67814+ vma_m = pax_find_mirror_vma(vma);
67815+ if (vma_m) {
67816+ unsigned long address_m;
67817+ pgd_t *pgd_m;
67818+ pud_t *pud_m;
67819+ pmd_t *pmd_m;
67820+
67821+ if (vma->vm_start > vma_m->vm_start) {
67822+ address_m = address;
67823+ address -= SEGMEXEC_TASK_SIZE;
67824+ vma = vma_m;
67825+ } else
67826+ address_m = address + SEGMEXEC_TASK_SIZE;
67827+
67828+ pgd_m = pgd_offset(mm, address_m);
67829+ pud_m = pud_alloc(mm, pgd_m, address_m);
67830+ if (!pud_m)
67831+ return VM_FAULT_OOM;
67832+ pmd_m = pmd_alloc(mm, pud_m, address_m);
67833+ if (!pmd_m)
67834+ return VM_FAULT_OOM;
67835+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
67836+ return VM_FAULT_OOM;
67837+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67838+ }
67839+#endif
67840+
67841 pgd = pgd_offset(mm, address);
67842 pud = pud_alloc(mm, pgd, address);
67843 if (!pud)
67844@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
67845 gate_vma.vm_start = FIXADDR_USER_START;
67846 gate_vma.vm_end = FIXADDR_USER_END;
67847 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67848- gate_vma.vm_page_prot = __P101;
67849+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67850 /*
67851 * Make sure the vDSO gets into every core dump.
67852 * Dumping its contents makes post-mortem fully interpretable later
67853diff -urNp linux-2.6.32.45/mm/memory-failure.c linux-2.6.32.45/mm/memory-failure.c
67854--- linux-2.6.32.45/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
67855+++ linux-2.6.32.45/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
67856@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
67857
67858 int sysctl_memory_failure_recovery __read_mostly = 1;
67859
67860-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67861+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67862
67863 /*
67864 * Send all the processes who have the page mapped an ``action optional''
67865@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
67866 return 0;
67867 }
67868
67869- atomic_long_add(1, &mce_bad_pages);
67870+ atomic_long_add_unchecked(1, &mce_bad_pages);
67871
67872 /*
67873 * We need/can do nothing about count=0 pages.
67874diff -urNp linux-2.6.32.45/mm/mempolicy.c linux-2.6.32.45/mm/mempolicy.c
67875--- linux-2.6.32.45/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
67876+++ linux-2.6.32.45/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
67877@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
67878 struct vm_area_struct *next;
67879 int err;
67880
67881+#ifdef CONFIG_PAX_SEGMEXEC
67882+ struct vm_area_struct *vma_m;
67883+#endif
67884+
67885 err = 0;
67886 for (; vma && vma->vm_start < end; vma = next) {
67887 next = vma->vm_next;
67888@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
67889 err = policy_vma(vma, new);
67890 if (err)
67891 break;
67892+
67893+#ifdef CONFIG_PAX_SEGMEXEC
67894+ vma_m = pax_find_mirror_vma(vma);
67895+ if (vma_m) {
67896+ err = policy_vma(vma_m, new);
67897+ if (err)
67898+ break;
67899+ }
67900+#endif
67901+
67902 }
67903 return err;
67904 }
67905@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
67906
67907 if (end < start)
67908 return -EINVAL;
67909+
67910+#ifdef CONFIG_PAX_SEGMEXEC
67911+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67912+ if (end > SEGMEXEC_TASK_SIZE)
67913+ return -EINVAL;
67914+ } else
67915+#endif
67916+
67917+ if (end > TASK_SIZE)
67918+ return -EINVAL;
67919+
67920 if (end == start)
67921 return 0;
67922
67923@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67924 if (!mm)
67925 return -EINVAL;
67926
67927+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67928+ if (mm != current->mm &&
67929+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67930+ err = -EPERM;
67931+ goto out;
67932+ }
67933+#endif
67934+
67935 /*
67936 * Check if this process has the right to modify the specified
67937 * process. The right exists if the process has administrative
67938@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67939 rcu_read_lock();
67940 tcred = __task_cred(task);
67941 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67942- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67943- !capable(CAP_SYS_NICE)) {
67944+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67945 rcu_read_unlock();
67946 err = -EPERM;
67947 goto out;
67948@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
67949
67950 if (file) {
67951 seq_printf(m, " file=");
67952- seq_path(m, &file->f_path, "\n\t= ");
67953+ seq_path(m, &file->f_path, "\n\t\\= ");
67954 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67955 seq_printf(m, " heap");
67956 } else if (vma->vm_start <= mm->start_stack &&
67957diff -urNp linux-2.6.32.45/mm/migrate.c linux-2.6.32.45/mm/migrate.c
67958--- linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
67959+++ linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
67960@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
67961 unsigned long chunk_start;
67962 int err;
67963
67964+ pax_track_stack();
67965+
67966 task_nodes = cpuset_mems_allowed(task);
67967
67968 err = -ENOMEM;
67969@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67970 if (!mm)
67971 return -EINVAL;
67972
67973+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67974+ if (mm != current->mm &&
67975+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67976+ err = -EPERM;
67977+ goto out;
67978+ }
67979+#endif
67980+
67981 /*
67982 * Check if this process has the right to modify the specified
67983 * process. The right exists if the process has administrative
67984@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67985 rcu_read_lock();
67986 tcred = __task_cred(task);
67987 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67988- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67989- !capable(CAP_SYS_NICE)) {
67990+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67991 rcu_read_unlock();
67992 err = -EPERM;
67993 goto out;
67994diff -urNp linux-2.6.32.45/mm/mlock.c linux-2.6.32.45/mm/mlock.c
67995--- linux-2.6.32.45/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
67996+++ linux-2.6.32.45/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
67997@@ -13,6 +13,7 @@
67998 #include <linux/pagemap.h>
67999 #include <linux/mempolicy.h>
68000 #include <linux/syscalls.h>
68001+#include <linux/security.h>
68002 #include <linux/sched.h>
68003 #include <linux/module.h>
68004 #include <linux/rmap.h>
68005@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
68006 }
68007 }
68008
68009-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
68010-{
68011- return (vma->vm_flags & VM_GROWSDOWN) &&
68012- (vma->vm_start == addr) &&
68013- !vma_stack_continue(vma->vm_prev, addr);
68014-}
68015-
68016 /**
68017 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
68018 * @vma: target vma
68019@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
68020 if (vma->vm_flags & VM_WRITE)
68021 gup_flags |= FOLL_WRITE;
68022
68023- /* We don't try to access the guard page of a stack vma */
68024- if (stack_guard_page(vma, start)) {
68025- addr += PAGE_SIZE;
68026- nr_pages--;
68027- }
68028-
68029 while (nr_pages > 0) {
68030 int i;
68031
68032@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
68033 {
68034 unsigned long nstart, end, tmp;
68035 struct vm_area_struct * vma, * prev;
68036- int error;
68037+ int error = -EINVAL;
68038
68039 len = PAGE_ALIGN(len);
68040 end = start + len;
68041@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
68042 return -EINVAL;
68043 if (end == start)
68044 return 0;
68045+ if (end > TASK_SIZE)
68046+ return -EINVAL;
68047+
68048 vma = find_vma_prev(current->mm, start, &prev);
68049 if (!vma || vma->vm_start > start)
68050 return -ENOMEM;
68051@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
68052 for (nstart = start ; ; ) {
68053 unsigned int newflags;
68054
68055+#ifdef CONFIG_PAX_SEGMEXEC
68056+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68057+ break;
68058+#endif
68059+
68060 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68061
68062 newflags = vma->vm_flags | VM_LOCKED;
68063@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
68064 lock_limit >>= PAGE_SHIFT;
68065
68066 /* check against resource limits */
68067+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68068 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68069 error = do_mlock(start, len, 1);
68070 up_write(&current->mm->mmap_sem);
68071@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
68072 static int do_mlockall(int flags)
68073 {
68074 struct vm_area_struct * vma, * prev = NULL;
68075- unsigned int def_flags = 0;
68076
68077 if (flags & MCL_FUTURE)
68078- def_flags = VM_LOCKED;
68079- current->mm->def_flags = def_flags;
68080+ current->mm->def_flags |= VM_LOCKED;
68081+ else
68082+ current->mm->def_flags &= ~VM_LOCKED;
68083 if (flags == MCL_FUTURE)
68084 goto out;
68085
68086 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68087- unsigned int newflags;
68088+ unsigned long newflags;
68089+
68090+#ifdef CONFIG_PAX_SEGMEXEC
68091+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68092+ break;
68093+#endif
68094
68095+ BUG_ON(vma->vm_end > TASK_SIZE);
68096 newflags = vma->vm_flags | VM_LOCKED;
68097 if (!(flags & MCL_CURRENT))
68098 newflags &= ~VM_LOCKED;
68099@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68100 lock_limit >>= PAGE_SHIFT;
68101
68102 ret = -ENOMEM;
68103+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68104 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68105 capable(CAP_IPC_LOCK))
68106 ret = do_mlockall(flags);
68107diff -urNp linux-2.6.32.45/mm/mmap.c linux-2.6.32.45/mm/mmap.c
68108--- linux-2.6.32.45/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
68109+++ linux-2.6.32.45/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
68110@@ -45,6 +45,16 @@
68111 #define arch_rebalance_pgtables(addr, len) (addr)
68112 #endif
68113
68114+static inline void verify_mm_writelocked(struct mm_struct *mm)
68115+{
68116+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68117+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68118+ up_read(&mm->mmap_sem);
68119+ BUG();
68120+ }
68121+#endif
68122+}
68123+
68124 static void unmap_region(struct mm_struct *mm,
68125 struct vm_area_struct *vma, struct vm_area_struct *prev,
68126 unsigned long start, unsigned long end);
68127@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
68128 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68129 *
68130 */
68131-pgprot_t protection_map[16] = {
68132+pgprot_t protection_map[16] __read_only = {
68133 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68134 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68135 };
68136
68137 pgprot_t vm_get_page_prot(unsigned long vm_flags)
68138 {
68139- return __pgprot(pgprot_val(protection_map[vm_flags &
68140+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68141 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68142 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68143+
68144+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68145+ if (!nx_enabled &&
68146+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68147+ (vm_flags & (VM_READ | VM_WRITE)))
68148+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68149+#endif
68150+
68151+ return prot;
68152 }
68153 EXPORT_SYMBOL(vm_get_page_prot);
68154
68155 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
68156 int sysctl_overcommit_ratio = 50; /* default is 50% */
68157 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68158+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68159 struct percpu_counter vm_committed_as;
68160
68161 /*
68162@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
68163 struct vm_area_struct *next = vma->vm_next;
68164
68165 might_sleep();
68166+ BUG_ON(vma->vm_mirror);
68167 if (vma->vm_ops && vma->vm_ops->close)
68168 vma->vm_ops->close(vma);
68169 if (vma->vm_file) {
68170@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68171 * not page aligned -Ram Gupta
68172 */
68173 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
68174+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68175 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68176 (mm->end_data - mm->start_data) > rlim)
68177 goto out;
68178@@ -704,6 +726,12 @@ static int
68179 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68180 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68181 {
68182+
68183+#ifdef CONFIG_PAX_SEGMEXEC
68184+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68185+ return 0;
68186+#endif
68187+
68188 if (is_mergeable_vma(vma, file, vm_flags) &&
68189 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68190 if (vma->vm_pgoff == vm_pgoff)
68191@@ -723,6 +751,12 @@ static int
68192 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68193 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68194 {
68195+
68196+#ifdef CONFIG_PAX_SEGMEXEC
68197+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68198+ return 0;
68199+#endif
68200+
68201 if (is_mergeable_vma(vma, file, vm_flags) &&
68202 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68203 pgoff_t vm_pglen;
68204@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
68205 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68206 struct vm_area_struct *prev, unsigned long addr,
68207 unsigned long end, unsigned long vm_flags,
68208- struct anon_vma *anon_vma, struct file *file,
68209+ struct anon_vma *anon_vma, struct file *file,
68210 pgoff_t pgoff, struct mempolicy *policy)
68211 {
68212 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68213 struct vm_area_struct *area, *next;
68214
68215+#ifdef CONFIG_PAX_SEGMEXEC
68216+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68217+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68218+
68219+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68220+#endif
68221+
68222 /*
68223 * We later require that vma->vm_flags == vm_flags,
68224 * so this tests vma->vm_flags & VM_SPECIAL, too.
68225@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
68226 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68227 next = next->vm_next;
68228
68229+#ifdef CONFIG_PAX_SEGMEXEC
68230+ if (prev)
68231+ prev_m = pax_find_mirror_vma(prev);
68232+ if (area)
68233+ area_m = pax_find_mirror_vma(area);
68234+ if (next)
68235+ next_m = pax_find_mirror_vma(next);
68236+#endif
68237+
68238 /*
68239 * Can it merge with the predecessor?
68240 */
68241@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
68242 /* cases 1, 6 */
68243 vma_adjust(prev, prev->vm_start,
68244 next->vm_end, prev->vm_pgoff, NULL);
68245- } else /* cases 2, 5, 7 */
68246+
68247+#ifdef CONFIG_PAX_SEGMEXEC
68248+ if (prev_m)
68249+ vma_adjust(prev_m, prev_m->vm_start,
68250+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68251+#endif
68252+
68253+ } else { /* cases 2, 5, 7 */
68254 vma_adjust(prev, prev->vm_start,
68255 end, prev->vm_pgoff, NULL);
68256+
68257+#ifdef CONFIG_PAX_SEGMEXEC
68258+ if (prev_m)
68259+ vma_adjust(prev_m, prev_m->vm_start,
68260+ end_m, prev_m->vm_pgoff, NULL);
68261+#endif
68262+
68263+ }
68264 return prev;
68265 }
68266
68267@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
68268 mpol_equal(policy, vma_policy(next)) &&
68269 can_vma_merge_before(next, vm_flags,
68270 anon_vma, file, pgoff+pglen)) {
68271- if (prev && addr < prev->vm_end) /* case 4 */
68272+ if (prev && addr < prev->vm_end) { /* case 4 */
68273 vma_adjust(prev, prev->vm_start,
68274 addr, prev->vm_pgoff, NULL);
68275- else /* cases 3, 8 */
68276+
68277+#ifdef CONFIG_PAX_SEGMEXEC
68278+ if (prev_m)
68279+ vma_adjust(prev_m, prev_m->vm_start,
68280+ addr_m, prev_m->vm_pgoff, NULL);
68281+#endif
68282+
68283+ } else { /* cases 3, 8 */
68284 vma_adjust(area, addr, next->vm_end,
68285 next->vm_pgoff - pglen, NULL);
68286+
68287+#ifdef CONFIG_PAX_SEGMEXEC
68288+ if (area_m)
68289+ vma_adjust(area_m, addr_m, next_m->vm_end,
68290+ next_m->vm_pgoff - pglen, NULL);
68291+#endif
68292+
68293+ }
68294 return area;
68295 }
68296
68297@@ -898,14 +978,11 @@ none:
68298 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68299 struct file *file, long pages)
68300 {
68301- const unsigned long stack_flags
68302- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68303-
68304 if (file) {
68305 mm->shared_vm += pages;
68306 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68307 mm->exec_vm += pages;
68308- } else if (flags & stack_flags)
68309+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68310 mm->stack_vm += pages;
68311 if (flags & (VM_RESERVED|VM_IO))
68312 mm->reserved_vm += pages;
68313@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
68314 * (the exception is when the underlying filesystem is noexec
68315 * mounted, in which case we dont add PROT_EXEC.)
68316 */
68317- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68318+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68319 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68320 prot |= PROT_EXEC;
68321
68322@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
68323 /* Obtain the address to map to. we verify (or select) it and ensure
68324 * that it represents a valid section of the address space.
68325 */
68326- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68327+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68328 if (addr & ~PAGE_MASK)
68329 return addr;
68330
68331@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
68332 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68333 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68334
68335+#ifdef CONFIG_PAX_MPROTECT
68336+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68337+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68338+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68339+ gr_log_rwxmmap(file);
68340+
68341+#ifdef CONFIG_PAX_EMUPLT
68342+ vm_flags &= ~VM_EXEC;
68343+#else
68344+ return -EPERM;
68345+#endif
68346+
68347+ }
68348+
68349+ if (!(vm_flags & VM_EXEC))
68350+ vm_flags &= ~VM_MAYEXEC;
68351+#else
68352+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68353+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68354+#endif
68355+ else
68356+ vm_flags &= ~VM_MAYWRITE;
68357+ }
68358+#endif
68359+
68360+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68361+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68362+ vm_flags &= ~VM_PAGEEXEC;
68363+#endif
68364+
68365 if (flags & MAP_LOCKED)
68366 if (!can_do_mlock())
68367 return -EPERM;
68368@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
68369 locked += mm->locked_vm;
68370 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
68371 lock_limit >>= PAGE_SHIFT;
68372+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68373 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68374 return -EAGAIN;
68375 }
68376@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
68377 if (error)
68378 return error;
68379
68380+ if (!gr_acl_handle_mmap(file, prot))
68381+ return -EACCES;
68382+
68383 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68384 }
68385 EXPORT_SYMBOL(do_mmap_pgoff);
68386@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
68387 */
68388 int vma_wants_writenotify(struct vm_area_struct *vma)
68389 {
68390- unsigned int vm_flags = vma->vm_flags;
68391+ unsigned long vm_flags = vma->vm_flags;
68392
68393 /* If it was private or non-writable, the write bit is already clear */
68394- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68395+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68396 return 0;
68397
68398 /* The backer wishes to know when pages are first written to? */
68399@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
68400 unsigned long charged = 0;
68401 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68402
68403+#ifdef CONFIG_PAX_SEGMEXEC
68404+ struct vm_area_struct *vma_m = NULL;
68405+#endif
68406+
68407+ /*
68408+ * mm->mmap_sem is required to protect against another thread
68409+ * changing the mappings in case we sleep.
68410+ */
68411+ verify_mm_writelocked(mm);
68412+
68413 /* Clear old maps */
68414 error = -ENOMEM;
68415-munmap_back:
68416 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68417 if (vma && vma->vm_start < addr + len) {
68418 if (do_munmap(mm, addr, len))
68419 return -ENOMEM;
68420- goto munmap_back;
68421+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68422+ BUG_ON(vma && vma->vm_start < addr + len);
68423 }
68424
68425 /* Check against address space limit. */
68426@@ -1173,6 +1294,16 @@ munmap_back:
68427 goto unacct_error;
68428 }
68429
68430+#ifdef CONFIG_PAX_SEGMEXEC
68431+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68432+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68433+ if (!vma_m) {
68434+ error = -ENOMEM;
68435+ goto free_vma;
68436+ }
68437+ }
68438+#endif
68439+
68440 vma->vm_mm = mm;
68441 vma->vm_start = addr;
68442 vma->vm_end = addr + len;
68443@@ -1195,6 +1326,19 @@ munmap_back:
68444 error = file->f_op->mmap(file, vma);
68445 if (error)
68446 goto unmap_and_free_vma;
68447+
68448+#ifdef CONFIG_PAX_SEGMEXEC
68449+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68450+ added_exe_file_vma(mm);
68451+#endif
68452+
68453+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68454+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68455+ vma->vm_flags |= VM_PAGEEXEC;
68456+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68457+ }
68458+#endif
68459+
68460 if (vm_flags & VM_EXECUTABLE)
68461 added_exe_file_vma(mm);
68462
68463@@ -1218,6 +1362,11 @@ munmap_back:
68464 vma_link(mm, vma, prev, rb_link, rb_parent);
68465 file = vma->vm_file;
68466
68467+#ifdef CONFIG_PAX_SEGMEXEC
68468+ if (vma_m)
68469+ pax_mirror_vma(vma_m, vma);
68470+#endif
68471+
68472 /* Once vma denies write, undo our temporary denial count */
68473 if (correct_wcount)
68474 atomic_inc(&inode->i_writecount);
68475@@ -1226,6 +1375,7 @@ out:
68476
68477 mm->total_vm += len >> PAGE_SHIFT;
68478 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68479+ track_exec_limit(mm, addr, addr + len, vm_flags);
68480 if (vm_flags & VM_LOCKED) {
68481 /*
68482 * makes pages present; downgrades, drops, reacquires mmap_sem
68483@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
68484 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68485 charged = 0;
68486 free_vma:
68487+
68488+#ifdef CONFIG_PAX_SEGMEXEC
68489+ if (vma_m)
68490+ kmem_cache_free(vm_area_cachep, vma_m);
68491+#endif
68492+
68493 kmem_cache_free(vm_area_cachep, vma);
68494 unacct_error:
68495 if (charged)
68496@@ -1255,6 +1411,44 @@ unacct_error:
68497 return error;
68498 }
68499
68500+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68501+{
68502+ if (!vma) {
68503+#ifdef CONFIG_STACK_GROWSUP
68504+ if (addr > sysctl_heap_stack_gap)
68505+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68506+ else
68507+ vma = find_vma(current->mm, 0);
68508+ if (vma && (vma->vm_flags & VM_GROWSUP))
68509+ return false;
68510+#endif
68511+ return true;
68512+ }
68513+
68514+ if (addr + len > vma->vm_start)
68515+ return false;
68516+
68517+ if (vma->vm_flags & VM_GROWSDOWN)
68518+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68519+#ifdef CONFIG_STACK_GROWSUP
68520+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68521+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68522+#endif
68523+
68524+ return true;
68525+}
68526+
68527+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68528+{
68529+ if (vma->vm_start < len)
68530+ return -ENOMEM;
68531+ if (!(vma->vm_flags & VM_GROWSDOWN))
68532+ return vma->vm_start - len;
68533+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
68534+ return vma->vm_start - len - sysctl_heap_stack_gap;
68535+ return -ENOMEM;
68536+}
68537+
68538 /* Get an address range which is currently unmapped.
68539 * For shmat() with addr=0.
68540 *
68541@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
68542 if (flags & MAP_FIXED)
68543 return addr;
68544
68545+#ifdef CONFIG_PAX_RANDMMAP
68546+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68547+#endif
68548+
68549 if (addr) {
68550 addr = PAGE_ALIGN(addr);
68551- vma = find_vma(mm, addr);
68552- if (TASK_SIZE - len >= addr &&
68553- (!vma || addr + len <= vma->vm_start))
68554- return addr;
68555+ if (TASK_SIZE - len >= addr) {
68556+ vma = find_vma(mm, addr);
68557+ if (check_heap_stack_gap(vma, addr, len))
68558+ return addr;
68559+ }
68560 }
68561 if (len > mm->cached_hole_size) {
68562- start_addr = addr = mm->free_area_cache;
68563+ start_addr = addr = mm->free_area_cache;
68564 } else {
68565- start_addr = addr = TASK_UNMAPPED_BASE;
68566- mm->cached_hole_size = 0;
68567+ start_addr = addr = mm->mmap_base;
68568+ mm->cached_hole_size = 0;
68569 }
68570
68571 full_search:
68572@@ -1303,34 +1502,40 @@ full_search:
68573 * Start a new search - just in case we missed
68574 * some holes.
68575 */
68576- if (start_addr != TASK_UNMAPPED_BASE) {
68577- addr = TASK_UNMAPPED_BASE;
68578- start_addr = addr;
68579+ if (start_addr != mm->mmap_base) {
68580+ start_addr = addr = mm->mmap_base;
68581 mm->cached_hole_size = 0;
68582 goto full_search;
68583 }
68584 return -ENOMEM;
68585 }
68586- if (!vma || addr + len <= vma->vm_start) {
68587- /*
68588- * Remember the place where we stopped the search:
68589- */
68590- mm->free_area_cache = addr + len;
68591- return addr;
68592- }
68593+ if (check_heap_stack_gap(vma, addr, len))
68594+ break;
68595 if (addr + mm->cached_hole_size < vma->vm_start)
68596 mm->cached_hole_size = vma->vm_start - addr;
68597 addr = vma->vm_end;
68598 }
68599+
68600+ /*
68601+ * Remember the place where we stopped the search:
68602+ */
68603+ mm->free_area_cache = addr + len;
68604+ return addr;
68605 }
68606 #endif
68607
68608 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68609 {
68610+
68611+#ifdef CONFIG_PAX_SEGMEXEC
68612+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68613+ return;
68614+#endif
68615+
68616 /*
68617 * Is this a new hole at the lowest possible address?
68618 */
68619- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68620+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68621 mm->free_area_cache = addr;
68622 mm->cached_hole_size = ~0UL;
68623 }
68624@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
68625 {
68626 struct vm_area_struct *vma;
68627 struct mm_struct *mm = current->mm;
68628- unsigned long addr = addr0;
68629+ unsigned long base = mm->mmap_base, addr = addr0;
68630
68631 /* requested length too big for entire address space */
68632 if (len > TASK_SIZE)
68633@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
68634 if (flags & MAP_FIXED)
68635 return addr;
68636
68637+#ifdef CONFIG_PAX_RANDMMAP
68638+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68639+#endif
68640+
68641 /* requesting a specific address */
68642 if (addr) {
68643 addr = PAGE_ALIGN(addr);
68644- vma = find_vma(mm, addr);
68645- if (TASK_SIZE - len >= addr &&
68646- (!vma || addr + len <= vma->vm_start))
68647- return addr;
68648+ if (TASK_SIZE - len >= addr) {
68649+ vma = find_vma(mm, addr);
68650+ if (check_heap_stack_gap(vma, addr, len))
68651+ return addr;
68652+ }
68653 }
68654
68655 /* check if free_area_cache is useful for us */
68656@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
68657 /* make sure it can fit in the remaining address space */
68658 if (addr > len) {
68659 vma = find_vma(mm, addr-len);
68660- if (!vma || addr <= vma->vm_start)
68661+ if (check_heap_stack_gap(vma, addr - len, len))
68662 /* remember the address as a hint for next time */
68663 return (mm->free_area_cache = addr-len);
68664 }
68665@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
68666 * return with success:
68667 */
68668 vma = find_vma(mm, addr);
68669- if (!vma || addr+len <= vma->vm_start)
68670+ if (check_heap_stack_gap(vma, addr, len))
68671 /* remember the address as a hint for next time */
68672 return (mm->free_area_cache = addr);
68673
68674@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
68675 mm->cached_hole_size = vma->vm_start - addr;
68676
68677 /* try just below the current vma->vm_start */
68678- addr = vma->vm_start-len;
68679- } while (len < vma->vm_start);
68680+ addr = skip_heap_stack_gap(vma, len);
68681+ } while (!IS_ERR_VALUE(addr));
68682
68683 bottomup:
68684 /*
68685@@ -1414,13 +1624,21 @@ bottomup:
68686 * can happen with large stack limits and large mmap()
68687 * allocations.
68688 */
68689+ mm->mmap_base = TASK_UNMAPPED_BASE;
68690+
68691+#ifdef CONFIG_PAX_RANDMMAP
68692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
68693+ mm->mmap_base += mm->delta_mmap;
68694+#endif
68695+
68696+ mm->free_area_cache = mm->mmap_base;
68697 mm->cached_hole_size = ~0UL;
68698- mm->free_area_cache = TASK_UNMAPPED_BASE;
68699 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68700 /*
68701 * Restore the topdown base:
68702 */
68703- mm->free_area_cache = mm->mmap_base;
68704+ mm->mmap_base = base;
68705+ mm->free_area_cache = base;
68706 mm->cached_hole_size = ~0UL;
68707
68708 return addr;
68709@@ -1429,6 +1647,12 @@ bottomup:
68710
68711 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68712 {
68713+
68714+#ifdef CONFIG_PAX_SEGMEXEC
68715+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68716+ return;
68717+#endif
68718+
68719 /*
68720 * Is this a new hole at the highest possible address?
68721 */
68722@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
68723 mm->free_area_cache = addr;
68724
68725 /* dont allow allocations above current base */
68726- if (mm->free_area_cache > mm->mmap_base)
68727+ if (mm->free_area_cache > mm->mmap_base) {
68728 mm->free_area_cache = mm->mmap_base;
68729+ mm->cached_hole_size = ~0UL;
68730+ }
68731 }
68732
68733 unsigned long
68734@@ -1545,6 +1771,27 @@ out:
68735 return prev ? prev->vm_next : vma;
68736 }
68737
68738+#ifdef CONFIG_PAX_SEGMEXEC
68739+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68740+{
68741+ struct vm_area_struct *vma_m;
68742+
68743+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68744+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68745+ BUG_ON(vma->vm_mirror);
68746+ return NULL;
68747+ }
68748+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68749+ vma_m = vma->vm_mirror;
68750+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68751+ BUG_ON(vma->vm_file != vma_m->vm_file);
68752+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68753+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
68754+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68755+ return vma_m;
68756+}
68757+#endif
68758+
68759 /*
68760 * Verify that the stack growth is acceptable and
68761 * update accounting. This is shared with both the
68762@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
68763 return -ENOMEM;
68764
68765 /* Stack limit test */
68766+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
68767 if (size > rlim[RLIMIT_STACK].rlim_cur)
68768 return -ENOMEM;
68769
68770@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
68771 unsigned long limit;
68772 locked = mm->locked_vm + grow;
68773 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
68774+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68775 if (locked > limit && !capable(CAP_IPC_LOCK))
68776 return -ENOMEM;
68777 }
68778@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
68779 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68780 * vma is the last one with address > vma->vm_end. Have to extend vma.
68781 */
68782+#ifndef CONFIG_IA64
68783+static
68784+#endif
68785 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68786 {
68787 int error;
68788+ bool locknext;
68789
68790 if (!(vma->vm_flags & VM_GROWSUP))
68791 return -EFAULT;
68792
68793+ /* Also guard against wrapping around to address 0. */
68794+ if (address < PAGE_ALIGN(address+1))
68795+ address = PAGE_ALIGN(address+1);
68796+ else
68797+ return -ENOMEM;
68798+
68799 /*
68800 * We must make sure the anon_vma is allocated
68801 * so that the anon_vma locking is not a noop.
68802 */
68803 if (unlikely(anon_vma_prepare(vma)))
68804 return -ENOMEM;
68805+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68806+ if (locknext && anon_vma_prepare(vma->vm_next))
68807+ return -ENOMEM;
68808 anon_vma_lock(vma);
68809+ if (locknext)
68810+ anon_vma_lock(vma->vm_next);
68811
68812 /*
68813 * vma->vm_start/vm_end cannot change under us because the caller
68814 * is required to hold the mmap_sem in read mode. We need the
68815- * anon_vma lock to serialize against concurrent expand_stacks.
68816- * Also guard against wrapping around to address 0.
68817+ * anon_vma locks to serialize against concurrent expand_stacks
68818+ * and expand_upwards.
68819 */
68820- if (address < PAGE_ALIGN(address+4))
68821- address = PAGE_ALIGN(address+4);
68822- else {
68823- anon_vma_unlock(vma);
68824- return -ENOMEM;
68825- }
68826 error = 0;
68827
68828 /* Somebody else might have raced and expanded it already */
68829- if (address > vma->vm_end) {
68830+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68831+ error = -ENOMEM;
68832+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68833 unsigned long size, grow;
68834
68835 size = address - vma->vm_start;
68836@@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
68837 if (!error)
68838 vma->vm_end = address;
68839 }
68840+ if (locknext)
68841+ anon_vma_unlock(vma->vm_next);
68842 anon_vma_unlock(vma);
68843 return error;
68844 }
68845@@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
68846 unsigned long address)
68847 {
68848 int error;
68849+ bool lockprev = false;
68850+ struct vm_area_struct *prev;
68851
68852 /*
68853 * We must make sure the anon_vma is allocated
68854@@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
68855 if (error)
68856 return error;
68857
68858+ prev = vma->vm_prev;
68859+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68860+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68861+#endif
68862+ if (lockprev && anon_vma_prepare(prev))
68863+ return -ENOMEM;
68864+ if (lockprev)
68865+ anon_vma_lock(prev);
68866+
68867 anon_vma_lock(vma);
68868
68869 /*
68870@@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
68871 */
68872
68873 /* Somebody else might have raced and expanded it already */
68874- if (address < vma->vm_start) {
68875+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68876+ error = -ENOMEM;
68877+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68878 unsigned long size, grow;
68879
68880+#ifdef CONFIG_PAX_SEGMEXEC
68881+ struct vm_area_struct *vma_m;
68882+
68883+ vma_m = pax_find_mirror_vma(vma);
68884+#endif
68885+
68886 size = vma->vm_end - address;
68887 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68888
68889@@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
68890 if (!error) {
68891 vma->vm_start = address;
68892 vma->vm_pgoff -= grow;
68893+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68894+
68895+#ifdef CONFIG_PAX_SEGMEXEC
68896+ if (vma_m) {
68897+ vma_m->vm_start -= grow << PAGE_SHIFT;
68898+ vma_m->vm_pgoff -= grow;
68899+ }
68900+#endif
68901+
68902 }
68903 }
68904 anon_vma_unlock(vma);
68905+ if (lockprev)
68906+ anon_vma_unlock(prev);
68907 return error;
68908 }
68909
68910@@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
68911 do {
68912 long nrpages = vma_pages(vma);
68913
68914+#ifdef CONFIG_PAX_SEGMEXEC
68915+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
68916+ vma = remove_vma(vma);
68917+ continue;
68918+ }
68919+#endif
68920+
68921 mm->total_vm -= nrpages;
68922 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
68923 vma = remove_vma(vma);
68924@@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
68925 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
68926 vma->vm_prev = NULL;
68927 do {
68928+
68929+#ifdef CONFIG_PAX_SEGMEXEC
68930+ if (vma->vm_mirror) {
68931+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
68932+ vma->vm_mirror->vm_mirror = NULL;
68933+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
68934+ vma->vm_mirror = NULL;
68935+ }
68936+#endif
68937+
68938 rb_erase(&vma->vm_rb, &mm->mm_rb);
68939 mm->map_count--;
68940 tail_vma = vma;
68941@@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
68942 struct mempolicy *pol;
68943 struct vm_area_struct *new;
68944
68945+#ifdef CONFIG_PAX_SEGMEXEC
68946+ struct vm_area_struct *vma_m, *new_m = NULL;
68947+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
68948+#endif
68949+
68950 if (is_vm_hugetlb_page(vma) && (addr &
68951 ~(huge_page_mask(hstate_vma(vma)))))
68952 return -EINVAL;
68953
68954+#ifdef CONFIG_PAX_SEGMEXEC
68955+ vma_m = pax_find_mirror_vma(vma);
68956+
68957+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68958+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
68959+ if (mm->map_count >= sysctl_max_map_count-1)
68960+ return -ENOMEM;
68961+ } else
68962+#endif
68963+
68964 if (mm->map_count >= sysctl_max_map_count)
68965 return -ENOMEM;
68966
68967@@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
68968 if (!new)
68969 return -ENOMEM;
68970
68971+#ifdef CONFIG_PAX_SEGMEXEC
68972+ if (vma_m) {
68973+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68974+ if (!new_m) {
68975+ kmem_cache_free(vm_area_cachep, new);
68976+ return -ENOMEM;
68977+ }
68978+ }
68979+#endif
68980+
68981 /* most fields are the same, copy all, and then fixup */
68982 *new = *vma;
68983
68984@@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
68985 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
68986 }
68987
68988+#ifdef CONFIG_PAX_SEGMEXEC
68989+ if (vma_m) {
68990+ *new_m = *vma_m;
68991+ new_m->vm_mirror = new;
68992+ new->vm_mirror = new_m;
68993+
68994+ if (new_below)
68995+ new_m->vm_end = addr_m;
68996+ else {
68997+ new_m->vm_start = addr_m;
68998+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
68999+ }
69000+ }
69001+#endif
69002+
69003 pol = mpol_dup(vma_policy(vma));
69004 if (IS_ERR(pol)) {
69005+
69006+#ifdef CONFIG_PAX_SEGMEXEC
69007+ if (new_m)
69008+ kmem_cache_free(vm_area_cachep, new_m);
69009+#endif
69010+
69011 kmem_cache_free(vm_area_cachep, new);
69012 return PTR_ERR(pol);
69013 }
69014@@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
69015 else
69016 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69017
69018+#ifdef CONFIG_PAX_SEGMEXEC
69019+ if (vma_m) {
69020+ mpol_get(pol);
69021+ vma_set_policy(new_m, pol);
69022+
69023+ if (new_m->vm_file) {
69024+ get_file(new_m->vm_file);
69025+ if (vma_m->vm_flags & VM_EXECUTABLE)
69026+ added_exe_file_vma(mm);
69027+ }
69028+
69029+ if (new_m->vm_ops && new_m->vm_ops->open)
69030+ new_m->vm_ops->open(new_m);
69031+
69032+ if (new_below)
69033+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69034+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69035+ else
69036+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69037+ }
69038+#endif
69039+
69040 return 0;
69041 }
69042
69043@@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
69044 * work. This now handles partial unmappings.
69045 * Jeremy Fitzhardinge <jeremy@goop.org>
69046 */
69047+#ifdef CONFIG_PAX_SEGMEXEC
69048+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69049+{
69050+ int ret = __do_munmap(mm, start, len);
69051+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69052+ return ret;
69053+
69054+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69055+}
69056+
69057+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69058+#else
69059 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69060+#endif
69061 {
69062 unsigned long end;
69063 struct vm_area_struct *vma, *prev, *last;
69064
69065+ /*
69066+ * mm->mmap_sem is required to protect against another thread
69067+ * changing the mappings in case we sleep.
69068+ */
69069+ verify_mm_writelocked(mm);
69070+
69071 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69072 return -EINVAL;
69073
69074@@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
69075 /* Fix up all other VM information */
69076 remove_vma_list(mm, vma);
69077
69078+ track_exec_limit(mm, start, end, 0UL);
69079+
69080 return 0;
69081 }
69082
69083@@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
69084
69085 profile_munmap(addr);
69086
69087+#ifdef CONFIG_PAX_SEGMEXEC
69088+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69089+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69090+ return -EINVAL;
69091+#endif
69092+
69093 down_write(&mm->mmap_sem);
69094 ret = do_munmap(mm, addr, len);
69095 up_write(&mm->mmap_sem);
69096 return ret;
69097 }
69098
69099-static inline void verify_mm_writelocked(struct mm_struct *mm)
69100-{
69101-#ifdef CONFIG_DEBUG_VM
69102- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69103- WARN_ON(1);
69104- up_read(&mm->mmap_sem);
69105- }
69106-#endif
69107-}
69108-
69109 /*
69110 * this is really a simplified "do_mmap". it only handles
69111 * anonymous maps. eventually we may be able to do some
69112@@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
69113 struct rb_node ** rb_link, * rb_parent;
69114 pgoff_t pgoff = addr >> PAGE_SHIFT;
69115 int error;
69116+ unsigned long charged;
69117
69118 len = PAGE_ALIGN(len);
69119 if (!len)
69120@@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
69121
69122 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69123
69124+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69125+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69126+ flags &= ~VM_EXEC;
69127+
69128+#ifdef CONFIG_PAX_MPROTECT
69129+ if (mm->pax_flags & MF_PAX_MPROTECT)
69130+ flags &= ~VM_MAYEXEC;
69131+#endif
69132+
69133+ }
69134+#endif
69135+
69136 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69137 if (error & ~PAGE_MASK)
69138 return error;
69139
69140+ charged = len >> PAGE_SHIFT;
69141+
69142 /*
69143 * mlock MCL_FUTURE?
69144 */
69145 if (mm->def_flags & VM_LOCKED) {
69146 unsigned long locked, lock_limit;
69147- locked = len >> PAGE_SHIFT;
69148+ locked = charged;
69149 locked += mm->locked_vm;
69150 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
69151 lock_limit >>= PAGE_SHIFT;
69152@@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
69153 /*
69154 * Clear old maps. this also does some error checking for us
69155 */
69156- munmap_back:
69157 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69158 if (vma && vma->vm_start < addr + len) {
69159 if (do_munmap(mm, addr, len))
69160 return -ENOMEM;
69161- goto munmap_back;
69162+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69163+ BUG_ON(vma && vma->vm_start < addr + len);
69164 }
69165
69166 /* Check against address space limits *after* clearing old maps... */
69167- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69168+ if (!may_expand_vm(mm, charged))
69169 return -ENOMEM;
69170
69171 if (mm->map_count > sysctl_max_map_count)
69172 return -ENOMEM;
69173
69174- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69175+ if (security_vm_enough_memory(charged))
69176 return -ENOMEM;
69177
69178 /* Can we just expand an old private anonymous mapping? */
69179@@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
69180 */
69181 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69182 if (!vma) {
69183- vm_unacct_memory(len >> PAGE_SHIFT);
69184+ vm_unacct_memory(charged);
69185 return -ENOMEM;
69186 }
69187
69188@@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
69189 vma->vm_page_prot = vm_get_page_prot(flags);
69190 vma_link(mm, vma, prev, rb_link, rb_parent);
69191 out:
69192- mm->total_vm += len >> PAGE_SHIFT;
69193+ mm->total_vm += charged;
69194 if (flags & VM_LOCKED) {
69195 if (!mlock_vma_pages_range(vma, addr, addr + len))
69196- mm->locked_vm += (len >> PAGE_SHIFT);
69197+ mm->locked_vm += charged;
69198 }
69199+ track_exec_limit(mm, addr, addr + len, flags);
69200 return addr;
69201 }
69202
69203@@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
69204 * Walk the list again, actually closing and freeing it,
69205 * with preemption enabled, without holding any MM locks.
69206 */
69207- while (vma)
69208+ while (vma) {
69209+ vma->vm_mirror = NULL;
69210 vma = remove_vma(vma);
69211+ }
69212
69213 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69214 }
69215@@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
69216 struct vm_area_struct * __vma, * prev;
69217 struct rb_node ** rb_link, * rb_parent;
69218
69219+#ifdef CONFIG_PAX_SEGMEXEC
69220+ struct vm_area_struct *vma_m = NULL;
69221+#endif
69222+
69223 /*
69224 * The vm_pgoff of a purely anonymous vma should be irrelevant
69225 * until its first write fault, when page's anon_vma and index
69226@@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
69227 if ((vma->vm_flags & VM_ACCOUNT) &&
69228 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69229 return -ENOMEM;
69230+
69231+#ifdef CONFIG_PAX_SEGMEXEC
69232+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69233+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69234+ if (!vma_m)
69235+ return -ENOMEM;
69236+ }
69237+#endif
69238+
69239 vma_link(mm, vma, prev, rb_link, rb_parent);
69240+
69241+#ifdef CONFIG_PAX_SEGMEXEC
69242+ if (vma_m)
69243+ pax_mirror_vma(vma_m, vma);
69244+#endif
69245+
69246 return 0;
69247 }
69248
69249@@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
69250 struct rb_node **rb_link, *rb_parent;
69251 struct mempolicy *pol;
69252
69253+ BUG_ON(vma->vm_mirror);
69254+
69255 /*
69256 * If anonymous vma has not yet been faulted, update new pgoff
69257 * to match new location, to increase its chance of merging.
69258@@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
69259 return new_vma;
69260 }
69261
69262+#ifdef CONFIG_PAX_SEGMEXEC
69263+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69264+{
69265+ struct vm_area_struct *prev_m;
69266+ struct rb_node **rb_link_m, *rb_parent_m;
69267+ struct mempolicy *pol_m;
69268+
69269+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69270+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69271+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69272+ *vma_m = *vma;
69273+ pol_m = vma_policy(vma_m);
69274+ mpol_get(pol_m);
69275+ vma_set_policy(vma_m, pol_m);
69276+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69277+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69278+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69279+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69280+ if (vma_m->vm_file)
69281+ get_file(vma_m->vm_file);
69282+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69283+ vma_m->vm_ops->open(vma_m);
69284+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69285+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69286+ vma_m->vm_mirror = vma;
69287+ vma->vm_mirror = vma_m;
69288+}
69289+#endif
69290+
69291 /*
69292 * Return true if the calling process may expand its vm space by the passed
69293 * number of pages
69294@@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
69295 unsigned long lim;
69296
69297 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
69298-
69299+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69300 if (cur + npages > lim)
69301 return 0;
69302 return 1;
69303@@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
69304 vma->vm_start = addr;
69305 vma->vm_end = addr + len;
69306
69307+#ifdef CONFIG_PAX_MPROTECT
69308+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69309+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69310+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69311+ return -EPERM;
69312+ if (!(vm_flags & VM_EXEC))
69313+ vm_flags &= ~VM_MAYEXEC;
69314+#else
69315+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69316+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69317+#endif
69318+ else
69319+ vm_flags &= ~VM_MAYWRITE;
69320+ }
69321+#endif
69322+
69323 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69324 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69325
69326diff -urNp linux-2.6.32.45/mm/mprotect.c linux-2.6.32.45/mm/mprotect.c
69327--- linux-2.6.32.45/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
69328+++ linux-2.6.32.45/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
69329@@ -24,10 +24,16 @@
69330 #include <linux/mmu_notifier.h>
69331 #include <linux/migrate.h>
69332 #include <linux/perf_event.h>
69333+
69334+#ifdef CONFIG_PAX_MPROTECT
69335+#include <linux/elf.h>
69336+#endif
69337+
69338 #include <asm/uaccess.h>
69339 #include <asm/pgtable.h>
69340 #include <asm/cacheflush.h>
69341 #include <asm/tlbflush.h>
69342+#include <asm/mmu_context.h>
69343
69344 #ifndef pgprot_modify
69345 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69346@@ -132,6 +138,48 @@ static void change_protection(struct vm_
69347 flush_tlb_range(vma, start, end);
69348 }
69349
69350+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69351+/* called while holding the mmap semaphor for writing except stack expansion */
69352+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69353+{
69354+ unsigned long oldlimit, newlimit = 0UL;
69355+
69356+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
69357+ return;
69358+
69359+ spin_lock(&mm->page_table_lock);
69360+ oldlimit = mm->context.user_cs_limit;
69361+ if ((prot & VM_EXEC) && oldlimit < end)
69362+ /* USER_CS limit moved up */
69363+ newlimit = end;
69364+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69365+ /* USER_CS limit moved down */
69366+ newlimit = start;
69367+
69368+ if (newlimit) {
69369+ mm->context.user_cs_limit = newlimit;
69370+
69371+#ifdef CONFIG_SMP
69372+ wmb();
69373+ cpus_clear(mm->context.cpu_user_cs_mask);
69374+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69375+#endif
69376+
69377+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69378+ }
69379+ spin_unlock(&mm->page_table_lock);
69380+ if (newlimit == end) {
69381+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69382+
69383+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69384+ if (is_vm_hugetlb_page(vma))
69385+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69386+ else
69387+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69388+ }
69389+}
69390+#endif
69391+
69392 int
69393 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69394 unsigned long start, unsigned long end, unsigned long newflags)
69395@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
69396 int error;
69397 int dirty_accountable = 0;
69398
69399+#ifdef CONFIG_PAX_SEGMEXEC
69400+ struct vm_area_struct *vma_m = NULL;
69401+ unsigned long start_m, end_m;
69402+
69403+ start_m = start + SEGMEXEC_TASK_SIZE;
69404+ end_m = end + SEGMEXEC_TASK_SIZE;
69405+#endif
69406+
69407 if (newflags == oldflags) {
69408 *pprev = vma;
69409 return 0;
69410 }
69411
69412+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69413+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69414+
69415+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69416+ return -ENOMEM;
69417+
69418+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69419+ return -ENOMEM;
69420+ }
69421+
69422 /*
69423 * If we make a private mapping writable we increase our commit;
69424 * but (without finer accounting) cannot reduce our commit if we
69425@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
69426 }
69427 }
69428
69429+#ifdef CONFIG_PAX_SEGMEXEC
69430+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69431+ if (start != vma->vm_start) {
69432+ error = split_vma(mm, vma, start, 1);
69433+ if (error)
69434+ goto fail;
69435+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69436+ *pprev = (*pprev)->vm_next;
69437+ }
69438+
69439+ if (end != vma->vm_end) {
69440+ error = split_vma(mm, vma, end, 0);
69441+ if (error)
69442+ goto fail;
69443+ }
69444+
69445+ if (pax_find_mirror_vma(vma)) {
69446+ error = __do_munmap(mm, start_m, end_m - start_m);
69447+ if (error)
69448+ goto fail;
69449+ } else {
69450+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69451+ if (!vma_m) {
69452+ error = -ENOMEM;
69453+ goto fail;
69454+ }
69455+ vma->vm_flags = newflags;
69456+ pax_mirror_vma(vma_m, vma);
69457+ }
69458+ }
69459+#endif
69460+
69461 /*
69462 * First try to merge with previous and/or next vma.
69463 */
69464@@ -195,9 +293,21 @@ success:
69465 * vm_flags and vm_page_prot are protected by the mmap_sem
69466 * held in write mode.
69467 */
69468+
69469+#ifdef CONFIG_PAX_SEGMEXEC
69470+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69471+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69472+#endif
69473+
69474 vma->vm_flags = newflags;
69475+
69476+#ifdef CONFIG_PAX_MPROTECT
69477+ if (mm->binfmt && mm->binfmt->handle_mprotect)
69478+ mm->binfmt->handle_mprotect(vma, newflags);
69479+#endif
69480+
69481 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69482- vm_get_page_prot(newflags));
69483+ vm_get_page_prot(vma->vm_flags));
69484
69485 if (vma_wants_writenotify(vma)) {
69486 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69487@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69488 end = start + len;
69489 if (end <= start)
69490 return -ENOMEM;
69491+
69492+#ifdef CONFIG_PAX_SEGMEXEC
69493+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69494+ if (end > SEGMEXEC_TASK_SIZE)
69495+ return -EINVAL;
69496+ } else
69497+#endif
69498+
69499+ if (end > TASK_SIZE)
69500+ return -EINVAL;
69501+
69502 if (!arch_validate_prot(prot))
69503 return -EINVAL;
69504
69505@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69506 /*
69507 * Does the application expect PROT_READ to imply PROT_EXEC:
69508 */
69509- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69510+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69511 prot |= PROT_EXEC;
69512
69513 vm_flags = calc_vm_prot_bits(prot);
69514@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69515 if (start > vma->vm_start)
69516 prev = vma;
69517
69518+#ifdef CONFIG_PAX_MPROTECT
69519+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69520+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
69521+#endif
69522+
69523 for (nstart = start ; ; ) {
69524 unsigned long newflags;
69525
69526@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69527
69528 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69529 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69530+ if (prot & (PROT_WRITE | PROT_EXEC))
69531+ gr_log_rwxmprotect(vma->vm_file);
69532+
69533+ error = -EACCES;
69534+ goto out;
69535+ }
69536+
69537+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69538 error = -EACCES;
69539 goto out;
69540 }
69541@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69542 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69543 if (error)
69544 goto out;
69545+
69546+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
69547+
69548 nstart = tmp;
69549
69550 if (nstart < prev->vm_end)
69551diff -urNp linux-2.6.32.45/mm/mremap.c linux-2.6.32.45/mm/mremap.c
69552--- linux-2.6.32.45/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
69553+++ linux-2.6.32.45/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
69554@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
69555 continue;
69556 pte = ptep_clear_flush(vma, old_addr, old_pte);
69557 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69558+
69559+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69560+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69561+ pte = pte_exprotect(pte);
69562+#endif
69563+
69564 set_pte_at(mm, new_addr, new_pte, pte);
69565 }
69566
69567@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
69568 if (is_vm_hugetlb_page(vma))
69569 goto Einval;
69570
69571+#ifdef CONFIG_PAX_SEGMEXEC
69572+ if (pax_find_mirror_vma(vma))
69573+ goto Einval;
69574+#endif
69575+
69576 /* We can't remap across vm area boundaries */
69577 if (old_len > vma->vm_end - addr)
69578 goto Efault;
69579@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
69580 unsigned long ret = -EINVAL;
69581 unsigned long charged = 0;
69582 unsigned long map_flags;
69583+ unsigned long pax_task_size = TASK_SIZE;
69584
69585 if (new_addr & ~PAGE_MASK)
69586 goto out;
69587
69588- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69589+#ifdef CONFIG_PAX_SEGMEXEC
69590+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69591+ pax_task_size = SEGMEXEC_TASK_SIZE;
69592+#endif
69593+
69594+ pax_task_size -= PAGE_SIZE;
69595+
69596+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69597 goto out;
69598
69599 /* Check if the location we're moving into overlaps the
69600 * old location at all, and fail if it does.
69601 */
69602- if ((new_addr <= addr) && (new_addr+new_len) > addr)
69603- goto out;
69604-
69605- if ((addr <= new_addr) && (addr+old_len) > new_addr)
69606+ if (addr + old_len > new_addr && new_addr + new_len > addr)
69607 goto out;
69608
69609 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69610@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
69611 struct vm_area_struct *vma;
69612 unsigned long ret = -EINVAL;
69613 unsigned long charged = 0;
69614+ unsigned long pax_task_size = TASK_SIZE;
69615
69616 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69617 goto out;
69618@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
69619 if (!new_len)
69620 goto out;
69621
69622+#ifdef CONFIG_PAX_SEGMEXEC
69623+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69624+ pax_task_size = SEGMEXEC_TASK_SIZE;
69625+#endif
69626+
69627+ pax_task_size -= PAGE_SIZE;
69628+
69629+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69630+ old_len > pax_task_size || addr > pax_task_size-old_len)
69631+ goto out;
69632+
69633 if (flags & MREMAP_FIXED) {
69634 if (flags & MREMAP_MAYMOVE)
69635 ret = mremap_to(addr, old_len, new_addr, new_len);
69636@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
69637 addr + new_len);
69638 }
69639 ret = addr;
69640+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69641 goto out;
69642 }
69643 }
69644@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
69645 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69646 if (ret)
69647 goto out;
69648+
69649+ map_flags = vma->vm_flags;
69650 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69651+ if (!(ret & ~PAGE_MASK)) {
69652+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69653+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69654+ }
69655 }
69656 out:
69657 if (ret & ~PAGE_MASK)
69658diff -urNp linux-2.6.32.45/mm/nommu.c linux-2.6.32.45/mm/nommu.c
69659--- linux-2.6.32.45/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
69660+++ linux-2.6.32.45/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
69661@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69662 int sysctl_overcommit_ratio = 50; /* default is 50% */
69663 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69664 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69665-int heap_stack_gap = 0;
69666
69667 atomic_long_t mmap_pages_allocated;
69668
69669@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
69670 EXPORT_SYMBOL(find_vma);
69671
69672 /*
69673- * find a VMA
69674- * - we don't extend stack VMAs under NOMMU conditions
69675- */
69676-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69677-{
69678- return find_vma(mm, addr);
69679-}
69680-
69681-/*
69682 * expand a stack to a given address
69683 * - not supported under NOMMU conditions
69684 */
69685diff -urNp linux-2.6.32.45/mm/page_alloc.c linux-2.6.32.45/mm/page_alloc.c
69686--- linux-2.6.32.45/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
69687+++ linux-2.6.32.45/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
69688@@ -289,7 +289,7 @@ out:
69689 * This usage means that zero-order pages may not be compound.
69690 */
69691
69692-static void free_compound_page(struct page *page)
69693+void free_compound_page(struct page *page)
69694 {
69695 __free_pages_ok(page, compound_order(page));
69696 }
69697@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
69698 int bad = 0;
69699 int wasMlocked = __TestClearPageMlocked(page);
69700
69701+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69702+ unsigned long index = 1UL << order;
69703+#endif
69704+
69705 kmemcheck_free_shadow(page, order);
69706
69707 for (i = 0 ; i < (1 << order) ; ++i)
69708@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
69709 debug_check_no_obj_freed(page_address(page),
69710 PAGE_SIZE << order);
69711 }
69712+
69713+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69714+ for (; index; --index)
69715+ sanitize_highpage(page + index - 1);
69716+#endif
69717+
69718 arch_free_page(page, order);
69719 kernel_map_pages(page, 1 << order, 0);
69720
69721@@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
69722 arch_alloc_page(page, order);
69723 kernel_map_pages(page, 1 << order, 1);
69724
69725+#ifndef CONFIG_PAX_MEMORY_SANITIZE
69726 if (gfp_flags & __GFP_ZERO)
69727 prep_zero_page(page, order, gfp_flags);
69728+#endif
69729
69730 if (order && (gfp_flags & __GFP_COMP))
69731 prep_compound_page(page, order);
69732@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
69733 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
69734 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
69735 }
69736+
69737+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69738+ sanitize_highpage(page);
69739+#endif
69740+
69741 arch_free_page(page, 0);
69742 kernel_map_pages(page, 1, 0);
69743
69744@@ -2179,6 +2196,8 @@ void show_free_areas(void)
69745 int cpu;
69746 struct zone *zone;
69747
69748+ pax_track_stack();
69749+
69750 for_each_populated_zone(zone) {
69751 show_node(zone);
69752 printk("%s per-cpu:\n", zone->name);
69753@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
69754 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
69755 }
69756 #else
69757-static void inline setup_usemap(struct pglist_data *pgdat,
69758+static inline void setup_usemap(struct pglist_data *pgdat,
69759 struct zone *zone, unsigned long zonesize) {}
69760 #endif /* CONFIG_SPARSEMEM */
69761
69762diff -urNp linux-2.6.32.45/mm/percpu.c linux-2.6.32.45/mm/percpu.c
69763--- linux-2.6.32.45/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
69764+++ linux-2.6.32.45/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
69765@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
69766 static unsigned int pcpu_last_unit_cpu __read_mostly;
69767
69768 /* the address of the first chunk which starts with the kernel static area */
69769-void *pcpu_base_addr __read_mostly;
69770+void *pcpu_base_addr __read_only;
69771 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69772
69773 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69774diff -urNp linux-2.6.32.45/mm/rmap.c linux-2.6.32.45/mm/rmap.c
69775--- linux-2.6.32.45/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
69776+++ linux-2.6.32.45/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
69777@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
69778 /* page_table_lock to protect against threads */
69779 spin_lock(&mm->page_table_lock);
69780 if (likely(!vma->anon_vma)) {
69781+
69782+#ifdef CONFIG_PAX_SEGMEXEC
69783+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
69784+
69785+ if (vma_m) {
69786+ BUG_ON(vma_m->anon_vma);
69787+ vma_m->anon_vma = anon_vma;
69788+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
69789+ }
69790+#endif
69791+
69792 vma->anon_vma = anon_vma;
69793 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
69794 allocated = NULL;
69795diff -urNp linux-2.6.32.45/mm/shmem.c linux-2.6.32.45/mm/shmem.c
69796--- linux-2.6.32.45/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
69797+++ linux-2.6.32.45/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
69798@@ -31,7 +31,7 @@
69799 #include <linux/swap.h>
69800 #include <linux/ima.h>
69801
69802-static struct vfsmount *shm_mnt;
69803+struct vfsmount *shm_mnt;
69804
69805 #ifdef CONFIG_SHMEM
69806 /*
69807@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
69808 goto unlock;
69809 }
69810 entry = shmem_swp_entry(info, index, NULL);
69811+ if (!entry)
69812+ goto unlock;
69813 if (entry->val) {
69814 /*
69815 * The more uptodate page coming down from a stacked
69816@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
69817 struct vm_area_struct pvma;
69818 struct page *page;
69819
69820+ pax_track_stack();
69821+
69822 spol = mpol_cond_copy(&mpol,
69823 mpol_shared_policy_lookup(&info->policy, idx));
69824
69825@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
69826
69827 info = SHMEM_I(inode);
69828 inode->i_size = len-1;
69829- if (len <= (char *)inode - (char *)info) {
69830+ if (len <= (char *)inode - (char *)info && len <= 64) {
69831 /* do it inline */
69832 memcpy(info, symname, len);
69833 inode->i_op = &shmem_symlink_inline_operations;
69834@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
69835 int err = -ENOMEM;
69836
69837 /* Round up to L1_CACHE_BYTES to resist false sharing */
69838- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
69839- L1_CACHE_BYTES), GFP_KERNEL);
69840+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
69841 if (!sbinfo)
69842 return -ENOMEM;
69843
69844diff -urNp linux-2.6.32.45/mm/slab.c linux-2.6.32.45/mm/slab.c
69845--- linux-2.6.32.45/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
69846+++ linux-2.6.32.45/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
69847@@ -174,7 +174,7 @@
69848
69849 /* Legal flag mask for kmem_cache_create(). */
69850 #if DEBUG
69851-# define CREATE_MASK (SLAB_RED_ZONE | \
69852+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
69853 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
69854 SLAB_CACHE_DMA | \
69855 SLAB_STORE_USER | \
69856@@ -182,7 +182,7 @@
69857 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69858 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
69859 #else
69860-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
69861+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
69862 SLAB_CACHE_DMA | \
69863 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
69864 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69865@@ -308,7 +308,7 @@ struct kmem_list3 {
69866 * Need this for bootstrapping a per node allocator.
69867 */
69868 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
69869-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
69870+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
69871 #define CACHE_CACHE 0
69872 #define SIZE_AC MAX_NUMNODES
69873 #define SIZE_L3 (2 * MAX_NUMNODES)
69874@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
69875 if ((x)->max_freeable < i) \
69876 (x)->max_freeable = i; \
69877 } while (0)
69878-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
69879-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
69880-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
69881-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
69882+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
69883+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
69884+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
69885+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
69886 #else
69887 #define STATS_INC_ACTIVE(x) do { } while (0)
69888 #define STATS_DEC_ACTIVE(x) do { } while (0)
69889@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
69890 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
69891 */
69892 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
69893- const struct slab *slab, void *obj)
69894+ const struct slab *slab, const void *obj)
69895 {
69896 u32 offset = (obj - slab->s_mem);
69897 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
69898@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
69899 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
69900 sizes[INDEX_AC].cs_size,
69901 ARCH_KMALLOC_MINALIGN,
69902- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69903+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69904 NULL);
69905
69906 if (INDEX_AC != INDEX_L3) {
69907@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
69908 kmem_cache_create(names[INDEX_L3].name,
69909 sizes[INDEX_L3].cs_size,
69910 ARCH_KMALLOC_MINALIGN,
69911- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69912+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69913 NULL);
69914 }
69915
69916@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
69917 sizes->cs_cachep = kmem_cache_create(names->name,
69918 sizes->cs_size,
69919 ARCH_KMALLOC_MINALIGN,
69920- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69921+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69922 NULL);
69923 }
69924 #ifdef CONFIG_ZONE_DMA
69925@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
69926 }
69927 /* cpu stats */
69928 {
69929- unsigned long allochit = atomic_read(&cachep->allochit);
69930- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
69931- unsigned long freehit = atomic_read(&cachep->freehit);
69932- unsigned long freemiss = atomic_read(&cachep->freemiss);
69933+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
69934+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
69935+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
69936+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
69937
69938 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
69939 allochit, allocmiss, freehit, freemiss);
69940@@ -4471,15 +4471,66 @@ static const struct file_operations proc
69941
69942 static int __init slab_proc_init(void)
69943 {
69944- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
69945+ mode_t gr_mode = S_IRUGO;
69946+
69947+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69948+ gr_mode = S_IRUSR;
69949+#endif
69950+
69951+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
69952 #ifdef CONFIG_DEBUG_SLAB_LEAK
69953- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
69954+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
69955 #endif
69956 return 0;
69957 }
69958 module_init(slab_proc_init);
69959 #endif
69960
69961+void check_object_size(const void *ptr, unsigned long n, bool to)
69962+{
69963+
69964+#ifdef CONFIG_PAX_USERCOPY
69965+ struct page *page;
69966+ struct kmem_cache *cachep = NULL;
69967+ struct slab *slabp;
69968+ unsigned int objnr;
69969+ unsigned long offset;
69970+
69971+ if (!n)
69972+ return;
69973+
69974+ if (ZERO_OR_NULL_PTR(ptr))
69975+ goto report;
69976+
69977+ if (!virt_addr_valid(ptr))
69978+ return;
69979+
69980+ page = virt_to_head_page(ptr);
69981+
69982+ if (!PageSlab(page)) {
69983+ if (object_is_on_stack(ptr, n) == -1)
69984+ goto report;
69985+ return;
69986+ }
69987+
69988+ cachep = page_get_cache(page);
69989+ if (!(cachep->flags & SLAB_USERCOPY))
69990+ goto report;
69991+
69992+ slabp = page_get_slab(page);
69993+ objnr = obj_to_index(cachep, slabp, ptr);
69994+ BUG_ON(objnr >= cachep->num);
69995+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
69996+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
69997+ return;
69998+
69999+report:
70000+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
70001+#endif
70002+
70003+}
70004+EXPORT_SYMBOL(check_object_size);
70005+
70006 /**
70007 * ksize - get the actual amount of memory allocated for a given object
70008 * @objp: Pointer to the object
70009diff -urNp linux-2.6.32.45/mm/slob.c linux-2.6.32.45/mm/slob.c
70010--- linux-2.6.32.45/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
70011+++ linux-2.6.32.45/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
70012@@ -29,7 +29,7 @@
70013 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70014 * alloc_pages() directly, allocating compound pages so the page order
70015 * does not have to be separately tracked, and also stores the exact
70016- * allocation size in page->private so that it can be used to accurately
70017+ * allocation size in slob_page->size so that it can be used to accurately
70018 * provide ksize(). These objects are detected in kfree() because slob_page()
70019 * is false for them.
70020 *
70021@@ -58,6 +58,7 @@
70022 */
70023
70024 #include <linux/kernel.h>
70025+#include <linux/sched.h>
70026 #include <linux/slab.h>
70027 #include <linux/mm.h>
70028 #include <linux/swap.h> /* struct reclaim_state */
70029@@ -100,7 +101,8 @@ struct slob_page {
70030 unsigned long flags; /* mandatory */
70031 atomic_t _count; /* mandatory */
70032 slobidx_t units; /* free units left in page */
70033- unsigned long pad[2];
70034+ unsigned long pad[1];
70035+ unsigned long size; /* size when >=PAGE_SIZE */
70036 slob_t *free; /* first free slob_t in page */
70037 struct list_head list; /* linked list of free pages */
70038 };
70039@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
70040 */
70041 static inline int is_slob_page(struct slob_page *sp)
70042 {
70043- return PageSlab((struct page *)sp);
70044+ return PageSlab((struct page *)sp) && !sp->size;
70045 }
70046
70047 static inline void set_slob_page(struct slob_page *sp)
70048@@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
70049
70050 static inline struct slob_page *slob_page(const void *addr)
70051 {
70052- return (struct slob_page *)virt_to_page(addr);
70053+ return (struct slob_page *)virt_to_head_page(addr);
70054 }
70055
70056 /*
70057@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
70058 /*
70059 * Return the size of a slob block.
70060 */
70061-static slobidx_t slob_units(slob_t *s)
70062+static slobidx_t slob_units(const slob_t *s)
70063 {
70064 if (s->units > 0)
70065 return s->units;
70066@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
70067 /*
70068 * Return the next free slob block pointer after this one.
70069 */
70070-static slob_t *slob_next(slob_t *s)
70071+static slob_t *slob_next(const slob_t *s)
70072 {
70073 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70074 slobidx_t next;
70075@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
70076 /*
70077 * Returns true if s is the last free block in its page.
70078 */
70079-static int slob_last(slob_t *s)
70080+static int slob_last(const slob_t *s)
70081 {
70082 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70083 }
70084@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
70085 if (!page)
70086 return NULL;
70087
70088+ set_slob_page(page);
70089 return page_address(page);
70090 }
70091
70092@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
70093 if (!b)
70094 return NULL;
70095 sp = slob_page(b);
70096- set_slob_page(sp);
70097
70098 spin_lock_irqsave(&slob_lock, flags);
70099 sp->units = SLOB_UNITS(PAGE_SIZE);
70100 sp->free = b;
70101+ sp->size = 0;
70102 INIT_LIST_HEAD(&sp->list);
70103 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70104 set_slob_page_free(sp, slob_list);
70105@@ -475,10 +478,9 @@ out:
70106 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
70107 #endif
70108
70109-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70110+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70111 {
70112- unsigned int *m;
70113- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70114+ slob_t *m;
70115 void *ret;
70116
70117 lockdep_trace_alloc(gfp);
70118@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
70119
70120 if (!m)
70121 return NULL;
70122- *m = size;
70123+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70124+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70125+ m[0].units = size;
70126+ m[1].units = align;
70127 ret = (void *)m + align;
70128
70129 trace_kmalloc_node(_RET_IP_, ret,
70130@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
70131
70132 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
70133 if (ret) {
70134- struct page *page;
70135- page = virt_to_page(ret);
70136- page->private = size;
70137+ struct slob_page *sp;
70138+ sp = slob_page(ret);
70139+ sp->size = size;
70140 }
70141
70142 trace_kmalloc_node(_RET_IP_, ret,
70143 size, PAGE_SIZE << order, gfp, node);
70144 }
70145
70146- kmemleak_alloc(ret, size, 1, gfp);
70147+ return ret;
70148+}
70149+
70150+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70151+{
70152+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70153+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70154+
70155+ if (!ZERO_OR_NULL_PTR(ret))
70156+ kmemleak_alloc(ret, size, 1, gfp);
70157 return ret;
70158 }
70159 EXPORT_SYMBOL(__kmalloc_node);
70160@@ -528,13 +542,88 @@ void kfree(const void *block)
70161 sp = slob_page(block);
70162 if (is_slob_page(sp)) {
70163 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70164- unsigned int *m = (unsigned int *)(block - align);
70165- slob_free(m, *m + align);
70166- } else
70167+ slob_t *m = (slob_t *)(block - align);
70168+ slob_free(m, m[0].units + align);
70169+ } else {
70170+ clear_slob_page(sp);
70171+ free_slob_page(sp);
70172+ sp->size = 0;
70173 put_page(&sp->page);
70174+ }
70175 }
70176 EXPORT_SYMBOL(kfree);
70177
70178+void check_object_size(const void *ptr, unsigned long n, bool to)
70179+{
70180+
70181+#ifdef CONFIG_PAX_USERCOPY
70182+ struct slob_page *sp;
70183+ const slob_t *free;
70184+ const void *base;
70185+ unsigned long flags;
70186+
70187+ if (!n)
70188+ return;
70189+
70190+ if (ZERO_OR_NULL_PTR(ptr))
70191+ goto report;
70192+
70193+ if (!virt_addr_valid(ptr))
70194+ return;
70195+
70196+ sp = slob_page(ptr);
70197+ if (!PageSlab((struct page*)sp)) {
70198+ if (object_is_on_stack(ptr, n) == -1)
70199+ goto report;
70200+ return;
70201+ }
70202+
70203+ if (sp->size) {
70204+ base = page_address(&sp->page);
70205+ if (base <= ptr && n <= sp->size - (ptr - base))
70206+ return;
70207+ goto report;
70208+ }
70209+
70210+ /* some tricky double walking to find the chunk */
70211+ spin_lock_irqsave(&slob_lock, flags);
70212+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70213+ free = sp->free;
70214+
70215+ while (!slob_last(free) && (void *)free <= ptr) {
70216+ base = free + slob_units(free);
70217+ free = slob_next(free);
70218+ }
70219+
70220+ while (base < (void *)free) {
70221+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70222+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70223+ int offset;
70224+
70225+ if (ptr < base + align)
70226+ break;
70227+
70228+ offset = ptr - base - align;
70229+ if (offset >= m) {
70230+ base += size;
70231+ continue;
70232+ }
70233+
70234+ if (n > m - offset)
70235+ break;
70236+
70237+ spin_unlock_irqrestore(&slob_lock, flags);
70238+ return;
70239+ }
70240+
70241+ spin_unlock_irqrestore(&slob_lock, flags);
70242+report:
70243+ pax_report_usercopy(ptr, n, to, NULL);
70244+#endif
70245+
70246+}
70247+EXPORT_SYMBOL(check_object_size);
70248+
70249 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70250 size_t ksize(const void *block)
70251 {
70252@@ -547,10 +636,10 @@ size_t ksize(const void *block)
70253 sp = slob_page(block);
70254 if (is_slob_page(sp)) {
70255 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70256- unsigned int *m = (unsigned int *)(block - align);
70257- return SLOB_UNITS(*m) * SLOB_UNIT;
70258+ slob_t *m = (slob_t *)(block - align);
70259+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70260 } else
70261- return sp->page.private;
70262+ return sp->size;
70263 }
70264 EXPORT_SYMBOL(ksize);
70265
70266@@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
70267 {
70268 struct kmem_cache *c;
70269
70270+#ifdef CONFIG_PAX_USERCOPY
70271+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70272+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70273+#else
70274 c = slob_alloc(sizeof(struct kmem_cache),
70275 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70276+#endif
70277
70278 if (c) {
70279 c->name = name;
70280@@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
70281 {
70282 void *b;
70283
70284+#ifdef CONFIG_PAX_USERCOPY
70285+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70286+#else
70287 if (c->size < PAGE_SIZE) {
70288 b = slob_alloc(c->size, flags, c->align, node);
70289 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70290 SLOB_UNITS(c->size) * SLOB_UNIT,
70291 flags, node);
70292 } else {
70293+ struct slob_page *sp;
70294+
70295 b = slob_new_pages(flags, get_order(c->size), node);
70296+ sp = slob_page(b);
70297+ sp->size = c->size;
70298 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70299 PAGE_SIZE << get_order(c->size),
70300 flags, node);
70301 }
70302+#endif
70303
70304 if (c->ctor)
70305 c->ctor(b);
70306@@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70307
70308 static void __kmem_cache_free(void *b, int size)
70309 {
70310- if (size < PAGE_SIZE)
70311+ struct slob_page *sp = slob_page(b);
70312+
70313+ if (is_slob_page(sp))
70314 slob_free(b, size);
70315- else
70316+ else {
70317+ clear_slob_page(sp);
70318+ free_slob_page(sp);
70319+ sp->size = 0;
70320 slob_free_pages(b, get_order(size));
70321+ }
70322 }
70323
70324 static void kmem_rcu_free(struct rcu_head *head)
70325@@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
70326
70327 void kmem_cache_free(struct kmem_cache *c, void *b)
70328 {
70329+ int size = c->size;
70330+
70331+#ifdef CONFIG_PAX_USERCOPY
70332+ if (size + c->align < PAGE_SIZE) {
70333+ size += c->align;
70334+ b -= c->align;
70335+ }
70336+#endif
70337+
70338 kmemleak_free_recursive(b, c->flags);
70339 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70340 struct slob_rcu *slob_rcu;
70341- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70342+ slob_rcu = b + (size - sizeof(struct slob_rcu));
70343 INIT_RCU_HEAD(&slob_rcu->head);
70344- slob_rcu->size = c->size;
70345+ slob_rcu->size = size;
70346 call_rcu(&slob_rcu->head, kmem_rcu_free);
70347 } else {
70348- __kmem_cache_free(b, c->size);
70349+ __kmem_cache_free(b, size);
70350 }
70351
70352+#ifdef CONFIG_PAX_USERCOPY
70353+ trace_kfree(_RET_IP_, b);
70354+#else
70355 trace_kmem_cache_free(_RET_IP_, b);
70356+#endif
70357+
70358 }
70359 EXPORT_SYMBOL(kmem_cache_free);
70360
70361diff -urNp linux-2.6.32.45/mm/slub.c linux-2.6.32.45/mm/slub.c
70362--- linux-2.6.32.45/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
70363+++ linux-2.6.32.45/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
70364@@ -410,7 +410,7 @@ static void print_track(const char *s, s
70365 if (!t->addr)
70366 return;
70367
70368- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70369+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70370 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70371 }
70372
70373@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
70374
70375 page = virt_to_head_page(x);
70376
70377+ BUG_ON(!PageSlab(page));
70378+
70379 slab_free(s, page, x, _RET_IP_);
70380
70381 trace_kmem_cache_free(_RET_IP_, x);
70382@@ -1937,7 +1939,7 @@ static int slub_min_objects;
70383 * Merge control. If this is set then no merging of slab caches will occur.
70384 * (Could be removed. This was introduced to pacify the merge skeptics.)
70385 */
70386-static int slub_nomerge;
70387+static int slub_nomerge = 1;
70388
70389 /*
70390 * Calculate the order of allocation given an slab object size.
70391@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
70392 * list to avoid pounding the page allocator excessively.
70393 */
70394 set_min_partial(s, ilog2(s->size));
70395- s->refcount = 1;
70396+ atomic_set(&s->refcount, 1);
70397 #ifdef CONFIG_NUMA
70398 s->remote_node_defrag_ratio = 1000;
70399 #endif
70400@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
70401 void kmem_cache_destroy(struct kmem_cache *s)
70402 {
70403 down_write(&slub_lock);
70404- s->refcount--;
70405- if (!s->refcount) {
70406+ if (atomic_dec_and_test(&s->refcount)) {
70407 list_del(&s->list);
70408 up_write(&slub_lock);
70409 if (kmem_cache_close(s)) {
70410@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
70411 __setup("slub_nomerge", setup_slub_nomerge);
70412
70413 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
70414- const char *name, int size, gfp_t gfp_flags)
70415+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
70416 {
70417- unsigned int flags = 0;
70418-
70419 if (gfp_flags & SLUB_DMA)
70420- flags = SLAB_CACHE_DMA;
70421+ flags |= SLAB_CACHE_DMA;
70422
70423 /*
70424 * This function is called with IRQs disabled during early-boot on
70425@@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
70426 EXPORT_SYMBOL(__kmalloc_node);
70427 #endif
70428
70429+void check_object_size(const void *ptr, unsigned long n, bool to)
70430+{
70431+
70432+#ifdef CONFIG_PAX_USERCOPY
70433+ struct page *page;
70434+ struct kmem_cache *s = NULL;
70435+ unsigned long offset;
70436+
70437+ if (!n)
70438+ return;
70439+
70440+ if (ZERO_OR_NULL_PTR(ptr))
70441+ goto report;
70442+
70443+ if (!virt_addr_valid(ptr))
70444+ return;
70445+
70446+ page = get_object_page(ptr);
70447+
70448+ if (!page) {
70449+ if (object_is_on_stack(ptr, n) == -1)
70450+ goto report;
70451+ return;
70452+ }
70453+
70454+ s = page->slab;
70455+ if (!(s->flags & SLAB_USERCOPY))
70456+ goto report;
70457+
70458+ offset = (ptr - page_address(page)) % s->size;
70459+ if (offset <= s->objsize && n <= s->objsize - offset)
70460+ return;
70461+
70462+report:
70463+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70464+#endif
70465+
70466+}
70467+EXPORT_SYMBOL(check_object_size);
70468+
70469 size_t ksize(const void *object)
70470 {
70471 struct page *page;
70472@@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
70473 * kmem_cache_open for slab_state == DOWN.
70474 */
70475 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
70476- sizeof(struct kmem_cache_node), GFP_NOWAIT);
70477- kmalloc_caches[0].refcount = -1;
70478+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
70479+ atomic_set(&kmalloc_caches[0].refcount, -1);
70480 caches++;
70481
70482 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
70483@@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
70484 /* Caches that are not of the two-to-the-power-of size */
70485 if (KMALLOC_MIN_SIZE <= 32) {
70486 create_kmalloc_cache(&kmalloc_caches[1],
70487- "kmalloc-96", 96, GFP_NOWAIT);
70488+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
70489 caches++;
70490 }
70491 if (KMALLOC_MIN_SIZE <= 64) {
70492 create_kmalloc_cache(&kmalloc_caches[2],
70493- "kmalloc-192", 192, GFP_NOWAIT);
70494+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
70495 caches++;
70496 }
70497
70498 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70499 create_kmalloc_cache(&kmalloc_caches[i],
70500- "kmalloc", 1 << i, GFP_NOWAIT);
70501+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
70502 caches++;
70503 }
70504
70505@@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
70506 /*
70507 * We may have set a slab to be unmergeable during bootstrap.
70508 */
70509- if (s->refcount < 0)
70510+ if (atomic_read(&s->refcount) < 0)
70511 return 1;
70512
70513 return 0;
70514@@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
70515 if (s) {
70516 int cpu;
70517
70518- s->refcount++;
70519+ atomic_inc(&s->refcount);
70520 /*
70521 * Adjust the object sizes so that we clear
70522 * the complete object on kzalloc.
70523@@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
70524
70525 if (sysfs_slab_alias(s, name)) {
70526 down_write(&slub_lock);
70527- s->refcount--;
70528+ atomic_dec(&s->refcount);
70529 up_write(&slub_lock);
70530 goto err;
70531 }
70532@@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
70533
70534 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70535 {
70536- return sprintf(buf, "%d\n", s->refcount - 1);
70537+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70538 }
70539 SLAB_ATTR_RO(aliases);
70540
70541@@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
70542 kfree(s);
70543 }
70544
70545-static struct sysfs_ops slab_sysfs_ops = {
70546+static const struct sysfs_ops slab_sysfs_ops = {
70547 .show = slab_attr_show,
70548 .store = slab_attr_store,
70549 };
70550@@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
70551 return 0;
70552 }
70553
70554-static struct kset_uevent_ops slab_uevent_ops = {
70555+static const struct kset_uevent_ops slab_uevent_ops = {
70556 .filter = uevent_filter,
70557 };
70558
70559@@ -4785,7 +4824,13 @@ static const struct file_operations proc
70560
70561 static int __init slab_proc_init(void)
70562 {
70563- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70564+ mode_t gr_mode = S_IRUGO;
70565+
70566+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70567+ gr_mode = S_IRUSR;
70568+#endif
70569+
70570+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70571 return 0;
70572 }
70573 module_init(slab_proc_init);
70574diff -urNp linux-2.6.32.45/mm/swap.c linux-2.6.32.45/mm/swap.c
70575--- linux-2.6.32.45/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
70576+++ linux-2.6.32.45/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
70577@@ -30,6 +30,7 @@
70578 #include <linux/notifier.h>
70579 #include <linux/backing-dev.h>
70580 #include <linux/memcontrol.h>
70581+#include <linux/hugetlb.h>
70582
70583 #include "internal.h"
70584
70585@@ -65,6 +66,8 @@ static void put_compound_page(struct pag
70586 compound_page_dtor *dtor;
70587
70588 dtor = get_compound_page_dtor(page);
70589+ if (!PageHuge(page))
70590+ BUG_ON(dtor != free_compound_page);
70591 (*dtor)(page);
70592 }
70593 }
70594diff -urNp linux-2.6.32.45/mm/util.c linux-2.6.32.45/mm/util.c
70595--- linux-2.6.32.45/mm/util.c 2011-03-27 14:31:47.000000000 -0400
70596+++ linux-2.6.32.45/mm/util.c 2011-04-17 15:56:46.000000000 -0400
70597@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
70598 void arch_pick_mmap_layout(struct mm_struct *mm)
70599 {
70600 mm->mmap_base = TASK_UNMAPPED_BASE;
70601+
70602+#ifdef CONFIG_PAX_RANDMMAP
70603+ if (mm->pax_flags & MF_PAX_RANDMMAP)
70604+ mm->mmap_base += mm->delta_mmap;
70605+#endif
70606+
70607 mm->get_unmapped_area = arch_get_unmapped_area;
70608 mm->unmap_area = arch_unmap_area;
70609 }
70610diff -urNp linux-2.6.32.45/mm/vmalloc.c linux-2.6.32.45/mm/vmalloc.c
70611--- linux-2.6.32.45/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
70612+++ linux-2.6.32.45/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
70613@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70614
70615 pte = pte_offset_kernel(pmd, addr);
70616 do {
70617- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70618- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70619+
70620+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70621+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70622+ BUG_ON(!pte_exec(*pte));
70623+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70624+ continue;
70625+ }
70626+#endif
70627+
70628+ {
70629+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70630+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70631+ }
70632 } while (pte++, addr += PAGE_SIZE, addr != end);
70633 }
70634
70635@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70636 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70637 {
70638 pte_t *pte;
70639+ int ret = -ENOMEM;
70640
70641 /*
70642 * nr is a running index into the array which helps higher level
70643@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
70644 pte = pte_alloc_kernel(pmd, addr);
70645 if (!pte)
70646 return -ENOMEM;
70647+
70648+ pax_open_kernel();
70649 do {
70650 struct page *page = pages[*nr];
70651
70652- if (WARN_ON(!pte_none(*pte)))
70653- return -EBUSY;
70654- if (WARN_ON(!page))
70655- return -ENOMEM;
70656+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70657+ if (!(pgprot_val(prot) & _PAGE_NX))
70658+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
70659+ else
70660+#endif
70661+
70662+ if (WARN_ON(!pte_none(*pte))) {
70663+ ret = -EBUSY;
70664+ goto out;
70665+ }
70666+ if (WARN_ON(!page)) {
70667+ ret = -ENOMEM;
70668+ goto out;
70669+ }
70670 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70671 (*nr)++;
70672 } while (pte++, addr += PAGE_SIZE, addr != end);
70673- return 0;
70674+ ret = 0;
70675+out:
70676+ pax_close_kernel();
70677+ return ret;
70678 }
70679
70680 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70681@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
70682 * and fall back on vmalloc() if that fails. Others
70683 * just put it in the vmalloc space.
70684 */
70685-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70686+#ifdef CONFIG_MODULES
70687+#ifdef MODULES_VADDR
70688 unsigned long addr = (unsigned long)x;
70689 if (addr >= MODULES_VADDR && addr < MODULES_END)
70690 return 1;
70691 #endif
70692+
70693+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70694+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70695+ return 1;
70696+#endif
70697+
70698+#endif
70699+
70700 return is_vmalloc_addr(x);
70701 }
70702
70703@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
70704
70705 if (!pgd_none(*pgd)) {
70706 pud_t *pud = pud_offset(pgd, addr);
70707+#ifdef CONFIG_X86
70708+ if (!pud_large(*pud))
70709+#endif
70710 if (!pud_none(*pud)) {
70711 pmd_t *pmd = pmd_offset(pud, addr);
70712+#ifdef CONFIG_X86
70713+ if (!pmd_large(*pmd))
70714+#endif
70715 if (!pmd_none(*pmd)) {
70716 pte_t *ptep, pte;
70717
70718@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
70719 struct rb_node *tmp;
70720
70721 while (*p) {
70722- struct vmap_area *tmp;
70723+ struct vmap_area *varea;
70724
70725 parent = *p;
70726- tmp = rb_entry(parent, struct vmap_area, rb_node);
70727- if (va->va_start < tmp->va_end)
70728+ varea = rb_entry(parent, struct vmap_area, rb_node);
70729+ if (va->va_start < varea->va_end)
70730 p = &(*p)->rb_left;
70731- else if (va->va_end > tmp->va_start)
70732+ else if (va->va_end > varea->va_start)
70733 p = &(*p)->rb_right;
70734 else
70735 BUG();
70736@@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
70737 struct vm_struct *area;
70738
70739 BUG_ON(in_interrupt());
70740+
70741+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70742+ if (flags & VM_KERNEXEC) {
70743+ if (start != VMALLOC_START || end != VMALLOC_END)
70744+ return NULL;
70745+ start = (unsigned long)MODULES_EXEC_VADDR;
70746+ end = (unsigned long)MODULES_EXEC_END;
70747+ }
70748+#endif
70749+
70750 if (flags & VM_IOREMAP) {
70751 int bit = fls(size);
70752
70753@@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
70754 if (count > totalram_pages)
70755 return NULL;
70756
70757+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70758+ if (!(pgprot_val(prot) & _PAGE_NX))
70759+ flags |= VM_KERNEXEC;
70760+#endif
70761+
70762 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
70763 __builtin_return_address(0));
70764 if (!area)
70765@@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
70766 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
70767 return NULL;
70768
70769+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70770+ if (!(pgprot_val(prot) & _PAGE_NX))
70771+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
70772+ node, gfp_mask, caller);
70773+ else
70774+#endif
70775+
70776 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
70777 VMALLOC_END, node, gfp_mask, caller);
70778
70779@@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
70780 return addr;
70781 }
70782
70783+#undef __vmalloc
70784 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
70785 {
70786 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
70787@@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
70788 * For tight control over page level allocator and protection flags
70789 * use __vmalloc() instead.
70790 */
70791+#undef vmalloc
70792 void *vmalloc(unsigned long size)
70793 {
70794 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70795@@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
70796 * The resulting memory area is zeroed so it can be mapped to userspace
70797 * without leaking data.
70798 */
70799+#undef vmalloc_user
70800 void *vmalloc_user(unsigned long size)
70801 {
70802 struct vm_struct *area;
70803@@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
70804 * For tight control over page level allocator and protection flags
70805 * use __vmalloc() instead.
70806 */
70807+#undef vmalloc_node
70808 void *vmalloc_node(unsigned long size, int node)
70809 {
70810 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70811@@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
70812 * For tight control over page level allocator and protection flags
70813 * use __vmalloc() instead.
70814 */
70815-
70816+#undef vmalloc_exec
70817 void *vmalloc_exec(unsigned long size)
70818 {
70819- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
70820+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
70821 -1, __builtin_return_address(0));
70822 }
70823
70824@@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
70825 * Allocate enough 32bit PA addressable pages to cover @size from the
70826 * page level allocator and map them into contiguous kernel virtual space.
70827 */
70828+#undef vmalloc_32
70829 void *vmalloc_32(unsigned long size)
70830 {
70831 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
70832@@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
70833 * The resulting memory area is 32bit addressable and zeroed so it can be
70834 * mapped to userspace without leaking data.
70835 */
70836+#undef vmalloc_32_user
70837 void *vmalloc_32_user(unsigned long size)
70838 {
70839 struct vm_struct *area;
70840@@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
70841 unsigned long uaddr = vma->vm_start;
70842 unsigned long usize = vma->vm_end - vma->vm_start;
70843
70844+ BUG_ON(vma->vm_mirror);
70845+
70846 if ((PAGE_SIZE-1) & (unsigned long)addr)
70847 return -EINVAL;
70848
70849diff -urNp linux-2.6.32.45/mm/vmstat.c linux-2.6.32.45/mm/vmstat.c
70850--- linux-2.6.32.45/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
70851+++ linux-2.6.32.45/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
70852@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
70853 *
70854 * vm_stat contains the global counters
70855 */
70856-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70857+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70858 EXPORT_SYMBOL(vm_stat);
70859
70860 #ifdef CONFIG_SMP
70861@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
70862 v = p->vm_stat_diff[i];
70863 p->vm_stat_diff[i] = 0;
70864 local_irq_restore(flags);
70865- atomic_long_add(v, &zone->vm_stat[i]);
70866+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
70867 global_diff[i] += v;
70868 #ifdef CONFIG_NUMA
70869 /* 3 seconds idle till flush */
70870@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
70871
70872 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
70873 if (global_diff[i])
70874- atomic_long_add(global_diff[i], &vm_stat[i]);
70875+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
70876 }
70877
70878 #endif
70879@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
70880 start_cpu_timer(cpu);
70881 #endif
70882 #ifdef CONFIG_PROC_FS
70883- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
70884- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
70885- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
70886- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
70887+ {
70888+ mode_t gr_mode = S_IRUGO;
70889+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70890+ gr_mode = S_IRUSR;
70891+#endif
70892+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
70893+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
70894+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70895+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
70896+#else
70897+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
70898+#endif
70899+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
70900+ }
70901 #endif
70902 return 0;
70903 }
70904diff -urNp linux-2.6.32.45/net/8021q/vlan.c linux-2.6.32.45/net/8021q/vlan.c
70905--- linux-2.6.32.45/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
70906+++ linux-2.6.32.45/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
70907@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
70908 err = -EPERM;
70909 if (!capable(CAP_NET_ADMIN))
70910 break;
70911- if ((args.u.name_type >= 0) &&
70912- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
70913+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
70914 struct vlan_net *vn;
70915
70916 vn = net_generic(net, vlan_net_id);
70917diff -urNp linux-2.6.32.45/net/atm/atm_misc.c linux-2.6.32.45/net/atm/atm_misc.c
70918--- linux-2.6.32.45/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
70919+++ linux-2.6.32.45/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
70920@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
70921 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
70922 return 1;
70923 atm_return(vcc,truesize);
70924- atomic_inc(&vcc->stats->rx_drop);
70925+ atomic_inc_unchecked(&vcc->stats->rx_drop);
70926 return 0;
70927 }
70928
70929@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
70930 }
70931 }
70932 atm_return(vcc,guess);
70933- atomic_inc(&vcc->stats->rx_drop);
70934+ atomic_inc_unchecked(&vcc->stats->rx_drop);
70935 return NULL;
70936 }
70937
70938@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
70939
70940 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70941 {
70942-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
70943+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
70944 __SONET_ITEMS
70945 #undef __HANDLE_ITEM
70946 }
70947@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
70948
70949 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70950 {
70951-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
70952+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
70953 __SONET_ITEMS
70954 #undef __HANDLE_ITEM
70955 }
70956diff -urNp linux-2.6.32.45/net/atm/lec.h linux-2.6.32.45/net/atm/lec.h
70957--- linux-2.6.32.45/net/atm/lec.h 2011-03-27 14:31:47.000000000 -0400
70958+++ linux-2.6.32.45/net/atm/lec.h 2011-08-05 20:33:55.000000000 -0400
70959@@ -48,7 +48,7 @@ struct lane2_ops {
70960 const u8 *tlvs, u32 sizeoftlvs);
70961 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
70962 const u8 *tlvs, u32 sizeoftlvs);
70963-};
70964+} __no_const;
70965
70966 /*
70967 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
70968diff -urNp linux-2.6.32.45/net/atm/mpc.c linux-2.6.32.45/net/atm/mpc.c
70969--- linux-2.6.32.45/net/atm/mpc.c 2011-03-27 14:31:47.000000000 -0400
70970+++ linux-2.6.32.45/net/atm/mpc.c 2011-08-05 20:33:55.000000000 -0400
70971@@ -291,8 +291,8 @@ static void start_mpc(struct mpoa_client
70972 printk("mpoa: (%s) start_mpc not starting\n", dev->name);
70973 else {
70974 mpc->old_ops = dev->netdev_ops;
70975- mpc->new_ops = *mpc->old_ops;
70976- mpc->new_ops.ndo_start_xmit = mpc_send_packet;
70977+ memcpy((void *)&mpc->new_ops, mpc->old_ops, sizeof(mpc->new_ops));
70978+ *(void **)&mpc->new_ops.ndo_start_xmit = mpc_send_packet;
70979 dev->netdev_ops = &mpc->new_ops;
70980 }
70981 }
70982diff -urNp linux-2.6.32.45/net/atm/mpoa_caches.c linux-2.6.32.45/net/atm/mpoa_caches.c
70983--- linux-2.6.32.45/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
70984+++ linux-2.6.32.45/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
70985@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
70986 struct timeval now;
70987 struct k_message msg;
70988
70989+ pax_track_stack();
70990+
70991 do_gettimeofday(&now);
70992
70993 write_lock_irq(&client->egress_lock);
70994diff -urNp linux-2.6.32.45/net/atm/proc.c linux-2.6.32.45/net/atm/proc.c
70995--- linux-2.6.32.45/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
70996+++ linux-2.6.32.45/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
70997@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
70998 const struct k_atm_aal_stats *stats)
70999 {
71000 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71001- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
71002- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
71003- atomic_read(&stats->rx_drop));
71004+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71005+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71006+ atomic_read_unchecked(&stats->rx_drop));
71007 }
71008
71009 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71010@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
71011 {
71012 struct sock *sk = sk_atm(vcc);
71013
71014+#ifdef CONFIG_GRKERNSEC_HIDESYM
71015+ seq_printf(seq, "%p ", NULL);
71016+#else
71017 seq_printf(seq, "%p ", vcc);
71018+#endif
71019+
71020 if (!vcc->dev)
71021 seq_printf(seq, "Unassigned ");
71022 else
71023@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
71024 {
71025 if (!vcc->dev)
71026 seq_printf(seq, sizeof(void *) == 4 ?
71027+#ifdef CONFIG_GRKERNSEC_HIDESYM
71028+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
71029+#else
71030 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
71031+#endif
71032 else
71033 seq_printf(seq, "%3d %3d %5d ",
71034 vcc->dev->number, vcc->vpi, vcc->vci);
71035diff -urNp linux-2.6.32.45/net/atm/resources.c linux-2.6.32.45/net/atm/resources.c
71036--- linux-2.6.32.45/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
71037+++ linux-2.6.32.45/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
71038@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
71039 static void copy_aal_stats(struct k_atm_aal_stats *from,
71040 struct atm_aal_stats *to)
71041 {
71042-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71043+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71044 __AAL_STAT_ITEMS
71045 #undef __HANDLE_ITEM
71046 }
71047@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
71048 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71049 struct atm_aal_stats *to)
71050 {
71051-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71052+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71053 __AAL_STAT_ITEMS
71054 #undef __HANDLE_ITEM
71055 }
71056diff -urNp linux-2.6.32.45/net/bluetooth/l2cap.c linux-2.6.32.45/net/bluetooth/l2cap.c
71057--- linux-2.6.32.45/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
71058+++ linux-2.6.32.45/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
71059@@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
71060 err = -ENOTCONN;
71061 break;
71062 }
71063-
71064+ memset(&cinfo, 0, sizeof(cinfo));
71065 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
71066 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
71067
71068@@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
71069
71070 /* Reject if config buffer is too small. */
71071 len = cmd_len - sizeof(*req);
71072- if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71073+ if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71074 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
71075 l2cap_build_conf_rsp(sk, rsp,
71076 L2CAP_CONF_REJECT, flags), rsp);
71077diff -urNp linux-2.6.32.45/net/bluetooth/rfcomm/sock.c linux-2.6.32.45/net/bluetooth/rfcomm/sock.c
71078--- linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
71079+++ linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
71080@@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
71081
71082 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
71083
71084+ memset(&cinfo, 0, sizeof(cinfo));
71085 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
71086 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
71087
71088diff -urNp linux-2.6.32.45/net/bridge/br_private.h linux-2.6.32.45/net/bridge/br_private.h
71089--- linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:35:30.000000000 -0400
71090+++ linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:34:01.000000000 -0400
71091@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event,
71092
71093 #ifdef CONFIG_SYSFS
71094 /* br_sysfs_if.c */
71095-extern struct sysfs_ops brport_sysfs_ops;
71096+extern const struct sysfs_ops brport_sysfs_ops;
71097 extern int br_sysfs_addif(struct net_bridge_port *p);
71098
71099 /* br_sysfs_br.c */
71100diff -urNp linux-2.6.32.45/net/bridge/br_stp_if.c linux-2.6.32.45/net/bridge/br_stp_if.c
71101--- linux-2.6.32.45/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
71102+++ linux-2.6.32.45/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
71103@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
71104 char *envp[] = { NULL };
71105
71106 if (br->stp_enabled == BR_USER_STP) {
71107- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
71108+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
71109 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
71110 br->dev->name, r);
71111
71112diff -urNp linux-2.6.32.45/net/bridge/br_sysfs_if.c linux-2.6.32.45/net/bridge/br_sysfs_if.c
71113--- linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
71114+++ linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
71115@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
71116 return ret;
71117 }
71118
71119-struct sysfs_ops brport_sysfs_ops = {
71120+const struct sysfs_ops brport_sysfs_ops = {
71121 .show = brport_show,
71122 .store = brport_store,
71123 };
71124diff -urNp linux-2.6.32.45/net/bridge/netfilter/ebtables.c linux-2.6.32.45/net/bridge/netfilter/ebtables.c
71125--- linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
71126+++ linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
71127@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
71128 unsigned int entries_size, nentries;
71129 char *entries;
71130
71131+ pax_track_stack();
71132+
71133 if (cmd == EBT_SO_GET_ENTRIES) {
71134 entries_size = t->private->entries_size;
71135 nentries = t->private->nentries;
71136diff -urNp linux-2.6.32.45/net/can/bcm.c linux-2.6.32.45/net/can/bcm.c
71137--- linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
71138+++ linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
71139@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
71140 struct bcm_sock *bo = bcm_sk(sk);
71141 struct bcm_op *op;
71142
71143+#ifdef CONFIG_GRKERNSEC_HIDESYM
71144+ seq_printf(m, ">>> socket %p", NULL);
71145+ seq_printf(m, " / sk %p", NULL);
71146+ seq_printf(m, " / bo %p", NULL);
71147+#else
71148 seq_printf(m, ">>> socket %p", sk->sk_socket);
71149 seq_printf(m, " / sk %p", sk);
71150 seq_printf(m, " / bo %p", bo);
71151+#endif
71152 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
71153 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
71154 seq_printf(m, " <<<\n");
71155diff -urNp linux-2.6.32.45/net/core/dev.c linux-2.6.32.45/net/core/dev.c
71156--- linux-2.6.32.45/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
71157+++ linux-2.6.32.45/net/core/dev.c 2011-08-05 20:33:55.000000000 -0400
71158@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
71159 if (no_module && capable(CAP_NET_ADMIN))
71160 no_module = request_module("netdev-%s", name);
71161 if (no_module && capable(CAP_SYS_MODULE)) {
71162+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71163+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
71164+#else
71165 if (!request_module("%s", name))
71166 pr_err("Loading kernel module for a network device "
71167 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71168 "instead\n", name);
71169+#endif
71170 }
71171 }
71172 EXPORT_SYMBOL(dev_load);
71173@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct
71174
71175 struct dev_gso_cb {
71176 void (*destructor)(struct sk_buff *skb);
71177-};
71178+} __no_const;
71179
71180 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71181
71182@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
71183 }
71184 EXPORT_SYMBOL(netif_rx_ni);
71185
71186-static void net_tx_action(struct softirq_action *h)
71187+static void net_tx_action(void)
71188 {
71189 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71190
71191@@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
71192 EXPORT_SYMBOL(netif_napi_del);
71193
71194
71195-static void net_rx_action(struct softirq_action *h)
71196+static void net_rx_action(void)
71197 {
71198 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
71199 unsigned long time_limit = jiffies + 2;
71200diff -urNp linux-2.6.32.45/net/core/flow.c linux-2.6.32.45/net/core/flow.c
71201--- linux-2.6.32.45/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
71202+++ linux-2.6.32.45/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
71203@@ -35,11 +35,11 @@ struct flow_cache_entry {
71204 atomic_t *object_ref;
71205 };
71206
71207-atomic_t flow_cache_genid = ATOMIC_INIT(0);
71208+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71209
71210 static u32 flow_hash_shift;
71211 #define flow_hash_size (1 << flow_hash_shift)
71212-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
71213+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
71214
71215 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
71216
71217@@ -52,7 +52,7 @@ struct flow_percpu_info {
71218 u32 hash_rnd;
71219 int count;
71220 };
71221-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
71222+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
71223
71224 #define flow_hash_rnd_recalc(cpu) \
71225 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
71226@@ -69,7 +69,7 @@ struct flow_flush_info {
71227 atomic_t cpuleft;
71228 struct completion completion;
71229 };
71230-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
71231+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
71232
71233 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
71234
71235@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
71236 if (fle->family == family &&
71237 fle->dir == dir &&
71238 flow_key_compare(key, &fle->key) == 0) {
71239- if (fle->genid == atomic_read(&flow_cache_genid)) {
71240+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
71241 void *ret = fle->object;
71242
71243 if (ret)
71244@@ -228,7 +228,7 @@ nocache:
71245 err = resolver(net, key, family, dir, &obj, &obj_ref);
71246
71247 if (fle && !err) {
71248- fle->genid = atomic_read(&flow_cache_genid);
71249+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
71250
71251 if (fle->object)
71252 atomic_dec(fle->object_ref);
71253@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
71254
71255 fle = flow_table(cpu)[i];
71256 for (; fle; fle = fle->next) {
71257- unsigned genid = atomic_read(&flow_cache_genid);
71258+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
71259
71260 if (!fle->object || fle->genid == genid)
71261 continue;
71262diff -urNp linux-2.6.32.45/net/core/rtnetlink.c linux-2.6.32.45/net/core/rtnetlink.c
71263--- linux-2.6.32.45/net/core/rtnetlink.c 2011-03-27 14:31:47.000000000 -0400
71264+++ linux-2.6.32.45/net/core/rtnetlink.c 2011-08-05 20:33:55.000000000 -0400
71265@@ -57,7 +57,7 @@ struct rtnl_link
71266 {
71267 rtnl_doit_func doit;
71268 rtnl_dumpit_func dumpit;
71269-};
71270+} __no_const;
71271
71272 static DEFINE_MUTEX(rtnl_mutex);
71273
71274diff -urNp linux-2.6.32.45/net/core/secure_seq.c linux-2.6.32.45/net/core/secure_seq.c
71275--- linux-2.6.32.45/net/core/secure_seq.c 2011-08-16 20:37:25.000000000 -0400
71276+++ linux-2.6.32.45/net/core/secure_seq.c 2011-08-07 19:48:09.000000000 -0400
71277@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be3
71278 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
71279
71280 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
71281- __be16 dport)
71282+ __be16 dport)
71283 {
71284 u32 secret[MD5_MESSAGE_BYTES / 4];
71285 u32 hash[MD5_DIGEST_WORDS];
71286@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __b
71287 secret[i] = net_secret[i];
71288
71289 md5_transform(hash, secret);
71290-
71291 return hash[0];
71292 }
71293 #endif
71294diff -urNp linux-2.6.32.45/net/core/skbuff.c linux-2.6.32.45/net/core/skbuff.c
71295--- linux-2.6.32.45/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
71296+++ linux-2.6.32.45/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
71297@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
71298 struct sk_buff *frag_iter;
71299 struct sock *sk = skb->sk;
71300
71301+ pax_track_stack();
71302+
71303 /*
71304 * __skb_splice_bits() only fails if the output has no room left,
71305 * so no point in going over the frag_list for the error case.
71306diff -urNp linux-2.6.32.45/net/core/sock.c linux-2.6.32.45/net/core/sock.c
71307--- linux-2.6.32.45/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
71308+++ linux-2.6.32.45/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
71309@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
71310 break;
71311
71312 case SO_PEERCRED:
71313+ {
71314+ struct ucred peercred;
71315 if (len > sizeof(sk->sk_peercred))
71316 len = sizeof(sk->sk_peercred);
71317- if (copy_to_user(optval, &sk->sk_peercred, len))
71318+ peercred = sk->sk_peercred;
71319+ if (copy_to_user(optval, &peercred, len))
71320 return -EFAULT;
71321 goto lenout;
71322+ }
71323
71324 case SO_PEERNAME:
71325 {
71326@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
71327 */
71328 smp_wmb();
71329 atomic_set(&sk->sk_refcnt, 1);
71330- atomic_set(&sk->sk_drops, 0);
71331+ atomic_set_unchecked(&sk->sk_drops, 0);
71332 }
71333 EXPORT_SYMBOL(sock_init_data);
71334
71335diff -urNp linux-2.6.32.45/net/decnet/sysctl_net_decnet.c linux-2.6.32.45/net/decnet/sysctl_net_decnet.c
71336--- linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
71337+++ linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
71338@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
71339
71340 if (len > *lenp) len = *lenp;
71341
71342- if (copy_to_user(buffer, addr, len))
71343+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
71344 return -EFAULT;
71345
71346 *lenp = len;
71347@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
71348
71349 if (len > *lenp) len = *lenp;
71350
71351- if (copy_to_user(buffer, devname, len))
71352+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
71353 return -EFAULT;
71354
71355 *lenp = len;
71356diff -urNp linux-2.6.32.45/net/econet/Kconfig linux-2.6.32.45/net/econet/Kconfig
71357--- linux-2.6.32.45/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
71358+++ linux-2.6.32.45/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
71359@@ -4,7 +4,7 @@
71360
71361 config ECONET
71362 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71363- depends on EXPERIMENTAL && INET
71364+ depends on EXPERIMENTAL && INET && BROKEN
71365 ---help---
71366 Econet is a fairly old and slow networking protocol mainly used by
71367 Acorn computers to access file and print servers. It uses native
71368diff -urNp linux-2.6.32.45/net/ieee802154/dgram.c linux-2.6.32.45/net/ieee802154/dgram.c
71369--- linux-2.6.32.45/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
71370+++ linux-2.6.32.45/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
71371@@ -318,7 +318,7 @@ out:
71372 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
71373 {
71374 if (sock_queue_rcv_skb(sk, skb) < 0) {
71375- atomic_inc(&sk->sk_drops);
71376+ atomic_inc_unchecked(&sk->sk_drops);
71377 kfree_skb(skb);
71378 return NET_RX_DROP;
71379 }
71380diff -urNp linux-2.6.32.45/net/ieee802154/raw.c linux-2.6.32.45/net/ieee802154/raw.c
71381--- linux-2.6.32.45/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
71382+++ linux-2.6.32.45/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
71383@@ -206,7 +206,7 @@ out:
71384 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
71385 {
71386 if (sock_queue_rcv_skb(sk, skb) < 0) {
71387- atomic_inc(&sk->sk_drops);
71388+ atomic_inc_unchecked(&sk->sk_drops);
71389 kfree_skb(skb);
71390 return NET_RX_DROP;
71391 }
71392diff -urNp linux-2.6.32.45/net/ipv4/inet_diag.c linux-2.6.32.45/net/ipv4/inet_diag.c
71393--- linux-2.6.32.45/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
71394+++ linux-2.6.32.45/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
71395@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
71396 r->idiag_retrans = 0;
71397
71398 r->id.idiag_if = sk->sk_bound_dev_if;
71399+#ifdef CONFIG_GRKERNSEC_HIDESYM
71400+ r->id.idiag_cookie[0] = 0;
71401+ r->id.idiag_cookie[1] = 0;
71402+#else
71403 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71404 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71405+#endif
71406
71407 r->id.idiag_sport = inet->sport;
71408 r->id.idiag_dport = inet->dport;
71409@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
71410 r->idiag_family = tw->tw_family;
71411 r->idiag_retrans = 0;
71412 r->id.idiag_if = tw->tw_bound_dev_if;
71413+
71414+#ifdef CONFIG_GRKERNSEC_HIDESYM
71415+ r->id.idiag_cookie[0] = 0;
71416+ r->id.idiag_cookie[1] = 0;
71417+#else
71418 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71419 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71420+#endif
71421+
71422 r->id.idiag_sport = tw->tw_sport;
71423 r->id.idiag_dport = tw->tw_dport;
71424 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71425@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
71426 if (sk == NULL)
71427 goto unlock;
71428
71429+#ifndef CONFIG_GRKERNSEC_HIDESYM
71430 err = -ESTALE;
71431 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71432 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71433 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71434 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71435 goto out;
71436+#endif
71437
71438 err = -ENOMEM;
71439 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71440@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
71441 r->idiag_retrans = req->retrans;
71442
71443 r->id.idiag_if = sk->sk_bound_dev_if;
71444+
71445+#ifdef CONFIG_GRKERNSEC_HIDESYM
71446+ r->id.idiag_cookie[0] = 0;
71447+ r->id.idiag_cookie[1] = 0;
71448+#else
71449 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71450 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71451+#endif
71452
71453 tmo = req->expires - jiffies;
71454 if (tmo < 0)
71455diff -urNp linux-2.6.32.45/net/ipv4/inet_hashtables.c linux-2.6.32.45/net/ipv4/inet_hashtables.c
71456--- linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71457+++ linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:42:30.000000000 -0400
71458@@ -18,12 +18,15 @@
71459 #include <linux/sched.h>
71460 #include <linux/slab.h>
71461 #include <linux/wait.h>
71462+#include <linux/security.h>
71463
71464 #include <net/inet_connection_sock.h>
71465 #include <net/inet_hashtables.h>
71466 #include <net/secure_seq.h>
71467 #include <net/ip.h>
71468
71469+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
71470+
71471 /*
71472 * Allocate and initialize a new local port bind bucket.
71473 * The bindhash mutex for snum's hash chain must be held here.
71474@@ -491,6 +494,8 @@ ok:
71475 }
71476 spin_unlock(&head->lock);
71477
71478+ gr_update_task_in_ip_table(current, inet_sk(sk));
71479+
71480 if (tw) {
71481 inet_twsk_deschedule(tw, death_row);
71482 inet_twsk_put(tw);
71483diff -urNp linux-2.6.32.45/net/ipv4/inetpeer.c linux-2.6.32.45/net/ipv4/inetpeer.c
71484--- linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-16 20:37:25.000000000 -0400
71485+++ linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-07 19:48:09.000000000 -0400
71486@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 da
71487 struct inet_peer *p, *n;
71488 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
71489
71490+ pax_track_stack();
71491+
71492 /* Look up for the address quickly. */
71493 read_lock_bh(&peer_pool_lock);
71494 p = lookup(daddr, NULL);
71495@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 da
71496 return NULL;
71497 n->v4daddr = daddr;
71498 atomic_set(&n->refcnt, 1);
71499- atomic_set(&n->rid, 0);
71500+ atomic_set_unchecked(&n->rid, 0);
71501 n->ip_id_count = secure_ip_id(daddr);
71502 n->tcp_ts_stamp = 0;
71503
71504diff -urNp linux-2.6.32.45/net/ipv4/ip_fragment.c linux-2.6.32.45/net/ipv4/ip_fragment.c
71505--- linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
71506+++ linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
71507@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
71508 return 0;
71509
71510 start = qp->rid;
71511- end = atomic_inc_return(&peer->rid);
71512+ end = atomic_inc_return_unchecked(&peer->rid);
71513 qp->rid = end;
71514
71515 rc = qp->q.fragments && (end - start) > max;
71516diff -urNp linux-2.6.32.45/net/ipv4/ip_sockglue.c linux-2.6.32.45/net/ipv4/ip_sockglue.c
71517--- linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71518+++ linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71519@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
71520 int val;
71521 int len;
71522
71523+ pax_track_stack();
71524+
71525 if (level != SOL_IP)
71526 return -EOPNOTSUPP;
71527
71528diff -urNp linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c
71529--- linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
71530+++ linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
71531@@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
71532 private = &tmp;
71533 }
71534 #endif
71535+ memset(&info, 0, sizeof(info));
71536 info.valid_hooks = t->valid_hooks;
71537 memcpy(info.hook_entry, private->hook_entry,
71538 sizeof(info.hook_entry));
71539diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c
71540--- linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
71541+++ linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
71542@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
71543 private = &tmp;
71544 }
71545 #endif
71546+ memset(&info, 0, sizeof(info));
71547 info.valid_hooks = t->valid_hooks;
71548 memcpy(info.hook_entry, private->hook_entry,
71549 sizeof(info.hook_entry));
71550diff -urNp linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c
71551--- linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
71552+++ linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
71553@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
71554
71555 *len = 0;
71556
71557- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
71558+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
71559 if (*octets == NULL) {
71560 if (net_ratelimit())
71561 printk("OOM in bsalg (%d)\n", __LINE__);
71562diff -urNp linux-2.6.32.45/net/ipv4/raw.c linux-2.6.32.45/net/ipv4/raw.c
71563--- linux-2.6.32.45/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
71564+++ linux-2.6.32.45/net/ipv4/raw.c 2011-08-14 11:46:51.000000000 -0400
71565@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
71566 /* Charge it to the socket. */
71567
71568 if (sock_queue_rcv_skb(sk, skb) < 0) {
71569- atomic_inc(&sk->sk_drops);
71570+ atomic_inc_unchecked(&sk->sk_drops);
71571 kfree_skb(skb);
71572 return NET_RX_DROP;
71573 }
71574@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
71575 int raw_rcv(struct sock *sk, struct sk_buff *skb)
71576 {
71577 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
71578- atomic_inc(&sk->sk_drops);
71579+ atomic_inc_unchecked(&sk->sk_drops);
71580 kfree_skb(skb);
71581 return NET_RX_DROP;
71582 }
71583@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
71584
71585 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
71586 {
71587+ struct icmp_filter filter;
71588+
71589+ if (optlen < 0)
71590+ return -EINVAL;
71591 if (optlen > sizeof(struct icmp_filter))
71592 optlen = sizeof(struct icmp_filter);
71593- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
71594+ if (copy_from_user(&filter, optval, optlen))
71595 return -EFAULT;
71596+ raw_sk(sk)->filter = filter;
71597+
71598 return 0;
71599 }
71600
71601 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
71602 {
71603 int len, ret = -EFAULT;
71604+ struct icmp_filter filter;
71605
71606 if (get_user(len, optlen))
71607 goto out;
71608@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
71609 if (len > sizeof(struct icmp_filter))
71610 len = sizeof(struct icmp_filter);
71611 ret = -EFAULT;
71612- if (put_user(len, optlen) ||
71613- copy_to_user(optval, &raw_sk(sk)->filter, len))
71614+ filter = raw_sk(sk)->filter;
71615+ if (put_user(len, optlen) || len > sizeof filter ||
71616+ copy_to_user(optval, &filter, len))
71617 goto out;
71618 ret = 0;
71619 out: return ret;
71620@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
71621 sk_wmem_alloc_get(sp),
71622 sk_rmem_alloc_get(sp),
71623 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71624- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
71625+ atomic_read(&sp->sk_refcnt),
71626+#ifdef CONFIG_GRKERNSEC_HIDESYM
71627+ NULL,
71628+#else
71629+ sp,
71630+#endif
71631+ atomic_read_unchecked(&sp->sk_drops));
71632 }
71633
71634 static int raw_seq_show(struct seq_file *seq, void *v)
71635diff -urNp linux-2.6.32.45/net/ipv4/route.c linux-2.6.32.45/net/ipv4/route.c
71636--- linux-2.6.32.45/net/ipv4/route.c 2011-08-16 20:37:25.000000000 -0400
71637+++ linux-2.6.32.45/net/ipv4/route.c 2011-08-07 19:48:09.000000000 -0400
71638@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be3
71639
71640 static inline int rt_genid(struct net *net)
71641 {
71642- return atomic_read(&net->ipv4.rt_genid);
71643+ return atomic_read_unchecked(&net->ipv4.rt_genid);
71644 }
71645
71646 #ifdef CONFIG_PROC_FS
71647@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct n
71648 unsigned char shuffle;
71649
71650 get_random_bytes(&shuffle, sizeof(shuffle));
71651- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
71652+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
71653 }
71654
71655 /*
71656@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_oper
71657
71658 static __net_init int rt_secret_timer_init(struct net *net)
71659 {
71660- atomic_set(&net->ipv4.rt_genid,
71661+ atomic_set_unchecked(&net->ipv4.rt_genid,
71662 (int) ((num_physpages ^ (num_physpages>>8)) ^
71663 (jiffies ^ (jiffies >> 7))));
71664
71665diff -urNp linux-2.6.32.45/net/ipv4/tcp.c linux-2.6.32.45/net/ipv4/tcp.c
71666--- linux-2.6.32.45/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
71667+++ linux-2.6.32.45/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
71668@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
71669 int val;
71670 int err = 0;
71671
71672+ pax_track_stack();
71673+
71674 /* This is a string value all the others are int's */
71675 if (optname == TCP_CONGESTION) {
71676 char name[TCP_CA_NAME_MAX];
71677@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
71678 struct tcp_sock *tp = tcp_sk(sk);
71679 int val, len;
71680
71681+ pax_track_stack();
71682+
71683 if (get_user(len, optlen))
71684 return -EFAULT;
71685
71686diff -urNp linux-2.6.32.45/net/ipv4/tcp_ipv4.c linux-2.6.32.45/net/ipv4/tcp_ipv4.c
71687--- linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-16 20:37:25.000000000 -0400
71688+++ linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-07 19:48:09.000000000 -0400
71689@@ -85,6 +85,9 @@
71690 int sysctl_tcp_tw_reuse __read_mostly;
71691 int sysctl_tcp_low_latency __read_mostly;
71692
71693+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71694+extern int grsec_enable_blackhole;
71695+#endif
71696
71697 #ifdef CONFIG_TCP_MD5SIG
71698 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
71699@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
71700 return 0;
71701
71702 reset:
71703+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71704+ if (!grsec_enable_blackhole)
71705+#endif
71706 tcp_v4_send_reset(rsk, skb);
71707 discard:
71708 kfree_skb(skb);
71709@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
71710 TCP_SKB_CB(skb)->sacked = 0;
71711
71712 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
71713- if (!sk)
71714+ if (!sk) {
71715+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71716+ ret = 1;
71717+#endif
71718 goto no_tcp_socket;
71719+ }
71720
71721 process:
71722- if (sk->sk_state == TCP_TIME_WAIT)
71723+ if (sk->sk_state == TCP_TIME_WAIT) {
71724+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71725+ ret = 2;
71726+#endif
71727 goto do_time_wait;
71728+ }
71729
71730 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
71731 goto discard_and_relse;
71732@@ -1651,6 +1665,10 @@ no_tcp_socket:
71733 bad_packet:
71734 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
71735 } else {
71736+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71737+ if (!grsec_enable_blackhole || (ret == 1 &&
71738+ (skb->dev->flags & IFF_LOOPBACK)))
71739+#endif
71740 tcp_v4_send_reset(NULL, skb);
71741 }
71742
71743@@ -2195,14 +2213,14 @@ int tcp_proc_register(struct net *net, s
71744 int rc = 0;
71745 struct proc_dir_entry *p;
71746
71747- afinfo->seq_fops.open = tcp_seq_open;
71748- afinfo->seq_fops.read = seq_read;
71749- afinfo->seq_fops.llseek = seq_lseek;
71750- afinfo->seq_fops.release = seq_release_net;
71751-
71752- afinfo->seq_ops.start = tcp_seq_start;
71753- afinfo->seq_ops.next = tcp_seq_next;
71754- afinfo->seq_ops.stop = tcp_seq_stop;
71755+ *(void **)&afinfo->seq_fops.open = tcp_seq_open;
71756+ *(void **)&afinfo->seq_fops.read = seq_read;
71757+ *(void **)&afinfo->seq_fops.llseek = seq_lseek;
71758+ *(void **)&afinfo->seq_fops.release = seq_release_net;
71759+
71760+ *(void **)&afinfo->seq_ops.start = tcp_seq_start;
71761+ *(void **)&afinfo->seq_ops.next = tcp_seq_next;
71762+ *(void **)&afinfo->seq_ops.stop = tcp_seq_stop;
71763
71764 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
71765 &afinfo->seq_fops, afinfo);
71766@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk
71767 0, /* non standard timer */
71768 0, /* open_requests have no inode */
71769 atomic_read(&sk->sk_refcnt),
71770+#ifdef CONFIG_GRKERNSEC_HIDESYM
71771+ NULL,
71772+#else
71773 req,
71774+#endif
71775 len);
71776 }
71777
71778@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *s
71779 sock_i_uid(sk),
71780 icsk->icsk_probes_out,
71781 sock_i_ino(sk),
71782- atomic_read(&sk->sk_refcnt), sk,
71783+ atomic_read(&sk->sk_refcnt),
71784+#ifdef CONFIG_GRKERNSEC_HIDESYM
71785+ NULL,
71786+#else
71787+ sk,
71788+#endif
71789 jiffies_to_clock_t(icsk->icsk_rto),
71790 jiffies_to_clock_t(icsk->icsk_ack.ato),
71791 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
71792@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct in
71793 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
71794 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
71795 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
71796- atomic_read(&tw->tw_refcnt), tw, len);
71797+ atomic_read(&tw->tw_refcnt),
71798+#ifdef CONFIG_GRKERNSEC_HIDESYM
71799+ NULL,
71800+#else
71801+ tw,
71802+#endif
71803+ len);
71804 }
71805
71806 #define TMPSZ 150
71807diff -urNp linux-2.6.32.45/net/ipv4/tcp_minisocks.c linux-2.6.32.45/net/ipv4/tcp_minisocks.c
71808--- linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
71809+++ linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
71810@@ -26,6 +26,10 @@
71811 #include <net/inet_common.h>
71812 #include <net/xfrm.h>
71813
71814+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71815+extern int grsec_enable_blackhole;
71816+#endif
71817+
71818 #ifdef CONFIG_SYSCTL
71819 #define SYNC_INIT 0 /* let the user enable it */
71820 #else
71821@@ -672,6 +676,10 @@ listen_overflow:
71822
71823 embryonic_reset:
71824 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
71825+
71826+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71827+ if (!grsec_enable_blackhole)
71828+#endif
71829 if (!(flg & TCP_FLAG_RST))
71830 req->rsk_ops->send_reset(sk, skb);
71831
71832diff -urNp linux-2.6.32.45/net/ipv4/tcp_output.c linux-2.6.32.45/net/ipv4/tcp_output.c
71833--- linux-2.6.32.45/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
71834+++ linux-2.6.32.45/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
71835@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
71836 __u8 *md5_hash_location;
71837 int mss;
71838
71839+ pax_track_stack();
71840+
71841 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
71842 if (skb == NULL)
71843 return NULL;
71844diff -urNp linux-2.6.32.45/net/ipv4/tcp_probe.c linux-2.6.32.45/net/ipv4/tcp_probe.c
71845--- linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
71846+++ linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
71847@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
71848 if (cnt + width >= len)
71849 break;
71850
71851- if (copy_to_user(buf + cnt, tbuf, width))
71852+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
71853 return -EFAULT;
71854 cnt += width;
71855 }
71856diff -urNp linux-2.6.32.45/net/ipv4/tcp_timer.c linux-2.6.32.45/net/ipv4/tcp_timer.c
71857--- linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
71858+++ linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
71859@@ -21,6 +21,10 @@
71860 #include <linux/module.h>
71861 #include <net/tcp.h>
71862
71863+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71864+extern int grsec_lastack_retries;
71865+#endif
71866+
71867 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
71868 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
71869 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
71870@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
71871 }
71872 }
71873
71874+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71875+ if ((sk->sk_state == TCP_LAST_ACK) &&
71876+ (grsec_lastack_retries > 0) &&
71877+ (grsec_lastack_retries < retry_until))
71878+ retry_until = grsec_lastack_retries;
71879+#endif
71880+
71881 if (retransmits_timed_out(sk, retry_until)) {
71882 /* Has it gone just too far? */
71883 tcp_write_err(sk);
71884diff -urNp linux-2.6.32.45/net/ipv4/udp.c linux-2.6.32.45/net/ipv4/udp.c
71885--- linux-2.6.32.45/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
71886+++ linux-2.6.32.45/net/ipv4/udp.c 2011-08-05 20:33:55.000000000 -0400
71887@@ -86,6 +86,7 @@
71888 #include <linux/types.h>
71889 #include <linux/fcntl.h>
71890 #include <linux/module.h>
71891+#include <linux/security.h>
71892 #include <linux/socket.h>
71893 #include <linux/sockios.h>
71894 #include <linux/igmp.h>
71895@@ -106,6 +107,10 @@
71896 #include <net/xfrm.h>
71897 #include "udp_impl.h"
71898
71899+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71900+extern int grsec_enable_blackhole;
71901+#endif
71902+
71903 struct udp_table udp_table;
71904 EXPORT_SYMBOL(udp_table);
71905
71906@@ -371,6 +376,9 @@ found:
71907 return s;
71908 }
71909
71910+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
71911+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
71912+
71913 /*
71914 * This routine is called by the ICMP module when it gets some
71915 * sort of error condition. If err < 0 then the socket should
71916@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
71917 dport = usin->sin_port;
71918 if (dport == 0)
71919 return -EINVAL;
71920+
71921+ err = gr_search_udp_sendmsg(sk, usin);
71922+ if (err)
71923+ return err;
71924 } else {
71925 if (sk->sk_state != TCP_ESTABLISHED)
71926 return -EDESTADDRREQ;
71927+
71928+ err = gr_search_udp_sendmsg(sk, NULL);
71929+ if (err)
71930+ return err;
71931+
71932 daddr = inet->daddr;
71933 dport = inet->dport;
71934 /* Open fast path for connected socket.
71935@@ -945,6 +962,10 @@ try_again:
71936 if (!skb)
71937 goto out;
71938
71939+ err = gr_search_udp_recvmsg(sk, skb);
71940+ if (err)
71941+ goto out_free;
71942+
71943 ulen = skb->len - sizeof(struct udphdr);
71944 copied = len;
71945 if (copied > ulen)
71946@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
71947 if (rc == -ENOMEM) {
71948 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
71949 is_udplite);
71950- atomic_inc(&sk->sk_drops);
71951+ atomic_inc_unchecked(&sk->sk_drops);
71952 }
71953 goto drop;
71954 }
71955@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
71956 goto csum_error;
71957
71958 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
71959+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71960+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
71961+#endif
71962 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
71963
71964 /*
71965@@ -1719,14 +1743,14 @@ int udp_proc_register(struct net *net, s
71966 struct proc_dir_entry *p;
71967 int rc = 0;
71968
71969- afinfo->seq_fops.open = udp_seq_open;
71970- afinfo->seq_fops.read = seq_read;
71971- afinfo->seq_fops.llseek = seq_lseek;
71972- afinfo->seq_fops.release = seq_release_net;
71973-
71974- afinfo->seq_ops.start = udp_seq_start;
71975- afinfo->seq_ops.next = udp_seq_next;
71976- afinfo->seq_ops.stop = udp_seq_stop;
71977+ *(void **)&afinfo->seq_fops.open = udp_seq_open;
71978+ *(void **)&afinfo->seq_fops.read = seq_read;
71979+ *(void **)&afinfo->seq_fops.llseek = seq_lseek;
71980+ *(void **)&afinfo->seq_fops.release = seq_release_net;
71981+
71982+ *(void **)&afinfo->seq_ops.start = udp_seq_start;
71983+ *(void **)&afinfo->seq_ops.next = udp_seq_next;
71984+ *(void **)&afinfo->seq_ops.stop = udp_seq_stop;
71985
71986 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
71987 &afinfo->seq_fops, afinfo);
71988@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
71989 sk_wmem_alloc_get(sp),
71990 sk_rmem_alloc_get(sp),
71991 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71992- atomic_read(&sp->sk_refcnt), sp,
71993- atomic_read(&sp->sk_drops), len);
71994+ atomic_read(&sp->sk_refcnt),
71995+#ifdef CONFIG_GRKERNSEC_HIDESYM
71996+ NULL,
71997+#else
71998+ sp,
71999+#endif
72000+ atomic_read_unchecked(&sp->sk_drops), len);
72001 }
72002
72003 int udp4_seq_show(struct seq_file *seq, void *v)
72004diff -urNp linux-2.6.32.45/net/ipv6/inet6_connection_sock.c linux-2.6.32.45/net/ipv6/inet6_connection_sock.c
72005--- linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
72006+++ linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
72007@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
72008 #ifdef CONFIG_XFRM
72009 {
72010 struct rt6_info *rt = (struct rt6_info *)dst;
72011- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72012+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72013 }
72014 #endif
72015 }
72016@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
72017 #ifdef CONFIG_XFRM
72018 if (dst) {
72019 struct rt6_info *rt = (struct rt6_info *)dst;
72020- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72021+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72022 sk->sk_dst_cache = NULL;
72023 dst_release(dst);
72024 dst = NULL;
72025diff -urNp linux-2.6.32.45/net/ipv6/inet6_hashtables.c linux-2.6.32.45/net/ipv6/inet6_hashtables.c
72026--- linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-16 20:37:25.000000000 -0400
72027+++ linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-07 19:48:09.000000000 -0400
72028@@ -119,7 +119,7 @@ out:
72029 }
72030 EXPORT_SYMBOL(__inet6_lookup_established);
72031
72032-static int inline compute_score(struct sock *sk, struct net *net,
72033+static inline int compute_score(struct sock *sk, struct net *net,
72034 const unsigned short hnum,
72035 const struct in6_addr *daddr,
72036 const int dif)
72037diff -urNp linux-2.6.32.45/net/ipv6/ipv6_sockglue.c linux-2.6.32.45/net/ipv6/ipv6_sockglue.c
72038--- linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
72039+++ linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
72040@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
72041 int val, valbool;
72042 int retv = -ENOPROTOOPT;
72043
72044+ pax_track_stack();
72045+
72046 if (optval == NULL)
72047 val=0;
72048 else {
72049@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
72050 int len;
72051 int val;
72052
72053+ pax_track_stack();
72054+
72055 if (ip6_mroute_opt(optname))
72056 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72057
72058diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c
72059--- linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
72060+++ linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
72061@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
72062 private = &tmp;
72063 }
72064 #endif
72065+ memset(&info, 0, sizeof(info));
72066 info.valid_hooks = t->valid_hooks;
72067 memcpy(info.hook_entry, private->hook_entry,
72068 sizeof(info.hook_entry));
72069diff -urNp linux-2.6.32.45/net/ipv6/raw.c linux-2.6.32.45/net/ipv6/raw.c
72070--- linux-2.6.32.45/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
72071+++ linux-2.6.32.45/net/ipv6/raw.c 2011-08-14 11:48:20.000000000 -0400
72072@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
72073 {
72074 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
72075 skb_checksum_complete(skb)) {
72076- atomic_inc(&sk->sk_drops);
72077+ atomic_inc_unchecked(&sk->sk_drops);
72078 kfree_skb(skb);
72079 return NET_RX_DROP;
72080 }
72081
72082 /* Charge it to the socket. */
72083 if (sock_queue_rcv_skb(sk,skb)<0) {
72084- atomic_inc(&sk->sk_drops);
72085+ atomic_inc_unchecked(&sk->sk_drops);
72086 kfree_skb(skb);
72087 return NET_RX_DROP;
72088 }
72089@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72090 struct raw6_sock *rp = raw6_sk(sk);
72091
72092 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72093- atomic_inc(&sk->sk_drops);
72094+ atomic_inc_unchecked(&sk->sk_drops);
72095 kfree_skb(skb);
72096 return NET_RX_DROP;
72097 }
72098@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72099
72100 if (inet->hdrincl) {
72101 if (skb_checksum_complete(skb)) {
72102- atomic_inc(&sk->sk_drops);
72103+ atomic_inc_unchecked(&sk->sk_drops);
72104 kfree_skb(skb);
72105 return NET_RX_DROP;
72106 }
72107@@ -518,7 +518,7 @@ csum_copy_err:
72108 as some normal condition.
72109 */
72110 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
72111- atomic_inc(&sk->sk_drops);
72112+ atomic_inc_unchecked(&sk->sk_drops);
72113 goto out;
72114 }
72115
72116@@ -600,7 +600,7 @@ out:
72117 return err;
72118 }
72119
72120-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72121+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72122 struct flowi *fl, struct rt6_info *rt,
72123 unsigned int flags)
72124 {
72125@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
72126 u16 proto;
72127 int err;
72128
72129+ pax_track_stack();
72130+
72131 /* Rough check on arithmetic overflow,
72132 better check is made in ip6_append_data().
72133 */
72134@@ -916,12 +918,17 @@ do_confirm:
72135 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72136 char __user *optval, int optlen)
72137 {
72138+ struct icmp6_filter filter;
72139+
72140 switch (optname) {
72141 case ICMPV6_FILTER:
72142+ if (optlen < 0)
72143+ return -EINVAL;
72144 if (optlen > sizeof(struct icmp6_filter))
72145 optlen = sizeof(struct icmp6_filter);
72146- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72147+ if (copy_from_user(&filter, optval, optlen))
72148 return -EFAULT;
72149+ raw6_sk(sk)->filter = filter;
72150 return 0;
72151 default:
72152 return -ENOPROTOOPT;
72153@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct so
72154 char __user *optval, int __user *optlen)
72155 {
72156 int len;
72157+ struct icmp6_filter filter;
72158
72159 switch (optname) {
72160 case ICMPV6_FILTER:
72161@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
72162 len = sizeof(struct icmp6_filter);
72163 if (put_user(len, optlen))
72164 return -EFAULT;
72165- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72166+ filter = raw6_sk(sk)->filter;
72167+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
72168 return -EFAULT;
72169 return 0;
72170 default:
72171@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
72172 0, 0L, 0,
72173 sock_i_uid(sp), 0,
72174 sock_i_ino(sp),
72175- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72176+ atomic_read(&sp->sk_refcnt),
72177+#ifdef CONFIG_GRKERNSEC_HIDESYM
72178+ NULL,
72179+#else
72180+ sp,
72181+#endif
72182+ atomic_read_unchecked(&sp->sk_drops));
72183 }
72184
72185 static int raw6_seq_show(struct seq_file *seq, void *v)
72186diff -urNp linux-2.6.32.45/net/ipv6/tcp_ipv6.c linux-2.6.32.45/net/ipv6/tcp_ipv6.c
72187--- linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-16 20:37:25.000000000 -0400
72188+++ linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-07 19:48:09.000000000 -0400
72189@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72190 }
72191 #endif
72192
72193+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72194+extern int grsec_enable_blackhole;
72195+#endif
72196+
72197 static void tcp_v6_hash(struct sock *sk)
72198 {
72199 if (sk->sk_state != TCP_CLOSE) {
72200@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72201 return 0;
72202
72203 reset:
72204+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72205+ if (!grsec_enable_blackhole)
72206+#endif
72207 tcp_v6_send_reset(sk, skb);
72208 discard:
72209 if (opt_skb)
72210@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72211 TCP_SKB_CB(skb)->sacked = 0;
72212
72213 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72214- if (!sk)
72215+ if (!sk) {
72216+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72217+ ret = 1;
72218+#endif
72219 goto no_tcp_socket;
72220+ }
72221
72222 process:
72223- if (sk->sk_state == TCP_TIME_WAIT)
72224+ if (sk->sk_state == TCP_TIME_WAIT) {
72225+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72226+ ret = 2;
72227+#endif
72228 goto do_time_wait;
72229+ }
72230
72231 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
72232 goto discard_and_relse;
72233@@ -1701,6 +1716,10 @@ no_tcp_socket:
72234 bad_packet:
72235 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72236 } else {
72237+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72238+ if (!grsec_enable_blackhole || (ret == 1 &&
72239+ (skb->dev->flags & IFF_LOOPBACK)))
72240+#endif
72241 tcp_v6_send_reset(NULL, skb);
72242 }
72243
72244@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file
72245 uid,
72246 0, /* non standard timer */
72247 0, /* open_requests have no inode */
72248- 0, req);
72249+ 0,
72250+#ifdef CONFIG_GRKERNSEC_HIDESYM
72251+ NULL
72252+#else
72253+ req
72254+#endif
72255+ );
72256 }
72257
72258 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72259@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_fil
72260 sock_i_uid(sp),
72261 icsk->icsk_probes_out,
72262 sock_i_ino(sp),
72263- atomic_read(&sp->sk_refcnt), sp,
72264+ atomic_read(&sp->sk_refcnt),
72265+#ifdef CONFIG_GRKERNSEC_HIDESYM
72266+ NULL,
72267+#else
72268+ sp,
72269+#endif
72270 jiffies_to_clock_t(icsk->icsk_rto),
72271 jiffies_to_clock_t(icsk->icsk_ack.ato),
72272 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72273@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct se
72274 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72275 tw->tw_substate, 0, 0,
72276 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72277- atomic_read(&tw->tw_refcnt), tw);
72278+ atomic_read(&tw->tw_refcnt),
72279+#ifdef CONFIG_GRKERNSEC_HIDESYM
72280+ NULL
72281+#else
72282+ tw
72283+#endif
72284+ );
72285 }
72286
72287 static int tcp6_seq_show(struct seq_file *seq, void *v)
72288diff -urNp linux-2.6.32.45/net/ipv6/udp.c linux-2.6.32.45/net/ipv6/udp.c
72289--- linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
72290+++ linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
72291@@ -49,6 +49,10 @@
72292 #include <linux/seq_file.h>
72293 #include "udp_impl.h"
72294
72295+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72296+extern int grsec_enable_blackhole;
72297+#endif
72298+
72299 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72300 {
72301 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72302@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72303 if (rc == -ENOMEM) {
72304 UDP6_INC_STATS_BH(sock_net(sk),
72305 UDP_MIB_RCVBUFERRORS, is_udplite);
72306- atomic_inc(&sk->sk_drops);
72307+ atomic_inc_unchecked(&sk->sk_drops);
72308 }
72309 goto drop;
72310 }
72311@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72312 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72313 proto == IPPROTO_UDPLITE);
72314
72315+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72316+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72317+#endif
72318 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
72319
72320 kfree_skb(skb);
72321@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
72322 0, 0L, 0,
72323 sock_i_uid(sp), 0,
72324 sock_i_ino(sp),
72325- atomic_read(&sp->sk_refcnt), sp,
72326- atomic_read(&sp->sk_drops));
72327+ atomic_read(&sp->sk_refcnt),
72328+#ifdef CONFIG_GRKERNSEC_HIDESYM
72329+ NULL,
72330+#else
72331+ sp,
72332+#endif
72333+ atomic_read_unchecked(&sp->sk_drops));
72334 }
72335
72336 int udp6_seq_show(struct seq_file *seq, void *v)
72337diff -urNp linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c
72338--- linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
72339+++ linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
72340@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
72341 add_wait_queue(&self->open_wait, &wait);
72342
72343 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72344- __FILE__,__LINE__, tty->driver->name, self->open_count );
72345+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72346
72347 /* As far as I can see, we protect open_count - Jean II */
72348 spin_lock_irqsave(&self->spinlock, flags);
72349 if (!tty_hung_up_p(filp)) {
72350 extra_count = 1;
72351- self->open_count--;
72352+ local_dec(&self->open_count);
72353 }
72354 spin_unlock_irqrestore(&self->spinlock, flags);
72355- self->blocked_open++;
72356+ local_inc(&self->blocked_open);
72357
72358 while (1) {
72359 if (tty->termios->c_cflag & CBAUD) {
72360@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
72361 }
72362
72363 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72364- __FILE__,__LINE__, tty->driver->name, self->open_count );
72365+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72366
72367 schedule();
72368 }
72369@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
72370 if (extra_count) {
72371 /* ++ is not atomic, so this should be protected - Jean II */
72372 spin_lock_irqsave(&self->spinlock, flags);
72373- self->open_count++;
72374+ local_inc(&self->open_count);
72375 spin_unlock_irqrestore(&self->spinlock, flags);
72376 }
72377- self->blocked_open--;
72378+ local_dec(&self->blocked_open);
72379
72380 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72381- __FILE__,__LINE__, tty->driver->name, self->open_count);
72382+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72383
72384 if (!retval)
72385 self->flags |= ASYNC_NORMAL_ACTIVE;
72386@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
72387 }
72388 /* ++ is not atomic, so this should be protected - Jean II */
72389 spin_lock_irqsave(&self->spinlock, flags);
72390- self->open_count++;
72391+ local_inc(&self->open_count);
72392
72393 tty->driver_data = self;
72394 self->tty = tty;
72395 spin_unlock_irqrestore(&self->spinlock, flags);
72396
72397 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72398- self->line, self->open_count);
72399+ self->line, local_read(&self->open_count));
72400
72401 /* Not really used by us, but lets do it anyway */
72402 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72403@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
72404 return;
72405 }
72406
72407- if ((tty->count == 1) && (self->open_count != 1)) {
72408+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72409 /*
72410 * Uh, oh. tty->count is 1, which means that the tty
72411 * structure will be freed. state->count should always
72412@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
72413 */
72414 IRDA_DEBUG(0, "%s(), bad serial port count; "
72415 "tty->count is 1, state->count is %d\n", __func__ ,
72416- self->open_count);
72417- self->open_count = 1;
72418+ local_read(&self->open_count));
72419+ local_set(&self->open_count, 1);
72420 }
72421
72422- if (--self->open_count < 0) {
72423+ if (local_dec_return(&self->open_count) < 0) {
72424 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72425- __func__, self->line, self->open_count);
72426- self->open_count = 0;
72427+ __func__, self->line, local_read(&self->open_count));
72428+ local_set(&self->open_count, 0);
72429 }
72430- if (self->open_count) {
72431+ if (local_read(&self->open_count)) {
72432 spin_unlock_irqrestore(&self->spinlock, flags);
72433
72434 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72435@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
72436 tty->closing = 0;
72437 self->tty = NULL;
72438
72439- if (self->blocked_open) {
72440+ if (local_read(&self->blocked_open)) {
72441 if (self->close_delay)
72442 schedule_timeout_interruptible(self->close_delay);
72443 wake_up_interruptible(&self->open_wait);
72444@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
72445 spin_lock_irqsave(&self->spinlock, flags);
72446 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72447 self->tty = NULL;
72448- self->open_count = 0;
72449+ local_set(&self->open_count, 0);
72450 spin_unlock_irqrestore(&self->spinlock, flags);
72451
72452 wake_up_interruptible(&self->open_wait);
72453@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
72454 seq_putc(m, '\n');
72455
72456 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72457- seq_printf(m, "Open count: %d\n", self->open_count);
72458+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72459 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72460 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72461
72462diff -urNp linux-2.6.32.45/net/iucv/af_iucv.c linux-2.6.32.45/net/iucv/af_iucv.c
72463--- linux-2.6.32.45/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
72464+++ linux-2.6.32.45/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
72465@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
72466
72467 write_lock_bh(&iucv_sk_list.lock);
72468
72469- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
72470+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72471 while (__iucv_get_sock_by_name(name)) {
72472 sprintf(name, "%08x",
72473- atomic_inc_return(&iucv_sk_list.autobind_name));
72474+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72475 }
72476
72477 write_unlock_bh(&iucv_sk_list.lock);
72478diff -urNp linux-2.6.32.45/net/key/af_key.c linux-2.6.32.45/net/key/af_key.c
72479--- linux-2.6.32.45/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
72480+++ linux-2.6.32.45/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
72481@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
72482 struct xfrm_migrate m[XFRM_MAX_DEPTH];
72483 struct xfrm_kmaddress k;
72484
72485+ pax_track_stack();
72486+
72487 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
72488 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
72489 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
72490@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
72491 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
72492 else
72493 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
72494+#ifdef CONFIG_GRKERNSEC_HIDESYM
72495+ NULL,
72496+#else
72497 s,
72498+#endif
72499 atomic_read(&s->sk_refcnt),
72500 sk_rmem_alloc_get(s),
72501 sk_wmem_alloc_get(s),
72502diff -urNp linux-2.6.32.45/net/lapb/lapb_iface.c linux-2.6.32.45/net/lapb/lapb_iface.c
72503--- linux-2.6.32.45/net/lapb/lapb_iface.c 2011-03-27 14:31:47.000000000 -0400
72504+++ linux-2.6.32.45/net/lapb/lapb_iface.c 2011-08-05 20:33:55.000000000 -0400
72505@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev
72506 goto out;
72507
72508 lapb->dev = dev;
72509- lapb->callbacks = *callbacks;
72510+ lapb->callbacks = callbacks;
72511
72512 __lapb_insert_cb(lapb);
72513
72514@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device
72515
72516 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
72517 {
72518- if (lapb->callbacks.connect_confirmation)
72519- lapb->callbacks.connect_confirmation(lapb->dev, reason);
72520+ if (lapb->callbacks->connect_confirmation)
72521+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
72522 }
72523
72524 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
72525 {
72526- if (lapb->callbacks.connect_indication)
72527- lapb->callbacks.connect_indication(lapb->dev, reason);
72528+ if (lapb->callbacks->connect_indication)
72529+ lapb->callbacks->connect_indication(lapb->dev, reason);
72530 }
72531
72532 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
72533 {
72534- if (lapb->callbacks.disconnect_confirmation)
72535- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
72536+ if (lapb->callbacks->disconnect_confirmation)
72537+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
72538 }
72539
72540 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
72541 {
72542- if (lapb->callbacks.disconnect_indication)
72543- lapb->callbacks.disconnect_indication(lapb->dev, reason);
72544+ if (lapb->callbacks->disconnect_indication)
72545+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
72546 }
72547
72548 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
72549 {
72550- if (lapb->callbacks.data_indication)
72551- return lapb->callbacks.data_indication(lapb->dev, skb);
72552+ if (lapb->callbacks->data_indication)
72553+ return lapb->callbacks->data_indication(lapb->dev, skb);
72554
72555 kfree_skb(skb);
72556 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
72557@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *l
72558 {
72559 int used = 0;
72560
72561- if (lapb->callbacks.data_transmit) {
72562- lapb->callbacks.data_transmit(lapb->dev, skb);
72563+ if (lapb->callbacks->data_transmit) {
72564+ lapb->callbacks->data_transmit(lapb->dev, skb);
72565 used = 1;
72566 }
72567
72568diff -urNp linux-2.6.32.45/net/mac80211/cfg.c linux-2.6.32.45/net/mac80211/cfg.c
72569--- linux-2.6.32.45/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
72570+++ linux-2.6.32.45/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
72571@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
72572 return err;
72573 }
72574
72575-struct cfg80211_ops mac80211_config_ops = {
72576+const struct cfg80211_ops mac80211_config_ops = {
72577 .add_virtual_intf = ieee80211_add_iface,
72578 .del_virtual_intf = ieee80211_del_iface,
72579 .change_virtual_intf = ieee80211_change_iface,
72580diff -urNp linux-2.6.32.45/net/mac80211/cfg.h linux-2.6.32.45/net/mac80211/cfg.h
72581--- linux-2.6.32.45/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
72582+++ linux-2.6.32.45/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
72583@@ -4,6 +4,6 @@
72584 #ifndef __CFG_H
72585 #define __CFG_H
72586
72587-extern struct cfg80211_ops mac80211_config_ops;
72588+extern const struct cfg80211_ops mac80211_config_ops;
72589
72590 #endif /* __CFG_H */
72591diff -urNp linux-2.6.32.45/net/mac80211/debugfs_key.c linux-2.6.32.45/net/mac80211/debugfs_key.c
72592--- linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
72593+++ linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
72594@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
72595 size_t count, loff_t *ppos)
72596 {
72597 struct ieee80211_key *key = file->private_data;
72598- int i, res, bufsize = 2 * key->conf.keylen + 2;
72599+ int i, bufsize = 2 * key->conf.keylen + 2;
72600 char *buf = kmalloc(bufsize, GFP_KERNEL);
72601 char *p = buf;
72602+ ssize_t res;
72603+
72604+ if (buf == NULL)
72605+ return -ENOMEM;
72606
72607 for (i = 0; i < key->conf.keylen; i++)
72608 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
72609diff -urNp linux-2.6.32.45/net/mac80211/debugfs_sta.c linux-2.6.32.45/net/mac80211/debugfs_sta.c
72610--- linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
72611+++ linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
72612@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
72613 int i;
72614 struct sta_info *sta = file->private_data;
72615
72616+ pax_track_stack();
72617+
72618 spin_lock_bh(&sta->lock);
72619 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
72620 sta->ampdu_mlme.dialog_token_allocator + 1);
72621diff -urNp linux-2.6.32.45/net/mac80211/ieee80211_i.h linux-2.6.32.45/net/mac80211/ieee80211_i.h
72622--- linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
72623+++ linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
72624@@ -25,6 +25,7 @@
72625 #include <linux/etherdevice.h>
72626 #include <net/cfg80211.h>
72627 #include <net/mac80211.h>
72628+#include <asm/local.h>
72629 #include "key.h"
72630 #include "sta_info.h"
72631
72632@@ -635,7 +636,7 @@ struct ieee80211_local {
72633 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
72634 spinlock_t queue_stop_reason_lock;
72635
72636- int open_count;
72637+ local_t open_count;
72638 int monitors, cooked_mntrs;
72639 /* number of interfaces with corresponding FIF_ flags */
72640 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
72641diff -urNp linux-2.6.32.45/net/mac80211/iface.c linux-2.6.32.45/net/mac80211/iface.c
72642--- linux-2.6.32.45/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
72643+++ linux-2.6.32.45/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
72644@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
72645 break;
72646 }
72647
72648- if (local->open_count == 0) {
72649+ if (local_read(&local->open_count) == 0) {
72650 res = drv_start(local);
72651 if (res)
72652 goto err_del_bss;
72653@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
72654 * Validate the MAC address for this device.
72655 */
72656 if (!is_valid_ether_addr(dev->dev_addr)) {
72657- if (!local->open_count)
72658+ if (!local_read(&local->open_count))
72659 drv_stop(local);
72660 return -EADDRNOTAVAIL;
72661 }
72662@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
72663
72664 hw_reconf_flags |= __ieee80211_recalc_idle(local);
72665
72666- local->open_count++;
72667+ local_inc(&local->open_count);
72668 if (hw_reconf_flags) {
72669 ieee80211_hw_config(local, hw_reconf_flags);
72670 /*
72671@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
72672 err_del_interface:
72673 drv_remove_interface(local, &conf);
72674 err_stop:
72675- if (!local->open_count)
72676+ if (!local_read(&local->open_count))
72677 drv_stop(local);
72678 err_del_bss:
72679 sdata->bss = NULL;
72680@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
72681 WARN_ON(!list_empty(&sdata->u.ap.vlans));
72682 }
72683
72684- local->open_count--;
72685+ local_dec(&local->open_count);
72686
72687 switch (sdata->vif.type) {
72688 case NL80211_IFTYPE_AP_VLAN:
72689@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
72690
72691 ieee80211_recalc_ps(local, -1);
72692
72693- if (local->open_count == 0) {
72694+ if (local_read(&local->open_count) == 0) {
72695 ieee80211_clear_tx_pending(local);
72696 ieee80211_stop_device(local);
72697
72698diff -urNp linux-2.6.32.45/net/mac80211/main.c linux-2.6.32.45/net/mac80211/main.c
72699--- linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
72700+++ linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
72701@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
72702 local->hw.conf.power_level = power;
72703 }
72704
72705- if (changed && local->open_count) {
72706+ if (changed && local_read(&local->open_count)) {
72707 ret = drv_config(local, changed);
72708 /*
72709 * Goal:
72710diff -urNp linux-2.6.32.45/net/mac80211/mlme.c linux-2.6.32.45/net/mac80211/mlme.c
72711--- linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:35:30.000000000 -0400
72712+++ linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:34:01.000000000 -0400
72713@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
72714 bool have_higher_than_11mbit = false, newsta = false;
72715 u16 ap_ht_cap_flags;
72716
72717+ pax_track_stack();
72718+
72719 /*
72720 * AssocResp and ReassocResp have identical structure, so process both
72721 * of them in this function.
72722diff -urNp linux-2.6.32.45/net/mac80211/pm.c linux-2.6.32.45/net/mac80211/pm.c
72723--- linux-2.6.32.45/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
72724+++ linux-2.6.32.45/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
72725@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
72726 }
72727
72728 /* stop hardware - this must stop RX */
72729- if (local->open_count)
72730+ if (local_read(&local->open_count))
72731 ieee80211_stop_device(local);
72732
72733 local->suspended = true;
72734diff -urNp linux-2.6.32.45/net/mac80211/rate.c linux-2.6.32.45/net/mac80211/rate.c
72735--- linux-2.6.32.45/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
72736+++ linux-2.6.32.45/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
72737@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
72738 struct rate_control_ref *ref, *old;
72739
72740 ASSERT_RTNL();
72741- if (local->open_count)
72742+ if (local_read(&local->open_count))
72743 return -EBUSY;
72744
72745 ref = rate_control_alloc(name, local);
72746diff -urNp linux-2.6.32.45/net/mac80211/tx.c linux-2.6.32.45/net/mac80211/tx.c
72747--- linux-2.6.32.45/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
72748+++ linux-2.6.32.45/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
72749@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
72750 return cpu_to_le16(dur);
72751 }
72752
72753-static int inline is_ieee80211_device(struct ieee80211_local *local,
72754+static inline int is_ieee80211_device(struct ieee80211_local *local,
72755 struct net_device *dev)
72756 {
72757 return local == wdev_priv(dev->ieee80211_ptr);
72758diff -urNp linux-2.6.32.45/net/mac80211/util.c linux-2.6.32.45/net/mac80211/util.c
72759--- linux-2.6.32.45/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
72760+++ linux-2.6.32.45/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
72761@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
72762 local->resuming = true;
72763
72764 /* restart hardware */
72765- if (local->open_count) {
72766+ if (local_read(&local->open_count)) {
72767 /*
72768 * Upon resume hardware can sometimes be goofy due to
72769 * various platform / driver / bus issues, so restarting
72770diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c
72771--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
72772+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
72773@@ -564,7 +564,7 @@ static const struct file_operations ip_v
72774 .open = ip_vs_app_open,
72775 .read = seq_read,
72776 .llseek = seq_lseek,
72777- .release = seq_release,
72778+ .release = seq_release_net,
72779 };
72780 #endif
72781
72782diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c
72783--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
72784+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
72785@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
72786 /* if the connection is not template and is created
72787 * by sync, preserve the activity flag.
72788 */
72789- cp->flags |= atomic_read(&dest->conn_flags) &
72790+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
72791 (~IP_VS_CONN_F_INACTIVE);
72792 else
72793- cp->flags |= atomic_read(&dest->conn_flags);
72794+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
72795 cp->dest = dest;
72796
72797 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
72798@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
72799 atomic_set(&cp->refcnt, 1);
72800
72801 atomic_set(&cp->n_control, 0);
72802- atomic_set(&cp->in_pkts, 0);
72803+ atomic_set_unchecked(&cp->in_pkts, 0);
72804
72805 atomic_inc(&ip_vs_conn_count);
72806 if (flags & IP_VS_CONN_F_NO_CPORT)
72807@@ -871,7 +871,7 @@ static const struct file_operations ip_v
72808 .open = ip_vs_conn_open,
72809 .read = seq_read,
72810 .llseek = seq_lseek,
72811- .release = seq_release,
72812+ .release = seq_release_net,
72813 };
72814
72815 static const char *ip_vs_origin_name(unsigned flags)
72816@@ -934,7 +934,7 @@ static const struct file_operations ip_v
72817 .open = ip_vs_conn_sync_open,
72818 .read = seq_read,
72819 .llseek = seq_lseek,
72820- .release = seq_release,
72821+ .release = seq_release_net,
72822 };
72823
72824 #endif
72825@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
72826
72827 /* Don't drop the entry if its number of incoming packets is not
72828 located in [0, 8] */
72829- i = atomic_read(&cp->in_pkts);
72830+ i = atomic_read_unchecked(&cp->in_pkts);
72831 if (i > 8 || i < 0) return 0;
72832
72833 if (!todrop_rate[i]) return 0;
72834diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c
72835--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
72836+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
72837@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
72838 ret = cp->packet_xmit(skb, cp, pp);
72839 /* do not touch skb anymore */
72840
72841- atomic_inc(&cp->in_pkts);
72842+ atomic_inc_unchecked(&cp->in_pkts);
72843 ip_vs_conn_put(cp);
72844 return ret;
72845 }
72846@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
72847 * Sync connection if it is about to close to
72848 * encorage the standby servers to update the connections timeout
72849 */
72850- pkts = atomic_add_return(1, &cp->in_pkts);
72851+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
72852 if (af == AF_INET &&
72853 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
72854 (((cp->protocol != IPPROTO_TCP ||
72855diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c
72856--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
72857+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
72858@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
72859 ip_vs_rs_hash(dest);
72860 write_unlock_bh(&__ip_vs_rs_lock);
72861 }
72862- atomic_set(&dest->conn_flags, conn_flags);
72863+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
72864
72865 /* bind the service */
72866 if (!dest->svc) {
72867@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
72868 " %-7s %-6d %-10d %-10d\n",
72869 &dest->addr.in6,
72870 ntohs(dest->port),
72871- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72872+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72873 atomic_read(&dest->weight),
72874 atomic_read(&dest->activeconns),
72875 atomic_read(&dest->inactconns));
72876@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
72877 "%-7s %-6d %-10d %-10d\n",
72878 ntohl(dest->addr.ip),
72879 ntohs(dest->port),
72880- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72881+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72882 atomic_read(&dest->weight),
72883 atomic_read(&dest->activeconns),
72884 atomic_read(&dest->inactconns));
72885@@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
72886 .open = ip_vs_info_open,
72887 .read = seq_read,
72888 .llseek = seq_lseek,
72889- .release = seq_release_private,
72890+ .release = seq_release_net,
72891 };
72892
72893 #endif
72894@@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
72895 .open = ip_vs_stats_seq_open,
72896 .read = seq_read,
72897 .llseek = seq_lseek,
72898- .release = single_release,
72899+ .release = single_release_net,
72900 };
72901
72902 #endif
72903@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
72904
72905 entry.addr = dest->addr.ip;
72906 entry.port = dest->port;
72907- entry.conn_flags = atomic_read(&dest->conn_flags);
72908+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
72909 entry.weight = atomic_read(&dest->weight);
72910 entry.u_threshold = dest->u_threshold;
72911 entry.l_threshold = dest->l_threshold;
72912@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
72913 unsigned char arg[128];
72914 int ret = 0;
72915
72916+ pax_track_stack();
72917+
72918 if (!capable(CAP_NET_ADMIN))
72919 return -EPERM;
72920
72921@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
72922 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
72923
72924 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
72925- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72926+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72927 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
72928 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
72929 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
72930diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c
72931--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
72932+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
72933@@ -438,7 +438,7 @@ static void ip_vs_process_message(const
72934
72935 if (opt)
72936 memcpy(&cp->in_seq, opt, sizeof(*opt));
72937- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72938+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72939 cp->state = state;
72940 cp->old_state = cp->state;
72941 /*
72942diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c
72943--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
72944+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
72945@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
72946 else
72947 rc = NF_ACCEPT;
72948 /* do not touch skb anymore */
72949- atomic_inc(&cp->in_pkts);
72950+ atomic_inc_unchecked(&cp->in_pkts);
72951 goto out;
72952 }
72953
72954@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
72955 else
72956 rc = NF_ACCEPT;
72957 /* do not touch skb anymore */
72958- atomic_inc(&cp->in_pkts);
72959+ atomic_inc_unchecked(&cp->in_pkts);
72960 goto out;
72961 }
72962
72963diff -urNp linux-2.6.32.45/net/netfilter/Kconfig linux-2.6.32.45/net/netfilter/Kconfig
72964--- linux-2.6.32.45/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
72965+++ linux-2.6.32.45/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
72966@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
72967
72968 To compile it as a module, choose M here. If unsure, say N.
72969
72970+config NETFILTER_XT_MATCH_GRADM
72971+ tristate '"gradm" match support'
72972+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
72973+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
72974+ ---help---
72975+ The gradm match allows to match on grsecurity RBAC being enabled.
72976+ It is useful when iptables rules are applied early on bootup to
72977+ prevent connections to the machine (except from a trusted host)
72978+ while the RBAC system is disabled.
72979+
72980 config NETFILTER_XT_MATCH_HASHLIMIT
72981 tristate '"hashlimit" match support'
72982 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
72983diff -urNp linux-2.6.32.45/net/netfilter/Makefile linux-2.6.32.45/net/netfilter/Makefile
72984--- linux-2.6.32.45/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
72985+++ linux-2.6.32.45/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
72986@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
72987 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
72988 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
72989 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
72990+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
72991 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
72992 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
72993 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
72994diff -urNp linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c
72995--- linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
72996+++ linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
72997@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
72998 static int
72999 ctnetlink_parse_tuple(const struct nlattr * const cda[],
73000 struct nf_conntrack_tuple *tuple,
73001- enum ctattr_tuple type, u_int8_t l3num)
73002+ enum ctattr_type type, u_int8_t l3num)
73003 {
73004 struct nlattr *tb[CTA_TUPLE_MAX+1];
73005 int err;
73006diff -urNp linux-2.6.32.45/net/netfilter/nfnetlink_log.c linux-2.6.32.45/net/netfilter/nfnetlink_log.c
73007--- linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
73008+++ linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
73009@@ -68,7 +68,7 @@ struct nfulnl_instance {
73010 };
73011
73012 static DEFINE_RWLOCK(instances_lock);
73013-static atomic_t global_seq;
73014+static atomic_unchecked_t global_seq;
73015
73016 #define INSTANCE_BUCKETS 16
73017 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73018@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
73019 /* global sequence number */
73020 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73021 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73022- htonl(atomic_inc_return(&global_seq)));
73023+ htonl(atomic_inc_return_unchecked(&global_seq)));
73024
73025 if (data_len) {
73026 struct nlattr *nla;
73027diff -urNp linux-2.6.32.45/net/netfilter/xt_gradm.c linux-2.6.32.45/net/netfilter/xt_gradm.c
73028--- linux-2.6.32.45/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73029+++ linux-2.6.32.45/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
73030@@ -0,0 +1,51 @@
73031+/*
73032+ * gradm match for netfilter
73033