]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.45-201108241901.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.45-201108241901.patch
CommitLineData
6c26ff54
PK
1diff -urNp linux-2.6.32.45/arch/alpha/include/asm/elf.h linux-2.6.32.45/arch/alpha/include/asm/elf.h
2--- linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3+++ linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-2.6.32.45/arch/alpha/include/asm/pgtable.h linux-2.6.32.45/arch/alpha/include/asm/pgtable.h
19--- linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20+++ linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-2.6.32.45/arch/alpha/kernel/module.c linux-2.6.32.45/arch/alpha/kernel/module.c
40--- linux-2.6.32.45/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41+++ linux-2.6.32.45/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-2.6.32.45/arch/alpha/kernel/osf_sys.c linux-2.6.32.45/arch/alpha/kernel/osf_sys.c
52--- linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-08-09 18:35:28.000000000 -0400
53+++ linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-2.6.32.45/arch/alpha/mm/fault.c linux-2.6.32.45/arch/alpha/mm/fault.c
86--- linux-2.6.32.45/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87+++ linux-2.6.32.45/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-2.6.32.45/arch/arm/include/asm/elf.h linux-2.6.32.45/arch/arm/include/asm/elf.h
245--- linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246+++ linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263diff -urNp linux-2.6.32.45/arch/arm/include/asm/kmap_types.h linux-2.6.32.45/arch/arm/include/asm/kmap_types.h
264--- linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265+++ linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266@@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270+ KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274diff -urNp linux-2.6.32.45/arch/arm/include/asm/uaccess.h linux-2.6.32.45/arch/arm/include/asm/uaccess.h
275--- linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276+++ linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
277@@ -22,6 +22,8 @@
278 #define VERIFY_READ 0
279 #define VERIFY_WRITE 1
280
281+extern void check_object_size(const void *ptr, unsigned long n, bool to);
282+
283 /*
284 * The exception table consists of pairs of addresses: the first is the
285 * address of an instruction that is allowed to fault, and the second is
286@@ -387,8 +389,23 @@ do { \
287
288
289 #ifdef CONFIG_MMU
290-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
291-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
292+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
293+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
294+
295+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
296+{
297+ if (!__builtin_constant_p(n))
298+ check_object_size(to, n, false);
299+ return ___copy_from_user(to, from, n);
300+}
301+
302+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
303+{
304+ if (!__builtin_constant_p(n))
305+ check_object_size(from, n, true);
306+ return ___copy_to_user(to, from, n);
307+}
308+
309 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
310 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
311 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
312@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
313
314 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
315 {
316+ if ((long)n < 0)
317+ return n;
318+
319 if (access_ok(VERIFY_READ, from, n))
320 n = __copy_from_user(to, from, n);
321 else /* security hole - plug it */
322@@ -412,6 +432,9 @@ static inline unsigned long __must_check
323
324 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
325 {
326+ if ((long)n < 0)
327+ return n;
328+
329 if (access_ok(VERIFY_WRITE, to, n))
330 n = __copy_to_user(to, from, n);
331 return n;
332diff -urNp linux-2.6.32.45/arch/arm/kernel/armksyms.c linux-2.6.32.45/arch/arm/kernel/armksyms.c
333--- linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
334+++ linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
335@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
336 #ifdef CONFIG_MMU
337 EXPORT_SYMBOL(copy_page);
338
339-EXPORT_SYMBOL(__copy_from_user);
340-EXPORT_SYMBOL(__copy_to_user);
341+EXPORT_SYMBOL(___copy_from_user);
342+EXPORT_SYMBOL(___copy_to_user);
343 EXPORT_SYMBOL(__clear_user);
344
345 EXPORT_SYMBOL(__get_user_1);
346diff -urNp linux-2.6.32.45/arch/arm/kernel/kgdb.c linux-2.6.32.45/arch/arm/kernel/kgdb.c
347--- linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
348+++ linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
349@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
350 * and we handle the normal undef case within the do_undefinstr
351 * handler.
352 */
353-struct kgdb_arch arch_kgdb_ops = {
354+const struct kgdb_arch arch_kgdb_ops = {
355 #ifndef __ARMEB__
356 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
357 #else /* ! __ARMEB__ */
358diff -urNp linux-2.6.32.45/arch/arm/kernel/traps.c linux-2.6.32.45/arch/arm/kernel/traps.c
359--- linux-2.6.32.45/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
360+++ linux-2.6.32.45/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
361@@ -247,6 +247,8 @@ static void __die(const char *str, int e
362
363 DEFINE_SPINLOCK(die_lock);
364
365+extern void gr_handle_kernel_exploit(void);
366+
367 /*
368 * This function is protected against re-entrancy.
369 */
370@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
371 if (panic_on_oops)
372 panic("Fatal exception");
373
374+ gr_handle_kernel_exploit();
375+
376 do_exit(SIGSEGV);
377 }
378
379diff -urNp linux-2.6.32.45/arch/arm/lib/copy_from_user.S linux-2.6.32.45/arch/arm/lib/copy_from_user.S
380--- linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
381+++ linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
382@@ -16,7 +16,7 @@
383 /*
384 * Prototype:
385 *
386- * size_t __copy_from_user(void *to, const void *from, size_t n)
387+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
388 *
389 * Purpose:
390 *
391@@ -84,11 +84,11 @@
392
393 .text
394
395-ENTRY(__copy_from_user)
396+ENTRY(___copy_from_user)
397
398 #include "copy_template.S"
399
400-ENDPROC(__copy_from_user)
401+ENDPROC(___copy_from_user)
402
403 .section .fixup,"ax"
404 .align 0
405diff -urNp linux-2.6.32.45/arch/arm/lib/copy_to_user.S linux-2.6.32.45/arch/arm/lib/copy_to_user.S
406--- linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
407+++ linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
408@@ -16,7 +16,7 @@
409 /*
410 * Prototype:
411 *
412- * size_t __copy_to_user(void *to, const void *from, size_t n)
413+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
414 *
415 * Purpose:
416 *
417@@ -88,11 +88,11 @@
418 .text
419
420 ENTRY(__copy_to_user_std)
421-WEAK(__copy_to_user)
422+WEAK(___copy_to_user)
423
424 #include "copy_template.S"
425
426-ENDPROC(__copy_to_user)
427+ENDPROC(___copy_to_user)
428
429 .section .fixup,"ax"
430 .align 0
431diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess.S linux-2.6.32.45/arch/arm/lib/uaccess.S
432--- linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
433+++ linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
434@@ -19,7 +19,7 @@
435
436 #define PAGE_SHIFT 12
437
438-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
439+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
440 * Purpose : copy a block to user memory from kernel memory
441 * Params : to - user memory
442 * : from - kernel memory
443@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
444 sub r2, r2, ip
445 b .Lc2u_dest_aligned
446
447-ENTRY(__copy_to_user)
448+ENTRY(___copy_to_user)
449 stmfd sp!, {r2, r4 - r7, lr}
450 cmp r2, #4
451 blt .Lc2u_not_enough
452@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
453 ldrgtb r3, [r1], #0
454 USER( strgtbt r3, [r0], #1) @ May fault
455 b .Lc2u_finished
456-ENDPROC(__copy_to_user)
457+ENDPROC(___copy_to_user)
458
459 .section .fixup,"ax"
460 .align 0
461 9001: ldmfd sp!, {r0, r4 - r7, pc}
462 .previous
463
464-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
465+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
466 * Purpose : copy a block from user memory to kernel memory
467 * Params : to - kernel memory
468 * : from - user memory
469@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
470 sub r2, r2, ip
471 b .Lcfu_dest_aligned
472
473-ENTRY(__copy_from_user)
474+ENTRY(___copy_from_user)
475 stmfd sp!, {r0, r2, r4 - r7, lr}
476 cmp r2, #4
477 blt .Lcfu_not_enough
478@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
479 USER( ldrgtbt r3, [r1], #1) @ May fault
480 strgtb r3, [r0], #1
481 b .Lcfu_finished
482-ENDPROC(__copy_from_user)
483+ENDPROC(___copy_from_user)
484
485 .section .fixup,"ax"
486 .align 0
487diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c
488--- linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
489+++ linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
490@@ -97,7 +97,7 @@ out:
491 }
492
493 unsigned long
494-__copy_to_user(void __user *to, const void *from, unsigned long n)
495+___copy_to_user(void __user *to, const void *from, unsigned long n)
496 {
497 /*
498 * This test is stubbed out of the main function above to keep
499diff -urNp linux-2.6.32.45/arch/arm/mach-at91/pm.c linux-2.6.32.45/arch/arm/mach-at91/pm.c
500--- linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
501+++ linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
502@@ -348,7 +348,7 @@ static void at91_pm_end(void)
503 }
504
505
506-static struct platform_suspend_ops at91_pm_ops ={
507+static const struct platform_suspend_ops at91_pm_ops ={
508 .valid = at91_pm_valid_state,
509 .begin = at91_pm_begin,
510 .enter = at91_pm_enter,
511diff -urNp linux-2.6.32.45/arch/arm/mach-omap1/pm.c linux-2.6.32.45/arch/arm/mach-omap1/pm.c
512--- linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
513+++ linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
514@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
515
516
517
518-static struct platform_suspend_ops omap_pm_ops ={
519+static const struct platform_suspend_ops omap_pm_ops ={
520 .prepare = omap_pm_prepare,
521 .enter = omap_pm_enter,
522 .finish = omap_pm_finish,
523diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c
524--- linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
525+++ linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
526@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
527 enable_hlt();
528 }
529
530-static struct platform_suspend_ops omap_pm_ops = {
531+static const struct platform_suspend_ops omap_pm_ops = {
532 .prepare = omap2_pm_prepare,
533 .enter = omap2_pm_enter,
534 .finish = omap2_pm_finish,
535diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c
536--- linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
537+++ linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
538@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
539 return;
540 }
541
542-static struct platform_suspend_ops omap_pm_ops = {
543+static const struct platform_suspend_ops omap_pm_ops = {
544 .begin = omap3_pm_begin,
545 .end = omap3_pm_end,
546 .prepare = omap3_pm_prepare,
547diff -urNp linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c
548--- linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
549+++ linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
550@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
551 (state == PM_SUSPEND_MEM);
552 }
553
554-static struct platform_suspend_ops pnx4008_pm_ops = {
555+static const struct platform_suspend_ops pnx4008_pm_ops = {
556 .enter = pnx4008_pm_enter,
557 .valid = pnx4008_pm_valid,
558 };
559diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/pm.c linux-2.6.32.45/arch/arm/mach-pxa/pm.c
560--- linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
561+++ linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
562@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
563 pxa_cpu_pm_fns->finish();
564 }
565
566-static struct platform_suspend_ops pxa_pm_ops = {
567+static const struct platform_suspend_ops pxa_pm_ops = {
568 .valid = pxa_pm_valid,
569 .enter = pxa_pm_enter,
570 .prepare = pxa_pm_prepare,
571diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c
572--- linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
573+++ linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
574@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
575 }
576
577 #ifdef CONFIG_PM
578-static struct platform_suspend_ops sharpsl_pm_ops = {
579+static const struct platform_suspend_ops sharpsl_pm_ops = {
580 .prepare = pxa_pm_prepare,
581 .finish = pxa_pm_finish,
582 .enter = corgi_pxa_pm_enter,
583diff -urNp linux-2.6.32.45/arch/arm/mach-sa1100/pm.c linux-2.6.32.45/arch/arm/mach-sa1100/pm.c
584--- linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
585+++ linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
586@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
587 return virt_to_phys(sp);
588 }
589
590-static struct platform_suspend_ops sa11x0_pm_ops = {
591+static const struct platform_suspend_ops sa11x0_pm_ops = {
592 .enter = sa11x0_pm_enter,
593 .valid = suspend_valid_only_mem,
594 };
595diff -urNp linux-2.6.32.45/arch/arm/mm/fault.c linux-2.6.32.45/arch/arm/mm/fault.c
596--- linux-2.6.32.45/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
597+++ linux-2.6.32.45/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
598@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
599 }
600 #endif
601
602+#ifdef CONFIG_PAX_PAGEEXEC
603+ if (fsr & FSR_LNX_PF) {
604+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
605+ do_group_exit(SIGKILL);
606+ }
607+#endif
608+
609 tsk->thread.address = addr;
610 tsk->thread.error_code = fsr;
611 tsk->thread.trap_no = 14;
612@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
613 }
614 #endif /* CONFIG_MMU */
615
616+#ifdef CONFIG_PAX_PAGEEXEC
617+void pax_report_insns(void *pc, void *sp)
618+{
619+ long i;
620+
621+ printk(KERN_ERR "PAX: bytes at PC: ");
622+ for (i = 0; i < 20; i++) {
623+ unsigned char c;
624+ if (get_user(c, (__force unsigned char __user *)pc+i))
625+ printk(KERN_CONT "?? ");
626+ else
627+ printk(KERN_CONT "%02x ", c);
628+ }
629+ printk("\n");
630+
631+ printk(KERN_ERR "PAX: bytes at SP-4: ");
632+ for (i = -1; i < 20; i++) {
633+ unsigned long c;
634+ if (get_user(c, (__force unsigned long __user *)sp+i))
635+ printk(KERN_CONT "???????? ");
636+ else
637+ printk(KERN_CONT "%08lx ", c);
638+ }
639+ printk("\n");
640+}
641+#endif
642+
643 /*
644 * First Level Translation Fault Handler
645 *
646diff -urNp linux-2.6.32.45/arch/arm/mm/mmap.c linux-2.6.32.45/arch/arm/mm/mmap.c
647--- linux-2.6.32.45/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
648+++ linux-2.6.32.45/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
649@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
650 if (len > TASK_SIZE)
651 return -ENOMEM;
652
653+#ifdef CONFIG_PAX_RANDMMAP
654+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
655+#endif
656+
657 if (addr) {
658 if (do_align)
659 addr = COLOUR_ALIGN(addr, pgoff);
660@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
661 addr = PAGE_ALIGN(addr);
662
663 vma = find_vma(mm, addr);
664- if (TASK_SIZE - len >= addr &&
665- (!vma || addr + len <= vma->vm_start))
666+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
667 return addr;
668 }
669 if (len > mm->cached_hole_size) {
670- start_addr = addr = mm->free_area_cache;
671+ start_addr = addr = mm->free_area_cache;
672 } else {
673- start_addr = addr = TASK_UNMAPPED_BASE;
674- mm->cached_hole_size = 0;
675+ start_addr = addr = mm->mmap_base;
676+ mm->cached_hole_size = 0;
677 }
678
679 full_search:
680@@ -94,14 +97,14 @@ full_search:
681 * Start a new search - just in case we missed
682 * some holes.
683 */
684- if (start_addr != TASK_UNMAPPED_BASE) {
685- start_addr = addr = TASK_UNMAPPED_BASE;
686+ if (start_addr != mm->mmap_base) {
687+ start_addr = addr = mm->mmap_base;
688 mm->cached_hole_size = 0;
689 goto full_search;
690 }
691 return -ENOMEM;
692 }
693- if (!vma || addr + len <= vma->vm_start) {
694+ if (check_heap_stack_gap(vma, addr, len)) {
695 /*
696 * Remember the place where we stopped the search:
697 */
698diff -urNp linux-2.6.32.45/arch/arm/plat-s3c/pm.c linux-2.6.32.45/arch/arm/plat-s3c/pm.c
699--- linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
700+++ linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
701@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
702 s3c_pm_check_cleanup();
703 }
704
705-static struct platform_suspend_ops s3c_pm_ops = {
706+static const struct platform_suspend_ops s3c_pm_ops = {
707 .enter = s3c_pm_enter,
708 .prepare = s3c_pm_prepare,
709 .finish = s3c_pm_finish,
710diff -urNp linux-2.6.32.45/arch/avr32/include/asm/elf.h linux-2.6.32.45/arch/avr32/include/asm/elf.h
711--- linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
712+++ linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
713@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719
720+#ifdef CONFIG_PAX_ASLR
721+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
722+
723+#define PAX_DELTA_MMAP_LEN 15
724+#define PAX_DELTA_STACK_LEN 15
725+#endif
726
727 /* This yields a mask that user programs can use to figure out what
728 instruction set this CPU supports. This could be done in user space,
729diff -urNp linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h
730--- linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
731+++ linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
732@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
733 D(11) KM_IRQ1,
734 D(12) KM_SOFTIRQ0,
735 D(13) KM_SOFTIRQ1,
736-D(14) KM_TYPE_NR
737+D(14) KM_CLEARPAGE,
738+D(15) KM_TYPE_NR
739 };
740
741 #undef D
742diff -urNp linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c
743--- linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
744+++ linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
745@@ -176,7 +176,7 @@ out:
746 return 0;
747 }
748
749-static struct platform_suspend_ops avr32_pm_ops = {
750+static const struct platform_suspend_ops avr32_pm_ops = {
751 .valid = avr32_pm_valid_state,
752 .enter = avr32_pm_enter,
753 };
754diff -urNp linux-2.6.32.45/arch/avr32/mm/fault.c linux-2.6.32.45/arch/avr32/mm/fault.c
755--- linux-2.6.32.45/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
756+++ linux-2.6.32.45/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
757@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
758
759 int exception_trace = 1;
760
761+#ifdef CONFIG_PAX_PAGEEXEC
762+void pax_report_insns(void *pc, void *sp)
763+{
764+ unsigned long i;
765+
766+ printk(KERN_ERR "PAX: bytes at PC: ");
767+ for (i = 0; i < 20; i++) {
768+ unsigned char c;
769+ if (get_user(c, (unsigned char *)pc+i))
770+ printk(KERN_CONT "???????? ");
771+ else
772+ printk(KERN_CONT "%02x ", c);
773+ }
774+ printk("\n");
775+}
776+#endif
777+
778 /*
779 * This routine handles page faults. It determines the address and the
780 * problem, and then passes it off to one of the appropriate routines.
781@@ -157,6 +174,16 @@ bad_area:
782 up_read(&mm->mmap_sem);
783
784 if (user_mode(regs)) {
785+
786+#ifdef CONFIG_PAX_PAGEEXEC
787+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
788+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
789+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
790+ do_group_exit(SIGKILL);
791+ }
792+ }
793+#endif
794+
795 if (exception_trace && printk_ratelimit())
796 printk("%s%s[%d]: segfault at %08lx pc %08lx "
797 "sp %08lx ecr %lu\n",
798diff -urNp linux-2.6.32.45/arch/blackfin/kernel/kgdb.c linux-2.6.32.45/arch/blackfin/kernel/kgdb.c
799--- linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
800+++ linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
801@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
802 return -1; /* this means that we do not want to exit from the handler */
803 }
804
805-struct kgdb_arch arch_kgdb_ops = {
806+const struct kgdb_arch arch_kgdb_ops = {
807 .gdb_bpt_instr = {0xa1},
808 #ifdef CONFIG_SMP
809 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
810diff -urNp linux-2.6.32.45/arch/blackfin/mach-common/pm.c linux-2.6.32.45/arch/blackfin/mach-common/pm.c
811--- linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
812+++ linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
813@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
814 return 0;
815 }
816
817-struct platform_suspend_ops bfin_pm_ops = {
818+const struct platform_suspend_ops bfin_pm_ops = {
819 .enter = bfin_pm_enter,
820 .valid = bfin_pm_valid,
821 };
822diff -urNp linux-2.6.32.45/arch/frv/include/asm/kmap_types.h linux-2.6.32.45/arch/frv/include/asm/kmap_types.h
823--- linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
824+++ linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
825@@ -23,6 +23,7 @@ enum km_type {
826 KM_IRQ1,
827 KM_SOFTIRQ0,
828 KM_SOFTIRQ1,
829+ KM_CLEARPAGE,
830 KM_TYPE_NR
831 };
832
833diff -urNp linux-2.6.32.45/arch/frv/mm/elf-fdpic.c linux-2.6.32.45/arch/frv/mm/elf-fdpic.c
834--- linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
835+++ linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
836@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
837 if (addr) {
838 addr = PAGE_ALIGN(addr);
839 vma = find_vma(current->mm, addr);
840- if (TASK_SIZE - len >= addr &&
841- (!vma || addr + len <= vma->vm_start))
842+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
843 goto success;
844 }
845
846@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
847 for (; vma; vma = vma->vm_next) {
848 if (addr > limit)
849 break;
850- if (addr + len <= vma->vm_start)
851+ if (check_heap_stack_gap(vma, addr, len))
852 goto success;
853 addr = vma->vm_end;
854 }
855@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
856 for (; vma; vma = vma->vm_next) {
857 if (addr > limit)
858 break;
859- if (addr + len <= vma->vm_start)
860+ if (check_heap_stack_gap(vma, addr, len))
861 goto success;
862 addr = vma->vm_end;
863 }
864diff -urNp linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c
865--- linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
866+++ linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
867@@ -17,7 +17,7 @@
868 #include <linux/swiotlb.h>
869 #include <asm/machvec.h>
870
871-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
872+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
873
874 /* swiotlb declarations & definitions: */
875 extern int swiotlb_late_init_with_default_size (size_t size);
876@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
877 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
878 }
879
880-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
881+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
882 {
883 if (use_swiotlb(dev))
884 return &swiotlb_dma_ops;
885diff -urNp linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c
886--- linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
887+++ linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
888@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
889 },
890 };
891
892-extern struct dma_map_ops swiotlb_dma_ops;
893+extern const struct dma_map_ops swiotlb_dma_ops;
894
895 static int __init
896 sba_init(void)
897@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
898
899 __setup("sbapagesize=",sba_page_override);
900
901-struct dma_map_ops sba_dma_ops = {
902+const struct dma_map_ops sba_dma_ops = {
903 .alloc_coherent = sba_alloc_coherent,
904 .free_coherent = sba_free_coherent,
905 .map_page = sba_map_page,
906diff -urNp linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c
907--- linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
908+++ linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
909@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
910
911 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
912
913+#ifdef CONFIG_PAX_ASLR
914+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
915+
916+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
917+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
918+#endif
919+
920 /* Ugly but avoids duplication */
921 #include "../../../fs/binfmt_elf.c"
922
923diff -urNp linux-2.6.32.45/arch/ia64/ia32/ia32priv.h linux-2.6.32.45/arch/ia64/ia32/ia32priv.h
924--- linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
925+++ linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
926@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
927 #define ELF_DATA ELFDATA2LSB
928 #define ELF_ARCH EM_386
929
930-#define IA32_STACK_TOP IA32_PAGE_OFFSET
931+#ifdef CONFIG_PAX_RANDUSTACK
932+#define __IA32_DELTA_STACK (current->mm->delta_stack)
933+#else
934+#define __IA32_DELTA_STACK 0UL
935+#endif
936+
937+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
938+
939 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
940 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
941
942diff -urNp linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h
943--- linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
944+++ linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
945@@ -12,7 +12,7 @@
946
947 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
948
949-extern struct dma_map_ops *dma_ops;
950+extern const struct dma_map_ops *dma_ops;
951 extern struct ia64_machine_vector ia64_mv;
952 extern void set_iommu_machvec(void);
953
954@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
955 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
956 dma_addr_t *daddr, gfp_t gfp)
957 {
958- struct dma_map_ops *ops = platform_dma_get_ops(dev);
959+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
960 void *caddr;
961
962 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
963@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
964 static inline void dma_free_coherent(struct device *dev, size_t size,
965 void *caddr, dma_addr_t daddr)
966 {
967- struct dma_map_ops *ops = platform_dma_get_ops(dev);
968+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
969 debug_dma_free_coherent(dev, size, caddr, daddr);
970 ops->free_coherent(dev, size, caddr, daddr);
971 }
972@@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
973
974 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
975 {
976- struct dma_map_ops *ops = platform_dma_get_ops(dev);
977+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
978 return ops->mapping_error(dev, daddr);
979 }
980
981 static inline int dma_supported(struct device *dev, u64 mask)
982 {
983- struct dma_map_ops *ops = platform_dma_get_ops(dev);
984+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
985 return ops->dma_supported(dev, mask);
986 }
987
988diff -urNp linux-2.6.32.45/arch/ia64/include/asm/elf.h linux-2.6.32.45/arch/ia64/include/asm/elf.h
989--- linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
990+++ linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
991@@ -43,6 +43,13 @@
992 */
993 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
994
995+#ifdef CONFIG_PAX_ASLR
996+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
997+
998+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
999+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1000+#endif
1001+
1002 #define PT_IA_64_UNWIND 0x70000001
1003
1004 /* IA-64 relocations: */
1005diff -urNp linux-2.6.32.45/arch/ia64/include/asm/machvec.h linux-2.6.32.45/arch/ia64/include/asm/machvec.h
1006--- linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1007+++ linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1008@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1009 /* DMA-mapping interface: */
1010 typedef void ia64_mv_dma_init (void);
1011 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1012-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1013+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1014
1015 /*
1016 * WARNING: The legacy I/O space is _architected_. Platforms are
1017@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1018 # endif /* CONFIG_IA64_GENERIC */
1019
1020 extern void swiotlb_dma_init(void);
1021-extern struct dma_map_ops *dma_get_ops(struct device *);
1022+extern const struct dma_map_ops *dma_get_ops(struct device *);
1023
1024 /*
1025 * Define default versions so we can extend machvec for new platforms without having
1026diff -urNp linux-2.6.32.45/arch/ia64/include/asm/pgtable.h linux-2.6.32.45/arch/ia64/include/asm/pgtable.h
1027--- linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1028+++ linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1029@@ -12,7 +12,7 @@
1030 * David Mosberger-Tang <davidm@hpl.hp.com>
1031 */
1032
1033-
1034+#include <linux/const.h>
1035 #include <asm/mman.h>
1036 #include <asm/page.h>
1037 #include <asm/processor.h>
1038@@ -143,6 +143,17 @@
1039 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1040 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1041 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1042+
1043+#ifdef CONFIG_PAX_PAGEEXEC
1044+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1045+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1046+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1047+#else
1048+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1049+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1050+# define PAGE_COPY_NOEXEC PAGE_COPY
1051+#endif
1052+
1053 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1054 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1055 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1056diff -urNp linux-2.6.32.45/arch/ia64/include/asm/spinlock.h linux-2.6.32.45/arch/ia64/include/asm/spinlock.h
1057--- linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1058+++ linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1059@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1060 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1061
1062 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1063- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1064+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1065 }
1066
1067 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1068diff -urNp linux-2.6.32.45/arch/ia64/include/asm/uaccess.h linux-2.6.32.45/arch/ia64/include/asm/uaccess.h
1069--- linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1070+++ linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1071@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1072 const void *__cu_from = (from); \
1073 long __cu_len = (n); \
1074 \
1075- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1076+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1077 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1078 __cu_len; \
1079 })
1080@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1081 long __cu_len = (n); \
1082 \
1083 __chk_user_ptr(__cu_from); \
1084- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1085+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1086 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1087 __cu_len; \
1088 })
1089diff -urNp linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c
1090--- linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1091+++ linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1092@@ -3,7 +3,7 @@
1093 /* Set this to 1 if there is a HW IOMMU in the system */
1094 int iommu_detected __read_mostly;
1095
1096-struct dma_map_ops *dma_ops;
1097+const struct dma_map_ops *dma_ops;
1098 EXPORT_SYMBOL(dma_ops);
1099
1100 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1101@@ -16,7 +16,7 @@ static int __init dma_init(void)
1102 }
1103 fs_initcall(dma_init);
1104
1105-struct dma_map_ops *dma_get_ops(struct device *dev)
1106+const struct dma_map_ops *dma_get_ops(struct device *dev)
1107 {
1108 return dma_ops;
1109 }
1110diff -urNp linux-2.6.32.45/arch/ia64/kernel/module.c linux-2.6.32.45/arch/ia64/kernel/module.c
1111--- linux-2.6.32.45/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1112+++ linux-2.6.32.45/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1113@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1114 void
1115 module_free (struct module *mod, void *module_region)
1116 {
1117- if (mod && mod->arch.init_unw_table &&
1118- module_region == mod->module_init) {
1119+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1120 unw_remove_unwind_table(mod->arch.init_unw_table);
1121 mod->arch.init_unw_table = NULL;
1122 }
1123@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1124 }
1125
1126 static inline int
1127+in_init_rx (const struct module *mod, uint64_t addr)
1128+{
1129+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1130+}
1131+
1132+static inline int
1133+in_init_rw (const struct module *mod, uint64_t addr)
1134+{
1135+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1136+}
1137+
1138+static inline int
1139 in_init (const struct module *mod, uint64_t addr)
1140 {
1141- return addr - (uint64_t) mod->module_init < mod->init_size;
1142+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1143+}
1144+
1145+static inline int
1146+in_core_rx (const struct module *mod, uint64_t addr)
1147+{
1148+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1149+}
1150+
1151+static inline int
1152+in_core_rw (const struct module *mod, uint64_t addr)
1153+{
1154+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1155 }
1156
1157 static inline int
1158 in_core (const struct module *mod, uint64_t addr)
1159 {
1160- return addr - (uint64_t) mod->module_core < mod->core_size;
1161+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1162 }
1163
1164 static inline int
1165@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1166 break;
1167
1168 case RV_BDREL:
1169- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1170+ if (in_init_rx(mod, val))
1171+ val -= (uint64_t) mod->module_init_rx;
1172+ else if (in_init_rw(mod, val))
1173+ val -= (uint64_t) mod->module_init_rw;
1174+ else if (in_core_rx(mod, val))
1175+ val -= (uint64_t) mod->module_core_rx;
1176+ else if (in_core_rw(mod, val))
1177+ val -= (uint64_t) mod->module_core_rw;
1178 break;
1179
1180 case RV_LTV:
1181@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1182 * addresses have been selected...
1183 */
1184 uint64_t gp;
1185- if (mod->core_size > MAX_LTOFF)
1186+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1187 /*
1188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1189 * at the end of the module.
1190 */
1191- gp = mod->core_size - MAX_LTOFF / 2;
1192+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1193 else
1194- gp = mod->core_size / 2;
1195- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1196+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1197+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1198 mod->arch.gp = gp;
1199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1200 }
1201diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-dma.c linux-2.6.32.45/arch/ia64/kernel/pci-dma.c
1202--- linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1203+++ linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1204@@ -43,7 +43,7 @@ struct device fallback_dev = {
1205 .dma_mask = &fallback_dev.coherent_dma_mask,
1206 };
1207
1208-extern struct dma_map_ops intel_dma_ops;
1209+extern const struct dma_map_ops intel_dma_ops;
1210
1211 static int __init pci_iommu_init(void)
1212 {
1213@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1214 }
1215 EXPORT_SYMBOL(iommu_dma_supported);
1216
1217+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1218+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1219+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1220+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1221+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1222+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1223+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1224+
1225+static const struct dma_map_ops intel_iommu_dma_ops = {
1226+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1227+ .alloc_coherent = intel_alloc_coherent,
1228+ .free_coherent = intel_free_coherent,
1229+ .map_sg = intel_map_sg,
1230+ .unmap_sg = intel_unmap_sg,
1231+ .map_page = intel_map_page,
1232+ .unmap_page = intel_unmap_page,
1233+ .mapping_error = intel_mapping_error,
1234+
1235+ .sync_single_for_cpu = machvec_dma_sync_single,
1236+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1237+ .sync_single_for_device = machvec_dma_sync_single,
1238+ .sync_sg_for_device = machvec_dma_sync_sg,
1239+ .dma_supported = iommu_dma_supported,
1240+};
1241+
1242 void __init pci_iommu_alloc(void)
1243 {
1244- dma_ops = &intel_dma_ops;
1245-
1246- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1247- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1248- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1249- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1250- dma_ops->dma_supported = iommu_dma_supported;
1251+ dma_ops = &intel_iommu_dma_ops;
1252
1253 /*
1254 * The order of these functions is important for
1255diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c
1256--- linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1257+++ linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1258@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1259 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1260 }
1261
1262-struct dma_map_ops swiotlb_dma_ops = {
1263+const struct dma_map_ops swiotlb_dma_ops = {
1264 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1265 .free_coherent = swiotlb_free_coherent,
1266 .map_page = swiotlb_map_page,
1267diff -urNp linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c
1268--- linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1269+++ linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1270@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1271 if (REGION_NUMBER(addr) == RGN_HPAGE)
1272 addr = 0;
1273 #endif
1274+
1275+#ifdef CONFIG_PAX_RANDMMAP
1276+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1277+ addr = mm->free_area_cache;
1278+ else
1279+#endif
1280+
1281 if (!addr)
1282 addr = mm->free_area_cache;
1283
1284@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1285 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1286 /* At this point: (!vma || addr < vma->vm_end). */
1287 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1288- if (start_addr != TASK_UNMAPPED_BASE) {
1289+ if (start_addr != mm->mmap_base) {
1290 /* Start a new search --- just in case we missed some holes. */
1291- addr = TASK_UNMAPPED_BASE;
1292+ addr = mm->mmap_base;
1293 goto full_search;
1294 }
1295 return -ENOMEM;
1296 }
1297- if (!vma || addr + len <= vma->vm_start) {
1298+ if (check_heap_stack_gap(vma, addr, len)) {
1299 /* Remember the address where we stopped this search: */
1300 mm->free_area_cache = addr + len;
1301 return addr;
1302diff -urNp linux-2.6.32.45/arch/ia64/kernel/topology.c linux-2.6.32.45/arch/ia64/kernel/topology.c
1303--- linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1304+++ linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1305@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1306 return ret;
1307 }
1308
1309-static struct sysfs_ops cache_sysfs_ops = {
1310+static const struct sysfs_ops cache_sysfs_ops = {
1311 .show = cache_show
1312 };
1313
1314diff -urNp linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S
1315--- linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1316+++ linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1317@@ -190,7 +190,7 @@ SECTIONS
1318 /* Per-cpu data: */
1319 . = ALIGN(PERCPU_PAGE_SIZE);
1320 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1321- __phys_per_cpu_start = __per_cpu_load;
1322+ __phys_per_cpu_start = per_cpu_load;
1323 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1324 * into percpu page size
1325 */
1326diff -urNp linux-2.6.32.45/arch/ia64/mm/fault.c linux-2.6.32.45/arch/ia64/mm/fault.c
1327--- linux-2.6.32.45/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1328+++ linux-2.6.32.45/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1329@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1330 return pte_present(pte);
1331 }
1332
1333+#ifdef CONFIG_PAX_PAGEEXEC
1334+void pax_report_insns(void *pc, void *sp)
1335+{
1336+ unsigned long i;
1337+
1338+ printk(KERN_ERR "PAX: bytes at PC: ");
1339+ for (i = 0; i < 8; i++) {
1340+ unsigned int c;
1341+ if (get_user(c, (unsigned int *)pc+i))
1342+ printk(KERN_CONT "???????? ");
1343+ else
1344+ printk(KERN_CONT "%08x ", c);
1345+ }
1346+ printk("\n");
1347+}
1348+#endif
1349+
1350 void __kprobes
1351 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1352 {
1353@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1354 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1355 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1356
1357- if ((vma->vm_flags & mask) != mask)
1358+ if ((vma->vm_flags & mask) != mask) {
1359+
1360+#ifdef CONFIG_PAX_PAGEEXEC
1361+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1362+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1363+ goto bad_area;
1364+
1365+ up_read(&mm->mmap_sem);
1366+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1367+ do_group_exit(SIGKILL);
1368+ }
1369+#endif
1370+
1371 goto bad_area;
1372
1373+ }
1374+
1375 survive:
1376 /*
1377 * If for any reason at all we couldn't handle the fault, make
1378diff -urNp linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c
1379--- linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1380+++ linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1381@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1382 /* At this point: (!vmm || addr < vmm->vm_end). */
1383 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1384 return -ENOMEM;
1385- if (!vmm || (addr + len) <= vmm->vm_start)
1386+ if (check_heap_stack_gap(vmm, addr, len))
1387 return addr;
1388 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1389 }
1390diff -urNp linux-2.6.32.45/arch/ia64/mm/init.c linux-2.6.32.45/arch/ia64/mm/init.c
1391--- linux-2.6.32.45/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1392+++ linux-2.6.32.45/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1393@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1394 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1395 vma->vm_end = vma->vm_start + PAGE_SIZE;
1396 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1397+
1398+#ifdef CONFIG_PAX_PAGEEXEC
1399+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1400+ vma->vm_flags &= ~VM_EXEC;
1401+
1402+#ifdef CONFIG_PAX_MPROTECT
1403+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1404+ vma->vm_flags &= ~VM_MAYEXEC;
1405+#endif
1406+
1407+ }
1408+#endif
1409+
1410 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1411 down_write(&current->mm->mmap_sem);
1412 if (insert_vm_struct(current->mm, vma)) {
1413diff -urNp linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c
1414--- linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1415+++ linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1416@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1417 return ret;
1418 }
1419
1420-static struct dma_map_ops sn_dma_ops = {
1421+static const struct dma_map_ops sn_dma_ops = {
1422 .alloc_coherent = sn_dma_alloc_coherent,
1423 .free_coherent = sn_dma_free_coherent,
1424 .map_page = sn_dma_map_page,
1425diff -urNp linux-2.6.32.45/arch/m32r/lib/usercopy.c linux-2.6.32.45/arch/m32r/lib/usercopy.c
1426--- linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1427+++ linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1428@@ -14,6 +14,9 @@
1429 unsigned long
1430 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1431 {
1432+ if ((long)n < 0)
1433+ return n;
1434+
1435 prefetch(from);
1436 if (access_ok(VERIFY_WRITE, to, n))
1437 __copy_user(to,from,n);
1438@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1439 unsigned long
1440 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1441 {
1442+ if ((long)n < 0)
1443+ return n;
1444+
1445 prefetchw(to);
1446 if (access_ok(VERIFY_READ, from, n))
1447 __copy_user_zeroing(to,from,n);
1448diff -urNp linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c
1449--- linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1450+++ linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1451@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1452
1453 }
1454
1455-static struct platform_suspend_ops db1x_pm_ops = {
1456+static const struct platform_suspend_ops db1x_pm_ops = {
1457 .valid = suspend_valid_only_mem,
1458 .begin = db1x_pm_begin,
1459 .enter = db1x_pm_enter,
1460diff -urNp linux-2.6.32.45/arch/mips/include/asm/elf.h linux-2.6.32.45/arch/mips/include/asm/elf.h
1461--- linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1462+++ linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1463@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1464 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1465 #endif
1466
1467+#ifdef CONFIG_PAX_ASLR
1468+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1469+
1470+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1471+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1472+#endif
1473+
1474 #endif /* _ASM_ELF_H */
1475diff -urNp linux-2.6.32.45/arch/mips/include/asm/page.h linux-2.6.32.45/arch/mips/include/asm/page.h
1476--- linux-2.6.32.45/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1477+++ linux-2.6.32.45/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1478@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1479 #ifdef CONFIG_CPU_MIPS32
1480 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1481 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1482- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1483+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1484 #else
1485 typedef struct { unsigned long long pte; } pte_t;
1486 #define pte_val(x) ((x).pte)
1487diff -urNp linux-2.6.32.45/arch/mips/include/asm/reboot.h linux-2.6.32.45/arch/mips/include/asm/reboot.h
1488--- linux-2.6.32.45/arch/mips/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
1489+++ linux-2.6.32.45/arch/mips/include/asm/reboot.h 2011-08-21 17:35:02.000000000 -0400
1490@@ -9,7 +9,7 @@
1491 #ifndef _ASM_REBOOT_H
1492 #define _ASM_REBOOT_H
1493
1494-extern void (*_machine_restart)(char *command);
1495-extern void (*_machine_halt)(void);
1496+extern void (*__noreturn _machine_restart)(char *command);
1497+extern void (*__noreturn _machine_halt)(void);
1498
1499 #endif /* _ASM_REBOOT_H */
1500diff -urNp linux-2.6.32.45/arch/mips/include/asm/system.h linux-2.6.32.45/arch/mips/include/asm/system.h
1501--- linux-2.6.32.45/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1502+++ linux-2.6.32.45/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1503@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1504 */
1505 #define __ARCH_WANT_UNLOCKED_CTXSW
1506
1507-extern unsigned long arch_align_stack(unsigned long sp);
1508+#define arch_align_stack(x) ((x) & ~0xfUL)
1509
1510 #endif /* _ASM_SYSTEM_H */
1511diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c
1512--- linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1513+++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1514@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1515 #undef ELF_ET_DYN_BASE
1516 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1517
1518+#ifdef CONFIG_PAX_ASLR
1519+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1520+
1521+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1522+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1523+#endif
1524+
1525 #include <asm/processor.h>
1526 #include <linux/module.h>
1527 #include <linux/elfcore.h>
1528diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c
1529--- linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1530+++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1531@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1532 #undef ELF_ET_DYN_BASE
1533 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1534
1535+#ifdef CONFIG_PAX_ASLR
1536+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1537+
1538+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1539+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1540+#endif
1541+
1542 #include <asm/processor.h>
1543
1544 /*
1545diff -urNp linux-2.6.32.45/arch/mips/kernel/kgdb.c linux-2.6.32.45/arch/mips/kernel/kgdb.c
1546--- linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1547+++ linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1548@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1549 return -1;
1550 }
1551
1552+/* cannot be const */
1553 struct kgdb_arch arch_kgdb_ops;
1554
1555 /*
1556diff -urNp linux-2.6.32.45/arch/mips/kernel/process.c linux-2.6.32.45/arch/mips/kernel/process.c
1557--- linux-2.6.32.45/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1558+++ linux-2.6.32.45/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1559@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1560 out:
1561 return pc;
1562 }
1563-
1564-/*
1565- * Don't forget that the stack pointer must be aligned on a 8 bytes
1566- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1567- */
1568-unsigned long arch_align_stack(unsigned long sp)
1569-{
1570- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1571- sp -= get_random_int() & ~PAGE_MASK;
1572-
1573- return sp & ALMASK;
1574-}
1575diff -urNp linux-2.6.32.45/arch/mips/kernel/reset.c linux-2.6.32.45/arch/mips/kernel/reset.c
1576--- linux-2.6.32.45/arch/mips/kernel/reset.c 2011-03-27 14:31:47.000000000 -0400
1577+++ linux-2.6.32.45/arch/mips/kernel/reset.c 2011-08-21 17:35:26.000000000 -0400
1578@@ -19,8 +19,8 @@
1579 * So handle all using function pointers to machine specific
1580 * functions.
1581 */
1582-void (*_machine_restart)(char *command);
1583-void (*_machine_halt)(void);
1584+void (*__noreturn _machine_restart)(char *command);
1585+void (*__noreturn _machine_halt)(void);
1586 void (*pm_power_off)(void);
1587
1588 EXPORT_SYMBOL(pm_power_off);
1589@@ -29,16 +29,19 @@ void machine_restart(char *command)
1590 {
1591 if (_machine_restart)
1592 _machine_restart(command);
1593+ BUG();
1594 }
1595
1596 void machine_halt(void)
1597 {
1598 if (_machine_halt)
1599 _machine_halt();
1600+ BUG();
1601 }
1602
1603 void machine_power_off(void)
1604 {
1605 if (pm_power_off)
1606 pm_power_off();
1607+ BUG();
1608 }
1609diff -urNp linux-2.6.32.45/arch/mips/kernel/syscall.c linux-2.6.32.45/arch/mips/kernel/syscall.c
1610--- linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1611+++ linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1612@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1613 do_color_align = 0;
1614 if (filp || (flags & MAP_SHARED))
1615 do_color_align = 1;
1616+
1617+#ifdef CONFIG_PAX_RANDMMAP
1618+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1619+#endif
1620+
1621 if (addr) {
1622 if (do_color_align)
1623 addr = COLOUR_ALIGN(addr, pgoff);
1624 else
1625 addr = PAGE_ALIGN(addr);
1626 vmm = find_vma(current->mm, addr);
1627- if (task_size - len >= addr &&
1628- (!vmm || addr + len <= vmm->vm_start))
1629+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1630 return addr;
1631 }
1632- addr = TASK_UNMAPPED_BASE;
1633+ addr = current->mm->mmap_base;
1634 if (do_color_align)
1635 addr = COLOUR_ALIGN(addr, pgoff);
1636 else
1637@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1638 /* At this point: (!vmm || addr < vmm->vm_end). */
1639 if (task_size - len < addr)
1640 return -ENOMEM;
1641- if (!vmm || addr + len <= vmm->vm_start)
1642+ if (check_heap_stack_gap(vmm, addr, len))
1643 return addr;
1644 addr = vmm->vm_end;
1645 if (do_color_align)
1646diff -urNp linux-2.6.32.45/arch/mips/Makefile linux-2.6.32.45/arch/mips/Makefile
1647--- linux-2.6.32.45/arch/mips/Makefile 2011-03-27 14:31:47.000000000 -0400
1648+++ linux-2.6.32.45/arch/mips/Makefile 2011-08-21 19:26:52.000000000 -0400
1649@@ -51,6 +51,8 @@ endif
1650 cflags-y := -ffunction-sections
1651 cflags-y += $(call cc-option, -mno-check-zero-division)
1652
1653+cflags-y += -Wno-sign-compare -Wno-extra
1654+
1655 ifdef CONFIG_32BIT
1656 ld-emul = $(32bit-emul)
1657 vmlinux-32 = vmlinux
1658diff -urNp linux-2.6.32.45/arch/mips/mm/fault.c linux-2.6.32.45/arch/mips/mm/fault.c
1659--- linux-2.6.32.45/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1660+++ linux-2.6.32.45/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1661@@ -26,6 +26,23 @@
1662 #include <asm/ptrace.h>
1663 #include <asm/highmem.h> /* For VMALLOC_END */
1664
1665+#ifdef CONFIG_PAX_PAGEEXEC
1666+void pax_report_insns(void *pc, void *sp)
1667+{
1668+ unsigned long i;
1669+
1670+ printk(KERN_ERR "PAX: bytes at PC: ");
1671+ for (i = 0; i < 5; i++) {
1672+ unsigned int c;
1673+ if (get_user(c, (unsigned int *)pc+i))
1674+ printk(KERN_CONT "???????? ");
1675+ else
1676+ printk(KERN_CONT "%08x ", c);
1677+ }
1678+ printk("\n");
1679+}
1680+#endif
1681+
1682 /*
1683 * This routine handles page faults. It determines the address,
1684 * and the problem, and then passes it off to one of the appropriate
1685diff -urNp linux-2.6.32.45/arch/parisc/include/asm/elf.h linux-2.6.32.45/arch/parisc/include/asm/elf.h
1686--- linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1687+++ linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1688@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1689
1690 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1691
1692+#ifdef CONFIG_PAX_ASLR
1693+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1694+
1695+#define PAX_DELTA_MMAP_LEN 16
1696+#define PAX_DELTA_STACK_LEN 16
1697+#endif
1698+
1699 /* This yields a mask that user programs can use to figure out what
1700 instruction set this CPU supports. This could be done in user space,
1701 but it's not easy, and we've already done it here. */
1702diff -urNp linux-2.6.32.45/arch/parisc/include/asm/pgtable.h linux-2.6.32.45/arch/parisc/include/asm/pgtable.h
1703--- linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1704+++ linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1705@@ -207,6 +207,17 @@
1706 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1707 #define PAGE_COPY PAGE_EXECREAD
1708 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1709+
1710+#ifdef CONFIG_PAX_PAGEEXEC
1711+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1712+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1713+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1714+#else
1715+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1716+# define PAGE_COPY_NOEXEC PAGE_COPY
1717+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1718+#endif
1719+
1720 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1721 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1722 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1723diff -urNp linux-2.6.32.45/arch/parisc/kernel/module.c linux-2.6.32.45/arch/parisc/kernel/module.c
1724--- linux-2.6.32.45/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1725+++ linux-2.6.32.45/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1726@@ -95,16 +95,38 @@
1727
1728 /* three functions to determine where in the module core
1729 * or init pieces the location is */
1730+static inline int in_init_rx(struct module *me, void *loc)
1731+{
1732+ return (loc >= me->module_init_rx &&
1733+ loc < (me->module_init_rx + me->init_size_rx));
1734+}
1735+
1736+static inline int in_init_rw(struct module *me, void *loc)
1737+{
1738+ return (loc >= me->module_init_rw &&
1739+ loc < (me->module_init_rw + me->init_size_rw));
1740+}
1741+
1742 static inline int in_init(struct module *me, void *loc)
1743 {
1744- return (loc >= me->module_init &&
1745- loc <= (me->module_init + me->init_size));
1746+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1747+}
1748+
1749+static inline int in_core_rx(struct module *me, void *loc)
1750+{
1751+ return (loc >= me->module_core_rx &&
1752+ loc < (me->module_core_rx + me->core_size_rx));
1753+}
1754+
1755+static inline int in_core_rw(struct module *me, void *loc)
1756+{
1757+ return (loc >= me->module_core_rw &&
1758+ loc < (me->module_core_rw + me->core_size_rw));
1759 }
1760
1761 static inline int in_core(struct module *me, void *loc)
1762 {
1763- return (loc >= me->module_core &&
1764- loc <= (me->module_core + me->core_size));
1765+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1766 }
1767
1768 static inline int in_local(struct module *me, void *loc)
1769@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1770 }
1771
1772 /* align things a bit */
1773- me->core_size = ALIGN(me->core_size, 16);
1774- me->arch.got_offset = me->core_size;
1775- me->core_size += gots * sizeof(struct got_entry);
1776-
1777- me->core_size = ALIGN(me->core_size, 16);
1778- me->arch.fdesc_offset = me->core_size;
1779- me->core_size += fdescs * sizeof(Elf_Fdesc);
1780+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1781+ me->arch.got_offset = me->core_size_rw;
1782+ me->core_size_rw += gots * sizeof(struct got_entry);
1783+
1784+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1785+ me->arch.fdesc_offset = me->core_size_rw;
1786+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1787
1788 me->arch.got_max = gots;
1789 me->arch.fdesc_max = fdescs;
1790@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1791
1792 BUG_ON(value == 0);
1793
1794- got = me->module_core + me->arch.got_offset;
1795+ got = me->module_core_rw + me->arch.got_offset;
1796 for (i = 0; got[i].addr; i++)
1797 if (got[i].addr == value)
1798 goto out;
1799@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1800 #ifdef CONFIG_64BIT
1801 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1802 {
1803- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1804+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1805
1806 if (!value) {
1807 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1808@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1809
1810 /* Create new one */
1811 fdesc->addr = value;
1812- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1813+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1814 return (Elf_Addr)fdesc;
1815 }
1816 #endif /* CONFIG_64BIT */
1817@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1818
1819 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1820 end = table + sechdrs[me->arch.unwind_section].sh_size;
1821- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1822+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1823
1824 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1825 me->arch.unwind_section, table, end, gp);
1826diff -urNp linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c
1827--- linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1828+++ linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1829@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1830 /* At this point: (!vma || addr < vma->vm_end). */
1831 if (TASK_SIZE - len < addr)
1832 return -ENOMEM;
1833- if (!vma || addr + len <= vma->vm_start)
1834+ if (check_heap_stack_gap(vma, addr, len))
1835 return addr;
1836 addr = vma->vm_end;
1837 }
1838@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1839 /* At this point: (!vma || addr < vma->vm_end). */
1840 if (TASK_SIZE - len < addr)
1841 return -ENOMEM;
1842- if (!vma || addr + len <= vma->vm_start)
1843+ if (check_heap_stack_gap(vma, addr, len))
1844 return addr;
1845 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1846 if (addr < vma->vm_end) /* handle wraparound */
1847@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1848 if (flags & MAP_FIXED)
1849 return addr;
1850 if (!addr)
1851- addr = TASK_UNMAPPED_BASE;
1852+ addr = current->mm->mmap_base;
1853
1854 if (filp) {
1855 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1856diff -urNp linux-2.6.32.45/arch/parisc/kernel/traps.c linux-2.6.32.45/arch/parisc/kernel/traps.c
1857--- linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1858+++ linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1859@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1860
1861 down_read(&current->mm->mmap_sem);
1862 vma = find_vma(current->mm,regs->iaoq[0]);
1863- if (vma && (regs->iaoq[0] >= vma->vm_start)
1864- && (vma->vm_flags & VM_EXEC)) {
1865-
1866+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1867 fault_address = regs->iaoq[0];
1868 fault_space = regs->iasq[0];
1869
1870diff -urNp linux-2.6.32.45/arch/parisc/mm/fault.c linux-2.6.32.45/arch/parisc/mm/fault.c
1871--- linux-2.6.32.45/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1872+++ linux-2.6.32.45/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1873@@ -15,6 +15,7 @@
1874 #include <linux/sched.h>
1875 #include <linux/interrupt.h>
1876 #include <linux/module.h>
1877+#include <linux/unistd.h>
1878
1879 #include <asm/uaccess.h>
1880 #include <asm/traps.h>
1881@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1882 static unsigned long
1883 parisc_acctyp(unsigned long code, unsigned int inst)
1884 {
1885- if (code == 6 || code == 16)
1886+ if (code == 6 || code == 7 || code == 16)
1887 return VM_EXEC;
1888
1889 switch (inst & 0xf0000000) {
1890@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1891 }
1892 #endif
1893
1894+#ifdef CONFIG_PAX_PAGEEXEC
1895+/*
1896+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1897+ *
1898+ * returns 1 when task should be killed
1899+ * 2 when rt_sigreturn trampoline was detected
1900+ * 3 when unpatched PLT trampoline was detected
1901+ */
1902+static int pax_handle_fetch_fault(struct pt_regs *regs)
1903+{
1904+
1905+#ifdef CONFIG_PAX_EMUPLT
1906+ int err;
1907+
1908+ do { /* PaX: unpatched PLT emulation */
1909+ unsigned int bl, depwi;
1910+
1911+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1912+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1913+
1914+ if (err)
1915+ break;
1916+
1917+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1918+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1919+
1920+ err = get_user(ldw, (unsigned int *)addr);
1921+ err |= get_user(bv, (unsigned int *)(addr+4));
1922+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1923+
1924+ if (err)
1925+ break;
1926+
1927+ if (ldw == 0x0E801096U &&
1928+ bv == 0xEAC0C000U &&
1929+ ldw2 == 0x0E881095U)
1930+ {
1931+ unsigned int resolver, map;
1932+
1933+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1934+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1935+ if (err)
1936+ break;
1937+
1938+ regs->gr[20] = instruction_pointer(regs)+8;
1939+ regs->gr[21] = map;
1940+ regs->gr[22] = resolver;
1941+ regs->iaoq[0] = resolver | 3UL;
1942+ regs->iaoq[1] = regs->iaoq[0] + 4;
1943+ return 3;
1944+ }
1945+ }
1946+ } while (0);
1947+#endif
1948+
1949+#ifdef CONFIG_PAX_EMUTRAMP
1950+
1951+#ifndef CONFIG_PAX_EMUSIGRT
1952+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1953+ return 1;
1954+#endif
1955+
1956+ do { /* PaX: rt_sigreturn emulation */
1957+ unsigned int ldi1, ldi2, bel, nop;
1958+
1959+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1960+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1961+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1962+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1963+
1964+ if (err)
1965+ break;
1966+
1967+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1968+ ldi2 == 0x3414015AU &&
1969+ bel == 0xE4008200U &&
1970+ nop == 0x08000240U)
1971+ {
1972+ regs->gr[25] = (ldi1 & 2) >> 1;
1973+ regs->gr[20] = __NR_rt_sigreturn;
1974+ regs->gr[31] = regs->iaoq[1] + 16;
1975+ regs->sr[0] = regs->iasq[1];
1976+ regs->iaoq[0] = 0x100UL;
1977+ regs->iaoq[1] = regs->iaoq[0] + 4;
1978+ regs->iasq[0] = regs->sr[2];
1979+ regs->iasq[1] = regs->sr[2];
1980+ return 2;
1981+ }
1982+ } while (0);
1983+#endif
1984+
1985+ return 1;
1986+}
1987+
1988+void pax_report_insns(void *pc, void *sp)
1989+{
1990+ unsigned long i;
1991+
1992+ printk(KERN_ERR "PAX: bytes at PC: ");
1993+ for (i = 0; i < 5; i++) {
1994+ unsigned int c;
1995+ if (get_user(c, (unsigned int *)pc+i))
1996+ printk(KERN_CONT "???????? ");
1997+ else
1998+ printk(KERN_CONT "%08x ", c);
1999+ }
2000+ printk("\n");
2001+}
2002+#endif
2003+
2004 int fixup_exception(struct pt_regs *regs)
2005 {
2006 const struct exception_table_entry *fix;
2007@@ -192,8 +303,33 @@ good_area:
2008
2009 acc_type = parisc_acctyp(code,regs->iir);
2010
2011- if ((vma->vm_flags & acc_type) != acc_type)
2012+ if ((vma->vm_flags & acc_type) != acc_type) {
2013+
2014+#ifdef CONFIG_PAX_PAGEEXEC
2015+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2016+ (address & ~3UL) == instruction_pointer(regs))
2017+ {
2018+ up_read(&mm->mmap_sem);
2019+ switch (pax_handle_fetch_fault(regs)) {
2020+
2021+#ifdef CONFIG_PAX_EMUPLT
2022+ case 3:
2023+ return;
2024+#endif
2025+
2026+#ifdef CONFIG_PAX_EMUTRAMP
2027+ case 2:
2028+ return;
2029+#endif
2030+
2031+ }
2032+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2033+ do_group_exit(SIGKILL);
2034+ }
2035+#endif
2036+
2037 goto bad_area;
2038+ }
2039
2040 /*
2041 * If for any reason at all we couldn't handle the fault, make
2042diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/device.h linux-2.6.32.45/arch/powerpc/include/asm/device.h
2043--- linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
2044+++ linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
2045@@ -14,7 +14,7 @@ struct dev_archdata {
2046 struct device_node *of_node;
2047
2048 /* DMA operations on that device */
2049- struct dma_map_ops *dma_ops;
2050+ const struct dma_map_ops *dma_ops;
2051
2052 /*
2053 * When an iommu is in use, dma_data is used as a ptr to the base of the
2054diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h
2055--- linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
2056+++ linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
2057@@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
2058 #ifdef CONFIG_PPC64
2059 extern struct dma_map_ops dma_iommu_ops;
2060 #endif
2061-extern struct dma_map_ops dma_direct_ops;
2062+extern const struct dma_map_ops dma_direct_ops;
2063
2064-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2065+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2066 {
2067 /* We don't handle the NULL dev case for ISA for now. We could
2068 * do it via an out of line call but it is not needed for now. The
2069@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2070 return dev->archdata.dma_ops;
2071 }
2072
2073-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2074+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2075 {
2076 dev->archdata.dma_ops = ops;
2077 }
2078@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2079
2080 static inline int dma_supported(struct device *dev, u64 mask)
2081 {
2082- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2083+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2084
2085 if (unlikely(dma_ops == NULL))
2086 return 0;
2087@@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2088
2089 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2090 {
2091- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2092+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2093
2094 if (unlikely(dma_ops == NULL))
2095 return -EIO;
2096@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2097 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2098 dma_addr_t *dma_handle, gfp_t flag)
2099 {
2100- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2101+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2102 void *cpu_addr;
2103
2104 BUG_ON(!dma_ops);
2105@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2106 static inline void dma_free_coherent(struct device *dev, size_t size,
2107 void *cpu_addr, dma_addr_t dma_handle)
2108 {
2109- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2110+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2111
2112 BUG_ON(!dma_ops);
2113
2114@@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2115
2116 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2117 {
2118- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2119+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2120
2121 if (dma_ops->mapping_error)
2122 return dma_ops->mapping_error(dev, dma_addr);
2123diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/elf.h linux-2.6.32.45/arch/powerpc/include/asm/elf.h
2124--- linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2125+++ linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2126@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2127 the loader. We need to make sure that it is out of the way of the program
2128 that it will "exec", and that there is sufficient room for the brk. */
2129
2130-extern unsigned long randomize_et_dyn(unsigned long base);
2131-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2132+#define ELF_ET_DYN_BASE (0x20000000)
2133+
2134+#ifdef CONFIG_PAX_ASLR
2135+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2136+
2137+#ifdef __powerpc64__
2138+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2139+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2140+#else
2141+#define PAX_DELTA_MMAP_LEN 15
2142+#define PAX_DELTA_STACK_LEN 15
2143+#endif
2144+#endif
2145
2146 /*
2147 * Our registers are always unsigned longs, whether we're a 32 bit
2148@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2149 (0x7ff >> (PAGE_SHIFT - 12)) : \
2150 (0x3ffff >> (PAGE_SHIFT - 12)))
2151
2152-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2153-#define arch_randomize_brk arch_randomize_brk
2154-
2155 #endif /* __KERNEL__ */
2156
2157 /*
2158diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/iommu.h linux-2.6.32.45/arch/powerpc/include/asm/iommu.h
2159--- linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2160+++ linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2161@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2162 extern void iommu_init_early_dart(void);
2163 extern void iommu_init_early_pasemi(void);
2164
2165+/* dma-iommu.c */
2166+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2167+
2168 #ifdef CONFIG_PCI
2169 extern void pci_iommu_init(void);
2170 extern void pci_direct_iommu_init(void);
2171diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h
2172--- linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2173+++ linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2174@@ -26,6 +26,7 @@ enum km_type {
2175 KM_SOFTIRQ1,
2176 KM_PPC_SYNC_PAGE,
2177 KM_PPC_SYNC_ICACHE,
2178+ KM_CLEARPAGE,
2179 KM_TYPE_NR
2180 };
2181
2182diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page_64.h linux-2.6.32.45/arch/powerpc/include/asm/page_64.h
2183--- linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2184+++ linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2185@@ -180,15 +180,18 @@ do { \
2186 * stack by default, so in the absense of a PT_GNU_STACK program header
2187 * we turn execute permission off.
2188 */
2189-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2190- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2191+#define VM_STACK_DEFAULT_FLAGS32 \
2192+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2193+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2194
2195 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2196 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2197
2198+#ifndef CONFIG_PAX_PAGEEXEC
2199 #define VM_STACK_DEFAULT_FLAGS \
2200 (test_thread_flag(TIF_32BIT) ? \
2201 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2202+#endif
2203
2204 #include <asm-generic/getorder.h>
2205
2206diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page.h linux-2.6.32.45/arch/powerpc/include/asm/page.h
2207--- linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2208+++ linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-08-21 16:07:39.000000000 -0400
2209@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2210 * and needs to be executable. This means the whole heap ends
2211 * up being executable.
2212 */
2213-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2214- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2215+#define VM_DATA_DEFAULT_FLAGS32 \
2216+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2217+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2218
2219 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2220 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2221@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2222 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2223 #endif
2224
2225+#define ktla_ktva(addr) (addr)
2226+#define ktva_ktla(addr) (addr)
2227+
2228 #ifndef __ASSEMBLY__
2229
2230 #undef STRICT_MM_TYPECHECKS
2231diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pci.h linux-2.6.32.45/arch/powerpc/include/asm/pci.h
2232--- linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2233+++ linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2234@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2235 }
2236
2237 #ifdef CONFIG_PCI
2238-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2239-extern struct dma_map_ops *get_pci_dma_ops(void);
2240+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2241+extern const struct dma_map_ops *get_pci_dma_ops(void);
2242 #else /* CONFIG_PCI */
2243 #define set_pci_dma_ops(d)
2244 #define get_pci_dma_ops() NULL
2245diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h
2246--- linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2247+++ linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2248@@ -2,6 +2,7 @@
2249 #define _ASM_POWERPC_PGTABLE_H
2250 #ifdef __KERNEL__
2251
2252+#include <linux/const.h>
2253 #ifndef __ASSEMBLY__
2254 #include <asm/processor.h> /* For TASK_SIZE */
2255 #include <asm/mmu.h>
2256diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h
2257--- linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2258+++ linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2259@@ -21,6 +21,7 @@
2260 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2261 #define _PAGE_USER 0x004 /* usermode access allowed */
2262 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2263+#define _PAGE_EXEC _PAGE_GUARDED
2264 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2265 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2266 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2267diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h
2268--- linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
2269+++ linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h 2011-08-21 15:53:58.000000000 -0400
2270@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct p
2271 } while(0)
2272
2273 struct task_struct;
2274-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2275+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2276 extern int ptrace_put_reg(struct task_struct *task, int regno,
2277 unsigned long data);
2278
2279diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/reg.h linux-2.6.32.45/arch/powerpc/include/asm/reg.h
2280--- linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2281+++ linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2282@@ -191,6 +191,7 @@
2283 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2284 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2285 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2286+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2287 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2288 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2289 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2290diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h
2291--- linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2292+++ linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2293@@ -13,7 +13,7 @@
2294
2295 #include <linux/swiotlb.h>
2296
2297-extern struct dma_map_ops swiotlb_dma_ops;
2298+extern const struct dma_map_ops swiotlb_dma_ops;
2299
2300 static inline void dma_mark_clean(void *addr, size_t size) {}
2301
2302diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/system.h linux-2.6.32.45/arch/powerpc/include/asm/system.h
2303--- linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2304+++ linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2305@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2306 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2307 #endif
2308
2309-extern unsigned long arch_align_stack(unsigned long sp);
2310+#define arch_align_stack(x) ((x) & ~0xfUL)
2311
2312 /* Used in very early kernel initialization. */
2313 extern unsigned long reloc_offset(void);
2314diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h
2315--- linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2316+++ linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2317@@ -13,6 +13,8 @@
2318 #define VERIFY_READ 0
2319 #define VERIFY_WRITE 1
2320
2321+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2322+
2323 /*
2324 * The fs value determines whether argument validity checking should be
2325 * performed or not. If get_fs() == USER_DS, checking is performed, with
2326@@ -327,52 +329,6 @@ do { \
2327 extern unsigned long __copy_tofrom_user(void __user *to,
2328 const void __user *from, unsigned long size);
2329
2330-#ifndef __powerpc64__
2331-
2332-static inline unsigned long copy_from_user(void *to,
2333- const void __user *from, unsigned long n)
2334-{
2335- unsigned long over;
2336-
2337- if (access_ok(VERIFY_READ, from, n))
2338- return __copy_tofrom_user((__force void __user *)to, from, n);
2339- if ((unsigned long)from < TASK_SIZE) {
2340- over = (unsigned long)from + n - TASK_SIZE;
2341- return __copy_tofrom_user((__force void __user *)to, from,
2342- n - over) + over;
2343- }
2344- return n;
2345-}
2346-
2347-static inline unsigned long copy_to_user(void __user *to,
2348- const void *from, unsigned long n)
2349-{
2350- unsigned long over;
2351-
2352- if (access_ok(VERIFY_WRITE, to, n))
2353- return __copy_tofrom_user(to, (__force void __user *)from, n);
2354- if ((unsigned long)to < TASK_SIZE) {
2355- over = (unsigned long)to + n - TASK_SIZE;
2356- return __copy_tofrom_user(to, (__force void __user *)from,
2357- n - over) + over;
2358- }
2359- return n;
2360-}
2361-
2362-#else /* __powerpc64__ */
2363-
2364-#define __copy_in_user(to, from, size) \
2365- __copy_tofrom_user((to), (from), (size))
2366-
2367-extern unsigned long copy_from_user(void *to, const void __user *from,
2368- unsigned long n);
2369-extern unsigned long copy_to_user(void __user *to, const void *from,
2370- unsigned long n);
2371-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2372- unsigned long n);
2373-
2374-#endif /* __powerpc64__ */
2375-
2376 static inline unsigned long __copy_from_user_inatomic(void *to,
2377 const void __user *from, unsigned long n)
2378 {
2379@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2380 if (ret == 0)
2381 return 0;
2382 }
2383+
2384+ if (!__builtin_constant_p(n))
2385+ check_object_size(to, n, false);
2386+
2387 return __copy_tofrom_user((__force void __user *)to, from, n);
2388 }
2389
2390@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2391 if (ret == 0)
2392 return 0;
2393 }
2394+
2395+ if (!__builtin_constant_p(n))
2396+ check_object_size(from, n, true);
2397+
2398 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2399 }
2400
2401@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2402 return __copy_to_user_inatomic(to, from, size);
2403 }
2404
2405+#ifndef __powerpc64__
2406+
2407+static inline unsigned long __must_check copy_from_user(void *to,
2408+ const void __user *from, unsigned long n)
2409+{
2410+ unsigned long over;
2411+
2412+ if ((long)n < 0)
2413+ return n;
2414+
2415+ if (access_ok(VERIFY_READ, from, n)) {
2416+ if (!__builtin_constant_p(n))
2417+ check_object_size(to, n, false);
2418+ return __copy_tofrom_user((__force void __user *)to, from, n);
2419+ }
2420+ if ((unsigned long)from < TASK_SIZE) {
2421+ over = (unsigned long)from + n - TASK_SIZE;
2422+ if (!__builtin_constant_p(n - over))
2423+ check_object_size(to, n - over, false);
2424+ return __copy_tofrom_user((__force void __user *)to, from,
2425+ n - over) + over;
2426+ }
2427+ return n;
2428+}
2429+
2430+static inline unsigned long __must_check copy_to_user(void __user *to,
2431+ const void *from, unsigned long n)
2432+{
2433+ unsigned long over;
2434+
2435+ if ((long)n < 0)
2436+ return n;
2437+
2438+ if (access_ok(VERIFY_WRITE, to, n)) {
2439+ if (!__builtin_constant_p(n))
2440+ check_object_size(from, n, true);
2441+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2442+ }
2443+ if ((unsigned long)to < TASK_SIZE) {
2444+ over = (unsigned long)to + n - TASK_SIZE;
2445+ if (!__builtin_constant_p(n))
2446+ check_object_size(from, n - over, true);
2447+ return __copy_tofrom_user(to, (__force void __user *)from,
2448+ n - over) + over;
2449+ }
2450+ return n;
2451+}
2452+
2453+#else /* __powerpc64__ */
2454+
2455+#define __copy_in_user(to, from, size) \
2456+ __copy_tofrom_user((to), (from), (size))
2457+
2458+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2459+{
2460+ if ((long)n < 0 || n > INT_MAX)
2461+ return n;
2462+
2463+ if (!__builtin_constant_p(n))
2464+ check_object_size(to, n, false);
2465+
2466+ if (likely(access_ok(VERIFY_READ, from, n)))
2467+ n = __copy_from_user(to, from, n);
2468+ else
2469+ memset(to, 0, n);
2470+ return n;
2471+}
2472+
2473+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2474+{
2475+ if ((long)n < 0 || n > INT_MAX)
2476+ return n;
2477+
2478+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2479+ if (!__builtin_constant_p(n))
2480+ check_object_size(from, n, true);
2481+ n = __copy_to_user(to, from, n);
2482+ }
2483+ return n;
2484+}
2485+
2486+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2487+ unsigned long n);
2488+
2489+#endif /* __powerpc64__ */
2490+
2491 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2492
2493 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2494diff -urNp linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c
2495--- linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2496+++ linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2497@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2498 &cache_assoc_attr,
2499 };
2500
2501-static struct sysfs_ops cache_index_ops = {
2502+static const struct sysfs_ops cache_index_ops = {
2503 .show = cache_index_show,
2504 };
2505
2506diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma.c linux-2.6.32.45/arch/powerpc/kernel/dma.c
2507--- linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2508+++ linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2509@@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2510 }
2511 #endif
2512
2513-struct dma_map_ops dma_direct_ops = {
2514+const struct dma_map_ops dma_direct_ops = {
2515 .alloc_coherent = dma_direct_alloc_coherent,
2516 .free_coherent = dma_direct_free_coherent,
2517 .map_sg = dma_direct_map_sg,
2518diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c
2519--- linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2520+++ linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2521@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2522 }
2523
2524 /* We support DMA to/from any memory page via the iommu */
2525-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2526+int dma_iommu_dma_supported(struct device *dev, u64 mask)
2527 {
2528 struct iommu_table *tbl = get_iommu_table_base(dev);
2529
2530diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c
2531--- linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2532+++ linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2533@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2534 * map_page, and unmap_page on highmem, use normal dma_ops
2535 * for everything else.
2536 */
2537-struct dma_map_ops swiotlb_dma_ops = {
2538+const struct dma_map_ops swiotlb_dma_ops = {
2539 .alloc_coherent = dma_direct_alloc_coherent,
2540 .free_coherent = dma_direct_free_coherent,
2541 .map_sg = swiotlb_map_sg_attrs,
2542diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S
2543--- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2544+++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2545@@ -455,6 +455,7 @@ storage_fault_common:
2546 std r14,_DAR(r1)
2547 std r15,_DSISR(r1)
2548 addi r3,r1,STACK_FRAME_OVERHEAD
2549+ bl .save_nvgprs
2550 mr r4,r14
2551 mr r5,r15
2552 ld r14,PACA_EXGEN+EX_R14(r13)
2553@@ -464,8 +465,7 @@ storage_fault_common:
2554 cmpdi r3,0
2555 bne- 1f
2556 b .ret_from_except_lite
2557-1: bl .save_nvgprs
2558- mr r5,r3
2559+1: mr r5,r3
2560 addi r3,r1,STACK_FRAME_OVERHEAD
2561 ld r4,_DAR(r1)
2562 bl .bad_page_fault
2563diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S
2564--- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2565+++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2566@@ -818,10 +818,10 @@ handle_page_fault:
2567 11: ld r4,_DAR(r1)
2568 ld r5,_DSISR(r1)
2569 addi r3,r1,STACK_FRAME_OVERHEAD
2570+ bl .save_nvgprs
2571 bl .do_page_fault
2572 cmpdi r3,0
2573 beq+ 13f
2574- bl .save_nvgprs
2575 mr r5,r3
2576 addi r3,r1,STACK_FRAME_OVERHEAD
2577 lwz r4,_DAR(r1)
2578diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c
2579--- linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2580+++ linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2581@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2582 return 1;
2583 }
2584
2585-static struct dma_map_ops ibmebus_dma_ops = {
2586+static const struct dma_map_ops ibmebus_dma_ops = {
2587 .alloc_coherent = ibmebus_alloc_coherent,
2588 .free_coherent = ibmebus_free_coherent,
2589 .map_sg = ibmebus_map_sg,
2590diff -urNp linux-2.6.32.45/arch/powerpc/kernel/kgdb.c linux-2.6.32.45/arch/powerpc/kernel/kgdb.c
2591--- linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2592+++ linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2593@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2594 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2595 return 0;
2596
2597- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2598+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2599 regs->nip += 4;
2600
2601 return 1;
2602@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2603 /*
2604 * Global data
2605 */
2606-struct kgdb_arch arch_kgdb_ops = {
2607+const struct kgdb_arch arch_kgdb_ops = {
2608 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2609 };
2610
2611diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module_32.c linux-2.6.32.45/arch/powerpc/kernel/module_32.c
2612--- linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2613+++ linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2614@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2615 me->arch.core_plt_section = i;
2616 }
2617 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2618- printk("Module doesn't contain .plt or .init.plt sections.\n");
2619+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2620 return -ENOEXEC;
2621 }
2622
2623@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2624
2625 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2626 /* Init, or core PLT? */
2627- if (location >= mod->module_core
2628- && location < mod->module_core + mod->core_size)
2629+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2630+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2631 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2632- else
2633+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2634+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2635 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2636+ else {
2637+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2638+ return ~0UL;
2639+ }
2640
2641 /* Find this entry, or if that fails, the next avail. entry */
2642 while (entry->jump[0]) {
2643diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module.c linux-2.6.32.45/arch/powerpc/kernel/module.c
2644--- linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2645+++ linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2646@@ -31,11 +31,24 @@
2647
2648 LIST_HEAD(module_bug_list);
2649
2650+#ifdef CONFIG_PAX_KERNEXEC
2651 void *module_alloc(unsigned long size)
2652 {
2653 if (size == 0)
2654 return NULL;
2655
2656+ return vmalloc(size);
2657+}
2658+
2659+void *module_alloc_exec(unsigned long size)
2660+#else
2661+void *module_alloc(unsigned long size)
2662+#endif
2663+
2664+{
2665+ if (size == 0)
2666+ return NULL;
2667+
2668 return vmalloc_exec(size);
2669 }
2670
2671@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2672 vfree(module_region);
2673 }
2674
2675+#ifdef CONFIG_PAX_KERNEXEC
2676+void module_free_exec(struct module *mod, void *module_region)
2677+{
2678+ module_free(mod, module_region);
2679+}
2680+#endif
2681+
2682 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2683 const Elf_Shdr *sechdrs,
2684 const char *name)
2685diff -urNp linux-2.6.32.45/arch/powerpc/kernel/pci-common.c linux-2.6.32.45/arch/powerpc/kernel/pci-common.c
2686--- linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2687+++ linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2688@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2689 unsigned int ppc_pci_flags = 0;
2690
2691
2692-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2693+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2694
2695-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2696+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2697 {
2698 pci_dma_ops = dma_ops;
2699 }
2700
2701-struct dma_map_ops *get_pci_dma_ops(void)
2702+const struct dma_map_ops *get_pci_dma_ops(void)
2703 {
2704 return pci_dma_ops;
2705 }
2706diff -urNp linux-2.6.32.45/arch/powerpc/kernel/process.c linux-2.6.32.45/arch/powerpc/kernel/process.c
2707--- linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2708+++ linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2709@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2710 * Lookup NIP late so we have the best change of getting the
2711 * above info out without failing
2712 */
2713- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2714- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2715+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2716+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2717 #endif
2718 show_stack(current, (unsigned long *) regs->gpr[1]);
2719 if (!user_mode(regs))
2720@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2721 newsp = stack[0];
2722 ip = stack[STACK_FRAME_LR_SAVE];
2723 if (!firstframe || ip != lr) {
2724- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2725+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2726 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2727 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2728- printk(" (%pS)",
2729+ printk(" (%pA)",
2730 (void *)current->ret_stack[curr_frame].ret);
2731 curr_frame--;
2732 }
2733@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2734 struct pt_regs *regs = (struct pt_regs *)
2735 (sp + STACK_FRAME_OVERHEAD);
2736 lr = regs->link;
2737- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2738+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2739 regs->trap, (void *)regs->nip, (void *)lr);
2740 firstframe = 1;
2741 }
2742@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2743 }
2744
2745 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2746-
2747-unsigned long arch_align_stack(unsigned long sp)
2748-{
2749- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2750- sp -= get_random_int() & ~PAGE_MASK;
2751- return sp & ~0xf;
2752-}
2753-
2754-static inline unsigned long brk_rnd(void)
2755-{
2756- unsigned long rnd = 0;
2757-
2758- /* 8MB for 32bit, 1GB for 64bit */
2759- if (is_32bit_task())
2760- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2761- else
2762- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2763-
2764- return rnd << PAGE_SHIFT;
2765-}
2766-
2767-unsigned long arch_randomize_brk(struct mm_struct *mm)
2768-{
2769- unsigned long base = mm->brk;
2770- unsigned long ret;
2771-
2772-#ifdef CONFIG_PPC_STD_MMU_64
2773- /*
2774- * If we are using 1TB segments and we are allowed to randomise
2775- * the heap, we can put it above 1TB so it is backed by a 1TB
2776- * segment. Otherwise the heap will be in the bottom 1TB
2777- * which always uses 256MB segments and this may result in a
2778- * performance penalty.
2779- */
2780- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2781- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2782-#endif
2783-
2784- ret = PAGE_ALIGN(base + brk_rnd());
2785-
2786- if (ret < mm->brk)
2787- return mm->brk;
2788-
2789- return ret;
2790-}
2791-
2792-unsigned long randomize_et_dyn(unsigned long base)
2793-{
2794- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2795-
2796- if (ret < base)
2797- return base;
2798-
2799- return ret;
2800-}
2801diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ptrace.c linux-2.6.32.45/arch/powerpc/kernel/ptrace.c
2802--- linux-2.6.32.45/arch/powerpc/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
2803+++ linux-2.6.32.45/arch/powerpc/kernel/ptrace.c 2011-08-21 15:53:39.000000000 -0400
2804@@ -86,7 +86,7 @@ static int set_user_trap(struct task_str
2805 /*
2806 * Get contents of register REGNO in task TASK.
2807 */
2808-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
2809+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
2810 {
2811 if (task->thread.regs == NULL)
2812 return -EIO;
2813@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *chi
2814
2815 CHECK_FULL_REGS(child->thread.regs);
2816 if (index < PT_FPR0) {
2817- tmp = ptrace_get_reg(child, (int) index);
2818+ tmp = ptrace_get_reg(child, index);
2819 } else {
2820 flush_fp_to_thread(child);
2821 tmp = ((unsigned long *)child->thread.fpr)
2822diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_32.c linux-2.6.32.45/arch/powerpc/kernel/signal_32.c
2823--- linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2824+++ linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2825@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2826 /* Save user registers on the stack */
2827 frame = &rt_sf->uc.uc_mcontext;
2828 addr = frame;
2829- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2830+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2831 if (save_user_regs(regs, frame, 0, 1))
2832 goto badframe;
2833 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2834diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_64.c linux-2.6.32.45/arch/powerpc/kernel/signal_64.c
2835--- linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2836+++ linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2837@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2838 current->thread.fpscr.val = 0;
2839
2840 /* Set up to return from userspace. */
2841- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2842+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2843 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2844 } else {
2845 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2846diff -urNp linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c
2847--- linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2848+++ linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2849@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2850 if (oldlenp) {
2851 if (!error) {
2852 if (get_user(oldlen, oldlenp) ||
2853- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2854+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2855+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2856 error = -EFAULT;
2857 }
2858- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2859 }
2860 return error;
2861 }
2862diff -urNp linux-2.6.32.45/arch/powerpc/kernel/traps.c linux-2.6.32.45/arch/powerpc/kernel/traps.c
2863--- linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2864+++ linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2865@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2866 static inline void pmac_backlight_unblank(void) { }
2867 #endif
2868
2869+extern void gr_handle_kernel_exploit(void);
2870+
2871 int die(const char *str, struct pt_regs *regs, long err)
2872 {
2873 static struct {
2874@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2875 if (panic_on_oops)
2876 panic("Fatal exception");
2877
2878+ gr_handle_kernel_exploit();
2879+
2880 oops_exit();
2881 do_exit(err);
2882
2883diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vdso.c linux-2.6.32.45/arch/powerpc/kernel/vdso.c
2884--- linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2885+++ linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2886@@ -36,6 +36,7 @@
2887 #include <asm/firmware.h>
2888 #include <asm/vdso.h>
2889 #include <asm/vdso_datapage.h>
2890+#include <asm/mman.h>
2891
2892 #include "setup.h"
2893
2894@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2895 vdso_base = VDSO32_MBASE;
2896 #endif
2897
2898- current->mm->context.vdso_base = 0;
2899+ current->mm->context.vdso_base = ~0UL;
2900
2901 /* vDSO has a problem and was disabled, just don't "enable" it for the
2902 * process
2903@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2904 vdso_base = get_unmapped_area(NULL, vdso_base,
2905 (vdso_pages << PAGE_SHIFT) +
2906 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2907- 0, 0);
2908+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2909 if (IS_ERR_VALUE(vdso_base)) {
2910 rc = vdso_base;
2911 goto fail_mmapsem;
2912diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vio.c linux-2.6.32.45/arch/powerpc/kernel/vio.c
2913--- linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2914+++ linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2915@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2916 vio_cmo_dealloc(viodev, alloc_size);
2917 }
2918
2919-struct dma_map_ops vio_dma_mapping_ops = {
2920+static const struct dma_map_ops vio_dma_mapping_ops = {
2921 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2922 .free_coherent = vio_dma_iommu_free_coherent,
2923 .map_sg = vio_dma_iommu_map_sg,
2924 .unmap_sg = vio_dma_iommu_unmap_sg,
2925+ .dma_supported = dma_iommu_dma_supported,
2926 .map_page = vio_dma_iommu_map_page,
2927 .unmap_page = vio_dma_iommu_unmap_page,
2928
2929@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2930
2931 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2932 {
2933- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2934 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2935 }
2936
2937diff -urNp linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c
2938--- linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2939+++ linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2940@@ -9,22 +9,6 @@
2941 #include <linux/module.h>
2942 #include <asm/uaccess.h>
2943
2944-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2945-{
2946- if (likely(access_ok(VERIFY_READ, from, n)))
2947- n = __copy_from_user(to, from, n);
2948- else
2949- memset(to, 0, n);
2950- return n;
2951-}
2952-
2953-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2954-{
2955- if (likely(access_ok(VERIFY_WRITE, to, n)))
2956- n = __copy_to_user(to, from, n);
2957- return n;
2958-}
2959-
2960 unsigned long copy_in_user(void __user *to, const void __user *from,
2961 unsigned long n)
2962 {
2963@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2964 return n;
2965 }
2966
2967-EXPORT_SYMBOL(copy_from_user);
2968-EXPORT_SYMBOL(copy_to_user);
2969 EXPORT_SYMBOL(copy_in_user);
2970
2971diff -urNp linux-2.6.32.45/arch/powerpc/Makefile linux-2.6.32.45/arch/powerpc/Makefile
2972--- linux-2.6.32.45/arch/powerpc/Makefile 2011-03-27 14:31:47.000000000 -0400
2973+++ linux-2.6.32.45/arch/powerpc/Makefile 2011-08-21 19:27:08.000000000 -0400
2974@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2975 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2976 CPP = $(CC) -E $(KBUILD_CFLAGS)
2977
2978+cflags-y += -Wno-sign-compare -Wno-extra
2979+
2980 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2981
2982 ifeq ($(CONFIG_PPC64),y)
2983diff -urNp linux-2.6.32.45/arch/powerpc/mm/fault.c linux-2.6.32.45/arch/powerpc/mm/fault.c
2984--- linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2985+++ linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2986@@ -30,6 +30,10 @@
2987 #include <linux/kprobes.h>
2988 #include <linux/kdebug.h>
2989 #include <linux/perf_event.h>
2990+#include <linux/slab.h>
2991+#include <linux/pagemap.h>
2992+#include <linux/compiler.h>
2993+#include <linux/unistd.h>
2994
2995 #include <asm/firmware.h>
2996 #include <asm/page.h>
2997@@ -40,6 +44,7 @@
2998 #include <asm/uaccess.h>
2999 #include <asm/tlbflush.h>
3000 #include <asm/siginfo.h>
3001+#include <asm/ptrace.h>
3002
3003
3004 #ifdef CONFIG_KPROBES
3005@@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
3006 }
3007 #endif
3008
3009+#ifdef CONFIG_PAX_PAGEEXEC
3010+/*
3011+ * PaX: decide what to do with offenders (regs->nip = fault address)
3012+ *
3013+ * returns 1 when task should be killed
3014+ */
3015+static int pax_handle_fetch_fault(struct pt_regs *regs)
3016+{
3017+ return 1;
3018+}
3019+
3020+void pax_report_insns(void *pc, void *sp)
3021+{
3022+ unsigned long i;
3023+
3024+ printk(KERN_ERR "PAX: bytes at PC: ");
3025+ for (i = 0; i < 5; i++) {
3026+ unsigned int c;
3027+ if (get_user(c, (unsigned int __user *)pc+i))
3028+ printk(KERN_CONT "???????? ");
3029+ else
3030+ printk(KERN_CONT "%08x ", c);
3031+ }
3032+ printk("\n");
3033+}
3034+#endif
3035+
3036 /*
3037 * Check whether the instruction at regs->nip is a store using
3038 * an update addressing form which will update r1.
3039@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
3040 * indicate errors in DSISR but can validly be set in SRR1.
3041 */
3042 if (trap == 0x400)
3043- error_code &= 0x48200000;
3044+ error_code &= 0x58200000;
3045 else
3046 is_write = error_code & DSISR_ISSTORE;
3047 #else
3048@@ -250,7 +282,7 @@ good_area:
3049 * "undefined". Of those that can be set, this is the only
3050 * one which seems bad.
3051 */
3052- if (error_code & 0x10000000)
3053+ if (error_code & DSISR_GUARDED)
3054 /* Guarded storage error. */
3055 goto bad_area;
3056 #endif /* CONFIG_8xx */
3057@@ -265,7 +297,7 @@ good_area:
3058 * processors use the same I/D cache coherency mechanism
3059 * as embedded.
3060 */
3061- if (error_code & DSISR_PROTFAULT)
3062+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3063 goto bad_area;
3064 #endif /* CONFIG_PPC_STD_MMU */
3065
3066@@ -335,6 +367,23 @@ bad_area:
3067 bad_area_nosemaphore:
3068 /* User mode accesses cause a SIGSEGV */
3069 if (user_mode(regs)) {
3070+
3071+#ifdef CONFIG_PAX_PAGEEXEC
3072+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3073+#ifdef CONFIG_PPC_STD_MMU
3074+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3075+#else
3076+ if (is_exec && regs->nip == address) {
3077+#endif
3078+ switch (pax_handle_fetch_fault(regs)) {
3079+ }
3080+
3081+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3082+ do_group_exit(SIGKILL);
3083+ }
3084+ }
3085+#endif
3086+
3087 _exception(SIGSEGV, regs, code, address);
3088 return 0;
3089 }
3090diff -urNp linux-2.6.32.45/arch/powerpc/mm/mem.c linux-2.6.32.45/arch/powerpc/mm/mem.c
3091--- linux-2.6.32.45/arch/powerpc/mm/mem.c 2011-03-27 14:31:47.000000000 -0400
3092+++ linux-2.6.32.45/arch/powerpc/mm/mem.c 2011-08-21 15:50:39.000000000 -0400
3093@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(voi
3094 {
3095 unsigned long lmb_next_region_start_pfn,
3096 lmb_region_max_pfn;
3097- int i;
3098+ unsigned int i;
3099
3100 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3101 lmb_region_max_pfn =
3102diff -urNp linux-2.6.32.45/arch/powerpc/mm/mmap_64.c linux-2.6.32.45/arch/powerpc/mm/mmap_64.c
3103--- linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
3104+++ linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
3105@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
3106 */
3107 if (mmap_is_legacy()) {
3108 mm->mmap_base = TASK_UNMAPPED_BASE;
3109+
3110+#ifdef CONFIG_PAX_RANDMMAP
3111+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3112+ mm->mmap_base += mm->delta_mmap;
3113+#endif
3114+
3115 mm->get_unmapped_area = arch_get_unmapped_area;
3116 mm->unmap_area = arch_unmap_area;
3117 } else {
3118 mm->mmap_base = mmap_base();
3119+
3120+#ifdef CONFIG_PAX_RANDMMAP
3121+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3122+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3123+#endif
3124+
3125 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3126 mm->unmap_area = arch_unmap_area_topdown;
3127 }
3128diff -urNp linux-2.6.32.45/arch/powerpc/mm/slice.c linux-2.6.32.45/arch/powerpc/mm/slice.c
3129--- linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3130+++ linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3131@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3132 if ((mm->task_size - len) < addr)
3133 return 0;
3134 vma = find_vma(mm, addr);
3135- return (!vma || (addr + len) <= vma->vm_start);
3136+ return check_heap_stack_gap(vma, addr, len);
3137 }
3138
3139 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3140@@ -256,7 +256,7 @@ full_search:
3141 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3142 continue;
3143 }
3144- if (!vma || addr + len <= vma->vm_start) {
3145+ if (check_heap_stack_gap(vma, addr, len)) {
3146 /*
3147 * Remember the place where we stopped the search:
3148 */
3149@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3150 }
3151 }
3152
3153- addr = mm->mmap_base;
3154- while (addr > len) {
3155+ if (mm->mmap_base < len)
3156+ addr = -ENOMEM;
3157+ else
3158+ addr = mm->mmap_base - len;
3159+
3160+ while (!IS_ERR_VALUE(addr)) {
3161 /* Go down by chunk size */
3162- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3163+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3164
3165 /* Check for hit with different page size */
3166 mask = slice_range_to_mask(addr, len);
3167@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3168 * return with success:
3169 */
3170 vma = find_vma(mm, addr);
3171- if (!vma || (addr + len) <= vma->vm_start) {
3172+ if (check_heap_stack_gap(vma, addr, len)) {
3173 /* remember the address as a hint for next time */
3174 if (use_cache)
3175 mm->free_area_cache = addr;
3176@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3177 mm->cached_hole_size = vma->vm_start - addr;
3178
3179 /* try just below the current vma->vm_start */
3180- addr = vma->vm_start;
3181+ addr = skip_heap_stack_gap(vma, len);
3182 }
3183
3184 /*
3185@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3186 if (fixed && addr > (mm->task_size - len))
3187 return -EINVAL;
3188
3189+#ifdef CONFIG_PAX_RANDMMAP
3190+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3191+ addr = 0;
3192+#endif
3193+
3194 /* If hint, make sure it matches our alignment restrictions */
3195 if (!fixed && addr) {
3196 addr = _ALIGN_UP(addr, 1ul << pshift);
3197diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c
3198--- linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3199+++ linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3200@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3201 lite5200_pm_target_state = PM_SUSPEND_ON;
3202 }
3203
3204-static struct platform_suspend_ops lite5200_pm_ops = {
3205+static const struct platform_suspend_ops lite5200_pm_ops = {
3206 .valid = lite5200_pm_valid,
3207 .begin = lite5200_pm_begin,
3208 .prepare = lite5200_pm_prepare,
3209diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3210--- linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3211+++ linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3212@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3213 iounmap(mbar);
3214 }
3215
3216-static struct platform_suspend_ops mpc52xx_pm_ops = {
3217+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3218 .valid = mpc52xx_pm_valid,
3219 .prepare = mpc52xx_pm_prepare,
3220 .enter = mpc52xx_pm_enter,
3221diff -urNp linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c
3222--- linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3223+++ linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3224@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3225 return ret;
3226 }
3227
3228-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3229+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3230 .valid = mpc83xx_suspend_valid,
3231 .begin = mpc83xx_suspend_begin,
3232 .enter = mpc83xx_suspend_enter,
3233diff -urNp linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c
3234--- linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3235+++ linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3236@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3237
3238 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3239
3240-struct dma_map_ops dma_iommu_fixed_ops = {
3241+const struct dma_map_ops dma_iommu_fixed_ops = {
3242 .alloc_coherent = dma_fixed_alloc_coherent,
3243 .free_coherent = dma_fixed_free_coherent,
3244 .map_sg = dma_fixed_map_sg,
3245diff -urNp linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c
3246--- linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3247+++ linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3248@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3249 return mask >= DMA_BIT_MASK(32);
3250 }
3251
3252-static struct dma_map_ops ps3_sb_dma_ops = {
3253+static const struct dma_map_ops ps3_sb_dma_ops = {
3254 .alloc_coherent = ps3_alloc_coherent,
3255 .free_coherent = ps3_free_coherent,
3256 .map_sg = ps3_sb_map_sg,
3257@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3258 .unmap_page = ps3_unmap_page,
3259 };
3260
3261-static struct dma_map_ops ps3_ioc0_dma_ops = {
3262+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3263 .alloc_coherent = ps3_alloc_coherent,
3264 .free_coherent = ps3_free_coherent,
3265 .map_sg = ps3_ioc0_map_sg,
3266diff -urNp linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig
3267--- linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3268+++ linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3269@@ -2,6 +2,8 @@ config PPC_PSERIES
3270 depends on PPC64 && PPC_BOOK3S
3271 bool "IBM pSeries & new (POWER5-based) iSeries"
3272 select MPIC
3273+ select PCI_MSI
3274+ select XICS
3275 select PPC_I8259
3276 select PPC_RTAS
3277 select RTAS_ERROR_LOGGING
3278diff -urNp linux-2.6.32.45/arch/s390/include/asm/elf.h linux-2.6.32.45/arch/s390/include/asm/elf.h
3279--- linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3280+++ linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3281@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3282 that it will "exec", and that there is sufficient room for the brk. */
3283 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3284
3285+#ifdef CONFIG_PAX_ASLR
3286+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3287+
3288+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3289+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3290+#endif
3291+
3292 /* This yields a mask that user programs can use to figure out what
3293 instruction set this CPU supports. */
3294
3295diff -urNp linux-2.6.32.45/arch/s390/include/asm/setup.h linux-2.6.32.45/arch/s390/include/asm/setup.h
3296--- linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3297+++ linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3298@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3299 void detect_memory_layout(struct mem_chunk chunk[]);
3300
3301 #ifdef CONFIG_S390_SWITCH_AMODE
3302-extern unsigned int switch_amode;
3303+#define switch_amode (1)
3304 #else
3305 #define switch_amode (0)
3306 #endif
3307
3308 #ifdef CONFIG_S390_EXEC_PROTECT
3309-extern unsigned int s390_noexec;
3310+#define s390_noexec (1)
3311 #else
3312 #define s390_noexec (0)
3313 #endif
3314diff -urNp linux-2.6.32.45/arch/s390/include/asm/uaccess.h linux-2.6.32.45/arch/s390/include/asm/uaccess.h
3315--- linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3316+++ linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3317@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3318 copy_to_user(void __user *to, const void *from, unsigned long n)
3319 {
3320 might_fault();
3321+
3322+ if ((long)n < 0)
3323+ return n;
3324+
3325 if (access_ok(VERIFY_WRITE, to, n))
3326 n = __copy_to_user(to, from, n);
3327 return n;
3328@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3329 static inline unsigned long __must_check
3330 __copy_from_user(void *to, const void __user *from, unsigned long n)
3331 {
3332+ if ((long)n < 0)
3333+ return n;
3334+
3335 if (__builtin_constant_p(n) && (n <= 256))
3336 return uaccess.copy_from_user_small(n, from, to);
3337 else
3338@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3339 copy_from_user(void *to, const void __user *from, unsigned long n)
3340 {
3341 might_fault();
3342+
3343+ if ((long)n < 0)
3344+ return n;
3345+
3346 if (access_ok(VERIFY_READ, from, n))
3347 n = __copy_from_user(to, from, n);
3348 else
3349diff -urNp linux-2.6.32.45/arch/s390/Kconfig linux-2.6.32.45/arch/s390/Kconfig
3350--- linux-2.6.32.45/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3351+++ linux-2.6.32.45/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3352@@ -194,28 +194,26 @@ config AUDIT_ARCH
3353
3354 config S390_SWITCH_AMODE
3355 bool "Switch kernel/user addressing modes"
3356+ default y
3357 help
3358 This option allows to switch the addressing modes of kernel and user
3359- space. The kernel parameter switch_amode=on will enable this feature,
3360- default is disabled. Enabling this (via kernel parameter) on machines
3361- earlier than IBM System z9-109 EC/BC will reduce system performance.
3362+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3363+ will reduce system performance.
3364
3365 Note that this option will also be selected by selecting the execute
3366- protection option below. Enabling the execute protection via the
3367- noexec kernel parameter will also switch the addressing modes,
3368- independent of the switch_amode kernel parameter.
3369+ protection option below. Enabling the execute protection will also
3370+ switch the addressing modes, independent of this option.
3371
3372
3373 config S390_EXEC_PROTECT
3374 bool "Data execute protection"
3375+ default y
3376 select S390_SWITCH_AMODE
3377 help
3378 This option allows to enable a buffer overflow protection for user
3379 space programs and it also selects the addressing mode option above.
3380- The kernel parameter noexec=on will enable this feature and also
3381- switch the addressing modes, default is disabled. Enabling this (via
3382- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3383- will reduce system performance.
3384+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3385+ reduce system performance.
3386
3387 comment "Code generation options"
3388
3389diff -urNp linux-2.6.32.45/arch/s390/kernel/module.c linux-2.6.32.45/arch/s390/kernel/module.c
3390--- linux-2.6.32.45/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3391+++ linux-2.6.32.45/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3392@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3393
3394 /* Increase core size by size of got & plt and set start
3395 offsets for got and plt. */
3396- me->core_size = ALIGN(me->core_size, 4);
3397- me->arch.got_offset = me->core_size;
3398- me->core_size += me->arch.got_size;
3399- me->arch.plt_offset = me->core_size;
3400- me->core_size += me->arch.plt_size;
3401+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3402+ me->arch.got_offset = me->core_size_rw;
3403+ me->core_size_rw += me->arch.got_size;
3404+ me->arch.plt_offset = me->core_size_rx;
3405+ me->core_size_rx += me->arch.plt_size;
3406 return 0;
3407 }
3408
3409@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3410 if (info->got_initialized == 0) {
3411 Elf_Addr *gotent;
3412
3413- gotent = me->module_core + me->arch.got_offset +
3414+ gotent = me->module_core_rw + me->arch.got_offset +
3415 info->got_offset;
3416 *gotent = val;
3417 info->got_initialized = 1;
3418@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3419 else if (r_type == R_390_GOTENT ||
3420 r_type == R_390_GOTPLTENT)
3421 *(unsigned int *) loc =
3422- (val + (Elf_Addr) me->module_core - loc) >> 1;
3423+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3424 else if (r_type == R_390_GOT64 ||
3425 r_type == R_390_GOTPLT64)
3426 *(unsigned long *) loc = val;
3427@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3428 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3429 if (info->plt_initialized == 0) {
3430 unsigned int *ip;
3431- ip = me->module_core + me->arch.plt_offset +
3432+ ip = me->module_core_rx + me->arch.plt_offset +
3433 info->plt_offset;
3434 #ifndef CONFIG_64BIT
3435 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3436@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3437 val - loc + 0xffffUL < 0x1ffffeUL) ||
3438 (r_type == R_390_PLT32DBL &&
3439 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3440- val = (Elf_Addr) me->module_core +
3441+ val = (Elf_Addr) me->module_core_rx +
3442 me->arch.plt_offset +
3443 info->plt_offset;
3444 val += rela->r_addend - loc;
3445@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3446 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3447 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3448 val = val + rela->r_addend -
3449- ((Elf_Addr) me->module_core + me->arch.got_offset);
3450+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3451 if (r_type == R_390_GOTOFF16)
3452 *(unsigned short *) loc = val;
3453 else if (r_type == R_390_GOTOFF32)
3454@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3455 break;
3456 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3457 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3458- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3459+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3460 rela->r_addend - loc;
3461 if (r_type == R_390_GOTPC)
3462 *(unsigned int *) loc = val;
3463diff -urNp linux-2.6.32.45/arch/s390/kernel/setup.c linux-2.6.32.45/arch/s390/kernel/setup.c
3464--- linux-2.6.32.45/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3465+++ linux-2.6.32.45/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3466@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3467 early_param("mem", early_parse_mem);
3468
3469 #ifdef CONFIG_S390_SWITCH_AMODE
3470-unsigned int switch_amode = 0;
3471-EXPORT_SYMBOL_GPL(switch_amode);
3472-
3473 static int set_amode_and_uaccess(unsigned long user_amode,
3474 unsigned long user32_amode)
3475 {
3476@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3477 return 0;
3478 }
3479 }
3480-
3481-/*
3482- * Switch kernel/user addressing modes?
3483- */
3484-static int __init early_parse_switch_amode(char *p)
3485-{
3486- switch_amode = 1;
3487- return 0;
3488-}
3489-early_param("switch_amode", early_parse_switch_amode);
3490-
3491 #else /* CONFIG_S390_SWITCH_AMODE */
3492 static inline int set_amode_and_uaccess(unsigned long user_amode,
3493 unsigned long user32_amode)
3494@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3495 }
3496 #endif /* CONFIG_S390_SWITCH_AMODE */
3497
3498-#ifdef CONFIG_S390_EXEC_PROTECT
3499-unsigned int s390_noexec = 0;
3500-EXPORT_SYMBOL_GPL(s390_noexec);
3501-
3502-/*
3503- * Enable execute protection?
3504- */
3505-static int __init early_parse_noexec(char *p)
3506-{
3507- if (!strncmp(p, "off", 3))
3508- return 0;
3509- switch_amode = 1;
3510- s390_noexec = 1;
3511- return 0;
3512-}
3513-early_param("noexec", early_parse_noexec);
3514-#endif /* CONFIG_S390_EXEC_PROTECT */
3515-
3516 static void setup_addressing_mode(void)
3517 {
3518 if (s390_noexec) {
3519diff -urNp linux-2.6.32.45/arch/s390/mm/mmap.c linux-2.6.32.45/arch/s390/mm/mmap.c
3520--- linux-2.6.32.45/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3521+++ linux-2.6.32.45/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3522@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3523 */
3524 if (mmap_is_legacy()) {
3525 mm->mmap_base = TASK_UNMAPPED_BASE;
3526+
3527+#ifdef CONFIG_PAX_RANDMMAP
3528+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3529+ mm->mmap_base += mm->delta_mmap;
3530+#endif
3531+
3532 mm->get_unmapped_area = arch_get_unmapped_area;
3533 mm->unmap_area = arch_unmap_area;
3534 } else {
3535 mm->mmap_base = mmap_base();
3536+
3537+#ifdef CONFIG_PAX_RANDMMAP
3538+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3539+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3540+#endif
3541+
3542 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3543 mm->unmap_area = arch_unmap_area_topdown;
3544 }
3545@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3546 */
3547 if (mmap_is_legacy()) {
3548 mm->mmap_base = TASK_UNMAPPED_BASE;
3549+
3550+#ifdef CONFIG_PAX_RANDMMAP
3551+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3552+ mm->mmap_base += mm->delta_mmap;
3553+#endif
3554+
3555 mm->get_unmapped_area = s390_get_unmapped_area;
3556 mm->unmap_area = arch_unmap_area;
3557 } else {
3558 mm->mmap_base = mmap_base();
3559+
3560+#ifdef CONFIG_PAX_RANDMMAP
3561+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3562+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3563+#endif
3564+
3565 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3566 mm->unmap_area = arch_unmap_area_topdown;
3567 }
3568diff -urNp linux-2.6.32.45/arch/score/include/asm/system.h linux-2.6.32.45/arch/score/include/asm/system.h
3569--- linux-2.6.32.45/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3570+++ linux-2.6.32.45/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3571@@ -17,7 +17,7 @@ do { \
3572 #define finish_arch_switch(prev) do {} while (0)
3573
3574 typedef void (*vi_handler_t)(void);
3575-extern unsigned long arch_align_stack(unsigned long sp);
3576+#define arch_align_stack(x) (x)
3577
3578 #define mb() barrier()
3579 #define rmb() barrier()
3580diff -urNp linux-2.6.32.45/arch/score/kernel/process.c linux-2.6.32.45/arch/score/kernel/process.c
3581--- linux-2.6.32.45/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3582+++ linux-2.6.32.45/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3583@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3584
3585 return task_pt_regs(task)->cp0_epc;
3586 }
3587-
3588-unsigned long arch_align_stack(unsigned long sp)
3589-{
3590- return sp;
3591-}
3592diff -urNp linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c
3593--- linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3594+++ linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3595@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3596 return 0;
3597 }
3598
3599-static struct platform_suspend_ops hp6x0_pm_ops = {
3600+static const struct platform_suspend_ops hp6x0_pm_ops = {
3601 .enter = hp6x0_pm_enter,
3602 .valid = suspend_valid_only_mem,
3603 };
3604diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c
3605--- linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3606+++ linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3607@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3608 NULL,
3609 };
3610
3611-static struct sysfs_ops sq_sysfs_ops = {
3612+static const struct sysfs_ops sq_sysfs_ops = {
3613 .show = sq_sysfs_show,
3614 .store = sq_sysfs_store,
3615 };
3616diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c
3617--- linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3618+++ linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3619@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3620 return 0;
3621 }
3622
3623-static struct platform_suspend_ops sh_pm_ops = {
3624+static const struct platform_suspend_ops sh_pm_ops = {
3625 .enter = sh_pm_enter,
3626 .valid = suspend_valid_only_mem,
3627 };
3628diff -urNp linux-2.6.32.45/arch/sh/kernel/kgdb.c linux-2.6.32.45/arch/sh/kernel/kgdb.c
3629--- linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3630+++ linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3631@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3632 {
3633 }
3634
3635-struct kgdb_arch arch_kgdb_ops = {
3636+const struct kgdb_arch arch_kgdb_ops = {
3637 /* Breakpoint instruction: trapa #0x3c */
3638 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3639 .gdb_bpt_instr = { 0x3c, 0xc3 },
3640diff -urNp linux-2.6.32.45/arch/sh/mm/mmap.c linux-2.6.32.45/arch/sh/mm/mmap.c
3641--- linux-2.6.32.45/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3642+++ linux-2.6.32.45/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3643@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3644 addr = PAGE_ALIGN(addr);
3645
3646 vma = find_vma(mm, addr);
3647- if (TASK_SIZE - len >= addr &&
3648- (!vma || addr + len <= vma->vm_start))
3649+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3650 return addr;
3651 }
3652
3653@@ -106,7 +105,7 @@ full_search:
3654 }
3655 return -ENOMEM;
3656 }
3657- if (likely(!vma || addr + len <= vma->vm_start)) {
3658+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3659 /*
3660 * Remember the place where we stopped the search:
3661 */
3662@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3663 addr = PAGE_ALIGN(addr);
3664
3665 vma = find_vma(mm, addr);
3666- if (TASK_SIZE - len >= addr &&
3667- (!vma || addr + len <= vma->vm_start))
3668+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3669 return addr;
3670 }
3671
3672@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3673 /* make sure it can fit in the remaining address space */
3674 if (likely(addr > len)) {
3675 vma = find_vma(mm, addr-len);
3676- if (!vma || addr <= vma->vm_start) {
3677+ if (check_heap_stack_gap(vma, addr - len, len)) {
3678 /* remember the address as a hint for next time */
3679 return (mm->free_area_cache = addr-len);
3680 }
3681@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3682 if (unlikely(mm->mmap_base < len))
3683 goto bottomup;
3684
3685- addr = mm->mmap_base-len;
3686- if (do_colour_align)
3687- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3688+ addr = mm->mmap_base - len;
3689
3690 do {
3691+ if (do_colour_align)
3692+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3693 /*
3694 * Lookup failure means no vma is above this address,
3695 * else if new region fits below vma->vm_start,
3696 * return with success:
3697 */
3698 vma = find_vma(mm, addr);
3699- if (likely(!vma || addr+len <= vma->vm_start)) {
3700+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3701 /* remember the address as a hint for next time */
3702 return (mm->free_area_cache = addr);
3703 }
3704@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3705 mm->cached_hole_size = vma->vm_start - addr;
3706
3707 /* try just below the current vma->vm_start */
3708- addr = vma->vm_start-len;
3709- if (do_colour_align)
3710- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3711- } while (likely(len < vma->vm_start));
3712+ addr = skip_heap_stack_gap(vma, len);
3713+ } while (!IS_ERR_VALUE(addr));
3714
3715 bottomup:
3716 /*
3717diff -urNp linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h
3718--- linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3719+++ linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-08-18 23:11:34.000000000 -0400
3720@@ -14,18 +14,40 @@
3721 #define ATOMIC64_INIT(i) { (i) }
3722
3723 #define atomic_read(v) ((v)->counter)
3724+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3725+{
3726+ return v->counter;
3727+}
3728 #define atomic64_read(v) ((v)->counter)
3729+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3730+{
3731+ return v->counter;
3732+}
3733
3734 #define atomic_set(v, i) (((v)->counter) = i)
3735+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3736+{
3737+ v->counter = i;
3738+}
3739 #define atomic64_set(v, i) (((v)->counter) = i)
3740+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3741+{
3742+ v->counter = i;
3743+}
3744
3745 extern void atomic_add(int, atomic_t *);
3746+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3747 extern void atomic64_add(long, atomic64_t *);
3748+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3749 extern void atomic_sub(int, atomic_t *);
3750+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3751 extern void atomic64_sub(long, atomic64_t *);
3752+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3753
3754 extern int atomic_add_ret(int, atomic_t *);
3755+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3756 extern long atomic64_add_ret(long, atomic64_t *);
3757+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3758 extern int atomic_sub_ret(int, atomic_t *);
3759 extern long atomic64_sub_ret(long, atomic64_t *);
3760
3761@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3762 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3763
3764 #define atomic_inc_return(v) atomic_add_ret(1, v)
3765+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3766+{
3767+ return atomic_add_ret_unchecked(1, v);
3768+}
3769 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3770+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3771+{
3772+ return atomic64_add_ret_unchecked(1, v);
3773+}
3774
3775 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3776 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3777
3778 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3779+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3780+{
3781+ return atomic_add_ret_unchecked(i, v);
3782+}
3783 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3784+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3785+{
3786+ return atomic64_add_ret_unchecked(i, v);
3787+}
3788
3789 /*
3790 * atomic_inc_and_test - increment and test
3791@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
3792 * other cases.
3793 */
3794 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3795+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3796+{
3797+ return atomic_inc_return_unchecked(v) == 0;
3798+}
3799 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3800
3801 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3802@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
3803 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3804
3805 #define atomic_inc(v) atomic_add(1, v)
3806+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3807+{
3808+ atomic_add_unchecked(1, v);
3809+}
3810 #define atomic64_inc(v) atomic64_add(1, v)
3811+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3812+{
3813+ atomic64_add_unchecked(1, v);
3814+}
3815
3816 #define atomic_dec(v) atomic_sub(1, v)
3817+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3818+{
3819+ atomic_sub_unchecked(1, v);
3820+}
3821 #define atomic64_dec(v) atomic64_sub(1, v)
3822+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3823+{
3824+ atomic64_sub_unchecked(1, v);
3825+}
3826
3827 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3828 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3829
3830 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3831+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3832+{
3833+ return cmpxchg(&v->counter, old, new);
3834+}
3835 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3836+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3837+{
3838+ return xchg(&v->counter, new);
3839+}
3840
3841 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3842 {
3843- int c, old;
3844+ int c, old, new;
3845 c = atomic_read(v);
3846 for (;;) {
3847- if (unlikely(c == (u)))
3848+ if (unlikely(c == u))
3849 break;
3850- old = atomic_cmpxchg((v), c, c + (a));
3851+
3852+ asm volatile("addcc %2, %0, %0\n"
3853+
3854+#ifdef CONFIG_PAX_REFCOUNT
3855+ "tvs %%icc, 6\n"
3856+#endif
3857+
3858+ : "=r" (new)
3859+ : "0" (c), "ir" (a)
3860+ : "cc");
3861+
3862+ old = atomic_cmpxchg(v, c, new);
3863 if (likely(old == c))
3864 break;
3865 c = old;
3866 }
3867- return c != (u);
3868+ return c != u;
3869 }
3870
3871 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3872@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
3873 #define atomic64_cmpxchg(v, o, n) \
3874 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3875 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3876+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3877+{
3878+ return xchg(&v->counter, new);
3879+}
3880
3881 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3882 {
3883- long c, old;
3884+ long c, old, new;
3885 c = atomic64_read(v);
3886 for (;;) {
3887- if (unlikely(c == (u)))
3888+ if (unlikely(c == u))
3889 break;
3890- old = atomic64_cmpxchg((v), c, c + (a));
3891+
3892+ asm volatile("addcc %2, %0, %0\n"
3893+
3894+#ifdef CONFIG_PAX_REFCOUNT
3895+ "tvs %%xcc, 6\n"
3896+#endif
3897+
3898+ : "=r" (new)
3899+ : "0" (c), "ir" (a)
3900+ : "cc");
3901+
3902+ old = atomic64_cmpxchg(v, c, new);
3903 if (likely(old == c))
3904 break;
3905 c = old;
3906 }
3907- return c != (u);
3908+ return c != u;
3909 }
3910
3911 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3912diff -urNp linux-2.6.32.45/arch/sparc/include/asm/cache.h linux-2.6.32.45/arch/sparc/include/asm/cache.h
3913--- linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3914+++ linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3915@@ -8,7 +8,7 @@
3916 #define _SPARC_CACHE_H
3917
3918 #define L1_CACHE_SHIFT 5
3919-#define L1_CACHE_BYTES 32
3920+#define L1_CACHE_BYTES 32UL
3921 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3922
3923 #ifdef CONFIG_SPARC32
3924diff -urNp linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h
3925--- linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3926+++ linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3927@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3928 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3929 #define dma_is_consistent(d, h) (1)
3930
3931-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3932+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3933 extern struct bus_type pci_bus_type;
3934
3935-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3936+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3937 {
3938 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3939 if (dev->bus == &pci_bus_type)
3940@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3941 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3942 dma_addr_t *dma_handle, gfp_t flag)
3943 {
3944- struct dma_map_ops *ops = get_dma_ops(dev);
3945+ const struct dma_map_ops *ops = get_dma_ops(dev);
3946 void *cpu_addr;
3947
3948 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3949@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3950 static inline void dma_free_coherent(struct device *dev, size_t size,
3951 void *cpu_addr, dma_addr_t dma_handle)
3952 {
3953- struct dma_map_ops *ops = get_dma_ops(dev);
3954+ const struct dma_map_ops *ops = get_dma_ops(dev);
3955
3956 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3957 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3958diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_32.h linux-2.6.32.45/arch/sparc/include/asm/elf_32.h
3959--- linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3960+++ linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3961@@ -116,6 +116,13 @@ typedef struct {
3962
3963 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3964
3965+#ifdef CONFIG_PAX_ASLR
3966+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3967+
3968+#define PAX_DELTA_MMAP_LEN 16
3969+#define PAX_DELTA_STACK_LEN 16
3970+#endif
3971+
3972 /* This yields a mask that user programs can use to figure out what
3973 instruction set this cpu supports. This can NOT be done in userspace
3974 on Sparc. */
3975diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_64.h linux-2.6.32.45/arch/sparc/include/asm/elf_64.h
3976--- linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3977+++ linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3978@@ -163,6 +163,12 @@ typedef struct {
3979 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3980 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3981
3982+#ifdef CONFIG_PAX_ASLR
3983+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3984+
3985+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3986+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3987+#endif
3988
3989 /* This yields a mask that user programs can use to figure out what
3990 instruction set this cpu supports. */
3991diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h
3992--- linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3993+++ linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3994@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3995 BTFIXUPDEF_INT(page_none)
3996 BTFIXUPDEF_INT(page_copy)
3997 BTFIXUPDEF_INT(page_readonly)
3998+
3999+#ifdef CONFIG_PAX_PAGEEXEC
4000+BTFIXUPDEF_INT(page_shared_noexec)
4001+BTFIXUPDEF_INT(page_copy_noexec)
4002+BTFIXUPDEF_INT(page_readonly_noexec)
4003+#endif
4004+
4005 BTFIXUPDEF_INT(page_kernel)
4006
4007 #define PMD_SHIFT SUN4C_PMD_SHIFT
4008@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4009 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4010 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4011
4012+#ifdef CONFIG_PAX_PAGEEXEC
4013+extern pgprot_t PAGE_SHARED_NOEXEC;
4014+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4015+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4016+#else
4017+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4018+# define PAGE_COPY_NOEXEC PAGE_COPY
4019+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4020+#endif
4021+
4022 extern unsigned long page_kernel;
4023
4024 #ifdef MODULE
4025diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h
4026--- linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
4027+++ linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
4028@@ -115,6 +115,13 @@
4029 SRMMU_EXEC | SRMMU_REF)
4030 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4031 SRMMU_EXEC | SRMMU_REF)
4032+
4033+#ifdef CONFIG_PAX_PAGEEXEC
4034+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4035+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4036+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4037+#endif
4038+
4039 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4040 SRMMU_DIRTY | SRMMU_REF)
4041
4042diff -urNp linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h
4043--- linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
4044+++ linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-08-18 23:19:30.000000000 -0400
4045@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
4046
4047 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4048
4049-static void inline arch_read_lock(raw_rwlock_t *lock)
4050+static inline void arch_read_lock(raw_rwlock_t *lock)
4051 {
4052 unsigned long tmp1, tmp2;
4053
4054 __asm__ __volatile__ (
4055 "1: ldsw [%2], %0\n"
4056 " brlz,pn %0, 2f\n"
4057-"4: add %0, 1, %1\n"
4058+"4: addcc %0, 1, %1\n"
4059+
4060+#ifdef CONFIG_PAX_REFCOUNT
4061+" tvs %%icc, 6\n"
4062+#endif
4063+
4064 " cas [%2], %0, %1\n"
4065 " cmp %0, %1\n"
4066 " bne,pn %%icc, 1b\n"
4067@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rw
4068 " .previous"
4069 : "=&r" (tmp1), "=&r" (tmp2)
4070 : "r" (lock)
4071- : "memory");
4072+ : "memory", "cc");
4073 }
4074
4075-static int inline arch_read_trylock(raw_rwlock_t *lock)
4076+static inline int arch_read_trylock(raw_rwlock_t *lock)
4077 {
4078 int tmp1, tmp2;
4079
4080@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
4081 "1: ldsw [%2], %0\n"
4082 " brlz,a,pn %0, 2f\n"
4083 " mov 0, %0\n"
4084-" add %0, 1, %1\n"
4085+" addcc %0, 1, %1\n"
4086+
4087+#ifdef CONFIG_PAX_REFCOUNT
4088+" tvs %%icc, 6\n"
4089+#endif
4090+
4091 " cas [%2], %0, %1\n"
4092 " cmp %0, %1\n"
4093 " bne,pn %%icc, 1b\n"
4094@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
4095 return tmp1;
4096 }
4097
4098-static void inline arch_read_unlock(raw_rwlock_t *lock)
4099+static inline void arch_read_unlock(raw_rwlock_t *lock)
4100 {
4101 unsigned long tmp1, tmp2;
4102
4103 __asm__ __volatile__(
4104 "1: lduw [%2], %0\n"
4105-" sub %0, 1, %1\n"
4106+" subcc %0, 1, %1\n"
4107+
4108+#ifdef CONFIG_PAX_REFCOUNT
4109+" tvs %%icc, 6\n"
4110+#endif
4111+
4112 " cas [%2], %0, %1\n"
4113 " cmp %0, %1\n"
4114 " bne,pn %%xcc, 1b\n"
4115@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4116 : "memory");
4117 }
4118
4119-static void inline arch_write_lock(raw_rwlock_t *lock)
4120+static inline void arch_write_lock(raw_rwlock_t *lock)
4121 {
4122 unsigned long mask, tmp1, tmp2;
4123
4124@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4125 : "memory");
4126 }
4127
4128-static void inline arch_write_unlock(raw_rwlock_t *lock)
4129+static inline void arch_write_unlock(raw_rwlock_t *lock)
4130 {
4131 __asm__ __volatile__(
4132 " stw %%g0, [%0]"
4133@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw
4134 : "memory");
4135 }
4136
4137-static int inline arch_write_trylock(raw_rwlock_t *lock)
4138+static inline int arch_write_trylock(raw_rwlock_t *lock)
4139 {
4140 unsigned long mask, tmp1, tmp2, result;
4141
4142diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h
4143--- linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4144+++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4145@@ -50,6 +50,8 @@ struct thread_info {
4146 unsigned long w_saved;
4147
4148 struct restart_block restart_block;
4149+
4150+ unsigned long lowest_stack;
4151 };
4152
4153 /*
4154diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h
4155--- linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4156+++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4157@@ -68,6 +68,8 @@ struct thread_info {
4158 struct pt_regs *kern_una_regs;
4159 unsigned int kern_una_insn;
4160
4161+ unsigned long lowest_stack;
4162+
4163 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4164 };
4165
4166diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h
4167--- linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4168+++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4169@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4170
4171 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4172 {
4173- if (n && __access_ok((unsigned long) to, n))
4174+ if ((long)n < 0)
4175+ return n;
4176+
4177+ if (n && __access_ok((unsigned long) to, n)) {
4178+ if (!__builtin_constant_p(n))
4179+ check_object_size(from, n, true);
4180 return __copy_user(to, (__force void __user *) from, n);
4181- else
4182+ } else
4183 return n;
4184 }
4185
4186 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4187 {
4188+ if ((long)n < 0)
4189+ return n;
4190+
4191+ if (!__builtin_constant_p(n))
4192+ check_object_size(from, n, true);
4193+
4194 return __copy_user(to, (__force void __user *) from, n);
4195 }
4196
4197 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4198 {
4199- if (n && __access_ok((unsigned long) from, n))
4200+ if ((long)n < 0)
4201+ return n;
4202+
4203+ if (n && __access_ok((unsigned long) from, n)) {
4204+ if (!__builtin_constant_p(n))
4205+ check_object_size(to, n, false);
4206 return __copy_user((__force void __user *) to, from, n);
4207- else
4208+ } else
4209 return n;
4210 }
4211
4212 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4213 {
4214+ if ((long)n < 0)
4215+ return n;
4216+
4217 return __copy_user((__force void __user *) to, from, n);
4218 }
4219
4220diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h
4221--- linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4222+++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4223@@ -9,6 +9,7 @@
4224 #include <linux/compiler.h>
4225 #include <linux/string.h>
4226 #include <linux/thread_info.h>
4227+#include <linux/kernel.h>
4228 #include <asm/asi.h>
4229 #include <asm/system.h>
4230 #include <asm/spitfire.h>
4231@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4232 static inline unsigned long __must_check
4233 copy_from_user(void *to, const void __user *from, unsigned long size)
4234 {
4235- unsigned long ret = ___copy_from_user(to, from, size);
4236+ unsigned long ret;
4237
4238+ if ((long)size < 0 || size > INT_MAX)
4239+ return size;
4240+
4241+ if (!__builtin_constant_p(size))
4242+ check_object_size(to, size, false);
4243+
4244+ ret = ___copy_from_user(to, from, size);
4245 if (unlikely(ret))
4246 ret = copy_from_user_fixup(to, from, size);
4247 return ret;
4248@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4249 static inline unsigned long __must_check
4250 copy_to_user(void __user *to, const void *from, unsigned long size)
4251 {
4252- unsigned long ret = ___copy_to_user(to, from, size);
4253+ unsigned long ret;
4254+
4255+ if ((long)size < 0 || size > INT_MAX)
4256+ return size;
4257+
4258+ if (!__builtin_constant_p(size))
4259+ check_object_size(from, size, true);
4260
4261+ ret = ___copy_to_user(to, from, size);
4262 if (unlikely(ret))
4263 ret = copy_to_user_fixup(to, from, size);
4264 return ret;
4265diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess.h linux-2.6.32.45/arch/sparc/include/asm/uaccess.h
4266--- linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4267+++ linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4268@@ -1,5 +1,13 @@
4269 #ifndef ___ASM_SPARC_UACCESS_H
4270 #define ___ASM_SPARC_UACCESS_H
4271+
4272+#ifdef __KERNEL__
4273+#ifndef __ASSEMBLY__
4274+#include <linux/types.h>
4275+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4276+#endif
4277+#endif
4278+
4279 #if defined(__sparc__) && defined(__arch64__)
4280 #include <asm/uaccess_64.h>
4281 #else
4282diff -urNp linux-2.6.32.45/arch/sparc/kernel/iommu.c linux-2.6.32.45/arch/sparc/kernel/iommu.c
4283--- linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4284+++ linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4285@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4286 spin_unlock_irqrestore(&iommu->lock, flags);
4287 }
4288
4289-static struct dma_map_ops sun4u_dma_ops = {
4290+static const struct dma_map_ops sun4u_dma_ops = {
4291 .alloc_coherent = dma_4u_alloc_coherent,
4292 .free_coherent = dma_4u_free_coherent,
4293 .map_page = dma_4u_map_page,
4294@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4295 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4296 };
4297
4298-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4299+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4300 EXPORT_SYMBOL(dma_ops);
4301
4302 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4303diff -urNp linux-2.6.32.45/arch/sparc/kernel/ioport.c linux-2.6.32.45/arch/sparc/kernel/ioport.c
4304--- linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4305+++ linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4306@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4307 BUG();
4308 }
4309
4310-struct dma_map_ops sbus_dma_ops = {
4311+const struct dma_map_ops sbus_dma_ops = {
4312 .alloc_coherent = sbus_alloc_coherent,
4313 .free_coherent = sbus_free_coherent,
4314 .map_page = sbus_map_page,
4315@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4316 .sync_sg_for_device = sbus_sync_sg_for_device,
4317 };
4318
4319-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4320+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4321 EXPORT_SYMBOL(dma_ops);
4322
4323 static int __init sparc_register_ioport(void)
4324@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4325 }
4326 }
4327
4328-struct dma_map_ops pci32_dma_ops = {
4329+const struct dma_map_ops pci32_dma_ops = {
4330 .alloc_coherent = pci32_alloc_coherent,
4331 .free_coherent = pci32_free_coherent,
4332 .map_page = pci32_map_page,
4333diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c
4334--- linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4335+++ linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4336@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4337 {
4338 }
4339
4340-struct kgdb_arch arch_kgdb_ops = {
4341+const struct kgdb_arch arch_kgdb_ops = {
4342 /* Breakpoint instruction: ta 0x7d */
4343 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4344 };
4345diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c
4346--- linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4347+++ linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4348@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4349 {
4350 }
4351
4352-struct kgdb_arch arch_kgdb_ops = {
4353+const struct kgdb_arch arch_kgdb_ops = {
4354 /* Breakpoint instruction: ta 0x72 */
4355 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4356 };
4357diff -urNp linux-2.6.32.45/arch/sparc/kernel/Makefile linux-2.6.32.45/arch/sparc/kernel/Makefile
4358--- linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4359+++ linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4360@@ -3,7 +3,7 @@
4361 #
4362
4363 asflags-y := -ansi
4364-ccflags-y := -Werror
4365+#ccflags-y := -Werror
4366
4367 extra-y := head_$(BITS).o
4368 extra-y += init_task.o
4369diff -urNp linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c
4370--- linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4371+++ linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4372@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4373 spin_unlock_irqrestore(&iommu->lock, flags);
4374 }
4375
4376-static struct dma_map_ops sun4v_dma_ops = {
4377+static const struct dma_map_ops sun4v_dma_ops = {
4378 .alloc_coherent = dma_4v_alloc_coherent,
4379 .free_coherent = dma_4v_free_coherent,
4380 .map_page = dma_4v_map_page,
4381diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_32.c linux-2.6.32.45/arch/sparc/kernel/process_32.c
4382--- linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4383+++ linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4384@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4385 rw->ins[4], rw->ins[5],
4386 rw->ins[6],
4387 rw->ins[7]);
4388- printk("%pS\n", (void *) rw->ins[7]);
4389+ printk("%pA\n", (void *) rw->ins[7]);
4390 rw = (struct reg_window32 *) rw->ins[6];
4391 }
4392 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4393@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4394
4395 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4396 r->psr, r->pc, r->npc, r->y, print_tainted());
4397- printk("PC: <%pS>\n", (void *) r->pc);
4398+ printk("PC: <%pA>\n", (void *) r->pc);
4399 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4400 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4401 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4402 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4403 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4404 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4405- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4406+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4407
4408 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4409 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4410@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4411 rw = (struct reg_window32 *) fp;
4412 pc = rw->ins[7];
4413 printk("[%08lx : ", pc);
4414- printk("%pS ] ", (void *) pc);
4415+ printk("%pA ] ", (void *) pc);
4416 fp = rw->ins[6];
4417 } while (++count < 16);
4418 printk("\n");
4419diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_64.c linux-2.6.32.45/arch/sparc/kernel/process_64.c
4420--- linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4421+++ linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4422@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4423 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4424 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4425 if (regs->tstate & TSTATE_PRIV)
4426- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4427+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4428 }
4429
4430 void show_regs(struct pt_regs *regs)
4431 {
4432 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4433 regs->tpc, regs->tnpc, regs->y, print_tainted());
4434- printk("TPC: <%pS>\n", (void *) regs->tpc);
4435+ printk("TPC: <%pA>\n", (void *) regs->tpc);
4436 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4437 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4438 regs->u_regs[3]);
4439@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4440 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4441 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4442 regs->u_regs[15]);
4443- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4444+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4445 show_regwindow(regs);
4446 }
4447
4448@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4449 ((tp && tp->task) ? tp->task->pid : -1));
4450
4451 if (gp->tstate & TSTATE_PRIV) {
4452- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4453+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4454 (void *) gp->tpc,
4455 (void *) gp->o7,
4456 (void *) gp->i7,
4457diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c
4458--- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4459+++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4460@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4461 if (ARCH_SUN4C && len > 0x20000000)
4462 return -ENOMEM;
4463 if (!addr)
4464- addr = TASK_UNMAPPED_BASE;
4465+ addr = current->mm->mmap_base;
4466
4467 if (flags & MAP_SHARED)
4468 addr = COLOUR_ALIGN(addr);
4469@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4470 }
4471 if (TASK_SIZE - PAGE_SIZE - len < addr)
4472 return -ENOMEM;
4473- if (!vmm || addr + len <= vmm->vm_start)
4474+ if (check_heap_stack_gap(vmm, addr, len))
4475 return addr;
4476 addr = vmm->vm_end;
4477 if (flags & MAP_SHARED)
4478diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c
4479--- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4480+++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4481@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4482 /* We do not accept a shared mapping if it would violate
4483 * cache aliasing constraints.
4484 */
4485- if ((flags & MAP_SHARED) &&
4486+ if ((filp || (flags & MAP_SHARED)) &&
4487 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4488 return -EINVAL;
4489 return addr;
4490@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4491 if (filp || (flags & MAP_SHARED))
4492 do_color_align = 1;
4493
4494+#ifdef CONFIG_PAX_RANDMMAP
4495+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4496+#endif
4497+
4498 if (addr) {
4499 if (do_color_align)
4500 addr = COLOUR_ALIGN(addr, pgoff);
4501@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4502 addr = PAGE_ALIGN(addr);
4503
4504 vma = find_vma(mm, addr);
4505- if (task_size - len >= addr &&
4506- (!vma || addr + len <= vma->vm_start))
4507+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4508 return addr;
4509 }
4510
4511 if (len > mm->cached_hole_size) {
4512- start_addr = addr = mm->free_area_cache;
4513+ start_addr = addr = mm->free_area_cache;
4514 } else {
4515- start_addr = addr = TASK_UNMAPPED_BASE;
4516+ start_addr = addr = mm->mmap_base;
4517 mm->cached_hole_size = 0;
4518 }
4519
4520@@ -175,14 +178,14 @@ full_search:
4521 vma = find_vma(mm, VA_EXCLUDE_END);
4522 }
4523 if (unlikely(task_size < addr)) {
4524- if (start_addr != TASK_UNMAPPED_BASE) {
4525- start_addr = addr = TASK_UNMAPPED_BASE;
4526+ if (start_addr != mm->mmap_base) {
4527+ start_addr = addr = mm->mmap_base;
4528 mm->cached_hole_size = 0;
4529 goto full_search;
4530 }
4531 return -ENOMEM;
4532 }
4533- if (likely(!vma || addr + len <= vma->vm_start)) {
4534+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4535 /*
4536 * Remember the place where we stopped the search:
4537 */
4538@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4539 /* We do not accept a shared mapping if it would violate
4540 * cache aliasing constraints.
4541 */
4542- if ((flags & MAP_SHARED) &&
4543+ if ((filp || (flags & MAP_SHARED)) &&
4544 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4545 return -EINVAL;
4546 return addr;
4547@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4548 addr = PAGE_ALIGN(addr);
4549
4550 vma = find_vma(mm, addr);
4551- if (task_size - len >= addr &&
4552- (!vma || addr + len <= vma->vm_start))
4553+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4554 return addr;
4555 }
4556
4557@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4558 /* make sure it can fit in the remaining address space */
4559 if (likely(addr > len)) {
4560 vma = find_vma(mm, addr-len);
4561- if (!vma || addr <= vma->vm_start) {
4562+ if (check_heap_stack_gap(vma, addr - len, len)) {
4563 /* remember the address as a hint for next time */
4564 return (mm->free_area_cache = addr-len);
4565 }
4566@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4567 if (unlikely(mm->mmap_base < len))
4568 goto bottomup;
4569
4570- addr = mm->mmap_base-len;
4571- if (do_color_align)
4572- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4573+ addr = mm->mmap_base - len;
4574
4575 do {
4576+ if (do_color_align)
4577+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4578 /*
4579 * Lookup failure means no vma is above this address,
4580 * else if new region fits below vma->vm_start,
4581 * return with success:
4582 */
4583 vma = find_vma(mm, addr);
4584- if (likely(!vma || addr+len <= vma->vm_start)) {
4585+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4586 /* remember the address as a hint for next time */
4587 return (mm->free_area_cache = addr);
4588 }
4589@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4590 mm->cached_hole_size = vma->vm_start - addr;
4591
4592 /* try just below the current vma->vm_start */
4593- addr = vma->vm_start-len;
4594- if (do_color_align)
4595- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4596- } while (likely(len < vma->vm_start));
4597+ addr = skip_heap_stack_gap(vma, len);
4598+ } while (!IS_ERR_VALUE(addr));
4599
4600 bottomup:
4601 /*
4602@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4603 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4604 sysctl_legacy_va_layout) {
4605 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4606+
4607+#ifdef CONFIG_PAX_RANDMMAP
4608+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4609+ mm->mmap_base += mm->delta_mmap;
4610+#endif
4611+
4612 mm->get_unmapped_area = arch_get_unmapped_area;
4613 mm->unmap_area = arch_unmap_area;
4614 } else {
4615@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4616 gap = (task_size / 6 * 5);
4617
4618 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4619+
4620+#ifdef CONFIG_PAX_RANDMMAP
4621+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4622+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4623+#endif
4624+
4625 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4626 mm->unmap_area = arch_unmap_area_topdown;
4627 }
4628diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_32.c linux-2.6.32.45/arch/sparc/kernel/traps_32.c
4629--- linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4630+++ linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4631@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4632 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4633 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4634
4635+extern void gr_handle_kernel_exploit(void);
4636+
4637 void die_if_kernel(char *str, struct pt_regs *regs)
4638 {
4639 static int die_counter;
4640@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4641 count++ < 30 &&
4642 (((unsigned long) rw) >= PAGE_OFFSET) &&
4643 !(((unsigned long) rw) & 0x7)) {
4644- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4645+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4646 (void *) rw->ins[7]);
4647 rw = (struct reg_window32 *)rw->ins[6];
4648 }
4649 }
4650 printk("Instruction DUMP:");
4651 instruction_dump ((unsigned long *) regs->pc);
4652- if(regs->psr & PSR_PS)
4653+ if(regs->psr & PSR_PS) {
4654+ gr_handle_kernel_exploit();
4655 do_exit(SIGKILL);
4656+ }
4657 do_exit(SIGSEGV);
4658 }
4659
4660diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_64.c linux-2.6.32.45/arch/sparc/kernel/traps_64.c
4661--- linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4662+++ linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4663@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4664 i + 1,
4665 p->trapstack[i].tstate, p->trapstack[i].tpc,
4666 p->trapstack[i].tnpc, p->trapstack[i].tt);
4667- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4668+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4669 }
4670 }
4671
4672@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4673
4674 lvl -= 0x100;
4675 if (regs->tstate & TSTATE_PRIV) {
4676+
4677+#ifdef CONFIG_PAX_REFCOUNT
4678+ if (lvl == 6)
4679+ pax_report_refcount_overflow(regs);
4680+#endif
4681+
4682 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4683 die_if_kernel(buffer, regs);
4684 }
4685@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4686 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4687 {
4688 char buffer[32];
4689-
4690+
4691 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4692 0, lvl, SIGTRAP) == NOTIFY_STOP)
4693 return;
4694
4695+#ifdef CONFIG_PAX_REFCOUNT
4696+ if (lvl == 6)
4697+ pax_report_refcount_overflow(regs);
4698+#endif
4699+
4700 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4701
4702 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4703@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4704 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4705 printk("%s" "ERROR(%d): ",
4706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4707- printk("TPC<%pS>\n", (void *) regs->tpc);
4708+ printk("TPC<%pA>\n", (void *) regs->tpc);
4709 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4710 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4711 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4712@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4713 smp_processor_id(),
4714 (type & 0x1) ? 'I' : 'D',
4715 regs->tpc);
4716- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4717+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4718 panic("Irrecoverable Cheetah+ parity error.");
4719 }
4720
4721@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4722 smp_processor_id(),
4723 (type & 0x1) ? 'I' : 'D',
4724 regs->tpc);
4725- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4726+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4727 }
4728
4729 struct sun4v_error_entry {
4730@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4731
4732 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4733 regs->tpc, tl);
4734- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4735+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4736 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4737- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4738+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4739 (void *) regs->u_regs[UREG_I7]);
4740 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4741 "pte[%lx] error[%lx]\n",
4742@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4743
4744 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4745 regs->tpc, tl);
4746- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4747+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4748 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4749- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4750+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4751 (void *) regs->u_regs[UREG_I7]);
4752 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4753 "pte[%lx] error[%lx]\n",
4754@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4755 fp = (unsigned long)sf->fp + STACK_BIAS;
4756 }
4757
4758- printk(" [%016lx] %pS\n", pc, (void *) pc);
4759+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4760 } while (++count < 16);
4761 }
4762
4763@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4764 return (struct reg_window *) (fp + STACK_BIAS);
4765 }
4766
4767+extern void gr_handle_kernel_exploit(void);
4768+
4769 void die_if_kernel(char *str, struct pt_regs *regs)
4770 {
4771 static int die_counter;
4772@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4773 while (rw &&
4774 count++ < 30&&
4775 is_kernel_stack(current, rw)) {
4776- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4777+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4778 (void *) rw->ins[7]);
4779
4780 rw = kernel_stack_up(rw);
4781@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4782 }
4783 user_instruction_dump ((unsigned int __user *) regs->tpc);
4784 }
4785- if (regs->tstate & TSTATE_PRIV)
4786+ if (regs->tstate & TSTATE_PRIV) {
4787+ gr_handle_kernel_exploit();
4788 do_exit(SIGKILL);
4789+ }
4790+
4791 do_exit(SIGSEGV);
4792 }
4793 EXPORT_SYMBOL(die_if_kernel);
4794diff -urNp linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S
4795--- linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4796+++ linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4797@@ -127,7 +127,7 @@ do_int_load:
4798 wr %o5, 0x0, %asi
4799 retl
4800 mov 0, %o0
4801- .size __do_int_load, .-__do_int_load
4802+ .size do_int_load, .-do_int_load
4803
4804 .section __ex_table,"a"
4805 .word 4b, __retl_efault
4806diff -urNp linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c
4807--- linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4808+++ linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4809@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4810 if (count < 5) {
4811 last_time = jiffies;
4812 count++;
4813- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4814+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4815 regs->tpc, (void *) regs->tpc);
4816 }
4817 }
4818diff -urNp linux-2.6.32.45/arch/sparc/lib/atomic_64.S linux-2.6.32.45/arch/sparc/lib/atomic_64.S
4819--- linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4820+++ linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4821@@ -18,7 +18,12 @@
4822 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4823 BACKOFF_SETUP(%o2)
4824 1: lduw [%o1], %g1
4825- add %g1, %o0, %g7
4826+ addcc %g1, %o0, %g7
4827+
4828+#ifdef CONFIG_PAX_REFCOUNT
4829+ tvs %icc, 6
4830+#endif
4831+
4832 cas [%o1], %g1, %g7
4833 cmp %g1, %g7
4834 bne,pn %icc, 2f
4835@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4836 2: BACKOFF_SPIN(%o2, %o3, 1b)
4837 .size atomic_add, .-atomic_add
4838
4839+ .globl atomic_add_unchecked
4840+ .type atomic_add_unchecked,#function
4841+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4842+ BACKOFF_SETUP(%o2)
4843+1: lduw [%o1], %g1
4844+ add %g1, %o0, %g7
4845+ cas [%o1], %g1, %g7
4846+ cmp %g1, %g7
4847+ bne,pn %icc, 2f
4848+ nop
4849+ retl
4850+ nop
4851+2: BACKOFF_SPIN(%o2, %o3, 1b)
4852+ .size atomic_add_unchecked, .-atomic_add_unchecked
4853+
4854 .globl atomic_sub
4855 .type atomic_sub,#function
4856 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4857 BACKOFF_SETUP(%o2)
4858 1: lduw [%o1], %g1
4859- sub %g1, %o0, %g7
4860+ subcc %g1, %o0, %g7
4861+
4862+#ifdef CONFIG_PAX_REFCOUNT
4863+ tvs %icc, 6
4864+#endif
4865+
4866 cas [%o1], %g1, %g7
4867 cmp %g1, %g7
4868 bne,pn %icc, 2f
4869@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4870 2: BACKOFF_SPIN(%o2, %o3, 1b)
4871 .size atomic_sub, .-atomic_sub
4872
4873+ .globl atomic_sub_unchecked
4874+ .type atomic_sub_unchecked,#function
4875+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4876+ BACKOFF_SETUP(%o2)
4877+1: lduw [%o1], %g1
4878+ sub %g1, %o0, %g7
4879+ cas [%o1], %g1, %g7
4880+ cmp %g1, %g7
4881+ bne,pn %icc, 2f
4882+ nop
4883+ retl
4884+ nop
4885+2: BACKOFF_SPIN(%o2, %o3, 1b)
4886+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4887+
4888 .globl atomic_add_ret
4889 .type atomic_add_ret,#function
4890 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4891 BACKOFF_SETUP(%o2)
4892 1: lduw [%o1], %g1
4893- add %g1, %o0, %g7
4894+ addcc %g1, %o0, %g7
4895+
4896+#ifdef CONFIG_PAX_REFCOUNT
4897+ tvs %icc, 6
4898+#endif
4899+
4900 cas [%o1], %g1, %g7
4901 cmp %g1, %g7
4902 bne,pn %icc, 2f
4903@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4904 2: BACKOFF_SPIN(%o2, %o3, 1b)
4905 .size atomic_add_ret, .-atomic_add_ret
4906
4907+ .globl atomic_add_ret_unchecked
4908+ .type atomic_add_ret_unchecked,#function
4909+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4910+ BACKOFF_SETUP(%o2)
4911+1: lduw [%o1], %g1
4912+ addcc %g1, %o0, %g7
4913+ cas [%o1], %g1, %g7
4914+ cmp %g1, %g7
4915+ bne,pn %icc, 2f
4916+ add %g7, %o0, %g7
4917+ sra %g7, 0, %o0
4918+ retl
4919+ nop
4920+2: BACKOFF_SPIN(%o2, %o3, 1b)
4921+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4922+
4923 .globl atomic_sub_ret
4924 .type atomic_sub_ret,#function
4925 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4926 BACKOFF_SETUP(%o2)
4927 1: lduw [%o1], %g1
4928- sub %g1, %o0, %g7
4929+ subcc %g1, %o0, %g7
4930+
4931+#ifdef CONFIG_PAX_REFCOUNT
4932+ tvs %icc, 6
4933+#endif
4934+
4935 cas [%o1], %g1, %g7
4936 cmp %g1, %g7
4937 bne,pn %icc, 2f
4938@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4939 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4940 BACKOFF_SETUP(%o2)
4941 1: ldx [%o1], %g1
4942- add %g1, %o0, %g7
4943+ addcc %g1, %o0, %g7
4944+
4945+#ifdef CONFIG_PAX_REFCOUNT
4946+ tvs %xcc, 6
4947+#endif
4948+
4949 casx [%o1], %g1, %g7
4950 cmp %g1, %g7
4951 bne,pn %xcc, 2f
4952@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4953 2: BACKOFF_SPIN(%o2, %o3, 1b)
4954 .size atomic64_add, .-atomic64_add
4955
4956+ .globl atomic64_add_unchecked
4957+ .type atomic64_add_unchecked,#function
4958+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4959+ BACKOFF_SETUP(%o2)
4960+1: ldx [%o1], %g1
4961+ addcc %g1, %o0, %g7
4962+ casx [%o1], %g1, %g7
4963+ cmp %g1, %g7
4964+ bne,pn %xcc, 2f
4965+ nop
4966+ retl
4967+ nop
4968+2: BACKOFF_SPIN(%o2, %o3, 1b)
4969+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4970+
4971 .globl atomic64_sub
4972 .type atomic64_sub,#function
4973 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4974 BACKOFF_SETUP(%o2)
4975 1: ldx [%o1], %g1
4976- sub %g1, %o0, %g7
4977+ subcc %g1, %o0, %g7
4978+
4979+#ifdef CONFIG_PAX_REFCOUNT
4980+ tvs %xcc, 6
4981+#endif
4982+
4983 casx [%o1], %g1, %g7
4984 cmp %g1, %g7
4985 bne,pn %xcc, 2f
4986@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4987 2: BACKOFF_SPIN(%o2, %o3, 1b)
4988 .size atomic64_sub, .-atomic64_sub
4989
4990+ .globl atomic64_sub_unchecked
4991+ .type atomic64_sub_unchecked,#function
4992+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4993+ BACKOFF_SETUP(%o2)
4994+1: ldx [%o1], %g1
4995+ subcc %g1, %o0, %g7
4996+ casx [%o1], %g1, %g7
4997+ cmp %g1, %g7
4998+ bne,pn %xcc, 2f
4999+ nop
5000+ retl
5001+ nop
5002+2: BACKOFF_SPIN(%o2, %o3, 1b)
5003+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5004+
5005 .globl atomic64_add_ret
5006 .type atomic64_add_ret,#function
5007 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5008 BACKOFF_SETUP(%o2)
5009 1: ldx [%o1], %g1
5010- add %g1, %o0, %g7
5011+ addcc %g1, %o0, %g7
5012+
5013+#ifdef CONFIG_PAX_REFCOUNT
5014+ tvs %xcc, 6
5015+#endif
5016+
5017 casx [%o1], %g1, %g7
5018 cmp %g1, %g7
5019 bne,pn %xcc, 2f
5020@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
5021 2: BACKOFF_SPIN(%o2, %o3, 1b)
5022 .size atomic64_add_ret, .-atomic64_add_ret
5023
5024+ .globl atomic64_add_ret_unchecked
5025+ .type atomic64_add_ret_unchecked,#function
5026+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5027+ BACKOFF_SETUP(%o2)
5028+1: ldx [%o1], %g1
5029+ addcc %g1, %o0, %g7
5030+ casx [%o1], %g1, %g7
5031+ cmp %g1, %g7
5032+ bne,pn %xcc, 2f
5033+ add %g7, %o0, %g7
5034+ mov %g7, %o0
5035+ retl
5036+ nop
5037+2: BACKOFF_SPIN(%o2, %o3, 1b)
5038+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5039+
5040 .globl atomic64_sub_ret
5041 .type atomic64_sub_ret,#function
5042 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5043 BACKOFF_SETUP(%o2)
5044 1: ldx [%o1], %g1
5045- sub %g1, %o0, %g7
5046+ subcc %g1, %o0, %g7
5047+
5048+#ifdef CONFIG_PAX_REFCOUNT
5049+ tvs %xcc, 6
5050+#endif
5051+
5052 casx [%o1], %g1, %g7
5053 cmp %g1, %g7
5054 bne,pn %xcc, 2f
5055diff -urNp linux-2.6.32.45/arch/sparc/lib/ksyms.c linux-2.6.32.45/arch/sparc/lib/ksyms.c
5056--- linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
5057+++ linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-08-19 23:05:14.000000000 -0400
5058@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5059
5060 /* Atomic counter implementation. */
5061 EXPORT_SYMBOL(atomic_add);
5062+EXPORT_SYMBOL(atomic_add_unchecked);
5063 EXPORT_SYMBOL(atomic_add_ret);
5064+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5065 EXPORT_SYMBOL(atomic_sub);
5066+EXPORT_SYMBOL(atomic_sub_unchecked);
5067 EXPORT_SYMBOL(atomic_sub_ret);
5068 EXPORT_SYMBOL(atomic64_add);
5069+EXPORT_SYMBOL(atomic64_add_unchecked);
5070 EXPORT_SYMBOL(atomic64_add_ret);
5071+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5072 EXPORT_SYMBOL(atomic64_sub);
5073+EXPORT_SYMBOL(atomic64_sub_unchecked);
5074 EXPORT_SYMBOL(atomic64_sub_ret);
5075
5076 /* Atomic bit operations. */
5077diff -urNp linux-2.6.32.45/arch/sparc/lib/Makefile linux-2.6.32.45/arch/sparc/lib/Makefile
5078--- linux-2.6.32.45/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
5079+++ linux-2.6.32.45/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
5080@@ -2,7 +2,7 @@
5081 #
5082
5083 asflags-y := -ansi -DST_DIV0=0x02
5084-ccflags-y := -Werror
5085+#ccflags-y := -Werror
5086
5087 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5088 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5089diff -urNp linux-2.6.32.45/arch/sparc/lib/rwsem_64.S linux-2.6.32.45/arch/sparc/lib/rwsem_64.S
5090--- linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
5091+++ linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
5092@@ -11,7 +11,12 @@
5093 .globl __down_read
5094 __down_read:
5095 1: lduw [%o0], %g1
5096- add %g1, 1, %g7
5097+ addcc %g1, 1, %g7
5098+
5099+#ifdef CONFIG_PAX_REFCOUNT
5100+ tvs %icc, 6
5101+#endif
5102+
5103 cas [%o0], %g1, %g7
5104 cmp %g1, %g7
5105 bne,pn %icc, 1b
5106@@ -33,7 +38,12 @@ __down_read:
5107 .globl __down_read_trylock
5108 __down_read_trylock:
5109 1: lduw [%o0], %g1
5110- add %g1, 1, %g7
5111+ addcc %g1, 1, %g7
5112+
5113+#ifdef CONFIG_PAX_REFCOUNT
5114+ tvs %icc, 6
5115+#endif
5116+
5117 cmp %g7, 0
5118 bl,pn %icc, 2f
5119 mov 0, %o1
5120@@ -51,7 +61,12 @@ __down_write:
5121 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5122 1:
5123 lduw [%o0], %g3
5124- add %g3, %g1, %g7
5125+ addcc %g3, %g1, %g7
5126+
5127+#ifdef CONFIG_PAX_REFCOUNT
5128+ tvs %icc, 6
5129+#endif
5130+
5131 cas [%o0], %g3, %g7
5132 cmp %g3, %g7
5133 bne,pn %icc, 1b
5134@@ -77,7 +92,12 @@ __down_write_trylock:
5135 cmp %g3, 0
5136 bne,pn %icc, 2f
5137 mov 0, %o1
5138- add %g3, %g1, %g7
5139+ addcc %g3, %g1, %g7
5140+
5141+#ifdef CONFIG_PAX_REFCOUNT
5142+ tvs %icc, 6
5143+#endif
5144+
5145 cas [%o0], %g3, %g7
5146 cmp %g3, %g7
5147 bne,pn %icc, 1b
5148@@ -90,7 +110,12 @@ __down_write_trylock:
5149 __up_read:
5150 1:
5151 lduw [%o0], %g1
5152- sub %g1, 1, %g7
5153+ subcc %g1, 1, %g7
5154+
5155+#ifdef CONFIG_PAX_REFCOUNT
5156+ tvs %icc, 6
5157+#endif
5158+
5159 cas [%o0], %g1, %g7
5160 cmp %g1, %g7
5161 bne,pn %icc, 1b
5162@@ -118,7 +143,12 @@ __up_write:
5163 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5164 1:
5165 lduw [%o0], %g3
5166- sub %g3, %g1, %g7
5167+ subcc %g3, %g1, %g7
5168+
5169+#ifdef CONFIG_PAX_REFCOUNT
5170+ tvs %icc, 6
5171+#endif
5172+
5173 cas [%o0], %g3, %g7
5174 cmp %g3, %g7
5175 bne,pn %icc, 1b
5176@@ -143,7 +173,12 @@ __downgrade_write:
5177 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5178 1:
5179 lduw [%o0], %g3
5180- sub %g3, %g1, %g7
5181+ subcc %g3, %g1, %g7
5182+
5183+#ifdef CONFIG_PAX_REFCOUNT
5184+ tvs %icc, 6
5185+#endif
5186+
5187 cas [%o0], %g3, %g7
5188 cmp %g3, %g7
5189 bne,pn %icc, 1b
5190diff -urNp linux-2.6.32.45/arch/sparc/Makefile linux-2.6.32.45/arch/sparc/Makefile
5191--- linux-2.6.32.45/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5192+++ linux-2.6.32.45/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5193@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5194 # Export what is needed by arch/sparc/boot/Makefile
5195 export VMLINUX_INIT VMLINUX_MAIN
5196 VMLINUX_INIT := $(head-y) $(init-y)
5197-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5198+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5199 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5200 VMLINUX_MAIN += $(drivers-y) $(net-y)
5201
5202diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_32.c linux-2.6.32.45/arch/sparc/mm/fault_32.c
5203--- linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5204+++ linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5205@@ -21,6 +21,9 @@
5206 #include <linux/interrupt.h>
5207 #include <linux/module.h>
5208 #include <linux/kdebug.h>
5209+#include <linux/slab.h>
5210+#include <linux/pagemap.h>
5211+#include <linux/compiler.h>
5212
5213 #include <asm/system.h>
5214 #include <asm/page.h>
5215@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5216 return safe_compute_effective_address(regs, insn);
5217 }
5218
5219+#ifdef CONFIG_PAX_PAGEEXEC
5220+#ifdef CONFIG_PAX_DLRESOLVE
5221+static void pax_emuplt_close(struct vm_area_struct *vma)
5222+{
5223+ vma->vm_mm->call_dl_resolve = 0UL;
5224+}
5225+
5226+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5227+{
5228+ unsigned int *kaddr;
5229+
5230+ vmf->page = alloc_page(GFP_HIGHUSER);
5231+ if (!vmf->page)
5232+ return VM_FAULT_OOM;
5233+
5234+ kaddr = kmap(vmf->page);
5235+ memset(kaddr, 0, PAGE_SIZE);
5236+ kaddr[0] = 0x9DE3BFA8U; /* save */
5237+ flush_dcache_page(vmf->page);
5238+ kunmap(vmf->page);
5239+ return VM_FAULT_MAJOR;
5240+}
5241+
5242+static const struct vm_operations_struct pax_vm_ops = {
5243+ .close = pax_emuplt_close,
5244+ .fault = pax_emuplt_fault
5245+};
5246+
5247+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5248+{
5249+ int ret;
5250+
5251+ vma->vm_mm = current->mm;
5252+ vma->vm_start = addr;
5253+ vma->vm_end = addr + PAGE_SIZE;
5254+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5255+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5256+ vma->vm_ops = &pax_vm_ops;
5257+
5258+ ret = insert_vm_struct(current->mm, vma);
5259+ if (ret)
5260+ return ret;
5261+
5262+ ++current->mm->total_vm;
5263+ return 0;
5264+}
5265+#endif
5266+
5267+/*
5268+ * PaX: decide what to do with offenders (regs->pc = fault address)
5269+ *
5270+ * returns 1 when task should be killed
5271+ * 2 when patched PLT trampoline was detected
5272+ * 3 when unpatched PLT trampoline was detected
5273+ */
5274+static int pax_handle_fetch_fault(struct pt_regs *regs)
5275+{
5276+
5277+#ifdef CONFIG_PAX_EMUPLT
5278+ int err;
5279+
5280+ do { /* PaX: patched PLT emulation #1 */
5281+ unsigned int sethi1, sethi2, jmpl;
5282+
5283+ err = get_user(sethi1, (unsigned int *)regs->pc);
5284+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5285+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5286+
5287+ if (err)
5288+ break;
5289+
5290+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5291+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5292+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5293+ {
5294+ unsigned int addr;
5295+
5296+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5297+ addr = regs->u_regs[UREG_G1];
5298+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5299+ regs->pc = addr;
5300+ regs->npc = addr+4;
5301+ return 2;
5302+ }
5303+ } while (0);
5304+
5305+ { /* PaX: patched PLT emulation #2 */
5306+ unsigned int ba;
5307+
5308+ err = get_user(ba, (unsigned int *)regs->pc);
5309+
5310+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5311+ unsigned int addr;
5312+
5313+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5314+ regs->pc = addr;
5315+ regs->npc = addr+4;
5316+ return 2;
5317+ }
5318+ }
5319+
5320+ do { /* PaX: patched PLT emulation #3 */
5321+ unsigned int sethi, jmpl, nop;
5322+
5323+ err = get_user(sethi, (unsigned int *)regs->pc);
5324+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5325+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5326+
5327+ if (err)
5328+ break;
5329+
5330+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5331+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5332+ nop == 0x01000000U)
5333+ {
5334+ unsigned int addr;
5335+
5336+ addr = (sethi & 0x003FFFFFU) << 10;
5337+ regs->u_regs[UREG_G1] = addr;
5338+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5339+ regs->pc = addr;
5340+ regs->npc = addr+4;
5341+ return 2;
5342+ }
5343+ } while (0);
5344+
5345+ do { /* PaX: unpatched PLT emulation step 1 */
5346+ unsigned int sethi, ba, nop;
5347+
5348+ err = get_user(sethi, (unsigned int *)regs->pc);
5349+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5350+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5351+
5352+ if (err)
5353+ break;
5354+
5355+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5356+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5357+ nop == 0x01000000U)
5358+ {
5359+ unsigned int addr, save, call;
5360+
5361+ if ((ba & 0xFFC00000U) == 0x30800000U)
5362+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5363+ else
5364+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5365+
5366+ err = get_user(save, (unsigned int *)addr);
5367+ err |= get_user(call, (unsigned int *)(addr+4));
5368+ err |= get_user(nop, (unsigned int *)(addr+8));
5369+ if (err)
5370+ break;
5371+
5372+#ifdef CONFIG_PAX_DLRESOLVE
5373+ if (save == 0x9DE3BFA8U &&
5374+ (call & 0xC0000000U) == 0x40000000U &&
5375+ nop == 0x01000000U)
5376+ {
5377+ struct vm_area_struct *vma;
5378+ unsigned long call_dl_resolve;
5379+
5380+ down_read(&current->mm->mmap_sem);
5381+ call_dl_resolve = current->mm->call_dl_resolve;
5382+ up_read(&current->mm->mmap_sem);
5383+ if (likely(call_dl_resolve))
5384+ goto emulate;
5385+
5386+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5387+
5388+ down_write(&current->mm->mmap_sem);
5389+ if (current->mm->call_dl_resolve) {
5390+ call_dl_resolve = current->mm->call_dl_resolve;
5391+ up_write(&current->mm->mmap_sem);
5392+ if (vma)
5393+ kmem_cache_free(vm_area_cachep, vma);
5394+ goto emulate;
5395+ }
5396+
5397+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5398+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5399+ up_write(&current->mm->mmap_sem);
5400+ if (vma)
5401+ kmem_cache_free(vm_area_cachep, vma);
5402+ return 1;
5403+ }
5404+
5405+ if (pax_insert_vma(vma, call_dl_resolve)) {
5406+ up_write(&current->mm->mmap_sem);
5407+ kmem_cache_free(vm_area_cachep, vma);
5408+ return 1;
5409+ }
5410+
5411+ current->mm->call_dl_resolve = call_dl_resolve;
5412+ up_write(&current->mm->mmap_sem);
5413+
5414+emulate:
5415+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5416+ regs->pc = call_dl_resolve;
5417+ regs->npc = addr+4;
5418+ return 3;
5419+ }
5420+#endif
5421+
5422+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5423+ if ((save & 0xFFC00000U) == 0x05000000U &&
5424+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5425+ nop == 0x01000000U)
5426+ {
5427+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5428+ regs->u_regs[UREG_G2] = addr + 4;
5429+ addr = (save & 0x003FFFFFU) << 10;
5430+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5431+ regs->pc = addr;
5432+ regs->npc = addr+4;
5433+ return 3;
5434+ }
5435+ }
5436+ } while (0);
5437+
5438+ do { /* PaX: unpatched PLT emulation step 2 */
5439+ unsigned int save, call, nop;
5440+
5441+ err = get_user(save, (unsigned int *)(regs->pc-4));
5442+ err |= get_user(call, (unsigned int *)regs->pc);
5443+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
5444+ if (err)
5445+ break;
5446+
5447+ if (save == 0x9DE3BFA8U &&
5448+ (call & 0xC0000000U) == 0x40000000U &&
5449+ nop == 0x01000000U)
5450+ {
5451+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5452+
5453+ regs->u_regs[UREG_RETPC] = regs->pc;
5454+ regs->pc = dl_resolve;
5455+ regs->npc = dl_resolve+4;
5456+ return 3;
5457+ }
5458+ } while (0);
5459+#endif
5460+
5461+ return 1;
5462+}
5463+
5464+void pax_report_insns(void *pc, void *sp)
5465+{
5466+ unsigned long i;
5467+
5468+ printk(KERN_ERR "PAX: bytes at PC: ");
5469+ for (i = 0; i < 8; i++) {
5470+ unsigned int c;
5471+ if (get_user(c, (unsigned int *)pc+i))
5472+ printk(KERN_CONT "???????? ");
5473+ else
5474+ printk(KERN_CONT "%08x ", c);
5475+ }
5476+ printk("\n");
5477+}
5478+#endif
5479+
5480 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5481 unsigned long address)
5482 {
5483@@ -231,6 +495,24 @@ good_area:
5484 if(!(vma->vm_flags & VM_WRITE))
5485 goto bad_area;
5486 } else {
5487+
5488+#ifdef CONFIG_PAX_PAGEEXEC
5489+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5490+ up_read(&mm->mmap_sem);
5491+ switch (pax_handle_fetch_fault(regs)) {
5492+
5493+#ifdef CONFIG_PAX_EMUPLT
5494+ case 2:
5495+ case 3:
5496+ return;
5497+#endif
5498+
5499+ }
5500+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5501+ do_group_exit(SIGKILL);
5502+ }
5503+#endif
5504+
5505 /* Allow reads even for write-only mappings */
5506 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5507 goto bad_area;
5508diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_64.c linux-2.6.32.45/arch/sparc/mm/fault_64.c
5509--- linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5510+++ linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5511@@ -20,6 +20,9 @@
5512 #include <linux/kprobes.h>
5513 #include <linux/kdebug.h>
5514 #include <linux/percpu.h>
5515+#include <linux/slab.h>
5516+#include <linux/pagemap.h>
5517+#include <linux/compiler.h>
5518
5519 #include <asm/page.h>
5520 #include <asm/pgtable.h>
5521@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5522 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5523 regs->tpc);
5524 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5525- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5526+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5527 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5528 dump_stack();
5529 unhandled_fault(regs->tpc, current, regs);
5530@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5531 show_regs(regs);
5532 }
5533
5534+#ifdef CONFIG_PAX_PAGEEXEC
5535+#ifdef CONFIG_PAX_DLRESOLVE
5536+static void pax_emuplt_close(struct vm_area_struct *vma)
5537+{
5538+ vma->vm_mm->call_dl_resolve = 0UL;
5539+}
5540+
5541+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5542+{
5543+ unsigned int *kaddr;
5544+
5545+ vmf->page = alloc_page(GFP_HIGHUSER);
5546+ if (!vmf->page)
5547+ return VM_FAULT_OOM;
5548+
5549+ kaddr = kmap(vmf->page);
5550+ memset(kaddr, 0, PAGE_SIZE);
5551+ kaddr[0] = 0x9DE3BFA8U; /* save */
5552+ flush_dcache_page(vmf->page);
5553+ kunmap(vmf->page);
5554+ return VM_FAULT_MAJOR;
5555+}
5556+
5557+static const struct vm_operations_struct pax_vm_ops = {
5558+ .close = pax_emuplt_close,
5559+ .fault = pax_emuplt_fault
5560+};
5561+
5562+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5563+{
5564+ int ret;
5565+
5566+ vma->vm_mm = current->mm;
5567+ vma->vm_start = addr;
5568+ vma->vm_end = addr + PAGE_SIZE;
5569+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5570+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5571+ vma->vm_ops = &pax_vm_ops;
5572+
5573+ ret = insert_vm_struct(current->mm, vma);
5574+ if (ret)
5575+ return ret;
5576+
5577+ ++current->mm->total_vm;
5578+ return 0;
5579+}
5580+#endif
5581+
5582+/*
5583+ * PaX: decide what to do with offenders (regs->tpc = fault address)
5584+ *
5585+ * returns 1 when task should be killed
5586+ * 2 when patched PLT trampoline was detected
5587+ * 3 when unpatched PLT trampoline was detected
5588+ */
5589+static int pax_handle_fetch_fault(struct pt_regs *regs)
5590+{
5591+
5592+#ifdef CONFIG_PAX_EMUPLT
5593+ int err;
5594+
5595+ do { /* PaX: patched PLT emulation #1 */
5596+ unsigned int sethi1, sethi2, jmpl;
5597+
5598+ err = get_user(sethi1, (unsigned int *)regs->tpc);
5599+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5600+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5601+
5602+ if (err)
5603+ break;
5604+
5605+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5606+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5607+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5608+ {
5609+ unsigned long addr;
5610+
5611+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5612+ addr = regs->u_regs[UREG_G1];
5613+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5614+
5615+ if (test_thread_flag(TIF_32BIT))
5616+ addr &= 0xFFFFFFFFUL;
5617+
5618+ regs->tpc = addr;
5619+ regs->tnpc = addr+4;
5620+ return 2;
5621+ }
5622+ } while (0);
5623+
5624+ { /* PaX: patched PLT emulation #2 */
5625+ unsigned int ba;
5626+
5627+ err = get_user(ba, (unsigned int *)regs->tpc);
5628+
5629+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5630+ unsigned long addr;
5631+
5632+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5633+
5634+ if (test_thread_flag(TIF_32BIT))
5635+ addr &= 0xFFFFFFFFUL;
5636+
5637+ regs->tpc = addr;
5638+ regs->tnpc = addr+4;
5639+ return 2;
5640+ }
5641+ }
5642+
5643+ do { /* PaX: patched PLT emulation #3 */
5644+ unsigned int sethi, jmpl, nop;
5645+
5646+ err = get_user(sethi, (unsigned int *)regs->tpc);
5647+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5648+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5649+
5650+ if (err)
5651+ break;
5652+
5653+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5654+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5655+ nop == 0x01000000U)
5656+ {
5657+ unsigned long addr;
5658+
5659+ addr = (sethi & 0x003FFFFFU) << 10;
5660+ regs->u_regs[UREG_G1] = addr;
5661+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5662+
5663+ if (test_thread_flag(TIF_32BIT))
5664+ addr &= 0xFFFFFFFFUL;
5665+
5666+ regs->tpc = addr;
5667+ regs->tnpc = addr+4;
5668+ return 2;
5669+ }
5670+ } while (0);
5671+
5672+ do { /* PaX: patched PLT emulation #4 */
5673+ unsigned int sethi, mov1, call, mov2;
5674+
5675+ err = get_user(sethi, (unsigned int *)regs->tpc);
5676+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5677+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5678+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5679+
5680+ if (err)
5681+ break;
5682+
5683+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5684+ mov1 == 0x8210000FU &&
5685+ (call & 0xC0000000U) == 0x40000000U &&
5686+ mov2 == 0x9E100001U)
5687+ {
5688+ unsigned long addr;
5689+
5690+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5691+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5692+
5693+ if (test_thread_flag(TIF_32BIT))
5694+ addr &= 0xFFFFFFFFUL;
5695+
5696+ regs->tpc = addr;
5697+ regs->tnpc = addr+4;
5698+ return 2;
5699+ }
5700+ } while (0);
5701+
5702+ do { /* PaX: patched PLT emulation #5 */
5703+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5704+
5705+ err = get_user(sethi, (unsigned int *)regs->tpc);
5706+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5707+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5708+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5709+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5710+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5711+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5712+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5713+
5714+ if (err)
5715+ break;
5716+
5717+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5718+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5719+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5720+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5721+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5722+ sllx == 0x83287020U &&
5723+ jmpl == 0x81C04005U &&
5724+ nop == 0x01000000U)
5725+ {
5726+ unsigned long addr;
5727+
5728+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5729+ regs->u_regs[UREG_G1] <<= 32;
5730+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5731+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5732+ regs->tpc = addr;
5733+ regs->tnpc = addr+4;
5734+ return 2;
5735+ }
5736+ } while (0);
5737+
5738+ do { /* PaX: patched PLT emulation #6 */
5739+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5740+
5741+ err = get_user(sethi, (unsigned int *)regs->tpc);
5742+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5743+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5744+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5745+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5746+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5747+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5748+
5749+ if (err)
5750+ break;
5751+
5752+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5753+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5754+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5755+ sllx == 0x83287020U &&
5756+ (or & 0xFFFFE000U) == 0x8A116000U &&
5757+ jmpl == 0x81C04005U &&
5758+ nop == 0x01000000U)
5759+ {
5760+ unsigned long addr;
5761+
5762+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5763+ regs->u_regs[UREG_G1] <<= 32;
5764+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5765+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5766+ regs->tpc = addr;
5767+ regs->tnpc = addr+4;
5768+ return 2;
5769+ }
5770+ } while (0);
5771+
5772+ do { /* PaX: unpatched PLT emulation step 1 */
5773+ unsigned int sethi, ba, nop;
5774+
5775+ err = get_user(sethi, (unsigned int *)regs->tpc);
5776+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5777+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5778+
5779+ if (err)
5780+ break;
5781+
5782+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5783+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5784+ nop == 0x01000000U)
5785+ {
5786+ unsigned long addr;
5787+ unsigned int save, call;
5788+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5789+
5790+ if ((ba & 0xFFC00000U) == 0x30800000U)
5791+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5792+ else
5793+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5794+
5795+ if (test_thread_flag(TIF_32BIT))
5796+ addr &= 0xFFFFFFFFUL;
5797+
5798+ err = get_user(save, (unsigned int *)addr);
5799+ err |= get_user(call, (unsigned int *)(addr+4));
5800+ err |= get_user(nop, (unsigned int *)(addr+8));
5801+ if (err)
5802+ break;
5803+
5804+#ifdef CONFIG_PAX_DLRESOLVE
5805+ if (save == 0x9DE3BFA8U &&
5806+ (call & 0xC0000000U) == 0x40000000U &&
5807+ nop == 0x01000000U)
5808+ {
5809+ struct vm_area_struct *vma;
5810+ unsigned long call_dl_resolve;
5811+
5812+ down_read(&current->mm->mmap_sem);
5813+ call_dl_resolve = current->mm->call_dl_resolve;
5814+ up_read(&current->mm->mmap_sem);
5815+ if (likely(call_dl_resolve))
5816+ goto emulate;
5817+
5818+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5819+
5820+ down_write(&current->mm->mmap_sem);
5821+ if (current->mm->call_dl_resolve) {
5822+ call_dl_resolve = current->mm->call_dl_resolve;
5823+ up_write(&current->mm->mmap_sem);
5824+ if (vma)
5825+ kmem_cache_free(vm_area_cachep, vma);
5826+ goto emulate;
5827+ }
5828+
5829+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5830+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5831+ up_write(&current->mm->mmap_sem);
5832+ if (vma)
5833+ kmem_cache_free(vm_area_cachep, vma);
5834+ return 1;
5835+ }
5836+
5837+ if (pax_insert_vma(vma, call_dl_resolve)) {
5838+ up_write(&current->mm->mmap_sem);
5839+ kmem_cache_free(vm_area_cachep, vma);
5840+ return 1;
5841+ }
5842+
5843+ current->mm->call_dl_resolve = call_dl_resolve;
5844+ up_write(&current->mm->mmap_sem);
5845+
5846+emulate:
5847+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5848+ regs->tpc = call_dl_resolve;
5849+ regs->tnpc = addr+4;
5850+ return 3;
5851+ }
5852+#endif
5853+
5854+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5855+ if ((save & 0xFFC00000U) == 0x05000000U &&
5856+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5857+ nop == 0x01000000U)
5858+ {
5859+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5860+ regs->u_regs[UREG_G2] = addr + 4;
5861+ addr = (save & 0x003FFFFFU) << 10;
5862+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5863+
5864+ if (test_thread_flag(TIF_32BIT))
5865+ addr &= 0xFFFFFFFFUL;
5866+
5867+ regs->tpc = addr;
5868+ regs->tnpc = addr+4;
5869+ return 3;
5870+ }
5871+
5872+ /* PaX: 64-bit PLT stub */
5873+ err = get_user(sethi1, (unsigned int *)addr);
5874+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5875+ err |= get_user(or1, (unsigned int *)(addr+8));
5876+ err |= get_user(or2, (unsigned int *)(addr+12));
5877+ err |= get_user(sllx, (unsigned int *)(addr+16));
5878+ err |= get_user(add, (unsigned int *)(addr+20));
5879+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5880+ err |= get_user(nop, (unsigned int *)(addr+28));
5881+ if (err)
5882+ break;
5883+
5884+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5885+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5886+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5887+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5888+ sllx == 0x89293020U &&
5889+ add == 0x8A010005U &&
5890+ jmpl == 0x89C14000U &&
5891+ nop == 0x01000000U)
5892+ {
5893+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5894+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5895+ regs->u_regs[UREG_G4] <<= 32;
5896+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5897+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5898+ regs->u_regs[UREG_G4] = addr + 24;
5899+ addr = regs->u_regs[UREG_G5];
5900+ regs->tpc = addr;
5901+ regs->tnpc = addr+4;
5902+ return 3;
5903+ }
5904+ }
5905+ } while (0);
5906+
5907+#ifdef CONFIG_PAX_DLRESOLVE
5908+ do { /* PaX: unpatched PLT emulation step 2 */
5909+ unsigned int save, call, nop;
5910+
5911+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5912+ err |= get_user(call, (unsigned int *)regs->tpc);
5913+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5914+ if (err)
5915+ break;
5916+
5917+ if (save == 0x9DE3BFA8U &&
5918+ (call & 0xC0000000U) == 0x40000000U &&
5919+ nop == 0x01000000U)
5920+ {
5921+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5922+
5923+ if (test_thread_flag(TIF_32BIT))
5924+ dl_resolve &= 0xFFFFFFFFUL;
5925+
5926+ regs->u_regs[UREG_RETPC] = regs->tpc;
5927+ regs->tpc = dl_resolve;
5928+ regs->tnpc = dl_resolve+4;
5929+ return 3;
5930+ }
5931+ } while (0);
5932+#endif
5933+
5934+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5935+ unsigned int sethi, ba, nop;
5936+
5937+ err = get_user(sethi, (unsigned int *)regs->tpc);
5938+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5939+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5940+
5941+ if (err)
5942+ break;
5943+
5944+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5945+ (ba & 0xFFF00000U) == 0x30600000U &&
5946+ nop == 0x01000000U)
5947+ {
5948+ unsigned long addr;
5949+
5950+ addr = (sethi & 0x003FFFFFU) << 10;
5951+ regs->u_regs[UREG_G1] = addr;
5952+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5953+
5954+ if (test_thread_flag(TIF_32BIT))
5955+ addr &= 0xFFFFFFFFUL;
5956+
5957+ regs->tpc = addr;
5958+ regs->tnpc = addr+4;
5959+ return 2;
5960+ }
5961+ } while (0);
5962+
5963+#endif
5964+
5965+ return 1;
5966+}
5967+
5968+void pax_report_insns(void *pc, void *sp)
5969+{
5970+ unsigned long i;
5971+
5972+ printk(KERN_ERR "PAX: bytes at PC: ");
5973+ for (i = 0; i < 8; i++) {
5974+ unsigned int c;
5975+ if (get_user(c, (unsigned int *)pc+i))
5976+ printk(KERN_CONT "???????? ");
5977+ else
5978+ printk(KERN_CONT "%08x ", c);
5979+ }
5980+ printk("\n");
5981+}
5982+#endif
5983+
5984 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5985 {
5986 struct mm_struct *mm = current->mm;
5987@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5988 if (!vma)
5989 goto bad_area;
5990
5991+#ifdef CONFIG_PAX_PAGEEXEC
5992+ /* PaX: detect ITLB misses on non-exec pages */
5993+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5994+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5995+ {
5996+ if (address != regs->tpc)
5997+ goto good_area;
5998+
5999+ up_read(&mm->mmap_sem);
6000+ switch (pax_handle_fetch_fault(regs)) {
6001+
6002+#ifdef CONFIG_PAX_EMUPLT
6003+ case 2:
6004+ case 3:
6005+ return;
6006+#endif
6007+
6008+ }
6009+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6010+ do_group_exit(SIGKILL);
6011+ }
6012+#endif
6013+
6014 /* Pure DTLB misses do not tell us whether the fault causing
6015 * load/store/atomic was a write or not, it only says that there
6016 * was no match. So in such a case we (carefully) read the
6017diff -urNp linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c
6018--- linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
6019+++ linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
6020@@ -69,7 +69,7 @@ full_search:
6021 }
6022 return -ENOMEM;
6023 }
6024- if (likely(!vma || addr + len <= vma->vm_start)) {
6025+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6026 /*
6027 * Remember the place where we stopped the search:
6028 */
6029@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
6030 /* make sure it can fit in the remaining address space */
6031 if (likely(addr > len)) {
6032 vma = find_vma(mm, addr-len);
6033- if (!vma || addr <= vma->vm_start) {
6034+ if (check_heap_stack_gap(vma, addr - len, len)) {
6035 /* remember the address as a hint for next time */
6036 return (mm->free_area_cache = addr-len);
6037 }
6038@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
6039 if (unlikely(mm->mmap_base < len))
6040 goto bottomup;
6041
6042- addr = (mm->mmap_base-len) & HPAGE_MASK;
6043+ addr = mm->mmap_base - len;
6044
6045 do {
6046+ addr &= HPAGE_MASK;
6047 /*
6048 * Lookup failure means no vma is above this address,
6049 * else if new region fits below vma->vm_start,
6050 * return with success:
6051 */
6052 vma = find_vma(mm, addr);
6053- if (likely(!vma || addr+len <= vma->vm_start)) {
6054+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6055 /* remember the address as a hint for next time */
6056 return (mm->free_area_cache = addr);
6057 }
6058@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
6059 mm->cached_hole_size = vma->vm_start - addr;
6060
6061 /* try just below the current vma->vm_start */
6062- addr = (vma->vm_start-len) & HPAGE_MASK;
6063- } while (likely(len < vma->vm_start));
6064+ addr = skip_heap_stack_gap(vma, len);
6065+ } while (!IS_ERR_VALUE(addr));
6066
6067 bottomup:
6068 /*
6069@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
6070 if (addr) {
6071 addr = ALIGN(addr, HPAGE_SIZE);
6072 vma = find_vma(mm, addr);
6073- if (task_size - len >= addr &&
6074- (!vma || addr + len <= vma->vm_start))
6075+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6076 return addr;
6077 }
6078 if (mm->get_unmapped_area == arch_get_unmapped_area)
6079diff -urNp linux-2.6.32.45/arch/sparc/mm/init_32.c linux-2.6.32.45/arch/sparc/mm/init_32.c
6080--- linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
6081+++ linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
6082@@ -317,6 +317,9 @@ extern void device_scan(void);
6083 pgprot_t PAGE_SHARED __read_mostly;
6084 EXPORT_SYMBOL(PAGE_SHARED);
6085
6086+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6087+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6088+
6089 void __init paging_init(void)
6090 {
6091 switch(sparc_cpu_model) {
6092@@ -345,17 +348,17 @@ void __init paging_init(void)
6093
6094 /* Initialize the protection map with non-constant, MMU dependent values. */
6095 protection_map[0] = PAGE_NONE;
6096- protection_map[1] = PAGE_READONLY;
6097- protection_map[2] = PAGE_COPY;
6098- protection_map[3] = PAGE_COPY;
6099+ protection_map[1] = PAGE_READONLY_NOEXEC;
6100+ protection_map[2] = PAGE_COPY_NOEXEC;
6101+ protection_map[3] = PAGE_COPY_NOEXEC;
6102 protection_map[4] = PAGE_READONLY;
6103 protection_map[5] = PAGE_READONLY;
6104 protection_map[6] = PAGE_COPY;
6105 protection_map[7] = PAGE_COPY;
6106 protection_map[8] = PAGE_NONE;
6107- protection_map[9] = PAGE_READONLY;
6108- protection_map[10] = PAGE_SHARED;
6109- protection_map[11] = PAGE_SHARED;
6110+ protection_map[9] = PAGE_READONLY_NOEXEC;
6111+ protection_map[10] = PAGE_SHARED_NOEXEC;
6112+ protection_map[11] = PAGE_SHARED_NOEXEC;
6113 protection_map[12] = PAGE_READONLY;
6114 protection_map[13] = PAGE_READONLY;
6115 protection_map[14] = PAGE_SHARED;
6116diff -urNp linux-2.6.32.45/arch/sparc/mm/Makefile linux-2.6.32.45/arch/sparc/mm/Makefile
6117--- linux-2.6.32.45/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
6118+++ linux-2.6.32.45/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
6119@@ -2,7 +2,7 @@
6120 #
6121
6122 asflags-y := -ansi
6123-ccflags-y := -Werror
6124+#ccflags-y := -Werror
6125
6126 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6127 obj-y += fault_$(BITS).o
6128diff -urNp linux-2.6.32.45/arch/sparc/mm/srmmu.c linux-2.6.32.45/arch/sparc/mm/srmmu.c
6129--- linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
6130+++ linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
6131@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6132 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6133 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6134 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6135+
6136+#ifdef CONFIG_PAX_PAGEEXEC
6137+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6138+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6139+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6140+#endif
6141+
6142 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6143 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6144
6145diff -urNp linux-2.6.32.45/arch/um/include/asm/kmap_types.h linux-2.6.32.45/arch/um/include/asm/kmap_types.h
6146--- linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6147+++ linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6148@@ -23,6 +23,7 @@ enum km_type {
6149 KM_IRQ1,
6150 KM_SOFTIRQ0,
6151 KM_SOFTIRQ1,
6152+ KM_CLEARPAGE,
6153 KM_TYPE_NR
6154 };
6155
6156diff -urNp linux-2.6.32.45/arch/um/include/asm/page.h linux-2.6.32.45/arch/um/include/asm/page.h
6157--- linux-2.6.32.45/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6158+++ linux-2.6.32.45/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6159@@ -14,6 +14,9 @@
6160 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6161 #define PAGE_MASK (~(PAGE_SIZE-1))
6162
6163+#define ktla_ktva(addr) (addr)
6164+#define ktva_ktla(addr) (addr)
6165+
6166 #ifndef __ASSEMBLY__
6167
6168 struct page;
6169diff -urNp linux-2.6.32.45/arch/um/kernel/process.c linux-2.6.32.45/arch/um/kernel/process.c
6170--- linux-2.6.32.45/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6171+++ linux-2.6.32.45/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6172@@ -393,22 +393,6 @@ int singlestepping(void * t)
6173 return 2;
6174 }
6175
6176-/*
6177- * Only x86 and x86_64 have an arch_align_stack().
6178- * All other arches have "#define arch_align_stack(x) (x)"
6179- * in their asm/system.h
6180- * As this is included in UML from asm-um/system-generic.h,
6181- * we can use it to behave as the subarch does.
6182- */
6183-#ifndef arch_align_stack
6184-unsigned long arch_align_stack(unsigned long sp)
6185-{
6186- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6187- sp -= get_random_int() % 8192;
6188- return sp & ~0xf;
6189-}
6190-#endif
6191-
6192 unsigned long get_wchan(struct task_struct *p)
6193 {
6194 unsigned long stack_page, sp, ip;
6195diff -urNp linux-2.6.32.45/arch/um/sys-i386/syscalls.c linux-2.6.32.45/arch/um/sys-i386/syscalls.c
6196--- linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6197+++ linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6198@@ -11,6 +11,21 @@
6199 #include "asm/uaccess.h"
6200 #include "asm/unistd.h"
6201
6202+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6203+{
6204+ unsigned long pax_task_size = TASK_SIZE;
6205+
6206+#ifdef CONFIG_PAX_SEGMEXEC
6207+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6208+ pax_task_size = SEGMEXEC_TASK_SIZE;
6209+#endif
6210+
6211+ if (len > pax_task_size || addr > pax_task_size - len)
6212+ return -EINVAL;
6213+
6214+ return 0;
6215+}
6216+
6217 /*
6218 * Perform the select(nd, in, out, ex, tv) and mmap() system
6219 * calls. Linux/i386 didn't use to be able to handle more than
6220diff -urNp linux-2.6.32.45/arch/x86/boot/bitops.h linux-2.6.32.45/arch/x86/boot/bitops.h
6221--- linux-2.6.32.45/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6222+++ linux-2.6.32.45/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6223@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6224 u8 v;
6225 const u32 *p = (const u32 *)addr;
6226
6227- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6228+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6229 return v;
6230 }
6231
6232@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6233
6234 static inline void set_bit(int nr, void *addr)
6235 {
6236- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6237+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6238 }
6239
6240 #endif /* BOOT_BITOPS_H */
6241diff -urNp linux-2.6.32.45/arch/x86/boot/boot.h linux-2.6.32.45/arch/x86/boot/boot.h
6242--- linux-2.6.32.45/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6243+++ linux-2.6.32.45/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6244@@ -82,7 +82,7 @@ static inline void io_delay(void)
6245 static inline u16 ds(void)
6246 {
6247 u16 seg;
6248- asm("movw %%ds,%0" : "=rm" (seg));
6249+ asm volatile("movw %%ds,%0" : "=rm" (seg));
6250 return seg;
6251 }
6252
6253@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6254 static inline int memcmp(const void *s1, const void *s2, size_t len)
6255 {
6256 u8 diff;
6257- asm("repe; cmpsb; setnz %0"
6258+ asm volatile("repe; cmpsb; setnz %0"
6259 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6260 return diff;
6261 }
6262diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_32.S linux-2.6.32.45/arch/x86/boot/compressed/head_32.S
6263--- linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6264+++ linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6265@@ -76,7 +76,7 @@ ENTRY(startup_32)
6266 notl %eax
6267 andl %eax, %ebx
6268 #else
6269- movl $LOAD_PHYSICAL_ADDR, %ebx
6270+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6271 #endif
6272
6273 /* Target address to relocate to for decompression */
6274@@ -149,7 +149,7 @@ relocated:
6275 * and where it was actually loaded.
6276 */
6277 movl %ebp, %ebx
6278- subl $LOAD_PHYSICAL_ADDR, %ebx
6279+ subl $____LOAD_PHYSICAL_ADDR, %ebx
6280 jz 2f /* Nothing to be done if loaded at compiled addr. */
6281 /*
6282 * Process relocations.
6283@@ -157,8 +157,7 @@ relocated:
6284
6285 1: subl $4, %edi
6286 movl (%edi), %ecx
6287- testl %ecx, %ecx
6288- jz 2f
6289+ jecxz 2f
6290 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6291 jmp 1b
6292 2:
6293diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_64.S linux-2.6.32.45/arch/x86/boot/compressed/head_64.S
6294--- linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6295+++ linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6296@@ -91,7 +91,7 @@ ENTRY(startup_32)
6297 notl %eax
6298 andl %eax, %ebx
6299 #else
6300- movl $LOAD_PHYSICAL_ADDR, %ebx
6301+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6302 #endif
6303
6304 /* Target address to relocate to for decompression */
6305@@ -183,7 +183,7 @@ no_longmode:
6306 hlt
6307 jmp 1b
6308
6309-#include "../../kernel/verify_cpu_64.S"
6310+#include "../../kernel/verify_cpu.S"
6311
6312 /*
6313 * Be careful here startup_64 needs to be at a predictable
6314@@ -234,7 +234,7 @@ ENTRY(startup_64)
6315 notq %rax
6316 andq %rax, %rbp
6317 #else
6318- movq $LOAD_PHYSICAL_ADDR, %rbp
6319+ movq $____LOAD_PHYSICAL_ADDR, %rbp
6320 #endif
6321
6322 /* Target address to relocate to for decompression */
6323diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/Makefile linux-2.6.32.45/arch/x86/boot/compressed/Makefile
6324--- linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-03-27 14:31:47.000000000 -0400
6325+++ linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-08-07 14:38:34.000000000 -0400
6326@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
6327 KBUILD_CFLAGS += $(cflags-y)
6328 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6329 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6330+ifdef CONSTIFY_PLUGIN
6331+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6332+endif
6333
6334 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6335 GCOV_PROFILE := n
6336diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/misc.c linux-2.6.32.45/arch/x86/boot/compressed/misc.c
6337--- linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6338+++ linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6339@@ -288,7 +288,7 @@ static void parse_elf(void *output)
6340 case PT_LOAD:
6341 #ifdef CONFIG_RELOCATABLE
6342 dest = output;
6343- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6344+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6345 #else
6346 dest = (void *)(phdr->p_paddr);
6347 #endif
6348@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6349 error("Destination address too large");
6350 #endif
6351 #ifndef CONFIG_RELOCATABLE
6352- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6353+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6354 error("Wrong destination address");
6355 #endif
6356
6357diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c
6358--- linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6359+++ linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6360@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6361
6362 offs = (olen > ilen) ? olen - ilen : 0;
6363 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6364- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6365+ offs += 64*1024; /* Add 64K bytes slack */
6366 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6367
6368 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6369diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/relocs.c linux-2.6.32.45/arch/x86/boot/compressed/relocs.c
6370--- linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6371+++ linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6372@@ -10,8 +10,11 @@
6373 #define USE_BSD
6374 #include <endian.h>
6375
6376+#include "../../../../include/linux/autoconf.h"
6377+
6378 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6379 static Elf32_Ehdr ehdr;
6380+static Elf32_Phdr *phdr;
6381 static unsigned long reloc_count, reloc_idx;
6382 static unsigned long *relocs;
6383
6384@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6385
6386 static int is_safe_abs_reloc(const char* sym_name)
6387 {
6388- int i;
6389+ unsigned int i;
6390
6391 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6392 if (!strcmp(sym_name, safe_abs_relocs[i]))
6393@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6394 }
6395 }
6396
6397+static void read_phdrs(FILE *fp)
6398+{
6399+ unsigned int i;
6400+
6401+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6402+ if (!phdr) {
6403+ die("Unable to allocate %d program headers\n",
6404+ ehdr.e_phnum);
6405+ }
6406+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6407+ die("Seek to %d failed: %s\n",
6408+ ehdr.e_phoff, strerror(errno));
6409+ }
6410+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6411+ die("Cannot read ELF program headers: %s\n",
6412+ strerror(errno));
6413+ }
6414+ for(i = 0; i < ehdr.e_phnum; i++) {
6415+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6416+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6417+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6418+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6419+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6420+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6421+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6422+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6423+ }
6424+
6425+}
6426+
6427 static void read_shdrs(FILE *fp)
6428 {
6429- int i;
6430+ unsigned int i;
6431 Elf32_Shdr shdr;
6432
6433 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6434@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6435
6436 static void read_strtabs(FILE *fp)
6437 {
6438- int i;
6439+ unsigned int i;
6440 for (i = 0; i < ehdr.e_shnum; i++) {
6441 struct section *sec = &secs[i];
6442 if (sec->shdr.sh_type != SHT_STRTAB) {
6443@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6444
6445 static void read_symtabs(FILE *fp)
6446 {
6447- int i,j;
6448+ unsigned int i,j;
6449 for (i = 0; i < ehdr.e_shnum; i++) {
6450 struct section *sec = &secs[i];
6451 if (sec->shdr.sh_type != SHT_SYMTAB) {
6452@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6453
6454 static void read_relocs(FILE *fp)
6455 {
6456- int i,j;
6457+ unsigned int i,j;
6458+ uint32_t base;
6459+
6460 for (i = 0; i < ehdr.e_shnum; i++) {
6461 struct section *sec = &secs[i];
6462 if (sec->shdr.sh_type != SHT_REL) {
6463@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6464 die("Cannot read symbol table: %s\n",
6465 strerror(errno));
6466 }
6467+ base = 0;
6468+ for (j = 0; j < ehdr.e_phnum; j++) {
6469+ if (phdr[j].p_type != PT_LOAD )
6470+ continue;
6471+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6472+ continue;
6473+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6474+ break;
6475+ }
6476 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6477 Elf32_Rel *rel = &sec->reltab[j];
6478- rel->r_offset = elf32_to_cpu(rel->r_offset);
6479+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6480 rel->r_info = elf32_to_cpu(rel->r_info);
6481 }
6482 }
6483@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6484
6485 static void print_absolute_symbols(void)
6486 {
6487- int i;
6488+ unsigned int i;
6489 printf("Absolute symbols\n");
6490 printf(" Num: Value Size Type Bind Visibility Name\n");
6491 for (i = 0; i < ehdr.e_shnum; i++) {
6492 struct section *sec = &secs[i];
6493 char *sym_strtab;
6494 Elf32_Sym *sh_symtab;
6495- int j;
6496+ unsigned int j;
6497
6498 if (sec->shdr.sh_type != SHT_SYMTAB) {
6499 continue;
6500@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6501
6502 static void print_absolute_relocs(void)
6503 {
6504- int i, printed = 0;
6505+ unsigned int i, printed = 0;
6506
6507 for (i = 0; i < ehdr.e_shnum; i++) {
6508 struct section *sec = &secs[i];
6509 struct section *sec_applies, *sec_symtab;
6510 char *sym_strtab;
6511 Elf32_Sym *sh_symtab;
6512- int j;
6513+ unsigned int j;
6514 if (sec->shdr.sh_type != SHT_REL) {
6515 continue;
6516 }
6517@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6518
6519 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6520 {
6521- int i;
6522+ unsigned int i;
6523 /* Walk through the relocations */
6524 for (i = 0; i < ehdr.e_shnum; i++) {
6525 char *sym_strtab;
6526 Elf32_Sym *sh_symtab;
6527 struct section *sec_applies, *sec_symtab;
6528- int j;
6529+ unsigned int j;
6530 struct section *sec = &secs[i];
6531
6532 if (sec->shdr.sh_type != SHT_REL) {
6533@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6534 if (sym->st_shndx == SHN_ABS) {
6535 continue;
6536 }
6537+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6538+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6539+ continue;
6540+
6541+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6542+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6543+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6544+ continue;
6545+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6546+ continue;
6547+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6548+ continue;
6549+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6550+ continue;
6551+#endif
6552 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6553 /*
6554 * NONE can be ignored and and PC relative
6555@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6556
6557 static void emit_relocs(int as_text)
6558 {
6559- int i;
6560+ unsigned int i;
6561 /* Count how many relocations I have and allocate space for them. */
6562 reloc_count = 0;
6563 walk_relocs(count_reloc);
6564@@ -634,6 +693,7 @@ int main(int argc, char **argv)
6565 fname, strerror(errno));
6566 }
6567 read_ehdr(fp);
6568+ read_phdrs(fp);
6569 read_shdrs(fp);
6570 read_strtabs(fp);
6571 read_symtabs(fp);
6572diff -urNp linux-2.6.32.45/arch/x86/boot/cpucheck.c linux-2.6.32.45/arch/x86/boot/cpucheck.c
6573--- linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6574+++ linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6575@@ -74,7 +74,7 @@ static int has_fpu(void)
6576 u16 fcw = -1, fsw = -1;
6577 u32 cr0;
6578
6579- asm("movl %%cr0,%0" : "=r" (cr0));
6580+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6581 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6582 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6583 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6584@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6585 {
6586 u32 f0, f1;
6587
6588- asm("pushfl ; "
6589+ asm volatile("pushfl ; "
6590 "pushfl ; "
6591 "popl %0 ; "
6592 "movl %0,%1 ; "
6593@@ -115,7 +115,7 @@ static void get_flags(void)
6594 set_bit(X86_FEATURE_FPU, cpu.flags);
6595
6596 if (has_eflag(X86_EFLAGS_ID)) {
6597- asm("cpuid"
6598+ asm volatile("cpuid"
6599 : "=a" (max_intel_level),
6600 "=b" (cpu_vendor[0]),
6601 "=d" (cpu_vendor[1]),
6602@@ -124,7 +124,7 @@ static void get_flags(void)
6603
6604 if (max_intel_level >= 0x00000001 &&
6605 max_intel_level <= 0x0000ffff) {
6606- asm("cpuid"
6607+ asm volatile("cpuid"
6608 : "=a" (tfms),
6609 "=c" (cpu.flags[4]),
6610 "=d" (cpu.flags[0])
6611@@ -136,7 +136,7 @@ static void get_flags(void)
6612 cpu.model += ((tfms >> 16) & 0xf) << 4;
6613 }
6614
6615- asm("cpuid"
6616+ asm volatile("cpuid"
6617 : "=a" (max_amd_level)
6618 : "a" (0x80000000)
6619 : "ebx", "ecx", "edx");
6620@@ -144,7 +144,7 @@ static void get_flags(void)
6621 if (max_amd_level >= 0x80000001 &&
6622 max_amd_level <= 0x8000ffff) {
6623 u32 eax = 0x80000001;
6624- asm("cpuid"
6625+ asm volatile("cpuid"
6626 : "+a" (eax),
6627 "=c" (cpu.flags[6]),
6628 "=d" (cpu.flags[1])
6629@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6630 u32 ecx = MSR_K7_HWCR;
6631 u32 eax, edx;
6632
6633- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6634+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6635 eax &= ~(1 << 15);
6636- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6637+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6638
6639 get_flags(); /* Make sure it really did something */
6640 err = check_flags();
6641@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6642 u32 ecx = MSR_VIA_FCR;
6643 u32 eax, edx;
6644
6645- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6646+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6647 eax |= (1<<1)|(1<<7);
6648- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6649+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6650
6651 set_bit(X86_FEATURE_CX8, cpu.flags);
6652 err = check_flags();
6653@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6654 u32 eax, edx;
6655 u32 level = 1;
6656
6657- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6658- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6659- asm("cpuid"
6660+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6661+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6662+ asm volatile("cpuid"
6663 : "+a" (level), "=d" (cpu.flags[0])
6664 : : "ecx", "ebx");
6665- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6666+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6667
6668 err = check_flags();
6669 }
6670diff -urNp linux-2.6.32.45/arch/x86/boot/header.S linux-2.6.32.45/arch/x86/boot/header.S
6671--- linux-2.6.32.45/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6672+++ linux-2.6.32.45/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6673@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6674 # single linked list of
6675 # struct setup_data
6676
6677-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6678+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6679
6680 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6681 #define VO_INIT_SIZE (VO__end - VO__text)
6682diff -urNp linux-2.6.32.45/arch/x86/boot/Makefile linux-2.6.32.45/arch/x86/boot/Makefile
6683--- linux-2.6.32.45/arch/x86/boot/Makefile 2011-03-27 14:31:47.000000000 -0400
6684+++ linux-2.6.32.45/arch/x86/boot/Makefile 2011-08-07 14:38:13.000000000 -0400
6685@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
6686 $(call cc-option, -fno-stack-protector) \
6687 $(call cc-option, -mpreferred-stack-boundary=2)
6688 KBUILD_CFLAGS += $(call cc-option, -m32)
6689+ifdef CONSTIFY_PLUGIN
6690+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6691+endif
6692 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6693 GCOV_PROFILE := n
6694
6695diff -urNp linux-2.6.32.45/arch/x86/boot/memory.c linux-2.6.32.45/arch/x86/boot/memory.c
6696--- linux-2.6.32.45/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6697+++ linux-2.6.32.45/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6698@@ -19,7 +19,7 @@
6699
6700 static int detect_memory_e820(void)
6701 {
6702- int count = 0;
6703+ unsigned int count = 0;
6704 struct biosregs ireg, oreg;
6705 struct e820entry *desc = boot_params.e820_map;
6706 static struct e820entry buf; /* static so it is zeroed */
6707diff -urNp linux-2.6.32.45/arch/x86/boot/video.c linux-2.6.32.45/arch/x86/boot/video.c
6708--- linux-2.6.32.45/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6709+++ linux-2.6.32.45/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6710@@ -90,7 +90,7 @@ static void store_mode_params(void)
6711 static unsigned int get_entry(void)
6712 {
6713 char entry_buf[4];
6714- int i, len = 0;
6715+ unsigned int i, len = 0;
6716 int key;
6717 unsigned int v;
6718
6719diff -urNp linux-2.6.32.45/arch/x86/boot/video-vesa.c linux-2.6.32.45/arch/x86/boot/video-vesa.c
6720--- linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6721+++ linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6722@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6723
6724 boot_params.screen_info.vesapm_seg = oreg.es;
6725 boot_params.screen_info.vesapm_off = oreg.di;
6726+ boot_params.screen_info.vesapm_size = oreg.cx;
6727 }
6728
6729 /*
6730diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_aout.c linux-2.6.32.45/arch/x86/ia32/ia32_aout.c
6731--- linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6732+++ linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6733@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6734 unsigned long dump_start, dump_size;
6735 struct user32 dump;
6736
6737+ memset(&dump, 0, sizeof(dump));
6738+
6739 fs = get_fs();
6740 set_fs(KERNEL_DS);
6741 has_dumped = 1;
6742@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6743 dump_size = dump.u_ssize << PAGE_SHIFT;
6744 DUMP_WRITE(dump_start, dump_size);
6745 }
6746- /*
6747- * Finally dump the task struct. Not be used by gdb, but
6748- * could be useful
6749- */
6750- set_fs(KERNEL_DS);
6751- DUMP_WRITE(current, sizeof(*current));
6752 end_coredump:
6753 set_fs(fs);
6754 return has_dumped;
6755diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32entry.S linux-2.6.32.45/arch/x86/ia32/ia32entry.S
6756--- linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6757+++ linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6758@@ -13,6 +13,7 @@
6759 #include <asm/thread_info.h>
6760 #include <asm/segment.h>
6761 #include <asm/irqflags.h>
6762+#include <asm/pgtable.h>
6763 #include <linux/linkage.h>
6764
6765 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6766@@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6767 ENDPROC(native_irq_enable_sysexit)
6768 #endif
6769
6770+ .macro pax_enter_kernel_user
6771+#ifdef CONFIG_PAX_MEMORY_UDEREF
6772+ call pax_enter_kernel_user
6773+#endif
6774+ .endm
6775+
6776+ .macro pax_exit_kernel_user
6777+#ifdef CONFIG_PAX_MEMORY_UDEREF
6778+ call pax_exit_kernel_user
6779+#endif
6780+#ifdef CONFIG_PAX_RANDKSTACK
6781+ pushq %rax
6782+ call pax_randomize_kstack
6783+ popq %rax
6784+#endif
6785+ pax_erase_kstack
6786+ .endm
6787+
6788+.macro pax_erase_kstack
6789+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6790+ call pax_erase_kstack
6791+#endif
6792+.endm
6793+
6794 /*
6795 * 32bit SYSENTER instruction entry.
6796 *
6797@@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6798 CFI_REGISTER rsp,rbp
6799 SWAPGS_UNSAFE_STACK
6800 movq PER_CPU_VAR(kernel_stack), %rsp
6801- addq $(KERNEL_STACK_OFFSET),%rsp
6802+ pax_enter_kernel_user
6803 /*
6804 * No need to follow this irqs on/off section: the syscall
6805 * disabled irqs, here we enable it straight after entry:
6806@@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6807 pushfq
6808 CFI_ADJUST_CFA_OFFSET 8
6809 /*CFI_REL_OFFSET rflags,0*/
6810- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6811+ GET_THREAD_INFO(%r10)
6812+ movl TI_sysenter_return(%r10), %r10d
6813 CFI_REGISTER rip,r10
6814 pushq $__USER32_CS
6815 CFI_ADJUST_CFA_OFFSET 8
6816@@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6817 SAVE_ARGS 0,0,1
6818 /* no need to do an access_ok check here because rbp has been
6819 32bit zero extended */
6820+
6821+#ifdef CONFIG_PAX_MEMORY_UDEREF
6822+ mov $PAX_USER_SHADOW_BASE,%r10
6823+ add %r10,%rbp
6824+#endif
6825+
6826 1: movl (%rbp),%ebp
6827 .section __ex_table,"a"
6828 .quad 1b,ia32_badarg
6829@@ -172,6 +204,7 @@ sysenter_dispatch:
6830 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6831 jnz sysexit_audit
6832 sysexit_from_sys_call:
6833+ pax_exit_kernel_user
6834 andl $~TS_COMPAT,TI_status(%r10)
6835 /* clear IF, that popfq doesn't enable interrupts early */
6836 andl $~0x200,EFLAGS-R11(%rsp)
6837@@ -200,6 +233,9 @@ sysexit_from_sys_call:
6838 movl %eax,%esi /* 2nd arg: syscall number */
6839 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6840 call audit_syscall_entry
6841+
6842+ pax_erase_kstack
6843+
6844 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6845 cmpq $(IA32_NR_syscalls-1),%rax
6846 ja ia32_badsys
6847@@ -252,6 +288,9 @@ sysenter_tracesys:
6848 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6849 movq %rsp,%rdi /* &pt_regs -> arg1 */
6850 call syscall_trace_enter
6851+
6852+ pax_erase_kstack
6853+
6854 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6855 RESTORE_REST
6856 cmpq $(IA32_NR_syscalls-1),%rax
6857@@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6858 ENTRY(ia32_cstar_target)
6859 CFI_STARTPROC32 simple
6860 CFI_SIGNAL_FRAME
6861- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6862+ CFI_DEF_CFA rsp,0
6863 CFI_REGISTER rip,rcx
6864 /*CFI_REGISTER rflags,r11*/
6865 SWAPGS_UNSAFE_STACK
6866 movl %esp,%r8d
6867 CFI_REGISTER rsp,r8
6868 movq PER_CPU_VAR(kernel_stack),%rsp
6869+
6870+#ifdef CONFIG_PAX_MEMORY_UDEREF
6871+ pax_enter_kernel_user
6872+#endif
6873+
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879- SAVE_ARGS 8,1,1
6880+ SAVE_ARGS 8*6,1,1
6881 movl %eax,%eax /* zero extension */
6882 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6883 movq %rcx,RIP-ARGOFFSET(%rsp)
6884@@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6885 /* no need to do an access_ok check here because r8 has been
6886 32bit zero extended */
6887 /* hardware stack frame is complete now */
6888+
6889+#ifdef CONFIG_PAX_MEMORY_UDEREF
6890+ mov $PAX_USER_SHADOW_BASE,%r10
6891+ add %r10,%r8
6892+#endif
6893+
6894 1: movl (%r8),%r9d
6895 .section __ex_table,"a"
6896 .quad 1b,ia32_badarg
6897@@ -333,6 +383,7 @@ cstar_dispatch:
6898 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6899 jnz sysretl_audit
6900 sysretl_from_sys_call:
6901+ pax_exit_kernel_user
6902 andl $~TS_COMPAT,TI_status(%r10)
6903 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6904 movl RIP-ARGOFFSET(%rsp),%ecx
6905@@ -370,6 +421,9 @@ cstar_tracesys:
6906 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6907 movq %rsp,%rdi /* &pt_regs -> arg1 */
6908 call syscall_trace_enter
6909+
6910+ pax_erase_kstack
6911+
6912 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6913 RESTORE_REST
6914 xchgl %ebp,%r9d
6915@@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6916 CFI_REL_OFFSET rip,RIP-RIP
6917 PARAVIRT_ADJUST_EXCEPTION_FRAME
6918 SWAPGS
6919+ pax_enter_kernel_user
6920 /*
6921 * No need to follow this irqs on/off section: the syscall
6922 * disabled irqs and here we enable it straight after entry:
6923@@ -448,6 +503,9 @@ ia32_tracesys:
6924 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6925 movq %rsp,%rdi /* &pt_regs -> arg1 */
6926 call syscall_trace_enter
6927+
6928+ pax_erase_kstack
6929+
6930 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6931 RESTORE_REST
6932 cmpq $(IA32_NR_syscalls-1),%rax
6933diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_signal.c linux-2.6.32.45/arch/x86/ia32/ia32_signal.c
6934--- linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6935+++ linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6936@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6937 sp -= frame_size;
6938 /* Align the stack pointer according to the i386 ABI,
6939 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6940- sp = ((sp + 4) & -16ul) - 4;
6941+ sp = ((sp - 12) & -16ul) - 4;
6942 return (void __user *) sp;
6943 }
6944
6945@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6946 * These are actually not used anymore, but left because some
6947 * gdb versions depend on them as a marker.
6948 */
6949- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6950+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6951 } put_user_catch(err);
6952
6953 if (err)
6954@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6955 0xb8,
6956 __NR_ia32_rt_sigreturn,
6957 0x80cd,
6958- 0,
6959+ 0
6960 };
6961
6962 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6963@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6964
6965 if (ka->sa.sa_flags & SA_RESTORER)
6966 restorer = ka->sa.sa_restorer;
6967+ else if (current->mm->context.vdso)
6968+ /* Return stub is in 32bit vsyscall page */
6969+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6970 else
6971- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6972- rt_sigreturn);
6973+ restorer = &frame->retcode;
6974 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6975
6976 /*
6977 * Not actually used anymore, but left because some gdb
6978 * versions need it.
6979 */
6980- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6981+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6982 } put_user_catch(err);
6983
6984 if (err)
6985diff -urNp linux-2.6.32.45/arch/x86/include/asm/alternative.h linux-2.6.32.45/arch/x86/include/asm/alternative.h
6986--- linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6987+++ linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6988@@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6989 " .byte 662b-661b\n" /* sourcelen */ \
6990 " .byte 664f-663f\n" /* replacementlen */ \
6991 ".previous\n" \
6992- ".section .altinstr_replacement, \"ax\"\n" \
6993+ ".section .altinstr_replacement, \"a\"\n" \
6994 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6995 ".previous"
6996
6997diff -urNp linux-2.6.32.45/arch/x86/include/asm/apic.h linux-2.6.32.45/arch/x86/include/asm/apic.h
6998--- linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-03-27 14:31:47.000000000 -0400
6999+++ linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-08-17 20:01:15.000000000 -0400
7000@@ -46,7 +46,7 @@ static inline void generic_apic_probe(vo
7001
7002 #ifdef CONFIG_X86_LOCAL_APIC
7003
7004-extern unsigned int apic_verbosity;
7005+extern int apic_verbosity;
7006 extern int local_apic_timer_c2_ok;
7007
7008 extern int disable_apic;
7009diff -urNp linux-2.6.32.45/arch/x86/include/asm/apm.h linux-2.6.32.45/arch/x86/include/asm/apm.h
7010--- linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
7011+++ linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
7012@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
7013 __asm__ __volatile__(APM_DO_ZERO_SEGS
7014 "pushl %%edi\n\t"
7015 "pushl %%ebp\n\t"
7016- "lcall *%%cs:apm_bios_entry\n\t"
7017+ "lcall *%%ss:apm_bios_entry\n\t"
7018 "setc %%al\n\t"
7019 "popl %%ebp\n\t"
7020 "popl %%edi\n\t"
7021@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
7022 __asm__ __volatile__(APM_DO_ZERO_SEGS
7023 "pushl %%edi\n\t"
7024 "pushl %%ebp\n\t"
7025- "lcall *%%cs:apm_bios_entry\n\t"
7026+ "lcall *%%ss:apm_bios_entry\n\t"
7027 "setc %%bl\n\t"
7028 "popl %%ebp\n\t"
7029 "popl %%edi\n\t"
7030diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_32.h linux-2.6.32.45/arch/x86/include/asm/atomic_32.h
7031--- linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
7032+++ linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
7033@@ -25,6 +25,17 @@ static inline int atomic_read(const atom
7034 }
7035
7036 /**
7037+ * atomic_read_unchecked - read atomic variable
7038+ * @v: pointer of type atomic_unchecked_t
7039+ *
7040+ * Atomically reads the value of @v.
7041+ */
7042+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7043+{
7044+ return v->counter;
7045+}
7046+
7047+/**
7048 * atomic_set - set atomic variable
7049 * @v: pointer of type atomic_t
7050 * @i: required value
7051@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
7052 }
7053
7054 /**
7055+ * atomic_set_unchecked - set atomic variable
7056+ * @v: pointer of type atomic_unchecked_t
7057+ * @i: required value
7058+ *
7059+ * Atomically sets the value of @v to @i.
7060+ */
7061+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7062+{
7063+ v->counter = i;
7064+}
7065+
7066+/**
7067 * atomic_add - add integer to atomic variable
7068 * @i: integer value to add
7069 * @v: pointer of type atomic_t
7070@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
7071 */
7072 static inline void atomic_add(int i, atomic_t *v)
7073 {
7074- asm volatile(LOCK_PREFIX "addl %1,%0"
7075+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7076+
7077+#ifdef CONFIG_PAX_REFCOUNT
7078+ "jno 0f\n"
7079+ LOCK_PREFIX "subl %1,%0\n"
7080+ "int $4\n0:\n"
7081+ _ASM_EXTABLE(0b, 0b)
7082+#endif
7083+
7084+ : "+m" (v->counter)
7085+ : "ir" (i));
7086+}
7087+
7088+/**
7089+ * atomic_add_unchecked - add integer to atomic variable
7090+ * @i: integer value to add
7091+ * @v: pointer of type atomic_unchecked_t
7092+ *
7093+ * Atomically adds @i to @v.
7094+ */
7095+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7096+{
7097+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7098 : "+m" (v->counter)
7099 : "ir" (i));
7100 }
7101@@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
7102 */
7103 static inline void atomic_sub(int i, atomic_t *v)
7104 {
7105- asm volatile(LOCK_PREFIX "subl %1,%0"
7106+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7107+
7108+#ifdef CONFIG_PAX_REFCOUNT
7109+ "jno 0f\n"
7110+ LOCK_PREFIX "addl %1,%0\n"
7111+ "int $4\n0:\n"
7112+ _ASM_EXTABLE(0b, 0b)
7113+#endif
7114+
7115+ : "+m" (v->counter)
7116+ : "ir" (i));
7117+}
7118+
7119+/**
7120+ * atomic_sub_unchecked - subtract integer from atomic variable
7121+ * @i: integer value to subtract
7122+ * @v: pointer of type atomic_unchecked_t
7123+ *
7124+ * Atomically subtracts @i from @v.
7125+ */
7126+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7127+{
7128+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7129 : "+m" (v->counter)
7130 : "ir" (i));
7131 }
7132@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
7133 {
7134 unsigned char c;
7135
7136- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7137+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7138+
7139+#ifdef CONFIG_PAX_REFCOUNT
7140+ "jno 0f\n"
7141+ LOCK_PREFIX "addl %2,%0\n"
7142+ "int $4\n0:\n"
7143+ _ASM_EXTABLE(0b, 0b)
7144+#endif
7145+
7146+ "sete %1\n"
7147 : "+m" (v->counter), "=qm" (c)
7148 : "ir" (i) : "memory");
7149 return c;
7150@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
7151 */
7152 static inline void atomic_inc(atomic_t *v)
7153 {
7154- asm volatile(LOCK_PREFIX "incl %0"
7155+ asm volatile(LOCK_PREFIX "incl %0\n"
7156+
7157+#ifdef CONFIG_PAX_REFCOUNT
7158+ "jno 0f\n"
7159+ LOCK_PREFIX "decl %0\n"
7160+ "int $4\n0:\n"
7161+ _ASM_EXTABLE(0b, 0b)
7162+#endif
7163+
7164+ : "+m" (v->counter));
7165+}
7166+
7167+/**
7168+ * atomic_inc_unchecked - increment atomic variable
7169+ * @v: pointer of type atomic_unchecked_t
7170+ *
7171+ * Atomically increments @v by 1.
7172+ */
7173+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7174+{
7175+ asm volatile(LOCK_PREFIX "incl %0\n"
7176 : "+m" (v->counter));
7177 }
7178
7179@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7180 */
7181 static inline void atomic_dec(atomic_t *v)
7182 {
7183- asm volatile(LOCK_PREFIX "decl %0"
7184+ asm volatile(LOCK_PREFIX "decl %0\n"
7185+
7186+#ifdef CONFIG_PAX_REFCOUNT
7187+ "jno 0f\n"
7188+ LOCK_PREFIX "incl %0\n"
7189+ "int $4\n0:\n"
7190+ _ASM_EXTABLE(0b, 0b)
7191+#endif
7192+
7193+ : "+m" (v->counter));
7194+}
7195+
7196+/**
7197+ * atomic_dec_unchecked - decrement atomic variable
7198+ * @v: pointer of type atomic_unchecked_t
7199+ *
7200+ * Atomically decrements @v by 1.
7201+ */
7202+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7203+{
7204+ asm volatile(LOCK_PREFIX "decl %0\n"
7205 : "+m" (v->counter));
7206 }
7207
7208@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7209 {
7210 unsigned char c;
7211
7212- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7213+ asm volatile(LOCK_PREFIX "decl %0\n"
7214+
7215+#ifdef CONFIG_PAX_REFCOUNT
7216+ "jno 0f\n"
7217+ LOCK_PREFIX "incl %0\n"
7218+ "int $4\n0:\n"
7219+ _ASM_EXTABLE(0b, 0b)
7220+#endif
7221+
7222+ "sete %1\n"
7223 : "+m" (v->counter), "=qm" (c)
7224 : : "memory");
7225 return c != 0;
7226@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7227 {
7228 unsigned char c;
7229
7230- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7231+ asm volatile(LOCK_PREFIX "incl %0\n"
7232+
7233+#ifdef CONFIG_PAX_REFCOUNT
7234+ "jno 0f\n"
7235+ LOCK_PREFIX "decl %0\n"
7236+ "into\n0:\n"
7237+ _ASM_EXTABLE(0b, 0b)
7238+#endif
7239+
7240+ "sete %1\n"
7241+ : "+m" (v->counter), "=qm" (c)
7242+ : : "memory");
7243+ return c != 0;
7244+}
7245+
7246+/**
7247+ * atomic_inc_and_test_unchecked - increment and test
7248+ * @v: pointer of type atomic_unchecked_t
7249+ *
7250+ * Atomically increments @v by 1
7251+ * and returns true if the result is zero, or false for all
7252+ * other cases.
7253+ */
7254+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7255+{
7256+ unsigned char c;
7257+
7258+ asm volatile(LOCK_PREFIX "incl %0\n"
7259+ "sete %1\n"
7260 : "+m" (v->counter), "=qm" (c)
7261 : : "memory");
7262 return c != 0;
7263@@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7264 {
7265 unsigned char c;
7266
7267- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7268+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7269+
7270+#ifdef CONFIG_PAX_REFCOUNT
7271+ "jno 0f\n"
7272+ LOCK_PREFIX "subl %2,%0\n"
7273+ "int $4\n0:\n"
7274+ _ASM_EXTABLE(0b, 0b)
7275+#endif
7276+
7277+ "sets %1\n"
7278 : "+m" (v->counter), "=qm" (c)
7279 : "ir" (i) : "memory");
7280 return c;
7281@@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7282 #endif
7283 /* Modern 486+ processor */
7284 __i = i;
7285+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7286+
7287+#ifdef CONFIG_PAX_REFCOUNT
7288+ "jno 0f\n"
7289+ "movl %0, %1\n"
7290+ "int $4\n0:\n"
7291+ _ASM_EXTABLE(0b, 0b)
7292+#endif
7293+
7294+ : "+r" (i), "+m" (v->counter)
7295+ : : "memory");
7296+ return i + __i;
7297+
7298+#ifdef CONFIG_M386
7299+no_xadd: /* Legacy 386 processor */
7300+ local_irq_save(flags);
7301+ __i = atomic_read(v);
7302+ atomic_set(v, i + __i);
7303+ local_irq_restore(flags);
7304+ return i + __i;
7305+#endif
7306+}
7307+
7308+/**
7309+ * atomic_add_return_unchecked - add integer and return
7310+ * @v: pointer of type atomic_unchecked_t
7311+ * @i: integer value to add
7312+ *
7313+ * Atomically adds @i to @v and returns @i + @v
7314+ */
7315+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7316+{
7317+ int __i;
7318+#ifdef CONFIG_M386
7319+ unsigned long flags;
7320+ if (unlikely(boot_cpu_data.x86 <= 3))
7321+ goto no_xadd;
7322+#endif
7323+ /* Modern 486+ processor */
7324+ __i = i;
7325 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7326 : "+r" (i), "+m" (v->counter)
7327 : : "memory");
7328@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7329 return cmpxchg(&v->counter, old, new);
7330 }
7331
7332+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7333+{
7334+ return cmpxchg(&v->counter, old, new);
7335+}
7336+
7337 static inline int atomic_xchg(atomic_t *v, int new)
7338 {
7339 return xchg(&v->counter, new);
7340 }
7341
7342+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7343+{
7344+ return xchg(&v->counter, new);
7345+}
7346+
7347 /**
7348 * atomic_add_unless - add unless the number is already a given value
7349 * @v: pointer of type atomic_t
7350@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7351 */
7352 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7353 {
7354- int c, old;
7355+ int c, old, new;
7356 c = atomic_read(v);
7357 for (;;) {
7358- if (unlikely(c == (u)))
7359+ if (unlikely(c == u))
7360 break;
7361- old = atomic_cmpxchg((v), c, c + (a));
7362+
7363+ asm volatile("addl %2,%0\n"
7364+
7365+#ifdef CONFIG_PAX_REFCOUNT
7366+ "jno 0f\n"
7367+ "subl %2,%0\n"
7368+ "int $4\n0:\n"
7369+ _ASM_EXTABLE(0b, 0b)
7370+#endif
7371+
7372+ : "=r" (new)
7373+ : "0" (c), "ir" (a));
7374+
7375+ old = atomic_cmpxchg(v, c, new);
7376 if (likely(old == c))
7377 break;
7378 c = old;
7379 }
7380- return c != (u);
7381+ return c != u;
7382 }
7383
7384 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7385
7386 #define atomic_inc_return(v) (atomic_add_return(1, v))
7387+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7388+{
7389+ return atomic_add_return_unchecked(1, v);
7390+}
7391 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7392
7393 /* These are x86-specific, used by some header files */
7394@@ -266,9 +495,18 @@ typedef struct {
7395 u64 __aligned(8) counter;
7396 } atomic64_t;
7397
7398+#ifdef CONFIG_PAX_REFCOUNT
7399+typedef struct {
7400+ u64 __aligned(8) counter;
7401+} atomic64_unchecked_t;
7402+#else
7403+typedef atomic64_t atomic64_unchecked_t;
7404+#endif
7405+
7406 #define ATOMIC64_INIT(val) { (val) }
7407
7408 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7409+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7410
7411 /**
7412 * atomic64_xchg - xchg atomic64 variable
7413@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7414 * the old value.
7415 */
7416 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7417+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7418
7419 /**
7420 * atomic64_set - set atomic64 variable
7421@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7422 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7423
7424 /**
7425+ * atomic64_unchecked_set - set atomic64 variable
7426+ * @ptr: pointer to type atomic64_unchecked_t
7427+ * @new_val: value to assign
7428+ *
7429+ * Atomically sets the value of @ptr to @new_val.
7430+ */
7431+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7432+
7433+/**
7434 * atomic64_read - read atomic64 variable
7435 * @ptr: pointer to type atomic64_t
7436 *
7437@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7438 return res;
7439 }
7440
7441-extern u64 atomic64_read(atomic64_t *ptr);
7442+/**
7443+ * atomic64_read_unchecked - read atomic64 variable
7444+ * @ptr: pointer to type atomic64_unchecked_t
7445+ *
7446+ * Atomically reads the value of @ptr and returns it.
7447+ */
7448+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7449+{
7450+ u64 res;
7451+
7452+ /*
7453+ * Note, we inline this atomic64_unchecked_t primitive because
7454+ * it only clobbers EAX/EDX and leaves the others
7455+ * untouched. We also (somewhat subtly) rely on the
7456+ * fact that cmpxchg8b returns the current 64-bit value
7457+ * of the memory location we are touching:
7458+ */
7459+ asm volatile(
7460+ "mov %%ebx, %%eax\n\t"
7461+ "mov %%ecx, %%edx\n\t"
7462+ LOCK_PREFIX "cmpxchg8b %1\n"
7463+ : "=&A" (res)
7464+ : "m" (*ptr)
7465+ );
7466+
7467+ return res;
7468+}
7469
7470 /**
7471 * atomic64_add_return - add and return
7472@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7473 * Other variants with different arithmetic operators:
7474 */
7475 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7476+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7477 extern u64 atomic64_inc_return(atomic64_t *ptr);
7478+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7479 extern u64 atomic64_dec_return(atomic64_t *ptr);
7480+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7481
7482 /**
7483 * atomic64_add - add integer to atomic64 variable
7484@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7485 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7486
7487 /**
7488+ * atomic64_add_unchecked - add integer to atomic64 variable
7489+ * @delta: integer value to add
7490+ * @ptr: pointer to type atomic64_unchecked_t
7491+ *
7492+ * Atomically adds @delta to @ptr.
7493+ */
7494+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7495+
7496+/**
7497 * atomic64_sub - subtract the atomic64 variable
7498 * @delta: integer value to subtract
7499 * @ptr: pointer to type atomic64_t
7500@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7501 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7502
7503 /**
7504+ * atomic64_sub_unchecked - subtract the atomic64 variable
7505+ * @delta: integer value to subtract
7506+ * @ptr: pointer to type atomic64_unchecked_t
7507+ *
7508+ * Atomically subtracts @delta from @ptr.
7509+ */
7510+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7511+
7512+/**
7513 * atomic64_sub_and_test - subtract value from variable and test result
7514 * @delta: integer value to subtract
7515 * @ptr: pointer to type atomic64_t
7516@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7517 extern void atomic64_inc(atomic64_t *ptr);
7518
7519 /**
7520+ * atomic64_inc_unchecked - increment atomic64 variable
7521+ * @ptr: pointer to type atomic64_unchecked_t
7522+ *
7523+ * Atomically increments @ptr by 1.
7524+ */
7525+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7526+
7527+/**
7528 * atomic64_dec - decrement atomic64 variable
7529 * @ptr: pointer to type atomic64_t
7530 *
7531@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7532 extern void atomic64_dec(atomic64_t *ptr);
7533
7534 /**
7535+ * atomic64_dec_unchecked - decrement atomic64 variable
7536+ * @ptr: pointer to type atomic64_unchecked_t
7537+ *
7538+ * Atomically decrements @ptr by 1.
7539+ */
7540+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7541+
7542+/**
7543 * atomic64_dec_and_test - decrement and test
7544 * @ptr: pointer to type atomic64_t
7545 *
7546diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_64.h linux-2.6.32.45/arch/x86/include/asm/atomic_64.h
7547--- linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7548+++ linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7549@@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7550 }
7551
7552 /**
7553+ * atomic_read_unchecked - read atomic variable
7554+ * @v: pointer of type atomic_unchecked_t
7555+ *
7556+ * Atomically reads the value of @v.
7557+ */
7558+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7559+{
7560+ return v->counter;
7561+}
7562+
7563+/**
7564 * atomic_set - set atomic variable
7565 * @v: pointer of type atomic_t
7566 * @i: required value
7567@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7568 }
7569
7570 /**
7571+ * atomic_set_unchecked - set atomic variable
7572+ * @v: pointer of type atomic_unchecked_t
7573+ * @i: required value
7574+ *
7575+ * Atomically sets the value of @v to @i.
7576+ */
7577+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7578+{
7579+ v->counter = i;
7580+}
7581+
7582+/**
7583 * atomic_add - add integer to atomic variable
7584 * @i: integer value to add
7585 * @v: pointer of type atomic_t
7586@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7587 */
7588 static inline void atomic_add(int i, atomic_t *v)
7589 {
7590- asm volatile(LOCK_PREFIX "addl %1,%0"
7591+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7592+
7593+#ifdef CONFIG_PAX_REFCOUNT
7594+ "jno 0f\n"
7595+ LOCK_PREFIX "subl %1,%0\n"
7596+ "int $4\n0:\n"
7597+ _ASM_EXTABLE(0b, 0b)
7598+#endif
7599+
7600+ : "=m" (v->counter)
7601+ : "ir" (i), "m" (v->counter));
7602+}
7603+
7604+/**
7605+ * atomic_add_unchecked - add integer to atomic variable
7606+ * @i: integer value to add
7607+ * @v: pointer of type atomic_unchecked_t
7608+ *
7609+ * Atomically adds @i to @v.
7610+ */
7611+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7612+{
7613+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7614 : "=m" (v->counter)
7615 : "ir" (i), "m" (v->counter));
7616 }
7617@@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7618 */
7619 static inline void atomic_sub(int i, atomic_t *v)
7620 {
7621- asm volatile(LOCK_PREFIX "subl %1,%0"
7622+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7623+
7624+#ifdef CONFIG_PAX_REFCOUNT
7625+ "jno 0f\n"
7626+ LOCK_PREFIX "addl %1,%0\n"
7627+ "int $4\n0:\n"
7628+ _ASM_EXTABLE(0b, 0b)
7629+#endif
7630+
7631+ : "=m" (v->counter)
7632+ : "ir" (i), "m" (v->counter));
7633+}
7634+
7635+/**
7636+ * atomic_sub_unchecked - subtract the atomic variable
7637+ * @i: integer value to subtract
7638+ * @v: pointer of type atomic_unchecked_t
7639+ *
7640+ * Atomically subtracts @i from @v.
7641+ */
7642+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7643+{
7644+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7645 : "=m" (v->counter)
7646 : "ir" (i), "m" (v->counter));
7647 }
7648@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7649 {
7650 unsigned char c;
7651
7652- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7653+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7654+
7655+#ifdef CONFIG_PAX_REFCOUNT
7656+ "jno 0f\n"
7657+ LOCK_PREFIX "addl %2,%0\n"
7658+ "int $4\n0:\n"
7659+ _ASM_EXTABLE(0b, 0b)
7660+#endif
7661+
7662+ "sete %1\n"
7663 : "=m" (v->counter), "=qm" (c)
7664 : "ir" (i), "m" (v->counter) : "memory");
7665 return c;
7666@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7667 */
7668 static inline void atomic_inc(atomic_t *v)
7669 {
7670- asm volatile(LOCK_PREFIX "incl %0"
7671+ asm volatile(LOCK_PREFIX "incl %0\n"
7672+
7673+#ifdef CONFIG_PAX_REFCOUNT
7674+ "jno 0f\n"
7675+ LOCK_PREFIX "decl %0\n"
7676+ "int $4\n0:\n"
7677+ _ASM_EXTABLE(0b, 0b)
7678+#endif
7679+
7680+ : "=m" (v->counter)
7681+ : "m" (v->counter));
7682+}
7683+
7684+/**
7685+ * atomic_inc_unchecked - increment atomic variable
7686+ * @v: pointer of type atomic_unchecked_t
7687+ *
7688+ * Atomically increments @v by 1.
7689+ */
7690+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7691+{
7692+ asm volatile(LOCK_PREFIX "incl %0\n"
7693 : "=m" (v->counter)
7694 : "m" (v->counter));
7695 }
7696@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7697 */
7698 static inline void atomic_dec(atomic_t *v)
7699 {
7700- asm volatile(LOCK_PREFIX "decl %0"
7701+ asm volatile(LOCK_PREFIX "decl %0\n"
7702+
7703+#ifdef CONFIG_PAX_REFCOUNT
7704+ "jno 0f\n"
7705+ LOCK_PREFIX "incl %0\n"
7706+ "int $4\n0:\n"
7707+ _ASM_EXTABLE(0b, 0b)
7708+#endif
7709+
7710+ : "=m" (v->counter)
7711+ : "m" (v->counter));
7712+}
7713+
7714+/**
7715+ * atomic_dec_unchecked - decrement atomic variable
7716+ * @v: pointer of type atomic_unchecked_t
7717+ *
7718+ * Atomically decrements @v by 1.
7719+ */
7720+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7721+{
7722+ asm volatile(LOCK_PREFIX "decl %0\n"
7723 : "=m" (v->counter)
7724 : "m" (v->counter));
7725 }
7726@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7727 {
7728 unsigned char c;
7729
7730- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7731+ asm volatile(LOCK_PREFIX "decl %0\n"
7732+
7733+#ifdef CONFIG_PAX_REFCOUNT
7734+ "jno 0f\n"
7735+ LOCK_PREFIX "incl %0\n"
7736+ "int $4\n0:\n"
7737+ _ASM_EXTABLE(0b, 0b)
7738+#endif
7739+
7740+ "sete %1\n"
7741 : "=m" (v->counter), "=qm" (c)
7742 : "m" (v->counter) : "memory");
7743 return c != 0;
7744@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7745 {
7746 unsigned char c;
7747
7748- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7749+ asm volatile(LOCK_PREFIX "incl %0\n"
7750+
7751+#ifdef CONFIG_PAX_REFCOUNT
7752+ "jno 0f\n"
7753+ LOCK_PREFIX "decl %0\n"
7754+ "int $4\n0:\n"
7755+ _ASM_EXTABLE(0b, 0b)
7756+#endif
7757+
7758+ "sete %1\n"
7759+ : "=m" (v->counter), "=qm" (c)
7760+ : "m" (v->counter) : "memory");
7761+ return c != 0;
7762+}
7763+
7764+/**
7765+ * atomic_inc_and_test_unchecked - increment and test
7766+ * @v: pointer of type atomic_unchecked_t
7767+ *
7768+ * Atomically increments @v by 1
7769+ * and returns true if the result is zero, or false for all
7770+ * other cases.
7771+ */
7772+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7773+{
7774+ unsigned char c;
7775+
7776+ asm volatile(LOCK_PREFIX "incl %0\n"
7777+ "sete %1\n"
7778 : "=m" (v->counter), "=qm" (c)
7779 : "m" (v->counter) : "memory");
7780 return c != 0;
7781@@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7782 {
7783 unsigned char c;
7784
7785- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7786+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7787+
7788+#ifdef CONFIG_PAX_REFCOUNT
7789+ "jno 0f\n"
7790+ LOCK_PREFIX "subl %2,%0\n"
7791+ "int $4\n0:\n"
7792+ _ASM_EXTABLE(0b, 0b)
7793+#endif
7794+
7795+ "sets %1\n"
7796 : "=m" (v->counter), "=qm" (c)
7797 : "ir" (i), "m" (v->counter) : "memory");
7798 return c;
7799@@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7800 static inline int atomic_add_return(int i, atomic_t *v)
7801 {
7802 int __i = i;
7803- asm volatile(LOCK_PREFIX "xaddl %0, %1"
7804+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7805+
7806+#ifdef CONFIG_PAX_REFCOUNT
7807+ "jno 0f\n"
7808+ "movl %0, %1\n"
7809+ "int $4\n0:\n"
7810+ _ASM_EXTABLE(0b, 0b)
7811+#endif
7812+
7813+ : "+r" (i), "+m" (v->counter)
7814+ : : "memory");
7815+ return i + __i;
7816+}
7817+
7818+/**
7819+ * atomic_add_return_unchecked - add and return
7820+ * @i: integer value to add
7821+ * @v: pointer of type atomic_unchecked_t
7822+ *
7823+ * Atomically adds @i to @v and returns @i + @v
7824+ */
7825+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7826+{
7827+ int __i = i;
7828+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7829 : "+r" (i), "+m" (v->counter)
7830 : : "memory");
7831 return i + __i;
7832@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7833 }
7834
7835 #define atomic_inc_return(v) (atomic_add_return(1, v))
7836+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7837+{
7838+ return atomic_add_return_unchecked(1, v);
7839+}
7840 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7841
7842 /* The 64-bit atomic type */
7843@@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7844 }
7845
7846 /**
7847+ * atomic64_read_unchecked - read atomic64 variable
7848+ * @v: pointer of type atomic64_unchecked_t
7849+ *
7850+ * Atomically reads the value of @v.
7851+ * Doesn't imply a read memory barrier.
7852+ */
7853+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7854+{
7855+ return v->counter;
7856+}
7857+
7858+/**
7859 * atomic64_set - set atomic64 variable
7860 * @v: pointer to type atomic64_t
7861 * @i: required value
7862@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7863 }
7864
7865 /**
7866+ * atomic64_set_unchecked - set atomic64 variable
7867+ * @v: pointer to type atomic64_unchecked_t
7868+ * @i: required value
7869+ *
7870+ * Atomically sets the value of @v to @i.
7871+ */
7872+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7873+{
7874+ v->counter = i;
7875+}
7876+
7877+/**
7878 * atomic64_add - add integer to atomic64 variable
7879 * @i: integer value to add
7880 * @v: pointer to type atomic64_t
7881@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7882 */
7883 static inline void atomic64_add(long i, atomic64_t *v)
7884 {
7885+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7886+
7887+#ifdef CONFIG_PAX_REFCOUNT
7888+ "jno 0f\n"
7889+ LOCK_PREFIX "subq %1,%0\n"
7890+ "int $4\n0:\n"
7891+ _ASM_EXTABLE(0b, 0b)
7892+#endif
7893+
7894+ : "=m" (v->counter)
7895+ : "er" (i), "m" (v->counter));
7896+}
7897+
7898+/**
7899+ * atomic64_add_unchecked - add integer to atomic64 variable
7900+ * @i: integer value to add
7901+ * @v: pointer to type atomic64_unchecked_t
7902+ *
7903+ * Atomically adds @i to @v.
7904+ */
7905+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7906+{
7907 asm volatile(LOCK_PREFIX "addq %1,%0"
7908 : "=m" (v->counter)
7909 : "er" (i), "m" (v->counter));
7910@@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7911 */
7912 static inline void atomic64_sub(long i, atomic64_t *v)
7913 {
7914- asm volatile(LOCK_PREFIX "subq %1,%0"
7915+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7916+
7917+#ifdef CONFIG_PAX_REFCOUNT
7918+ "jno 0f\n"
7919+ LOCK_PREFIX "addq %1,%0\n"
7920+ "int $4\n0:\n"
7921+ _ASM_EXTABLE(0b, 0b)
7922+#endif
7923+
7924 : "=m" (v->counter)
7925 : "er" (i), "m" (v->counter));
7926 }
7927@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7928 {
7929 unsigned char c;
7930
7931- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7932+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7933+
7934+#ifdef CONFIG_PAX_REFCOUNT
7935+ "jno 0f\n"
7936+ LOCK_PREFIX "addq %2,%0\n"
7937+ "int $4\n0:\n"
7938+ _ASM_EXTABLE(0b, 0b)
7939+#endif
7940+
7941+ "sete %1\n"
7942 : "=m" (v->counter), "=qm" (c)
7943 : "er" (i), "m" (v->counter) : "memory");
7944 return c;
7945@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7946 */
7947 static inline void atomic64_inc(atomic64_t *v)
7948 {
7949+ asm volatile(LOCK_PREFIX "incq %0\n"
7950+
7951+#ifdef CONFIG_PAX_REFCOUNT
7952+ "jno 0f\n"
7953+ LOCK_PREFIX "decq %0\n"
7954+ "int $4\n0:\n"
7955+ _ASM_EXTABLE(0b, 0b)
7956+#endif
7957+
7958+ : "=m" (v->counter)
7959+ : "m" (v->counter));
7960+}
7961+
7962+/**
7963+ * atomic64_inc_unchecked - increment atomic64 variable
7964+ * @v: pointer to type atomic64_unchecked_t
7965+ *
7966+ * Atomically increments @v by 1.
7967+ */
7968+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7969+{
7970 asm volatile(LOCK_PREFIX "incq %0"
7971 : "=m" (v->counter)
7972 : "m" (v->counter));
7973@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7974 */
7975 static inline void atomic64_dec(atomic64_t *v)
7976 {
7977- asm volatile(LOCK_PREFIX "decq %0"
7978+ asm volatile(LOCK_PREFIX "decq %0\n"
7979+
7980+#ifdef CONFIG_PAX_REFCOUNT
7981+ "jno 0f\n"
7982+ LOCK_PREFIX "incq %0\n"
7983+ "int $4\n0:\n"
7984+ _ASM_EXTABLE(0b, 0b)
7985+#endif
7986+
7987+ : "=m" (v->counter)
7988+ : "m" (v->counter));
7989+}
7990+
7991+/**
7992+ * atomic64_dec_unchecked - decrement atomic64 variable
7993+ * @v: pointer to type atomic64_t
7994+ *
7995+ * Atomically decrements @v by 1.
7996+ */
7997+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7998+{
7999+ asm volatile(LOCK_PREFIX "decq %0\n"
8000 : "=m" (v->counter)
8001 : "m" (v->counter));
8002 }
8003@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
8004 {
8005 unsigned char c;
8006
8007- asm volatile(LOCK_PREFIX "decq %0; sete %1"
8008+ asm volatile(LOCK_PREFIX "decq %0\n"
8009+
8010+#ifdef CONFIG_PAX_REFCOUNT
8011+ "jno 0f\n"
8012+ LOCK_PREFIX "incq %0\n"
8013+ "int $4\n0:\n"
8014+ _ASM_EXTABLE(0b, 0b)
8015+#endif
8016+
8017+ "sete %1\n"
8018 : "=m" (v->counter), "=qm" (c)
8019 : "m" (v->counter) : "memory");
8020 return c != 0;
8021@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
8022 {
8023 unsigned char c;
8024
8025- asm volatile(LOCK_PREFIX "incq %0; sete %1"
8026+ asm volatile(LOCK_PREFIX "incq %0\n"
8027+
8028+#ifdef CONFIG_PAX_REFCOUNT
8029+ "jno 0f\n"
8030+ LOCK_PREFIX "decq %0\n"
8031+ "int $4\n0:\n"
8032+ _ASM_EXTABLE(0b, 0b)
8033+#endif
8034+
8035+ "sete %1\n"
8036 : "=m" (v->counter), "=qm" (c)
8037 : "m" (v->counter) : "memory");
8038 return c != 0;
8039@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
8040 {
8041 unsigned char c;
8042
8043- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
8044+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
8045+
8046+#ifdef CONFIG_PAX_REFCOUNT
8047+ "jno 0f\n"
8048+ LOCK_PREFIX "subq %2,%0\n"
8049+ "int $4\n0:\n"
8050+ _ASM_EXTABLE(0b, 0b)
8051+#endif
8052+
8053+ "sets %1\n"
8054 : "=m" (v->counter), "=qm" (c)
8055 : "er" (i), "m" (v->counter) : "memory");
8056 return c;
8057@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
8058 static inline long atomic64_add_return(long i, atomic64_t *v)
8059 {
8060 long __i = i;
8061- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
8062+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
8063+
8064+#ifdef CONFIG_PAX_REFCOUNT
8065+ "jno 0f\n"
8066+ "movq %0, %1\n"
8067+ "int $4\n0:\n"
8068+ _ASM_EXTABLE(0b, 0b)
8069+#endif
8070+
8071+ : "+r" (i), "+m" (v->counter)
8072+ : : "memory");
8073+ return i + __i;
8074+}
8075+
8076+/**
8077+ * atomic64_add_return_unchecked - add and return
8078+ * @i: integer value to add
8079+ * @v: pointer to type atomic64_unchecked_t
8080+ *
8081+ * Atomically adds @i to @v and returns @i + @v
8082+ */
8083+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8084+{
8085+ long __i = i;
8086+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
8087 : "+r" (i), "+m" (v->counter)
8088 : : "memory");
8089 return i + __i;
8090@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
8091 }
8092
8093 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
8094+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8095+{
8096+ return atomic64_add_return_unchecked(1, v);
8097+}
8098 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
8099
8100 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8101@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
8102 return cmpxchg(&v->counter, old, new);
8103 }
8104
8105+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8106+{
8107+ return cmpxchg(&v->counter, old, new);
8108+}
8109+
8110 static inline long atomic64_xchg(atomic64_t *v, long new)
8111 {
8112 return xchg(&v->counter, new);
8113 }
8114
8115+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8116+{
8117+ return xchg(&v->counter, new);
8118+}
8119+
8120 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
8121 {
8122 return cmpxchg(&v->counter, old, new);
8123 }
8124
8125+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8126+{
8127+ return cmpxchg(&v->counter, old, new);
8128+}
8129+
8130 static inline long atomic_xchg(atomic_t *v, int new)
8131 {
8132 return xchg(&v->counter, new);
8133 }
8134
8135+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8136+{
8137+ return xchg(&v->counter, new);
8138+}
8139+
8140 /**
8141 * atomic_add_unless - add unless the number is a given value
8142 * @v: pointer of type atomic_t
8143@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
8144 */
8145 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8146 {
8147- int c, old;
8148+ int c, old, new;
8149 c = atomic_read(v);
8150 for (;;) {
8151- if (unlikely(c == (u)))
8152+ if (unlikely(c == u))
8153 break;
8154- old = atomic_cmpxchg((v), c, c + (a));
8155+
8156+ asm volatile("addl %2,%0\n"
8157+
8158+#ifdef CONFIG_PAX_REFCOUNT
8159+ "jno 0f\n"
8160+ "subl %2,%0\n"
8161+ "int $4\n0:\n"
8162+ _ASM_EXTABLE(0b, 0b)
8163+#endif
8164+
8165+ : "=r" (new)
8166+ : "0" (c), "ir" (a));
8167+
8168+ old = atomic_cmpxchg(v, c, new);
8169 if (likely(old == c))
8170 break;
8171 c = old;
8172 }
8173- return c != (u);
8174+ return c != u;
8175 }
8176
8177 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8178@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8179 */
8180 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8181 {
8182- long c, old;
8183+ long c, old, new;
8184 c = atomic64_read(v);
8185 for (;;) {
8186- if (unlikely(c == (u)))
8187+ if (unlikely(c == u))
8188 break;
8189- old = atomic64_cmpxchg((v), c, c + (a));
8190+
8191+ asm volatile("addq %2,%0\n"
8192+
8193+#ifdef CONFIG_PAX_REFCOUNT
8194+ "jno 0f\n"
8195+ "subq %2,%0\n"
8196+ "int $4\n0:\n"
8197+ _ASM_EXTABLE(0b, 0b)
8198+#endif
8199+
8200+ : "=r" (new)
8201+ : "0" (c), "er" (a));
8202+
8203+ old = atomic64_cmpxchg(v, c, new);
8204 if (likely(old == c))
8205 break;
8206 c = old;
8207 }
8208- return c != (u);
8209+ return c != u;
8210 }
8211
8212 /**
8213diff -urNp linux-2.6.32.45/arch/x86/include/asm/bitops.h linux-2.6.32.45/arch/x86/include/asm/bitops.h
8214--- linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8215+++ linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8216@@ -38,7 +38,7 @@
8217 * a mask operation on a byte.
8218 */
8219 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8220-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8221+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8222 #define CONST_MASK(nr) (1 << ((nr) & 7))
8223
8224 /**
8225diff -urNp linux-2.6.32.45/arch/x86/include/asm/boot.h linux-2.6.32.45/arch/x86/include/asm/boot.h
8226--- linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8227+++ linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8228@@ -11,10 +11,15 @@
8229 #include <asm/pgtable_types.h>
8230
8231 /* Physical address where kernel should be loaded. */
8232-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8233+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8234 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8235 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8236
8237+#ifndef __ASSEMBLY__
8238+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8239+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8240+#endif
8241+
8242 /* Minimum kernel alignment, as a power of two */
8243 #ifdef CONFIG_X86_64
8244 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8245diff -urNp linux-2.6.32.45/arch/x86/include/asm/cacheflush.h linux-2.6.32.45/arch/x86/include/asm/cacheflush.h
8246--- linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8247+++ linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8248@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8249 static inline unsigned long get_page_memtype(struct page *pg)
8250 {
8251 if (!PageUncached(pg) && !PageWC(pg))
8252- return -1;
8253+ return ~0UL;
8254 else if (!PageUncached(pg) && PageWC(pg))
8255 return _PAGE_CACHE_WC;
8256 else if (PageUncached(pg) && !PageWC(pg))
8257@@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8258 SetPageWC(pg);
8259 break;
8260 default:
8261- case -1:
8262+ case ~0UL:
8263 ClearPageUncached(pg);
8264 ClearPageWC(pg);
8265 break;
8266diff -urNp linux-2.6.32.45/arch/x86/include/asm/cache.h linux-2.6.32.45/arch/x86/include/asm/cache.h
8267--- linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8268+++ linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8269@@ -5,9 +5,10 @@
8270
8271 /* L1 cache line size */
8272 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8273-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8274+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8275
8276 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8277+#define __read_only __attribute__((__section__(".data.read_only")))
8278
8279 #ifdef CONFIG_X86_VSMP
8280 /* vSMP Internode cacheline shift */
8281diff -urNp linux-2.6.32.45/arch/x86/include/asm/checksum_32.h linux-2.6.32.45/arch/x86/include/asm/checksum_32.h
8282--- linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8283+++ linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8284@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8285 int len, __wsum sum,
8286 int *src_err_ptr, int *dst_err_ptr);
8287
8288+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8289+ int len, __wsum sum,
8290+ int *src_err_ptr, int *dst_err_ptr);
8291+
8292+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8293+ int len, __wsum sum,
8294+ int *src_err_ptr, int *dst_err_ptr);
8295+
8296 /*
8297 * Note: when you get a NULL pointer exception here this means someone
8298 * passed in an incorrect kernel address to one of these functions.
8299@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8300 int *err_ptr)
8301 {
8302 might_sleep();
8303- return csum_partial_copy_generic((__force void *)src, dst,
8304+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8305 len, sum, err_ptr, NULL);
8306 }
8307
8308@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8309 {
8310 might_sleep();
8311 if (access_ok(VERIFY_WRITE, dst, len))
8312- return csum_partial_copy_generic(src, (__force void *)dst,
8313+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8314 len, sum, NULL, err_ptr);
8315
8316 if (len)
8317diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc_defs.h linux-2.6.32.45/arch/x86/include/asm/desc_defs.h
8318--- linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8319+++ linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8320@@ -31,6 +31,12 @@ struct desc_struct {
8321 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8322 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8323 };
8324+ struct {
8325+ u16 offset_low;
8326+ u16 seg;
8327+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8328+ unsigned offset_high: 16;
8329+ } gate;
8330 };
8331 } __attribute__((packed));
8332
8333diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc.h linux-2.6.32.45/arch/x86/include/asm/desc.h
8334--- linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8335+++ linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8336@@ -4,6 +4,7 @@
8337 #include <asm/desc_defs.h>
8338 #include <asm/ldt.h>
8339 #include <asm/mmu.h>
8340+#include <asm/pgtable.h>
8341 #include <linux/smp.h>
8342
8343 static inline void fill_ldt(struct desc_struct *desc,
8344@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8345 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8346 desc->type = (info->read_exec_only ^ 1) << 1;
8347 desc->type |= info->contents << 2;
8348+ desc->type |= info->seg_not_present ^ 1;
8349 desc->s = 1;
8350 desc->dpl = 0x3;
8351 desc->p = info->seg_not_present ^ 1;
8352@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8353 }
8354
8355 extern struct desc_ptr idt_descr;
8356-extern gate_desc idt_table[];
8357-
8358-struct gdt_page {
8359- struct desc_struct gdt[GDT_ENTRIES];
8360-} __attribute__((aligned(PAGE_SIZE)));
8361-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8362+extern gate_desc idt_table[256];
8363
8364+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8365 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8366 {
8367- return per_cpu(gdt_page, cpu).gdt;
8368+ return cpu_gdt_table[cpu];
8369 }
8370
8371 #ifdef CONFIG_X86_64
8372@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8373 unsigned long base, unsigned dpl, unsigned flags,
8374 unsigned short seg)
8375 {
8376- gate->a = (seg << 16) | (base & 0xffff);
8377- gate->b = (base & 0xffff0000) |
8378- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8379+ gate->gate.offset_low = base;
8380+ gate->gate.seg = seg;
8381+ gate->gate.reserved = 0;
8382+ gate->gate.type = type;
8383+ gate->gate.s = 0;
8384+ gate->gate.dpl = dpl;
8385+ gate->gate.p = 1;
8386+ gate->gate.offset_high = base >> 16;
8387 }
8388
8389 #endif
8390@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8391 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8392 const gate_desc *gate)
8393 {
8394+ pax_open_kernel();
8395 memcpy(&idt[entry], gate, sizeof(*gate));
8396+ pax_close_kernel();
8397 }
8398
8399 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8400 const void *desc)
8401 {
8402+ pax_open_kernel();
8403 memcpy(&ldt[entry], desc, 8);
8404+ pax_close_kernel();
8405 }
8406
8407 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8408@@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8409 size = sizeof(struct desc_struct);
8410 break;
8411 }
8412+
8413+ pax_open_kernel();
8414 memcpy(&gdt[entry], desc, size);
8415+ pax_close_kernel();
8416 }
8417
8418 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8419@@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8420
8421 static inline void native_load_tr_desc(void)
8422 {
8423+ pax_open_kernel();
8424 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8425+ pax_close_kernel();
8426 }
8427
8428 static inline void native_load_gdt(const struct desc_ptr *dtr)
8429@@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8430 unsigned int i;
8431 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8432
8433+ pax_open_kernel();
8434 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8435 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8436+ pax_close_kernel();
8437 }
8438
8439 #define _LDT_empty(info) \
8440@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8441 desc->limit = (limit >> 16) & 0xf;
8442 }
8443
8444-static inline void _set_gate(int gate, unsigned type, void *addr,
8445+static inline void _set_gate(int gate, unsigned type, const void *addr,
8446 unsigned dpl, unsigned ist, unsigned seg)
8447 {
8448 gate_desc s;
8449@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8450 * Pentium F0 0F bugfix can have resulted in the mapped
8451 * IDT being write-protected.
8452 */
8453-static inline void set_intr_gate(unsigned int n, void *addr)
8454+static inline void set_intr_gate(unsigned int n, const void *addr)
8455 {
8456 BUG_ON((unsigned)n > 0xFF);
8457 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8458@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8459 /*
8460 * This routine sets up an interrupt gate at directory privilege level 3.
8461 */
8462-static inline void set_system_intr_gate(unsigned int n, void *addr)
8463+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8464 {
8465 BUG_ON((unsigned)n > 0xFF);
8466 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8467 }
8468
8469-static inline void set_system_trap_gate(unsigned int n, void *addr)
8470+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8471 {
8472 BUG_ON((unsigned)n > 0xFF);
8473 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8474 }
8475
8476-static inline void set_trap_gate(unsigned int n, void *addr)
8477+static inline void set_trap_gate(unsigned int n, const void *addr)
8478 {
8479 BUG_ON((unsigned)n > 0xFF);
8480 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8481@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8482 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8483 {
8484 BUG_ON((unsigned)n > 0xFF);
8485- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8486+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8487 }
8488
8489-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8490+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8491 {
8492 BUG_ON((unsigned)n > 0xFF);
8493 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8494 }
8495
8496-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8497+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8498 {
8499 BUG_ON((unsigned)n > 0xFF);
8500 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8501 }
8502
8503+#ifdef CONFIG_X86_32
8504+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8505+{
8506+ struct desc_struct d;
8507+
8508+ if (likely(limit))
8509+ limit = (limit - 1UL) >> PAGE_SHIFT;
8510+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8511+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8512+}
8513+#endif
8514+
8515 #endif /* _ASM_X86_DESC_H */
8516diff -urNp linux-2.6.32.45/arch/x86/include/asm/device.h linux-2.6.32.45/arch/x86/include/asm/device.h
8517--- linux-2.6.32.45/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8518+++ linux-2.6.32.45/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8519@@ -6,7 +6,7 @@ struct dev_archdata {
8520 void *acpi_handle;
8521 #endif
8522 #ifdef CONFIG_X86_64
8523-struct dma_map_ops *dma_ops;
8524+ const struct dma_map_ops *dma_ops;
8525 #endif
8526 #ifdef CONFIG_DMAR
8527 void *iommu; /* hook for IOMMU specific extension */
8528diff -urNp linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h
8529--- linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8530+++ linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8531@@ -25,9 +25,9 @@ extern int iommu_merge;
8532 extern struct device x86_dma_fallback_dev;
8533 extern int panic_on_overflow;
8534
8535-extern struct dma_map_ops *dma_ops;
8536+extern const struct dma_map_ops *dma_ops;
8537
8538-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8539+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8540 {
8541 #ifdef CONFIG_X86_32
8542 return dma_ops;
8543@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8544 /* Make sure we keep the same behaviour */
8545 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8546 {
8547- struct dma_map_ops *ops = get_dma_ops(dev);
8548+ const struct dma_map_ops *ops = get_dma_ops(dev);
8549 if (ops->mapping_error)
8550 return ops->mapping_error(dev, dma_addr);
8551
8552@@ -122,7 +122,7 @@ static inline void *
8553 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8554 gfp_t gfp)
8555 {
8556- struct dma_map_ops *ops = get_dma_ops(dev);
8557+ const struct dma_map_ops *ops = get_dma_ops(dev);
8558 void *memory;
8559
8560 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8561@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8562 static inline void dma_free_coherent(struct device *dev, size_t size,
8563 void *vaddr, dma_addr_t bus)
8564 {
8565- struct dma_map_ops *ops = get_dma_ops(dev);
8566+ const struct dma_map_ops *ops = get_dma_ops(dev);
8567
8568 WARN_ON(irqs_disabled()); /* for portability */
8569
8570diff -urNp linux-2.6.32.45/arch/x86/include/asm/e820.h linux-2.6.32.45/arch/x86/include/asm/e820.h
8571--- linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8572+++ linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8573@@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8574 #define ISA_END_ADDRESS 0x100000
8575 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8576
8577-#define BIOS_BEGIN 0x000a0000
8578+#define BIOS_BEGIN 0x000c0000
8579 #define BIOS_END 0x00100000
8580
8581 #ifdef __KERNEL__
8582diff -urNp linux-2.6.32.45/arch/x86/include/asm/elf.h linux-2.6.32.45/arch/x86/include/asm/elf.h
8583--- linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8584+++ linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-08-23 20:24:19.000000000 -0400
8585@@ -257,7 +257,25 @@ extern int force_personality32;
8586 the loader. We need to make sure that it is out of the way of the program
8587 that it will "exec", and that there is sufficient room for the brk. */
8588
8589+#ifdef CONFIG_PAX_SEGMEXEC
8590+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8591+#else
8592 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8593+#endif
8594+
8595+#ifdef CONFIG_PAX_ASLR
8596+#ifdef CONFIG_X86_32
8597+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8598+
8599+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8600+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8601+#else
8602+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8603+
8604+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8605+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8606+#endif
8607+#endif
8608
8609 /* This yields a mask that user programs can use to figure out what
8610 instruction set this CPU supports. This could be done in user space,
8611@@ -310,9 +328,7 @@ do { \
8612
8613 #define ARCH_DLINFO \
8614 do { \
8615- if (vdso_enabled) \
8616- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8617- (unsigned long)current->mm->context.vdso); \
8618+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8619 } while (0)
8620
8621 #define AT_SYSINFO 32
8622@@ -323,7 +339,7 @@ do { \
8623
8624 #endif /* !CONFIG_X86_32 */
8625
8626-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8627+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8628
8629 #define VDSO_ENTRY \
8630 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8631@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(s
8632 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8633 #define compat_arch_setup_additional_pages syscall32_setup_pages
8634
8635-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8636-#define arch_randomize_brk arch_randomize_brk
8637-
8638 #endif /* _ASM_X86_ELF_H */
8639diff -urNp linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h
8640--- linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8641+++ linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8642@@ -15,6 +15,6 @@ enum reboot_type {
8643
8644 extern enum reboot_type reboot_type;
8645
8646-extern void machine_emergency_restart(void);
8647+extern void machine_emergency_restart(void) __noreturn;
8648
8649 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8650diff -urNp linux-2.6.32.45/arch/x86/include/asm/futex.h linux-2.6.32.45/arch/x86/include/asm/futex.h
8651--- linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8652+++ linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8653@@ -12,16 +12,18 @@
8654 #include <asm/system.h>
8655
8656 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8657+ typecheck(u32 *, uaddr); \
8658 asm volatile("1:\t" insn "\n" \
8659 "2:\t.section .fixup,\"ax\"\n" \
8660 "3:\tmov\t%3, %1\n" \
8661 "\tjmp\t2b\n" \
8662 "\t.previous\n" \
8663 _ASM_EXTABLE(1b, 3b) \
8664- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8665+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8666 : "i" (-EFAULT), "0" (oparg), "1" (0))
8667
8668 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8669+ typecheck(u32 *, uaddr); \
8670 asm volatile("1:\tmovl %2, %0\n" \
8671 "\tmovl\t%0, %3\n" \
8672 "\t" insn "\n" \
8673@@ -34,10 +36,10 @@
8674 _ASM_EXTABLE(1b, 4b) \
8675 _ASM_EXTABLE(2b, 4b) \
8676 : "=&a" (oldval), "=&r" (ret), \
8677- "+m" (*uaddr), "=&r" (tem) \
8678+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8679 : "r" (oparg), "i" (-EFAULT), "1" (0))
8680
8681-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8682+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8683 {
8684 int op = (encoded_op >> 28) & 7;
8685 int cmp = (encoded_op >> 24) & 15;
8686@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8687
8688 switch (op) {
8689 case FUTEX_OP_SET:
8690- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8691+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8692 break;
8693 case FUTEX_OP_ADD:
8694- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8695+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8696 uaddr, oparg);
8697 break;
8698 case FUTEX_OP_OR:
8699@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8700 return ret;
8701 }
8702
8703-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8704+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8705 int newval)
8706 {
8707
8708@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8709 return -ENOSYS;
8710 #endif
8711
8712- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8713+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8714 return -EFAULT;
8715
8716- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8717+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8718 "2:\t.section .fixup, \"ax\"\n"
8719 "3:\tmov %2, %0\n"
8720 "\tjmp 2b\n"
8721 "\t.previous\n"
8722 _ASM_EXTABLE(1b, 3b)
8723- : "=a" (oldval), "+m" (*uaddr)
8724+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8725 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8726 : "memory"
8727 );
8728diff -urNp linux-2.6.32.45/arch/x86/include/asm/hw_irq.h linux-2.6.32.45/arch/x86/include/asm/hw_irq.h
8729--- linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8730+++ linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8731@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8732 extern void enable_IO_APIC(void);
8733
8734 /* Statistics */
8735-extern atomic_t irq_err_count;
8736-extern atomic_t irq_mis_count;
8737+extern atomic_unchecked_t irq_err_count;
8738+extern atomic_unchecked_t irq_mis_count;
8739
8740 /* EISA */
8741 extern void eisa_set_level_irq(unsigned int irq);
8742diff -urNp linux-2.6.32.45/arch/x86/include/asm/i387.h linux-2.6.32.45/arch/x86/include/asm/i387.h
8743--- linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8744+++ linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8745@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8746 {
8747 int err;
8748
8749+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8750+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8751+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8752+#endif
8753+
8754 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8755 "2:\n"
8756 ".section .fixup,\"ax\"\n"
8757@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8758 {
8759 int err;
8760
8761+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8762+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8763+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8764+#endif
8765+
8766 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8767 "2:\n"
8768 ".section .fixup,\"ax\"\n"
8769@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8770 }
8771
8772 /* We need a safe address that is cheap to find and that is already
8773- in L1 during context switch. The best choices are unfortunately
8774- different for UP and SMP */
8775-#ifdef CONFIG_SMP
8776-#define safe_address (__per_cpu_offset[0])
8777-#else
8778-#define safe_address (kstat_cpu(0).cpustat.user)
8779-#endif
8780+ in L1 during context switch. */
8781+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8782
8783 /*
8784 * These must be called with preempt disabled
8785@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8786 struct thread_info *me = current_thread_info();
8787 preempt_disable();
8788 if (me->status & TS_USEDFPU)
8789- __save_init_fpu(me->task);
8790+ __save_init_fpu(current);
8791 else
8792 clts();
8793 }
8794diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_32.h linux-2.6.32.45/arch/x86/include/asm/io_32.h
8795--- linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8796+++ linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8797@@ -3,6 +3,7 @@
8798
8799 #include <linux/string.h>
8800 #include <linux/compiler.h>
8801+#include <asm/processor.h>
8802
8803 /*
8804 * This file contains the definitions for the x86 IO instructions
8805@@ -42,6 +43,17 @@
8806
8807 #ifdef __KERNEL__
8808
8809+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8810+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8811+{
8812+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8813+}
8814+
8815+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8816+{
8817+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8818+}
8819+
8820 #include <asm-generic/iomap.h>
8821
8822 #include <linux/vmalloc.h>
8823diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_64.h linux-2.6.32.45/arch/x86/include/asm/io_64.h
8824--- linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8825+++ linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8826@@ -140,6 +140,17 @@ __OUTS(l)
8827
8828 #include <linux/vmalloc.h>
8829
8830+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8831+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8832+{
8833+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8834+}
8835+
8836+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8837+{
8838+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8839+}
8840+
8841 #include <asm-generic/iomap.h>
8842
8843 void __memcpy_fromio(void *, unsigned long, unsigned);
8844diff -urNp linux-2.6.32.45/arch/x86/include/asm/iommu.h linux-2.6.32.45/arch/x86/include/asm/iommu.h
8845--- linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8846+++ linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8847@@ -3,7 +3,7 @@
8848
8849 extern void pci_iommu_shutdown(void);
8850 extern void no_iommu_init(void);
8851-extern struct dma_map_ops nommu_dma_ops;
8852+extern const struct dma_map_ops nommu_dma_ops;
8853 extern int force_iommu, no_iommu;
8854 extern int iommu_detected;
8855 extern int iommu_pass_through;
8856diff -urNp linux-2.6.32.45/arch/x86/include/asm/irqflags.h linux-2.6.32.45/arch/x86/include/asm/irqflags.h
8857--- linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8858+++ linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8859@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8860 sti; \
8861 sysexit
8862
8863+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8864+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8865+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8866+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8867+
8868 #else
8869 #define INTERRUPT_RETURN iret
8870 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8871diff -urNp linux-2.6.32.45/arch/x86/include/asm/kprobes.h linux-2.6.32.45/arch/x86/include/asm/kprobes.h
8872--- linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8873+++ linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8874@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8875 #define BREAKPOINT_INSTRUCTION 0xcc
8876 #define RELATIVEJUMP_INSTRUCTION 0xe9
8877 #define MAX_INSN_SIZE 16
8878-#define MAX_STACK_SIZE 64
8879-#define MIN_STACK_SIZE(ADDR) \
8880- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8881- THREAD_SIZE - (unsigned long)(ADDR))) \
8882- ? (MAX_STACK_SIZE) \
8883- : (((unsigned long)current_thread_info()) + \
8884- THREAD_SIZE - (unsigned long)(ADDR)))
8885+#define MAX_STACK_SIZE 64UL
8886+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8887
8888 #define flush_insn_slot(p) do { } while (0)
8889
8890diff -urNp linux-2.6.32.45/arch/x86/include/asm/kvm_host.h linux-2.6.32.45/arch/x86/include/asm/kvm_host.h
8891--- linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8892+++ linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8893@@ -536,7 +536,7 @@ struct kvm_x86_ops {
8894 const struct trace_print_flags *exit_reasons_str;
8895 };
8896
8897-extern struct kvm_x86_ops *kvm_x86_ops;
8898+extern const struct kvm_x86_ops *kvm_x86_ops;
8899
8900 int kvm_mmu_module_init(void);
8901 void kvm_mmu_module_exit(void);
8902diff -urNp linux-2.6.32.45/arch/x86/include/asm/local.h linux-2.6.32.45/arch/x86/include/asm/local.h
8903--- linux-2.6.32.45/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8904+++ linux-2.6.32.45/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8905@@ -18,26 +18,58 @@ typedef struct {
8906
8907 static inline void local_inc(local_t *l)
8908 {
8909- asm volatile(_ASM_INC "%0"
8910+ asm volatile(_ASM_INC "%0\n"
8911+
8912+#ifdef CONFIG_PAX_REFCOUNT
8913+ "jno 0f\n"
8914+ _ASM_DEC "%0\n"
8915+ "int $4\n0:\n"
8916+ _ASM_EXTABLE(0b, 0b)
8917+#endif
8918+
8919 : "+m" (l->a.counter));
8920 }
8921
8922 static inline void local_dec(local_t *l)
8923 {
8924- asm volatile(_ASM_DEC "%0"
8925+ asm volatile(_ASM_DEC "%0\n"
8926+
8927+#ifdef CONFIG_PAX_REFCOUNT
8928+ "jno 0f\n"
8929+ _ASM_INC "%0\n"
8930+ "int $4\n0:\n"
8931+ _ASM_EXTABLE(0b, 0b)
8932+#endif
8933+
8934 : "+m" (l->a.counter));
8935 }
8936
8937 static inline void local_add(long i, local_t *l)
8938 {
8939- asm volatile(_ASM_ADD "%1,%0"
8940+ asm volatile(_ASM_ADD "%1,%0\n"
8941+
8942+#ifdef CONFIG_PAX_REFCOUNT
8943+ "jno 0f\n"
8944+ _ASM_SUB "%1,%0\n"
8945+ "int $4\n0:\n"
8946+ _ASM_EXTABLE(0b, 0b)
8947+#endif
8948+
8949 : "+m" (l->a.counter)
8950 : "ir" (i));
8951 }
8952
8953 static inline void local_sub(long i, local_t *l)
8954 {
8955- asm volatile(_ASM_SUB "%1,%0"
8956+ asm volatile(_ASM_SUB "%1,%0\n"
8957+
8958+#ifdef CONFIG_PAX_REFCOUNT
8959+ "jno 0f\n"
8960+ _ASM_ADD "%1,%0\n"
8961+ "int $4\n0:\n"
8962+ _ASM_EXTABLE(0b, 0b)
8963+#endif
8964+
8965 : "+m" (l->a.counter)
8966 : "ir" (i));
8967 }
8968@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8969 {
8970 unsigned char c;
8971
8972- asm volatile(_ASM_SUB "%2,%0; sete %1"
8973+ asm volatile(_ASM_SUB "%2,%0\n"
8974+
8975+#ifdef CONFIG_PAX_REFCOUNT
8976+ "jno 0f\n"
8977+ _ASM_ADD "%2,%0\n"
8978+ "int $4\n0:\n"
8979+ _ASM_EXTABLE(0b, 0b)
8980+#endif
8981+
8982+ "sete %1\n"
8983 : "+m" (l->a.counter), "=qm" (c)
8984 : "ir" (i) : "memory");
8985 return c;
8986@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8987 {
8988 unsigned char c;
8989
8990- asm volatile(_ASM_DEC "%0; sete %1"
8991+ asm volatile(_ASM_DEC "%0\n"
8992+
8993+#ifdef CONFIG_PAX_REFCOUNT
8994+ "jno 0f\n"
8995+ _ASM_INC "%0\n"
8996+ "int $4\n0:\n"
8997+ _ASM_EXTABLE(0b, 0b)
8998+#endif
8999+
9000+ "sete %1\n"
9001 : "+m" (l->a.counter), "=qm" (c)
9002 : : "memory");
9003 return c != 0;
9004@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
9005 {
9006 unsigned char c;
9007
9008- asm volatile(_ASM_INC "%0; sete %1"
9009+ asm volatile(_ASM_INC "%0\n"
9010+
9011+#ifdef CONFIG_PAX_REFCOUNT
9012+ "jno 0f\n"
9013+ _ASM_DEC "%0\n"
9014+ "int $4\n0:\n"
9015+ _ASM_EXTABLE(0b, 0b)
9016+#endif
9017+
9018+ "sete %1\n"
9019 : "+m" (l->a.counter), "=qm" (c)
9020 : : "memory");
9021 return c != 0;
9022@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
9023 {
9024 unsigned char c;
9025
9026- asm volatile(_ASM_ADD "%2,%0; sets %1"
9027+ asm volatile(_ASM_ADD "%2,%0\n"
9028+
9029+#ifdef CONFIG_PAX_REFCOUNT
9030+ "jno 0f\n"
9031+ _ASM_SUB "%2,%0\n"
9032+ "int $4\n0:\n"
9033+ _ASM_EXTABLE(0b, 0b)
9034+#endif
9035+
9036+ "sets %1\n"
9037 : "+m" (l->a.counter), "=qm" (c)
9038 : "ir" (i) : "memory");
9039 return c;
9040@@ -133,7 +201,15 @@ static inline long local_add_return(long
9041 #endif
9042 /* Modern 486+ processor */
9043 __i = i;
9044- asm volatile(_ASM_XADD "%0, %1;"
9045+ asm volatile(_ASM_XADD "%0, %1\n"
9046+
9047+#ifdef CONFIG_PAX_REFCOUNT
9048+ "jno 0f\n"
9049+ _ASM_MOV "%0,%1\n"
9050+ "int $4\n0:\n"
9051+ _ASM_EXTABLE(0b, 0b)
9052+#endif
9053+
9054 : "+r" (i), "+m" (l->a.counter)
9055 : : "memory");
9056 return i + __i;
9057diff -urNp linux-2.6.32.45/arch/x86/include/asm/microcode.h linux-2.6.32.45/arch/x86/include/asm/microcode.h
9058--- linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
9059+++ linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
9060@@ -12,13 +12,13 @@ struct device;
9061 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
9062
9063 struct microcode_ops {
9064- enum ucode_state (*request_microcode_user) (int cpu,
9065+ enum ucode_state (* const request_microcode_user) (int cpu,
9066 const void __user *buf, size_t size);
9067
9068- enum ucode_state (*request_microcode_fw) (int cpu,
9069+ enum ucode_state (* const request_microcode_fw) (int cpu,
9070 struct device *device);
9071
9072- void (*microcode_fini_cpu) (int cpu);
9073+ void (* const microcode_fini_cpu) (int cpu);
9074
9075 /*
9076 * The generic 'microcode_core' part guarantees that
9077@@ -38,18 +38,18 @@ struct ucode_cpu_info {
9078 extern struct ucode_cpu_info ucode_cpu_info[];
9079
9080 #ifdef CONFIG_MICROCODE_INTEL
9081-extern struct microcode_ops * __init init_intel_microcode(void);
9082+extern const struct microcode_ops * __init init_intel_microcode(void);
9083 #else
9084-static inline struct microcode_ops * __init init_intel_microcode(void)
9085+static inline const struct microcode_ops * __init init_intel_microcode(void)
9086 {
9087 return NULL;
9088 }
9089 #endif /* CONFIG_MICROCODE_INTEL */
9090
9091 #ifdef CONFIG_MICROCODE_AMD
9092-extern struct microcode_ops * __init init_amd_microcode(void);
9093+extern const struct microcode_ops * __init init_amd_microcode(void);
9094 #else
9095-static inline struct microcode_ops * __init init_amd_microcode(void)
9096+static inline const struct microcode_ops * __init init_amd_microcode(void)
9097 {
9098 return NULL;
9099 }
9100diff -urNp linux-2.6.32.45/arch/x86/include/asm/mman.h linux-2.6.32.45/arch/x86/include/asm/mman.h
9101--- linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
9102+++ linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
9103@@ -5,4 +5,14 @@
9104
9105 #include <asm-generic/mman.h>
9106
9107+#ifdef __KERNEL__
9108+#ifndef __ASSEMBLY__
9109+#ifdef CONFIG_X86_32
9110+#define arch_mmap_check i386_mmap_check
9111+int i386_mmap_check(unsigned long addr, unsigned long len,
9112+ unsigned long flags);
9113+#endif
9114+#endif
9115+#endif
9116+
9117 #endif /* _ASM_X86_MMAN_H */
9118diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu_context.h linux-2.6.32.45/arch/x86/include/asm/mmu_context.h
9119--- linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
9120+++ linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-08-23 20:24:19.000000000 -0400
9121@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
9122
9123 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9124 {
9125+
9126+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9127+ unsigned int i;
9128+ pgd_t *pgd;
9129+
9130+ pax_open_kernel();
9131+ pgd = get_cpu_pgd(smp_processor_id());
9132+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9133+ set_pgd_batched(pgd+i, native_make_pgd(0));
9134+ pax_close_kernel();
9135+#endif
9136+
9137 #ifdef CONFIG_SMP
9138 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9139 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9140@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
9141 struct task_struct *tsk)
9142 {
9143 unsigned cpu = smp_processor_id();
9144+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
9145+ int tlbstate = TLBSTATE_OK;
9146+#endif
9147
9148 if (likely(prev != next)) {
9149 #ifdef CONFIG_SMP
9150+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9151+ tlbstate = percpu_read(cpu_tlbstate.state);
9152+#endif
9153 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9154 percpu_write(cpu_tlbstate.active_mm, next);
9155 #endif
9156 cpumask_set_cpu(cpu, mm_cpumask(next));
9157
9158 /* Re-load page tables */
9159+#ifdef CONFIG_PAX_PER_CPU_PGD
9160+ pax_open_kernel();
9161+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9162+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9163+ pax_close_kernel();
9164+ load_cr3(get_cpu_pgd(cpu));
9165+#else
9166 load_cr3(next->pgd);
9167+#endif
9168
9169 /* stop flush ipis for the previous mm */
9170 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9171@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
9172 */
9173 if (unlikely(prev->context.ldt != next->context.ldt))
9174 load_LDT_nolock(&next->context);
9175- }
9176+
9177+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9178+ if (!nx_enabled) {
9179+ smp_mb__before_clear_bit();
9180+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9181+ smp_mb__after_clear_bit();
9182+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9183+ }
9184+#endif
9185+
9186+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9187+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9188+ prev->context.user_cs_limit != next->context.user_cs_limit))
9189+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9190 #ifdef CONFIG_SMP
9191+ else if (unlikely(tlbstate != TLBSTATE_OK))
9192+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9193+#endif
9194+#endif
9195+
9196+ }
9197 else {
9198+
9199+#ifdef CONFIG_PAX_PER_CPU_PGD
9200+ pax_open_kernel();
9201+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9202+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9203+ pax_close_kernel();
9204+ load_cr3(get_cpu_pgd(cpu));
9205+#endif
9206+
9207+#ifdef CONFIG_SMP
9208 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9209 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9210
9211@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
9212 * tlb flush IPI delivery. We must reload CR3
9213 * to make sure to use no freed page tables.
9214 */
9215+
9216+#ifndef CONFIG_PAX_PER_CPU_PGD
9217 load_cr3(next->pgd);
9218+#endif
9219+
9220 load_LDT_nolock(&next->context);
9221+
9222+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9223+ if (!nx_enabled)
9224+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9225+#endif
9226+
9227+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9228+#ifdef CONFIG_PAX_PAGEEXEC
9229+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9230+#endif
9231+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9232+#endif
9233+
9234 }
9235- }
9236 #endif
9237+ }
9238 }
9239
9240 #define activate_mm(prev, next) \
9241diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu.h linux-2.6.32.45/arch/x86/include/asm/mmu.h
9242--- linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9243+++ linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9244@@ -9,10 +9,23 @@
9245 * we put the segment information here.
9246 */
9247 typedef struct {
9248- void *ldt;
9249+ struct desc_struct *ldt;
9250 int size;
9251 struct mutex lock;
9252- void *vdso;
9253+ unsigned long vdso;
9254+
9255+#ifdef CONFIG_X86_32
9256+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9257+ unsigned long user_cs_base;
9258+ unsigned long user_cs_limit;
9259+
9260+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9261+ cpumask_t cpu_user_cs_mask;
9262+#endif
9263+
9264+#endif
9265+#endif
9266+
9267 } mm_context_t;
9268
9269 #ifdef CONFIG_SMP
9270diff -urNp linux-2.6.32.45/arch/x86/include/asm/module.h linux-2.6.32.45/arch/x86/include/asm/module.h
9271--- linux-2.6.32.45/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9272+++ linux-2.6.32.45/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9273@@ -5,6 +5,7 @@
9274
9275 #ifdef CONFIG_X86_64
9276 /* X86_64 does not define MODULE_PROC_FAMILY */
9277+#define MODULE_PROC_FAMILY ""
9278 #elif defined CONFIG_M386
9279 #define MODULE_PROC_FAMILY "386 "
9280 #elif defined CONFIG_M486
9281@@ -59,13 +60,36 @@
9282 #error unknown processor family
9283 #endif
9284
9285-#ifdef CONFIG_X86_32
9286-# ifdef CONFIG_4KSTACKS
9287-# define MODULE_STACKSIZE "4KSTACKS "
9288-# else
9289-# define MODULE_STACKSIZE ""
9290-# endif
9291-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9292+#ifdef CONFIG_PAX_MEMORY_UDEREF
9293+#define MODULE_PAX_UDEREF "UDEREF "
9294+#else
9295+#define MODULE_PAX_UDEREF ""
9296+#endif
9297+
9298+#ifdef CONFIG_PAX_KERNEXEC
9299+#define MODULE_PAX_KERNEXEC "KERNEXEC "
9300+#else
9301+#define MODULE_PAX_KERNEXEC ""
9302+#endif
9303+
9304+#ifdef CONFIG_PAX_REFCOUNT
9305+#define MODULE_PAX_REFCOUNT "REFCOUNT "
9306+#else
9307+#define MODULE_PAX_REFCOUNT ""
9308 #endif
9309
9310+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9311+#define MODULE_STACKSIZE "4KSTACKS "
9312+#else
9313+#define MODULE_STACKSIZE ""
9314+#endif
9315+
9316+#ifdef CONFIG_GRKERNSEC
9317+#define MODULE_GRSEC "GRSECURITY "
9318+#else
9319+#define MODULE_GRSEC ""
9320+#endif
9321+
9322+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9323+
9324 #endif /* _ASM_X86_MODULE_H */
9325diff -urNp linux-2.6.32.45/arch/x86/include/asm/page_64_types.h linux-2.6.32.45/arch/x86/include/asm/page_64_types.h
9326--- linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9327+++ linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9328@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9329
9330 /* duplicated to the one in bootmem.h */
9331 extern unsigned long max_pfn;
9332-extern unsigned long phys_base;
9333+extern const unsigned long phys_base;
9334
9335 extern unsigned long __phys_addr(unsigned long);
9336 #define __phys_reloc_hide(x) (x)
9337diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt.h linux-2.6.32.45/arch/x86/include/asm/paravirt.h
9338--- linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9339+++ linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-08-23 21:36:48.000000000 -0400
9340@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp,
9341 val);
9342 }
9343
9344+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9345+{
9346+ pgdval_t val = native_pgd_val(pgd);
9347+
9348+ if (sizeof(pgdval_t) > sizeof(long))
9349+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9350+ val, (u64)val >> 32);
9351+ else
9352+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9353+ val);
9354+}
9355+
9356 static inline void pgd_clear(pgd_t *pgdp)
9357 {
9358 set_pgd(pgdp, __pgd(0));
9359@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned
9360 pv_mmu_ops.set_fixmap(idx, phys, flags);
9361 }
9362
9363+#ifdef CONFIG_PAX_KERNEXEC
9364+static inline unsigned long pax_open_kernel(void)
9365+{
9366+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9367+}
9368+
9369+static inline unsigned long pax_close_kernel(void)
9370+{
9371+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9372+}
9373+#else
9374+static inline unsigned long pax_open_kernel(void) { return 0; }
9375+static inline unsigned long pax_close_kernel(void) { return 0; }
9376+#endif
9377+
9378 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9379
9380 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9381@@ -945,7 +972,7 @@ extern void default_banner(void);
9382
9383 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9384 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9385-#define PARA_INDIRECT(addr) *%cs:addr
9386+#define PARA_INDIRECT(addr) *%ss:addr
9387 #endif
9388
9389 #define INTERRUPT_RETURN \
9390@@ -1022,6 +1049,21 @@ extern void default_banner(void);
9391 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9392 CLBR_NONE, \
9393 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9394+
9395+#define GET_CR0_INTO_RDI \
9396+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9397+ mov %rax,%rdi
9398+
9399+#define SET_RDI_INTO_CR0 \
9400+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9401+
9402+#define GET_CR3_INTO_RDI \
9403+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9404+ mov %rax,%rdi
9405+
9406+#define SET_RDI_INTO_CR3 \
9407+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9408+
9409 #endif /* CONFIG_X86_32 */
9410
9411 #endif /* __ASSEMBLY__ */
9412diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h
9413--- linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9414+++ linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-08-23 20:24:19.000000000 -0400
9415@@ -78,19 +78,19 @@ struct pv_init_ops {
9416 */
9417 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9418 unsigned long addr, unsigned len);
9419-};
9420+} __no_const;
9421
9422
9423 struct pv_lazy_ops {
9424 /* Set deferred update mode, used for batching operations. */
9425 void (*enter)(void);
9426 void (*leave)(void);
9427-};
9428+} __no_const;
9429
9430 struct pv_time_ops {
9431 unsigned long long (*sched_clock)(void);
9432 unsigned long (*get_tsc_khz)(void);
9433-};
9434+} __no_const;
9435
9436 struct pv_cpu_ops {
9437 /* hooks for various privileged instructions */
9438@@ -186,7 +186,7 @@ struct pv_cpu_ops {
9439
9440 void (*start_context_switch)(struct task_struct *prev);
9441 void (*end_context_switch)(struct task_struct *next);
9442-};
9443+} __no_const;
9444
9445 struct pv_irq_ops {
9446 /*
9447@@ -217,7 +217,7 @@ struct pv_apic_ops {
9448 unsigned long start_eip,
9449 unsigned long start_esp);
9450 #endif
9451-};
9452+} __no_const;
9453
9454 struct pv_mmu_ops {
9455 unsigned long (*read_cr2)(void);
9456@@ -301,6 +301,7 @@ struct pv_mmu_ops {
9457 struct paravirt_callee_save make_pud;
9458
9459 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9460+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9461 #endif /* PAGETABLE_LEVELS == 4 */
9462 #endif /* PAGETABLE_LEVELS >= 3 */
9463
9464@@ -316,6 +317,12 @@ struct pv_mmu_ops {
9465 an mfn. We can tell which is which from the index. */
9466 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9467 phys_addr_t phys, pgprot_t flags);
9468+
9469+#ifdef CONFIG_PAX_KERNEXEC
9470+ unsigned long (*pax_open_kernel)(void);
9471+ unsigned long (*pax_close_kernel)(void);
9472+#endif
9473+
9474 };
9475
9476 struct raw_spinlock;
9477@@ -326,7 +333,7 @@ struct pv_lock_ops {
9478 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
9479 int (*spin_trylock)(struct raw_spinlock *lock);
9480 void (*spin_unlock)(struct raw_spinlock *lock);
9481-};
9482+} __no_const;
9483
9484 /* This contains all the paravirt structures: we get a convenient
9485 * number for each function using the offset which we use to indicate
9486diff -urNp linux-2.6.32.45/arch/x86/include/asm/pci_x86.h linux-2.6.32.45/arch/x86/include/asm/pci_x86.h
9487--- linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9488+++ linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9489@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9490 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9491
9492 struct pci_raw_ops {
9493- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9494+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9495 int reg, int len, u32 *val);
9496- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9497+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9498 int reg, int len, u32 val);
9499 };
9500
9501-extern struct pci_raw_ops *raw_pci_ops;
9502-extern struct pci_raw_ops *raw_pci_ext_ops;
9503+extern const struct pci_raw_ops *raw_pci_ops;
9504+extern const struct pci_raw_ops *raw_pci_ext_ops;
9505
9506-extern struct pci_raw_ops pci_direct_conf1;
9507+extern const struct pci_raw_ops pci_direct_conf1;
9508 extern bool port_cf9_safe;
9509
9510 /* arch_initcall level */
9511diff -urNp linux-2.6.32.45/arch/x86/include/asm/percpu.h linux-2.6.32.45/arch/x86/include/asm/percpu.h
9512--- linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-03-27 14:31:47.000000000 -0400
9513+++ linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-08-17 19:33:59.000000000 -0400
9514@@ -78,6 +78,7 @@ do { \
9515 if (0) { \
9516 T__ tmp__; \
9517 tmp__ = (val); \
9518+ (void)tmp__; \
9519 } \
9520 switch (sizeof(var)) { \
9521 case 1: \
9522diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgalloc.h linux-2.6.32.45/arch/x86/include/asm/pgalloc.h
9523--- linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9524+++ linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9525@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9526 pmd_t *pmd, pte_t *pte)
9527 {
9528 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9529+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9530+}
9531+
9532+static inline void pmd_populate_user(struct mm_struct *mm,
9533+ pmd_t *pmd, pte_t *pte)
9534+{
9535+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9536 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9537 }
9538
9539diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h
9540--- linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9541+++ linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9542@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9543
9544 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9545 {
9546+ pax_open_kernel();
9547 *pmdp = pmd;
9548+ pax_close_kernel();
9549 }
9550
9551 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9552diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h
9553--- linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9554+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9555@@ -26,9 +26,6 @@
9556 struct mm_struct;
9557 struct vm_area_struct;
9558
9559-extern pgd_t swapper_pg_dir[1024];
9560-extern pgd_t trampoline_pg_dir[1024];
9561-
9562 static inline void pgtable_cache_init(void) { }
9563 static inline void check_pgt_cache(void) { }
9564 void paging_init(void);
9565@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9566 # include <asm/pgtable-2level.h>
9567 #endif
9568
9569+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9570+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9571+#ifdef CONFIG_X86_PAE
9572+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9573+#endif
9574+
9575 #if defined(CONFIG_HIGHPTE)
9576 #define __KM_PTE \
9577 (in_nmi() ? KM_NMI_PTE : \
9578@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9579 /* Clear a kernel PTE and flush it from the TLB */
9580 #define kpte_clear_flush(ptep, vaddr) \
9581 do { \
9582+ pax_open_kernel(); \
9583 pte_clear(&init_mm, (vaddr), (ptep)); \
9584+ pax_close_kernel(); \
9585 __flush_tlb_one((vaddr)); \
9586 } while (0)
9587
9588@@ -85,6 +90,9 @@ do { \
9589
9590 #endif /* !__ASSEMBLY__ */
9591
9592+#define HAVE_ARCH_UNMAPPED_AREA
9593+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9594+
9595 /*
9596 * kern_addr_valid() is (1) for FLATMEM and (0) for
9597 * SPARSEMEM and DISCONTIGMEM
9598diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h
9599--- linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9600+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9601@@ -8,7 +8,7 @@
9602 */
9603 #ifdef CONFIG_X86_PAE
9604 # include <asm/pgtable-3level_types.h>
9605-# define PMD_SIZE (1UL << PMD_SHIFT)
9606+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9607 # define PMD_MASK (~(PMD_SIZE - 1))
9608 #else
9609 # include <asm/pgtable-2level_types.h>
9610@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9611 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9612 #endif
9613
9614+#ifdef CONFIG_PAX_KERNEXEC
9615+#ifndef __ASSEMBLY__
9616+extern unsigned char MODULES_EXEC_VADDR[];
9617+extern unsigned char MODULES_EXEC_END[];
9618+#endif
9619+#include <asm/boot.h>
9620+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9621+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9622+#else
9623+#define ktla_ktva(addr) (addr)
9624+#define ktva_ktla(addr) (addr)
9625+#endif
9626+
9627 #define MODULES_VADDR VMALLOC_START
9628 #define MODULES_END VMALLOC_END
9629 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9630diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h
9631--- linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9632+++ linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9633@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9634
9635 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9636 {
9637+ pax_open_kernel();
9638 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9639+ pax_close_kernel();
9640 }
9641
9642 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9643 {
9644+ pax_open_kernel();
9645 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9646+ pax_close_kernel();
9647 }
9648
9649 /*
9650diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h
9651--- linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9652+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-08-23 20:24:19.000000000 -0400
9653@@ -16,10 +16,13 @@
9654
9655 extern pud_t level3_kernel_pgt[512];
9656 extern pud_t level3_ident_pgt[512];
9657+extern pud_t level3_vmalloc_pgt[512];
9658+extern pud_t level3_vmemmap_pgt[512];
9659+extern pud_t level2_vmemmap_pgt[512];
9660 extern pmd_t level2_kernel_pgt[512];
9661 extern pmd_t level2_fixmap_pgt[512];
9662-extern pmd_t level2_ident_pgt[512];
9663-extern pgd_t init_level4_pgt[];
9664+extern pmd_t level2_ident_pgt[512*2];
9665+extern pgd_t init_level4_pgt[512];
9666
9667 #define swapper_pg_dir init_level4_pgt
9668
9669@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9670
9671 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9672 {
9673+ pax_open_kernel();
9674 *pmdp = pmd;
9675+ pax_close_kernel();
9676 }
9677
9678 static inline void native_pmd_clear(pmd_t *pmd)
9679@@ -94,6 +99,13 @@ static inline void native_pud_clear(pud_
9680
9681 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9682 {
9683+ pax_open_kernel();
9684+ *pgdp = pgd;
9685+ pax_close_kernel();
9686+}
9687+
9688+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9689+{
9690 *pgdp = pgd;
9691 }
9692
9693diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h
9694--- linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9695+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9696@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9697 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9698 #define MODULES_END _AC(0xffffffffff000000, UL)
9699 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9700+#define MODULES_EXEC_VADDR MODULES_VADDR
9701+#define MODULES_EXEC_END MODULES_END
9702+
9703+#define ktla_ktva(addr) (addr)
9704+#define ktva_ktla(addr) (addr)
9705
9706 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9707diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable.h linux-2.6.32.45/arch/x86/include/asm/pgtable.h
9708--- linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9709+++ linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-08-23 20:24:19.000000000 -0400
9710@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
9711
9712 #ifndef __PAGETABLE_PUD_FOLDED
9713 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9714+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9715 #define pgd_clear(pgd) native_pgd_clear(pgd)
9716 #endif
9717
9718@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
9719
9720 #define arch_end_context_switch(prev) do {} while(0)
9721
9722+#define pax_open_kernel() native_pax_open_kernel()
9723+#define pax_close_kernel() native_pax_close_kernel()
9724 #endif /* CONFIG_PARAVIRT */
9725
9726+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9727+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9728+
9729+#ifdef CONFIG_PAX_KERNEXEC
9730+static inline unsigned long native_pax_open_kernel(void)
9731+{
9732+ unsigned long cr0;
9733+
9734+ preempt_disable();
9735+ barrier();
9736+ cr0 = read_cr0() ^ X86_CR0_WP;
9737+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9738+ write_cr0(cr0);
9739+ return cr0 ^ X86_CR0_WP;
9740+}
9741+
9742+static inline unsigned long native_pax_close_kernel(void)
9743+{
9744+ unsigned long cr0;
9745+
9746+ cr0 = read_cr0() ^ X86_CR0_WP;
9747+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9748+ write_cr0(cr0);
9749+ barrier();
9750+ preempt_enable_no_resched();
9751+ return cr0 ^ X86_CR0_WP;
9752+}
9753+#else
9754+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9755+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9756+#endif
9757+
9758 /*
9759 * The following only work if pte_present() is true.
9760 * Undefined behaviour if not..
9761 */
9762+static inline int pte_user(pte_t pte)
9763+{
9764+ return pte_val(pte) & _PAGE_USER;
9765+}
9766+
9767 static inline int pte_dirty(pte_t pte)
9768 {
9769 return pte_flags(pte) & _PAGE_DIRTY;
9770@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t
9771 return pte_clear_flags(pte, _PAGE_RW);
9772 }
9773
9774+static inline pte_t pte_mkread(pte_t pte)
9775+{
9776+ return __pte(pte_val(pte) | _PAGE_USER);
9777+}
9778+
9779 static inline pte_t pte_mkexec(pte_t pte)
9780 {
9781- return pte_clear_flags(pte, _PAGE_NX);
9782+#ifdef CONFIG_X86_PAE
9783+ if (__supported_pte_mask & _PAGE_NX)
9784+ return pte_clear_flags(pte, _PAGE_NX);
9785+ else
9786+#endif
9787+ return pte_set_flags(pte, _PAGE_USER);
9788+}
9789+
9790+static inline pte_t pte_exprotect(pte_t pte)
9791+{
9792+#ifdef CONFIG_X86_PAE
9793+ if (__supported_pte_mask & _PAGE_NX)
9794+ return pte_set_flags(pte, _PAGE_NX);
9795+ else
9796+#endif
9797+ return pte_clear_flags(pte, _PAGE_USER);
9798 }
9799
9800 static inline pte_t pte_mkdirty(pte_t pte)
9801@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long
9802 #endif
9803
9804 #ifndef __ASSEMBLY__
9805+
9806+#ifdef CONFIG_PAX_PER_CPU_PGD
9807+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9808+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9809+{
9810+ return cpu_pgd[cpu];
9811+}
9812+#endif
9813+
9814 #include <linux/mm_types.h>
9815
9816 static inline int pte_none(pte_t pte)
9817@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *p
9818
9819 static inline int pgd_bad(pgd_t pgd)
9820 {
9821- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9822+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9823 }
9824
9825 static inline int pgd_none(pgd_t pgd)
9826@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
9827 * pgd_offset() returns a (pgd_t *)
9828 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9829 */
9830-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9831+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9832+
9833+#ifdef CONFIG_PAX_PER_CPU_PGD
9834+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9835+#endif
9836+
9837 /*
9838 * a shortcut which implies the use of the kernel's pgd, instead
9839 * of a process's
9840@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
9841 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9842 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9843
9844+#ifdef CONFIG_X86_32
9845+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9846+#else
9847+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9848+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9849+
9850+#ifdef CONFIG_PAX_MEMORY_UDEREF
9851+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9852+#else
9853+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9854+#endif
9855+
9856+#endif
9857+
9858 #ifndef __ASSEMBLY__
9859
9860 extern int direct_gbpages;
9861@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(st
9862 * dst and src can be on the same page, but the range must not overlap,
9863 * and must not cross a page boundary.
9864 */
9865-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9866+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9867 {
9868- memcpy(dst, src, count * sizeof(pgd_t));
9869+ pax_open_kernel();
9870+ while (count--)
9871+ *dst++ = *src++;
9872+ pax_close_kernel();
9873 }
9874
9875+#ifdef CONFIG_PAX_PER_CPU_PGD
9876+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9877+#endif
9878+
9879+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9880+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9881+#else
9882+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9883+#endif
9884
9885 #include <asm-generic/pgtable.h>
9886 #endif /* __ASSEMBLY__ */
9887diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h
9888--- linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9889+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9890@@ -16,12 +16,11 @@
9891 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9892 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9893 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9894-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9895+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9896 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9897 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9898 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9899-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9900-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9901+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9902 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9903
9904 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9905@@ -39,7 +38,6 @@
9906 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9907 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9908 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9909-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9910 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9911 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9912 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9913@@ -55,8 +53,10 @@
9914
9915 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9916 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9917-#else
9918+#elif defined(CONFIG_KMEMCHECK)
9919 #define _PAGE_NX (_AT(pteval_t, 0))
9920+#else
9921+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9922 #endif
9923
9924 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9925@@ -93,6 +93,9 @@
9926 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9927 _PAGE_ACCESSED)
9928
9929+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9930+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9931+
9932 #define __PAGE_KERNEL_EXEC \
9933 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9934 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9935@@ -103,8 +106,8 @@
9936 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9937 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9938 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9939-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9940-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9941+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9942+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9943 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9944 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9945 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9946@@ -163,8 +166,8 @@
9947 * bits are combined, this will alow user to access the high address mapped
9948 * VDSO in the presence of CONFIG_COMPAT_VDSO
9949 */
9950-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9951-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9952+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9953+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9954 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9955 #endif
9956
9957@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9958 {
9959 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9960 }
9961+#endif
9962
9963+#if PAGETABLE_LEVELS == 3
9964+#include <asm-generic/pgtable-nopud.h>
9965+#endif
9966+
9967+#if PAGETABLE_LEVELS == 2
9968+#include <asm-generic/pgtable-nopmd.h>
9969+#endif
9970+
9971+#ifndef __ASSEMBLY__
9972 #if PAGETABLE_LEVELS > 3
9973 typedef struct { pudval_t pud; } pud_t;
9974
9975@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9976 return pud.pud;
9977 }
9978 #else
9979-#include <asm-generic/pgtable-nopud.h>
9980-
9981 static inline pudval_t native_pud_val(pud_t pud)
9982 {
9983 return native_pgd_val(pud.pgd);
9984@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9985 return pmd.pmd;
9986 }
9987 #else
9988-#include <asm-generic/pgtable-nopmd.h>
9989-
9990 static inline pmdval_t native_pmd_val(pmd_t pmd)
9991 {
9992 return native_pgd_val(pmd.pud.pgd);
9993@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9994
9995 extern pteval_t __supported_pte_mask;
9996 extern void set_nx(void);
9997+
9998+#ifdef CONFIG_X86_32
9999+#ifdef CONFIG_X86_PAE
10000 extern int nx_enabled;
10001+#else
10002+#define nx_enabled (0)
10003+#endif
10004+#else
10005+#define nx_enabled (1)
10006+#endif
10007
10008 #define pgprot_writecombine pgprot_writecombine
10009 extern pgprot_t pgprot_writecombine(pgprot_t prot);
10010diff -urNp linux-2.6.32.45/arch/x86/include/asm/processor.h linux-2.6.32.45/arch/x86/include/asm/processor.h
10011--- linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
10012+++ linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
10013@@ -272,7 +272,7 @@ struct tss_struct {
10014
10015 } ____cacheline_aligned;
10016
10017-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
10018+extern struct tss_struct init_tss[NR_CPUS];
10019
10020 /*
10021 * Save the original ist values for checking stack pointers during debugging
10022@@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
10023 */
10024 #define TASK_SIZE PAGE_OFFSET
10025 #define TASK_SIZE_MAX TASK_SIZE
10026+
10027+#ifdef CONFIG_PAX_SEGMEXEC
10028+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
10029+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
10030+#else
10031 #define STACK_TOP TASK_SIZE
10032-#define STACK_TOP_MAX STACK_TOP
10033+#endif
10034+
10035+#define STACK_TOP_MAX TASK_SIZE
10036
10037 #define INIT_THREAD { \
10038- .sp0 = sizeof(init_stack) + (long)&init_stack, \
10039+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10040 .vm86_info = NULL, \
10041 .sysenter_cs = __KERNEL_CS, \
10042 .io_bitmap_ptr = NULL, \
10043@@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
10044 */
10045 #define INIT_TSS { \
10046 .x86_tss = { \
10047- .sp0 = sizeof(init_stack) + (long)&init_stack, \
10048+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10049 .ss0 = __KERNEL_DS, \
10050 .ss1 = __KERNEL_CS, \
10051 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
10052@@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
10053 extern unsigned long thread_saved_pc(struct task_struct *tsk);
10054
10055 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
10056-#define KSTK_TOP(info) \
10057-({ \
10058- unsigned long *__ptr = (unsigned long *)(info); \
10059- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
10060-})
10061+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
10062
10063 /*
10064 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
10065@@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
10066 #define task_pt_regs(task) \
10067 ({ \
10068 struct pt_regs *__regs__; \
10069- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
10070+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
10071 __regs__ - 1; \
10072 })
10073
10074@@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
10075 /*
10076 * User space process size. 47bits minus one guard page.
10077 */
10078-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
10079+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
10080
10081 /* This decides where the kernel will search for a free chunk of vm
10082 * space during mmap's.
10083 */
10084 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
10085- 0xc0000000 : 0xFFFFe000)
10086+ 0xc0000000 : 0xFFFFf000)
10087
10088 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
10089 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
10090@@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
10091 #define STACK_TOP_MAX TASK_SIZE_MAX
10092
10093 #define INIT_THREAD { \
10094- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10095+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10096 }
10097
10098 #define INIT_TSS { \
10099- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10100+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10101 }
10102
10103 /*
10104@@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
10105 */
10106 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
10107
10108+#ifdef CONFIG_PAX_SEGMEXEC
10109+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
10110+#endif
10111+
10112 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
10113
10114 /* Get/set a process' ability to use the timestamp counter instruction */
10115diff -urNp linux-2.6.32.45/arch/x86/include/asm/ptrace.h linux-2.6.32.45/arch/x86/include/asm/ptrace.h
10116--- linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
10117+++ linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
10118@@ -151,28 +151,29 @@ static inline unsigned long regs_return_
10119 }
10120
10121 /*
10122- * user_mode_vm(regs) determines whether a register set came from user mode.
10123+ * user_mode(regs) determines whether a register set came from user mode.
10124 * This is true if V8086 mode was enabled OR if the register set was from
10125 * protected mode with RPL-3 CS value. This tricky test checks that with
10126 * one comparison. Many places in the kernel can bypass this full check
10127- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
10128+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
10129+ * be used.
10130 */
10131-static inline int user_mode(struct pt_regs *regs)
10132+static inline int user_mode_novm(struct pt_regs *regs)
10133 {
10134 #ifdef CONFIG_X86_32
10135 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
10136 #else
10137- return !!(regs->cs & 3);
10138+ return !!(regs->cs & SEGMENT_RPL_MASK);
10139 #endif
10140 }
10141
10142-static inline int user_mode_vm(struct pt_regs *regs)
10143+static inline int user_mode(struct pt_regs *regs)
10144 {
10145 #ifdef CONFIG_X86_32
10146 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
10147 USER_RPL;
10148 #else
10149- return user_mode(regs);
10150+ return user_mode_novm(regs);
10151 #endif
10152 }
10153
10154diff -urNp linux-2.6.32.45/arch/x86/include/asm/reboot.h linux-2.6.32.45/arch/x86/include/asm/reboot.h
10155--- linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
10156+++ linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-08-05 20:33:55.000000000 -0400
10157@@ -6,19 +6,19 @@
10158 struct pt_regs;
10159
10160 struct machine_ops {
10161- void (*restart)(char *cmd);
10162- void (*halt)(void);
10163- void (*power_off)(void);
10164+ void (* __noreturn restart)(char *cmd);
10165+ void (* __noreturn halt)(void);
10166+ void (* __noreturn power_off)(void);
10167 void (*shutdown)(void);
10168 void (*crash_shutdown)(struct pt_regs *);
10169- void (*emergency_restart)(void);
10170-};
10171+ void (* __noreturn emergency_restart)(void);
10172+} __no_const;
10173
10174 extern struct machine_ops machine_ops;
10175
10176 void native_machine_crash_shutdown(struct pt_regs *regs);
10177 void native_machine_shutdown(void);
10178-void machine_real_restart(const unsigned char *code, int length);
10179+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
10180
10181 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
10182 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
10183diff -urNp linux-2.6.32.45/arch/x86/include/asm/rwsem.h linux-2.6.32.45/arch/x86/include/asm/rwsem.h
10184--- linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
10185+++ linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
10186@@ -118,6 +118,14 @@ static inline void __down_read(struct rw
10187 {
10188 asm volatile("# beginning down_read\n\t"
10189 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10190+
10191+#ifdef CONFIG_PAX_REFCOUNT
10192+ "jno 0f\n"
10193+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
10194+ "int $4\n0:\n"
10195+ _ASM_EXTABLE(0b, 0b)
10196+#endif
10197+
10198 /* adds 0x00000001, returns the old value */
10199 " jns 1f\n"
10200 " call call_rwsem_down_read_failed\n"
10201@@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
10202 "1:\n\t"
10203 " mov %1,%2\n\t"
10204 " add %3,%2\n\t"
10205+
10206+#ifdef CONFIG_PAX_REFCOUNT
10207+ "jno 0f\n"
10208+ "sub %3,%2\n"
10209+ "int $4\n0:\n"
10210+ _ASM_EXTABLE(0b, 0b)
10211+#endif
10212+
10213 " jle 2f\n\t"
10214 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10215 " jnz 1b\n\t"
10216@@ -160,6 +176,14 @@ static inline void __down_write_nested(s
10217 tmp = RWSEM_ACTIVE_WRITE_BIAS;
10218 asm volatile("# beginning down_write\n\t"
10219 LOCK_PREFIX " xadd %1,(%2)\n\t"
10220+
10221+#ifdef CONFIG_PAX_REFCOUNT
10222+ "jno 0f\n"
10223+ "mov %1,(%2)\n"
10224+ "int $4\n0:\n"
10225+ _ASM_EXTABLE(0b, 0b)
10226+#endif
10227+
10228 /* subtract 0x0000ffff, returns the old value */
10229 " test %1,%1\n\t"
10230 /* was the count 0 before? */
10231@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10232 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10233 asm volatile("# beginning __up_read\n\t"
10234 LOCK_PREFIX " xadd %1,(%2)\n\t"
10235+
10236+#ifdef CONFIG_PAX_REFCOUNT
10237+ "jno 0f\n"
10238+ "mov %1,(%2)\n"
10239+ "int $4\n0:\n"
10240+ _ASM_EXTABLE(0b, 0b)
10241+#endif
10242+
10243 /* subtracts 1, returns the old value */
10244 " jns 1f\n\t"
10245 " call call_rwsem_wake\n"
10246@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10247 rwsem_count_t tmp;
10248 asm volatile("# beginning __up_write\n\t"
10249 LOCK_PREFIX " xadd %1,(%2)\n\t"
10250+
10251+#ifdef CONFIG_PAX_REFCOUNT
10252+ "jno 0f\n"
10253+ "mov %1,(%2)\n"
10254+ "int $4\n0:\n"
10255+ _ASM_EXTABLE(0b, 0b)
10256+#endif
10257+
10258 /* tries to transition
10259 0xffff0001 -> 0x00000000 */
10260 " jz 1f\n"
10261@@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10262 {
10263 asm volatile("# beginning __downgrade_write\n\t"
10264 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10265+
10266+#ifdef CONFIG_PAX_REFCOUNT
10267+ "jno 0f\n"
10268+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10269+ "int $4\n0:\n"
10270+ _ASM_EXTABLE(0b, 0b)
10271+#endif
10272+
10273 /*
10274 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10275 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10276@@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10277 static inline void rwsem_atomic_add(rwsem_count_t delta,
10278 struct rw_semaphore *sem)
10279 {
10280- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10281+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10282+
10283+#ifdef CONFIG_PAX_REFCOUNT
10284+ "jno 0f\n"
10285+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10286+ "int $4\n0:\n"
10287+ _ASM_EXTABLE(0b, 0b)
10288+#endif
10289+
10290 : "+m" (sem->count)
10291 : "er" (delta));
10292 }
10293@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10294 {
10295 rwsem_count_t tmp = delta;
10296
10297- asm volatile(LOCK_PREFIX "xadd %0,%1"
10298+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10299+
10300+#ifdef CONFIG_PAX_REFCOUNT
10301+ "jno 0f\n"
10302+ "mov %0,%1\n"
10303+ "int $4\n0:\n"
10304+ _ASM_EXTABLE(0b, 0b)
10305+#endif
10306+
10307 : "+r" (tmp), "+m" (sem->count)
10308 : : "memory");
10309
10310diff -urNp linux-2.6.32.45/arch/x86/include/asm/segment.h linux-2.6.32.45/arch/x86/include/asm/segment.h
10311--- linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10312+++ linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10313@@ -62,8 +62,8 @@
10314 * 26 - ESPFIX small SS
10315 * 27 - per-cpu [ offset to per-cpu data area ]
10316 * 28 - stack_canary-20 [ for stack protector ]
10317- * 29 - unused
10318- * 30 - unused
10319+ * 29 - PCI BIOS CS
10320+ * 30 - PCI BIOS DS
10321 * 31 - TSS for double fault handler
10322 */
10323 #define GDT_ENTRY_TLS_MIN 6
10324@@ -77,6 +77,8 @@
10325
10326 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10327
10328+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10329+
10330 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10331
10332 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10333@@ -88,7 +90,7 @@
10334 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10335 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10336
10337-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10338+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10339 #ifdef CONFIG_SMP
10340 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10341 #else
10342@@ -102,6 +104,12 @@
10343 #define __KERNEL_STACK_CANARY 0
10344 #endif
10345
10346+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10347+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10348+
10349+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10350+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10351+
10352 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10353
10354 /*
10355@@ -139,7 +147,7 @@
10356 */
10357
10358 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10359-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10360+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10361
10362
10363 #else
10364@@ -163,6 +171,8 @@
10365 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10366 #define __USER32_DS __USER_DS
10367
10368+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10369+
10370 #define GDT_ENTRY_TSS 8 /* needs two entries */
10371 #define GDT_ENTRY_LDT 10 /* needs two entries */
10372 #define GDT_ENTRY_TLS_MIN 12
10373@@ -183,6 +193,7 @@
10374 #endif
10375
10376 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10377+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10378 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10379 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10380 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10381diff -urNp linux-2.6.32.45/arch/x86/include/asm/smp.h linux-2.6.32.45/arch/x86/include/asm/smp.h
10382--- linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10383+++ linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-08-05 20:33:55.000000000 -0400
10384@@ -24,7 +24,7 @@ extern unsigned int num_processors;
10385 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10386 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10387 DECLARE_PER_CPU(u16, cpu_llc_id);
10388-DECLARE_PER_CPU(int, cpu_number);
10389+DECLARE_PER_CPU(unsigned int, cpu_number);
10390
10391 static inline struct cpumask *cpu_sibling_mask(int cpu)
10392 {
10393@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10394 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10395
10396 /* Static state in head.S used to set up a CPU */
10397-extern struct {
10398- void *sp;
10399- unsigned short ss;
10400-} stack_start;
10401+extern unsigned long stack_start; /* Initial stack pointer address */
10402
10403 struct smp_ops {
10404 void (*smp_prepare_boot_cpu)(void);
10405@@ -60,7 +57,7 @@ struct smp_ops {
10406
10407 void (*send_call_func_ipi)(const struct cpumask *mask);
10408 void (*send_call_func_single_ipi)(int cpu);
10409-};
10410+} __no_const;
10411
10412 /* Globals due to paravirt */
10413 extern void set_cpu_sibling_map(int cpu);
10414@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10415 extern int safe_smp_processor_id(void);
10416
10417 #elif defined(CONFIG_X86_64_SMP)
10418-#define raw_smp_processor_id() (percpu_read(cpu_number))
10419-
10420-#define stack_smp_processor_id() \
10421-({ \
10422- struct thread_info *ti; \
10423- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10424- ti->cpu; \
10425-})
10426+#define raw_smp_processor_id() (percpu_read(cpu_number))
10427+#define stack_smp_processor_id() raw_smp_processor_id()
10428 #define safe_smp_processor_id() smp_processor_id()
10429
10430 #endif
10431diff -urNp linux-2.6.32.45/arch/x86/include/asm/spinlock.h linux-2.6.32.45/arch/x86/include/asm/spinlock.h
10432--- linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10433+++ linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10434@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10435 static inline void __raw_read_lock(raw_rwlock_t *rw)
10436 {
10437 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10438+
10439+#ifdef CONFIG_PAX_REFCOUNT
10440+ "jno 0f\n"
10441+ LOCK_PREFIX " addl $1,(%0)\n"
10442+ "int $4\n0:\n"
10443+ _ASM_EXTABLE(0b, 0b)
10444+#endif
10445+
10446 "jns 1f\n"
10447 "call __read_lock_failed\n\t"
10448 "1:\n"
10449@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10450 static inline void __raw_write_lock(raw_rwlock_t *rw)
10451 {
10452 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10453+
10454+#ifdef CONFIG_PAX_REFCOUNT
10455+ "jno 0f\n"
10456+ LOCK_PREFIX " addl %1,(%0)\n"
10457+ "int $4\n0:\n"
10458+ _ASM_EXTABLE(0b, 0b)
10459+#endif
10460+
10461 "jz 1f\n"
10462 "call __write_lock_failed\n\t"
10463 "1:\n"
10464@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10465
10466 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10467 {
10468- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10469+ asm volatile(LOCK_PREFIX "incl %0\n"
10470+
10471+#ifdef CONFIG_PAX_REFCOUNT
10472+ "jno 0f\n"
10473+ LOCK_PREFIX "decl %0\n"
10474+ "int $4\n0:\n"
10475+ _ASM_EXTABLE(0b, 0b)
10476+#endif
10477+
10478+ :"+m" (rw->lock) : : "memory");
10479 }
10480
10481 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10482 {
10483- asm volatile(LOCK_PREFIX "addl %1, %0"
10484+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
10485+
10486+#ifdef CONFIG_PAX_REFCOUNT
10487+ "jno 0f\n"
10488+ LOCK_PREFIX "subl %1, %0\n"
10489+ "int $4\n0:\n"
10490+ _ASM_EXTABLE(0b, 0b)
10491+#endif
10492+
10493 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10494 }
10495
10496diff -urNp linux-2.6.32.45/arch/x86/include/asm/stackprotector.h linux-2.6.32.45/arch/x86/include/asm/stackprotector.h
10497--- linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10498+++ linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10499@@ -48,7 +48,7 @@
10500 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10501 */
10502 #define GDT_STACK_CANARY_INIT \
10503- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10504+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10505
10506 /*
10507 * Initialize the stackprotector canary value.
10508@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10509
10510 static inline void load_stack_canary_segment(void)
10511 {
10512-#ifdef CONFIG_X86_32
10513+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10514 asm volatile ("mov %0, %%gs" : : "r" (0));
10515 #endif
10516 }
10517diff -urNp linux-2.6.32.45/arch/x86/include/asm/system.h linux-2.6.32.45/arch/x86/include/asm/system.h
10518--- linux-2.6.32.45/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10519+++ linux-2.6.32.45/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10520@@ -132,7 +132,7 @@ do { \
10521 "thread_return:\n\t" \
10522 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10523 __switch_canary \
10524- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10525+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10526 "movq %%rax,%%rdi\n\t" \
10527 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10528 "jnz ret_from_fork\n\t" \
10529@@ -143,7 +143,7 @@ do { \
10530 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10531 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10532 [_tif_fork] "i" (_TIF_FORK), \
10533- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10534+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
10535 [current_task] "m" (per_cpu_var(current_task)) \
10536 __switch_canary_iparam \
10537 : "memory", "cc" __EXTRA_CLOBBER)
10538@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10539 {
10540 unsigned long __limit;
10541 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10542- return __limit + 1;
10543+ return __limit;
10544 }
10545
10546 static inline void native_clts(void)
10547@@ -340,12 +340,12 @@ void enable_hlt(void);
10548
10549 void cpu_idle_wait(void);
10550
10551-extern unsigned long arch_align_stack(unsigned long sp);
10552+#define arch_align_stack(x) ((x) & ~0xfUL)
10553 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10554
10555 void default_idle(void);
10556
10557-void stop_this_cpu(void *dummy);
10558+void stop_this_cpu(void *dummy) __noreturn;
10559
10560 /*
10561 * Force strict CPU ordering.
10562diff -urNp linux-2.6.32.45/arch/x86/include/asm/thread_info.h linux-2.6.32.45/arch/x86/include/asm/thread_info.h
10563--- linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10564+++ linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10565@@ -10,6 +10,7 @@
10566 #include <linux/compiler.h>
10567 #include <asm/page.h>
10568 #include <asm/types.h>
10569+#include <asm/percpu.h>
10570
10571 /*
10572 * low level task data that entry.S needs immediate access to
10573@@ -24,7 +25,6 @@ struct exec_domain;
10574 #include <asm/atomic.h>
10575
10576 struct thread_info {
10577- struct task_struct *task; /* main task structure */
10578 struct exec_domain *exec_domain; /* execution domain */
10579 __u32 flags; /* low level flags */
10580 __u32 status; /* thread synchronous flags */
10581@@ -34,18 +34,12 @@ struct thread_info {
10582 mm_segment_t addr_limit;
10583 struct restart_block restart_block;
10584 void __user *sysenter_return;
10585-#ifdef CONFIG_X86_32
10586- unsigned long previous_esp; /* ESP of the previous stack in
10587- case of nested (IRQ) stacks
10588- */
10589- __u8 supervisor_stack[0];
10590-#endif
10591+ unsigned long lowest_stack;
10592 int uaccess_err;
10593 };
10594
10595-#define INIT_THREAD_INFO(tsk) \
10596+#define INIT_THREAD_INFO \
10597 { \
10598- .task = &tsk, \
10599 .exec_domain = &default_exec_domain, \
10600 .flags = 0, \
10601 .cpu = 0, \
10602@@ -56,7 +50,7 @@ struct thread_info {
10603 }, \
10604 }
10605
10606-#define init_thread_info (init_thread_union.thread_info)
10607+#define init_thread_info (init_thread_union.stack)
10608 #define init_stack (init_thread_union.stack)
10609
10610 #else /* !__ASSEMBLY__ */
10611@@ -163,6 +157,23 @@ struct thread_info {
10612 #define alloc_thread_info(tsk) \
10613 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10614
10615+#ifdef __ASSEMBLY__
10616+/* how to get the thread information struct from ASM */
10617+#define GET_THREAD_INFO(reg) \
10618+ mov PER_CPU_VAR(current_tinfo), reg
10619+
10620+/* use this one if reg already contains %esp */
10621+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10622+#else
10623+/* how to get the thread information struct from C */
10624+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10625+
10626+static __always_inline struct thread_info *current_thread_info(void)
10627+{
10628+ return percpu_read_stable(current_tinfo);
10629+}
10630+#endif
10631+
10632 #ifdef CONFIG_X86_32
10633
10634 #define STACK_WARN (THREAD_SIZE/8)
10635@@ -173,35 +184,13 @@ struct thread_info {
10636 */
10637 #ifndef __ASSEMBLY__
10638
10639-
10640 /* how to get the current stack pointer from C */
10641 register unsigned long current_stack_pointer asm("esp") __used;
10642
10643-/* how to get the thread information struct from C */
10644-static inline struct thread_info *current_thread_info(void)
10645-{
10646- return (struct thread_info *)
10647- (current_stack_pointer & ~(THREAD_SIZE - 1));
10648-}
10649-
10650-#else /* !__ASSEMBLY__ */
10651-
10652-/* how to get the thread information struct from ASM */
10653-#define GET_THREAD_INFO(reg) \
10654- movl $-THREAD_SIZE, reg; \
10655- andl %esp, reg
10656-
10657-/* use this one if reg already contains %esp */
10658-#define GET_THREAD_INFO_WITH_ESP(reg) \
10659- andl $-THREAD_SIZE, reg
10660-
10661 #endif
10662
10663 #else /* X86_32 */
10664
10665-#include <asm/percpu.h>
10666-#define KERNEL_STACK_OFFSET (5*8)
10667-
10668 /*
10669 * macros/functions for gaining access to the thread information structure
10670 * preempt_count needs to be 1 initially, until the scheduler is functional.
10671@@ -209,21 +198,8 @@ static inline struct thread_info *curren
10672 #ifndef __ASSEMBLY__
10673 DECLARE_PER_CPU(unsigned long, kernel_stack);
10674
10675-static inline struct thread_info *current_thread_info(void)
10676-{
10677- struct thread_info *ti;
10678- ti = (void *)(percpu_read_stable(kernel_stack) +
10679- KERNEL_STACK_OFFSET - THREAD_SIZE);
10680- return ti;
10681-}
10682-
10683-#else /* !__ASSEMBLY__ */
10684-
10685-/* how to get the thread information struct from ASM */
10686-#define GET_THREAD_INFO(reg) \
10687- movq PER_CPU_VAR(kernel_stack),reg ; \
10688- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10689-
10690+/* how to get the current stack pointer from C */
10691+register unsigned long current_stack_pointer asm("rsp") __used;
10692 #endif
10693
10694 #endif /* !X86_32 */
10695@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10696 extern void free_thread_info(struct thread_info *ti);
10697 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10698 #define arch_task_cache_init arch_task_cache_init
10699+
10700+#define __HAVE_THREAD_FUNCTIONS
10701+#define task_thread_info(task) (&(task)->tinfo)
10702+#define task_stack_page(task) ((task)->stack)
10703+#define setup_thread_stack(p, org) do {} while (0)
10704+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10705+
10706+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10707+extern struct task_struct *alloc_task_struct(void);
10708+extern void free_task_struct(struct task_struct *);
10709+
10710 #endif
10711 #endif /* _ASM_X86_THREAD_INFO_H */
10712diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h
10713--- linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10714+++ linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10715@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10716 static __always_inline unsigned long __must_check
10717 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10718 {
10719+ pax_track_stack();
10720+
10721+ if ((long)n < 0)
10722+ return n;
10723+
10724 if (__builtin_constant_p(n)) {
10725 unsigned long ret;
10726
10727@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10728 return ret;
10729 }
10730 }
10731+ if (!__builtin_constant_p(n))
10732+ check_object_size(from, n, true);
10733 return __copy_to_user_ll(to, from, n);
10734 }
10735
10736@@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10737 __copy_to_user(void __user *to, const void *from, unsigned long n)
10738 {
10739 might_fault();
10740+
10741 return __copy_to_user_inatomic(to, from, n);
10742 }
10743
10744 static __always_inline unsigned long
10745 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10746 {
10747+ if ((long)n < 0)
10748+ return n;
10749+
10750 /* Avoid zeroing the tail if the copy fails..
10751 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10752 * but as the zeroing behaviour is only significant when n is not
10753@@ -138,6 +149,12 @@ static __always_inline unsigned long
10754 __copy_from_user(void *to, const void __user *from, unsigned long n)
10755 {
10756 might_fault();
10757+
10758+ pax_track_stack();
10759+
10760+ if ((long)n < 0)
10761+ return n;
10762+
10763 if (__builtin_constant_p(n)) {
10764 unsigned long ret;
10765
10766@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10767 return ret;
10768 }
10769 }
10770+ if (!__builtin_constant_p(n))
10771+ check_object_size(to, n, false);
10772 return __copy_from_user_ll(to, from, n);
10773 }
10774
10775@@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10776 const void __user *from, unsigned long n)
10777 {
10778 might_fault();
10779+
10780+ if ((long)n < 0)
10781+ return n;
10782+
10783 if (__builtin_constant_p(n)) {
10784 unsigned long ret;
10785
10786@@ -182,14 +205,62 @@ static __always_inline unsigned long
10787 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10788 unsigned long n)
10789 {
10790- return __copy_from_user_ll_nocache_nozero(to, from, n);
10791+ if ((long)n < 0)
10792+ return n;
10793+
10794+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10795+}
10796+
10797+/**
10798+ * copy_to_user: - Copy a block of data into user space.
10799+ * @to: Destination address, in user space.
10800+ * @from: Source address, in kernel space.
10801+ * @n: Number of bytes to copy.
10802+ *
10803+ * Context: User context only. This function may sleep.
10804+ *
10805+ * Copy data from kernel space to user space.
10806+ *
10807+ * Returns number of bytes that could not be copied.
10808+ * On success, this will be zero.
10809+ */
10810+static __always_inline unsigned long __must_check
10811+copy_to_user(void __user *to, const void *from, unsigned long n)
10812+{
10813+ if (access_ok(VERIFY_WRITE, to, n))
10814+ n = __copy_to_user(to, from, n);
10815+ return n;
10816+}
10817+
10818+/**
10819+ * copy_from_user: - Copy a block of data from user space.
10820+ * @to: Destination address, in kernel space.
10821+ * @from: Source address, in user space.
10822+ * @n: Number of bytes to copy.
10823+ *
10824+ * Context: User context only. This function may sleep.
10825+ *
10826+ * Copy data from user space to kernel space.
10827+ *
10828+ * Returns number of bytes that could not be copied.
10829+ * On success, this will be zero.
10830+ *
10831+ * If some data could not be copied, this function will pad the copied
10832+ * data to the requested size using zero bytes.
10833+ */
10834+static __always_inline unsigned long __must_check
10835+copy_from_user(void *to, const void __user *from, unsigned long n)
10836+{
10837+ if (access_ok(VERIFY_READ, from, n))
10838+ n = __copy_from_user(to, from, n);
10839+ else if ((long)n > 0) {
10840+ if (!__builtin_constant_p(n))
10841+ check_object_size(to, n, false);
10842+ memset(to, 0, n);
10843+ }
10844+ return n;
10845 }
10846
10847-unsigned long __must_check copy_to_user(void __user *to,
10848- const void *from, unsigned long n);
10849-unsigned long __must_check copy_from_user(void *to,
10850- const void __user *from,
10851- unsigned long n);
10852 long __must_check strncpy_from_user(char *dst, const char __user *src,
10853 long count);
10854 long __must_check __strncpy_from_user(char *dst,
10855diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h
10856--- linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10857+++ linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10858@@ -9,6 +9,9 @@
10859 #include <linux/prefetch.h>
10860 #include <linux/lockdep.h>
10861 #include <asm/page.h>
10862+#include <asm/pgtable.h>
10863+
10864+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10865
10866 /*
10867 * Copy To/From Userspace
10868@@ -19,113 +22,203 @@ __must_check unsigned long
10869 copy_user_generic(void *to, const void *from, unsigned len);
10870
10871 __must_check unsigned long
10872-copy_to_user(void __user *to, const void *from, unsigned len);
10873-__must_check unsigned long
10874-copy_from_user(void *to, const void __user *from, unsigned len);
10875-__must_check unsigned long
10876 copy_in_user(void __user *to, const void __user *from, unsigned len);
10877
10878 static __always_inline __must_check
10879-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10880+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10881 {
10882- int ret = 0;
10883+ unsigned ret = 0;
10884
10885 might_fault();
10886- if (!__builtin_constant_p(size))
10887- return copy_user_generic(dst, (__force void *)src, size);
10888+
10889+ if ((int)size < 0)
10890+ return size;
10891+
10892+#ifdef CONFIG_PAX_MEMORY_UDEREF
10893+ if (!__access_ok(VERIFY_READ, src, size))
10894+ return size;
10895+#endif
10896+
10897+ if (!__builtin_constant_p(size)) {
10898+ check_object_size(dst, size, false);
10899+
10900+#ifdef CONFIG_PAX_MEMORY_UDEREF
10901+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10902+ src += PAX_USER_SHADOW_BASE;
10903+#endif
10904+
10905+ return copy_user_generic(dst, (__force const void *)src, size);
10906+ }
10907 switch (size) {
10908- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10909+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10910 ret, "b", "b", "=q", 1);
10911 return ret;
10912- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10913+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10914 ret, "w", "w", "=r", 2);
10915 return ret;
10916- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10917+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10918 ret, "l", "k", "=r", 4);
10919 return ret;
10920- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10921+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10922 ret, "q", "", "=r", 8);
10923 return ret;
10924 case 10:
10925- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10926+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10927 ret, "q", "", "=r", 10);
10928 if (unlikely(ret))
10929 return ret;
10930 __get_user_asm(*(u16 *)(8 + (char *)dst),
10931- (u16 __user *)(8 + (char __user *)src),
10932+ (const u16 __user *)(8 + (const char __user *)src),
10933 ret, "w", "w", "=r", 2);
10934 return ret;
10935 case 16:
10936- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10937+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10938 ret, "q", "", "=r", 16);
10939 if (unlikely(ret))
10940 return ret;
10941 __get_user_asm(*(u64 *)(8 + (char *)dst),
10942- (u64 __user *)(8 + (char __user *)src),
10943+ (const u64 __user *)(8 + (const char __user *)src),
10944 ret, "q", "", "=r", 8);
10945 return ret;
10946 default:
10947- return copy_user_generic(dst, (__force void *)src, size);
10948+
10949+#ifdef CONFIG_PAX_MEMORY_UDEREF
10950+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10951+ src += PAX_USER_SHADOW_BASE;
10952+#endif
10953+
10954+ return copy_user_generic(dst, (__force const void *)src, size);
10955 }
10956 }
10957
10958 static __always_inline __must_check
10959-int __copy_to_user(void __user *dst, const void *src, unsigned size)
10960+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10961 {
10962- int ret = 0;
10963+ unsigned ret = 0;
10964
10965 might_fault();
10966- if (!__builtin_constant_p(size))
10967+
10968+ pax_track_stack();
10969+
10970+ if ((int)size < 0)
10971+ return size;
10972+
10973+#ifdef CONFIG_PAX_MEMORY_UDEREF
10974+ if (!__access_ok(VERIFY_WRITE, dst, size))
10975+ return size;
10976+#endif
10977+
10978+ if (!__builtin_constant_p(size)) {
10979+ check_object_size(src, size, true);
10980+
10981+#ifdef CONFIG_PAX_MEMORY_UDEREF
10982+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10983+ dst += PAX_USER_SHADOW_BASE;
10984+#endif
10985+
10986 return copy_user_generic((__force void *)dst, src, size);
10987+ }
10988 switch (size) {
10989- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10990+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10991 ret, "b", "b", "iq", 1);
10992 return ret;
10993- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10994+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10995 ret, "w", "w", "ir", 2);
10996 return ret;
10997- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10998+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10999 ret, "l", "k", "ir", 4);
11000 return ret;
11001- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11002+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11003 ret, "q", "", "er", 8);
11004 return ret;
11005 case 10:
11006- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11007+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11008 ret, "q", "", "er", 10);
11009 if (unlikely(ret))
11010 return ret;
11011 asm("":::"memory");
11012- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11013+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11014 ret, "w", "w", "ir", 2);
11015 return ret;
11016 case 16:
11017- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11018+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11019 ret, "q", "", "er", 16);
11020 if (unlikely(ret))
11021 return ret;
11022 asm("":::"memory");
11023- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11024+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11025 ret, "q", "", "er", 8);
11026 return ret;
11027 default:
11028+
11029+#ifdef CONFIG_PAX_MEMORY_UDEREF
11030+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11031+ dst += PAX_USER_SHADOW_BASE;
11032+#endif
11033+
11034 return copy_user_generic((__force void *)dst, src, size);
11035 }
11036 }
11037
11038 static __always_inline __must_check
11039-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11040+unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
11041+{
11042+ if (access_ok(VERIFY_WRITE, to, len))
11043+ len = __copy_to_user(to, from, len);
11044+ return len;
11045+}
11046+
11047+static __always_inline __must_check
11048+unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
11049+{
11050+ if ((int)len < 0)
11051+ return len;
11052+
11053+ if (access_ok(VERIFY_READ, from, len))
11054+ len = __copy_from_user(to, from, len);
11055+ else if ((int)len > 0) {
11056+ if (!__builtin_constant_p(len))
11057+ check_object_size(to, len, false);
11058+ memset(to, 0, len);
11059+ }
11060+ return len;
11061+}
11062+
11063+static __always_inline __must_check
11064+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11065 {
11066- int ret = 0;
11067+ unsigned ret = 0;
11068
11069 might_fault();
11070- if (!__builtin_constant_p(size))
11071+
11072+ pax_track_stack();
11073+
11074+ if ((int)size < 0)
11075+ return size;
11076+
11077+#ifdef CONFIG_PAX_MEMORY_UDEREF
11078+ if (!__access_ok(VERIFY_READ, src, size))
11079+ return size;
11080+ if (!__access_ok(VERIFY_WRITE, dst, size))
11081+ return size;
11082+#endif
11083+
11084+ if (!__builtin_constant_p(size)) {
11085+
11086+#ifdef CONFIG_PAX_MEMORY_UDEREF
11087+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11088+ src += PAX_USER_SHADOW_BASE;
11089+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11090+ dst += PAX_USER_SHADOW_BASE;
11091+#endif
11092+
11093 return copy_user_generic((__force void *)dst,
11094- (__force void *)src, size);
11095+ (__force const void *)src, size);
11096+ }
11097 switch (size) {
11098 case 1: {
11099 u8 tmp;
11100- __get_user_asm(tmp, (u8 __user *)src,
11101+ __get_user_asm(tmp, (const u8 __user *)src,
11102 ret, "b", "b", "=q", 1);
11103 if (likely(!ret))
11104 __put_user_asm(tmp, (u8 __user *)dst,
11105@@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
11106 }
11107 case 2: {
11108 u16 tmp;
11109- __get_user_asm(tmp, (u16 __user *)src,
11110+ __get_user_asm(tmp, (const u16 __user *)src,
11111 ret, "w", "w", "=r", 2);
11112 if (likely(!ret))
11113 __put_user_asm(tmp, (u16 __user *)dst,
11114@@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
11115
11116 case 4: {
11117 u32 tmp;
11118- __get_user_asm(tmp, (u32 __user *)src,
11119+ __get_user_asm(tmp, (const u32 __user *)src,
11120 ret, "l", "k", "=r", 4);
11121 if (likely(!ret))
11122 __put_user_asm(tmp, (u32 __user *)dst,
11123@@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
11124 }
11125 case 8: {
11126 u64 tmp;
11127- __get_user_asm(tmp, (u64 __user *)src,
11128+ __get_user_asm(tmp, (const u64 __user *)src,
11129 ret, "q", "", "=r", 8);
11130 if (likely(!ret))
11131 __put_user_asm(tmp, (u64 __user *)dst,
11132@@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
11133 return ret;
11134 }
11135 default:
11136+
11137+#ifdef CONFIG_PAX_MEMORY_UDEREF
11138+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11139+ src += PAX_USER_SHADOW_BASE;
11140+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11141+ dst += PAX_USER_SHADOW_BASE;
11142+#endif
11143+
11144 return copy_user_generic((__force void *)dst,
11145- (__force void *)src, size);
11146+ (__force const void *)src, size);
11147 }
11148 }
11149
11150@@ -176,33 +277,75 @@ __must_check long strlen_user(const char
11151 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11152 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11153
11154-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
11155- unsigned size);
11156+static __must_check __always_inline unsigned long
11157+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11158+{
11159+ pax_track_stack();
11160+
11161+ if ((int)size < 0)
11162+ return size;
11163
11164-static __must_check __always_inline int
11165+#ifdef CONFIG_PAX_MEMORY_UDEREF
11166+ if (!__access_ok(VERIFY_READ, src, size))
11167+ return size;
11168+
11169+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11170+ src += PAX_USER_SHADOW_BASE;
11171+#endif
11172+
11173+ return copy_user_generic(dst, (__force const void *)src, size);
11174+}
11175+
11176+static __must_check __always_inline unsigned long
11177 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11178 {
11179+ if ((int)size < 0)
11180+ return size;
11181+
11182+#ifdef CONFIG_PAX_MEMORY_UDEREF
11183+ if (!__access_ok(VERIFY_WRITE, dst, size))
11184+ return size;
11185+
11186+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11187+ dst += PAX_USER_SHADOW_BASE;
11188+#endif
11189+
11190 return copy_user_generic((__force void *)dst, src, size);
11191 }
11192
11193-extern long __copy_user_nocache(void *dst, const void __user *src,
11194+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11195 unsigned size, int zerorest);
11196
11197-static inline int
11198-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11199+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11200 {
11201 might_sleep();
11202+
11203+ if ((int)size < 0)
11204+ return size;
11205+
11206+#ifdef CONFIG_PAX_MEMORY_UDEREF
11207+ if (!__access_ok(VERIFY_READ, src, size))
11208+ return size;
11209+#endif
11210+
11211 return __copy_user_nocache(dst, src, size, 1);
11212 }
11213
11214-static inline int
11215-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11216+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11217 unsigned size)
11218 {
11219+ if ((int)size < 0)
11220+ return size;
11221+
11222+#ifdef CONFIG_PAX_MEMORY_UDEREF
11223+ if (!__access_ok(VERIFY_READ, src, size))
11224+ return size;
11225+#endif
11226+
11227 return __copy_user_nocache(dst, src, size, 0);
11228 }
11229
11230-unsigned long
11231+extern unsigned long
11232 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11233
11234 #endif /* _ASM_X86_UACCESS_64_H */
11235diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess.h linux-2.6.32.45/arch/x86/include/asm/uaccess.h
11236--- linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
11237+++ linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
11238@@ -8,12 +8,15 @@
11239 #include <linux/thread_info.h>
11240 #include <linux/prefetch.h>
11241 #include <linux/string.h>
11242+#include <linux/sched.h>
11243 #include <asm/asm.h>
11244 #include <asm/page.h>
11245
11246 #define VERIFY_READ 0
11247 #define VERIFY_WRITE 1
11248
11249+extern void check_object_size(const void *ptr, unsigned long n, bool to);
11250+
11251 /*
11252 * The fs value determines whether argument validity checking should be
11253 * performed or not. If get_fs() == USER_DS, checking is performed, with
11254@@ -29,7 +32,12 @@
11255
11256 #define get_ds() (KERNEL_DS)
11257 #define get_fs() (current_thread_info()->addr_limit)
11258+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11259+void __set_fs(mm_segment_t x);
11260+void set_fs(mm_segment_t x);
11261+#else
11262 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11263+#endif
11264
11265 #define segment_eq(a, b) ((a).seg == (b).seg)
11266
11267@@ -77,7 +85,33 @@
11268 * checks that the pointer is in the user space range - after calling
11269 * this function, memory access functions may still return -EFAULT.
11270 */
11271-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11272+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11273+#define access_ok(type, addr, size) \
11274+({ \
11275+ long __size = size; \
11276+ unsigned long __addr = (unsigned long)addr; \
11277+ unsigned long __addr_ao = __addr & PAGE_MASK; \
11278+ unsigned long __end_ao = __addr + __size - 1; \
11279+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11280+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11281+ while(__addr_ao <= __end_ao) { \
11282+ char __c_ao; \
11283+ __addr_ao += PAGE_SIZE; \
11284+ if (__size > PAGE_SIZE) \
11285+ cond_resched(); \
11286+ if (__get_user(__c_ao, (char __user *)__addr)) \
11287+ break; \
11288+ if (type != VERIFY_WRITE) { \
11289+ __addr = __addr_ao; \
11290+ continue; \
11291+ } \
11292+ if (__put_user(__c_ao, (char __user *)__addr)) \
11293+ break; \
11294+ __addr = __addr_ao; \
11295+ } \
11296+ } \
11297+ __ret_ao; \
11298+})
11299
11300 /*
11301 * The exception table consists of pairs of addresses: the first is the
11302@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11303 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11304 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11305
11306-
11307+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11308+#define __copyuser_seg "gs;"
11309+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11310+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11311+#else
11312+#define __copyuser_seg
11313+#define __COPYUSER_SET_ES
11314+#define __COPYUSER_RESTORE_ES
11315+#endif
11316
11317 #ifdef CONFIG_X86_32
11318 #define __put_user_asm_u64(x, addr, err, errret) \
11319- asm volatile("1: movl %%eax,0(%2)\n" \
11320- "2: movl %%edx,4(%2)\n" \
11321+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11322+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11323 "3:\n" \
11324 ".section .fixup,\"ax\"\n" \
11325 "4: movl %3,%0\n" \
11326@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11327 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11328
11329 #define __put_user_asm_ex_u64(x, addr) \
11330- asm volatile("1: movl %%eax,0(%1)\n" \
11331- "2: movl %%edx,4(%1)\n" \
11332+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11333+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11334 "3:\n" \
11335 _ASM_EXTABLE(1b, 2b - 1b) \
11336 _ASM_EXTABLE(2b, 3b - 2b) \
11337@@ -374,7 +416,7 @@ do { \
11338 } while (0)
11339
11340 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11341- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11342+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11343 "2:\n" \
11344 ".section .fixup,\"ax\"\n" \
11345 "3: mov %3,%0\n" \
11346@@ -382,7 +424,7 @@ do { \
11347 " jmp 2b\n" \
11348 ".previous\n" \
11349 _ASM_EXTABLE(1b, 3b) \
11350- : "=r" (err), ltype(x) \
11351+ : "=r" (err), ltype (x) \
11352 : "m" (__m(addr)), "i" (errret), "0" (err))
11353
11354 #define __get_user_size_ex(x, ptr, size) \
11355@@ -407,7 +449,7 @@ do { \
11356 } while (0)
11357
11358 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11359- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11360+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11361 "2:\n" \
11362 _ASM_EXTABLE(1b, 2b - 1b) \
11363 : ltype(x) : "m" (__m(addr)))
11364@@ -424,13 +466,24 @@ do { \
11365 int __gu_err; \
11366 unsigned long __gu_val; \
11367 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11368- (x) = (__force __typeof__(*(ptr)))__gu_val; \
11369+ (x) = (__typeof__(*(ptr)))__gu_val; \
11370 __gu_err; \
11371 })
11372
11373 /* FIXME: this hack is definitely wrong -AK */
11374 struct __large_struct { unsigned long buf[100]; };
11375-#define __m(x) (*(struct __large_struct __user *)(x))
11376+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11377+#define ____m(x) \
11378+({ \
11379+ unsigned long ____x = (unsigned long)(x); \
11380+ if (____x < PAX_USER_SHADOW_BASE) \
11381+ ____x += PAX_USER_SHADOW_BASE; \
11382+ (void __user *)____x; \
11383+})
11384+#else
11385+#define ____m(x) (x)
11386+#endif
11387+#define __m(x) (*(struct __large_struct __user *)____m(x))
11388
11389 /*
11390 * Tell gcc we read from memory instead of writing: this is because
11391@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11392 * aliasing issues.
11393 */
11394 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11395- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11396+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11397 "2:\n" \
11398 ".section .fixup,\"ax\"\n" \
11399 "3: mov %3,%0\n" \
11400@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11401 ".previous\n" \
11402 _ASM_EXTABLE(1b, 3b) \
11403 : "=r"(err) \
11404- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11405+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11406
11407 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11408- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11409+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11410 "2:\n" \
11411 _ASM_EXTABLE(1b, 2b - 1b) \
11412 : : ltype(x), "m" (__m(addr)))
11413@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11414 * On error, the variable @x is set to zero.
11415 */
11416
11417+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11418+#define __get_user(x, ptr) get_user((x), (ptr))
11419+#else
11420 #define __get_user(x, ptr) \
11421 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11422+#endif
11423
11424 /**
11425 * __put_user: - Write a simple value into user space, with less checking.
11426@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11427 * Returns zero on success, or -EFAULT on error.
11428 */
11429
11430+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11431+#define __put_user(x, ptr) put_user((x), (ptr))
11432+#else
11433 #define __put_user(x, ptr) \
11434 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11435+#endif
11436
11437 #define __get_user_unaligned __get_user
11438 #define __put_user_unaligned __put_user
11439@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11440 #define get_user_ex(x, ptr) do { \
11441 unsigned long __gue_val; \
11442 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11443- (x) = (__force __typeof__(*(ptr)))__gue_val; \
11444+ (x) = (__typeof__(*(ptr)))__gue_val; \
11445 } while (0)
11446
11447 #ifdef CONFIG_X86_WP_WORKS_OK
11448@@ -567,6 +628,7 @@ extern struct movsl_mask {
11449
11450 #define ARCH_HAS_NOCACHE_UACCESS 1
11451
11452+#define ARCH_HAS_SORT_EXTABLE
11453 #ifdef CONFIG_X86_32
11454 # include "uaccess_32.h"
11455 #else
11456diff -urNp linux-2.6.32.45/arch/x86/include/asm/vgtod.h linux-2.6.32.45/arch/x86/include/asm/vgtod.h
11457--- linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11458+++ linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11459@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11460 int sysctl_enabled;
11461 struct timezone sys_tz;
11462 struct { /* extract of a clocksource struct */
11463+ char name[8];
11464 cycle_t (*vread)(void);
11465 cycle_t cycle_last;
11466 cycle_t mask;
11467diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi.h linux-2.6.32.45/arch/x86/include/asm/vmi.h
11468--- linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11469+++ linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11470@@ -191,6 +191,7 @@ struct vrom_header {
11471 u8 reserved[96]; /* Reserved for headers */
11472 char vmi_init[8]; /* VMI_Init jump point */
11473 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11474+ char rom_data[8048]; /* rest of the option ROM */
11475 } __attribute__((packed));
11476
11477 struct pnp_header {
11478diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi_time.h linux-2.6.32.45/arch/x86/include/asm/vmi_time.h
11479--- linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-03-27 14:31:47.000000000 -0400
11480+++ linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-08-05 20:33:55.000000000 -0400
11481@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
11482 int (*wallclock_updated)(void);
11483 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
11484 void (*cancel_alarm)(u32 flags);
11485-} vmi_timer_ops;
11486+} __no_const vmi_timer_ops;
11487
11488 /* Prototypes */
11489 extern void __init vmi_time_init(void);
11490diff -urNp linux-2.6.32.45/arch/x86/include/asm/vsyscall.h linux-2.6.32.45/arch/x86/include/asm/vsyscall.h
11491--- linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11492+++ linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11493@@ -15,9 +15,10 @@ enum vsyscall_num {
11494
11495 #ifdef __KERNEL__
11496 #include <linux/seqlock.h>
11497+#include <linux/getcpu.h>
11498+#include <linux/time.h>
11499
11500 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11501-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11502
11503 /* Definitions for CONFIG_GENERIC_TIME definitions */
11504 #define __section_vsyscall_gtod_data __attribute__ \
11505@@ -31,7 +32,6 @@ enum vsyscall_num {
11506 #define VGETCPU_LSL 2
11507
11508 extern int __vgetcpu_mode;
11509-extern volatile unsigned long __jiffies;
11510
11511 /* kernel space (writeable) */
11512 extern int vgetcpu_mode;
11513@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11514
11515 extern void map_vsyscall(void);
11516
11517+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11518+extern time_t vtime(time_t *t);
11519+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11520 #endif /* __KERNEL__ */
11521
11522 #endif /* _ASM_X86_VSYSCALL_H */
11523diff -urNp linux-2.6.32.45/arch/x86/include/asm/x86_init.h linux-2.6.32.45/arch/x86/include/asm/x86_init.h
11524--- linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-03-27 14:31:47.000000000 -0400
11525+++ linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-08-05 20:33:55.000000000 -0400
11526@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11527 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11528 void (*find_smp_config)(unsigned int reserve);
11529 void (*get_smp_config)(unsigned int early);
11530-};
11531+} __no_const;
11532
11533 /**
11534 * struct x86_init_resources - platform specific resource related ops
11535@@ -42,7 +42,7 @@ struct x86_init_resources {
11536 void (*probe_roms)(void);
11537 void (*reserve_resources)(void);
11538 char *(*memory_setup)(void);
11539-};
11540+} __no_const;
11541
11542 /**
11543 * struct x86_init_irqs - platform specific interrupt setup
11544@@ -55,7 +55,7 @@ struct x86_init_irqs {
11545 void (*pre_vector_init)(void);
11546 void (*intr_init)(void);
11547 void (*trap_init)(void);
11548-};
11549+} __no_const;
11550
11551 /**
11552 * struct x86_init_oem - oem platform specific customizing functions
11553@@ -65,7 +65,7 @@ struct x86_init_irqs {
11554 struct x86_init_oem {
11555 void (*arch_setup)(void);
11556 void (*banner)(void);
11557-};
11558+} __no_const;
11559
11560 /**
11561 * struct x86_init_paging - platform specific paging functions
11562@@ -75,7 +75,7 @@ struct x86_init_oem {
11563 struct x86_init_paging {
11564 void (*pagetable_setup_start)(pgd_t *base);
11565 void (*pagetable_setup_done)(pgd_t *base);
11566-};
11567+} __no_const;
11568
11569 /**
11570 * struct x86_init_timers - platform specific timer setup
11571@@ -88,7 +88,7 @@ struct x86_init_timers {
11572 void (*setup_percpu_clockev)(void);
11573 void (*tsc_pre_init)(void);
11574 void (*timer_init)(void);
11575-};
11576+} __no_const;
11577
11578 /**
11579 * struct x86_init_ops - functions for platform specific setup
11580@@ -101,7 +101,7 @@ struct x86_init_ops {
11581 struct x86_init_oem oem;
11582 struct x86_init_paging paging;
11583 struct x86_init_timers timers;
11584-};
11585+} __no_const;
11586
11587 /**
11588 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11589@@ -109,7 +109,7 @@ struct x86_init_ops {
11590 */
11591 struct x86_cpuinit_ops {
11592 void (*setup_percpu_clockev)(void);
11593-};
11594+} __no_const;
11595
11596 /**
11597 * struct x86_platform_ops - platform specific runtime functions
11598@@ -121,7 +121,7 @@ struct x86_platform_ops {
11599 unsigned long (*calibrate_tsc)(void);
11600 unsigned long (*get_wallclock)(void);
11601 int (*set_wallclock)(unsigned long nowtime);
11602-};
11603+} __no_const;
11604
11605 extern struct x86_init_ops x86_init;
11606 extern struct x86_cpuinit_ops x86_cpuinit;
11607diff -urNp linux-2.6.32.45/arch/x86/include/asm/xsave.h linux-2.6.32.45/arch/x86/include/asm/xsave.h
11608--- linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11609+++ linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11610@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11611 static inline int xsave_user(struct xsave_struct __user *buf)
11612 {
11613 int err;
11614+
11615+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11616+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11617+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11618+#endif
11619+
11620 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11621 "2:\n"
11622 ".section .fixup,\"ax\"\n"
11623@@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11624 u32 lmask = mask;
11625 u32 hmask = mask >> 32;
11626
11627+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11628+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11629+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11630+#endif
11631+
11632 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11633 "2:\n"
11634 ".section .fixup,\"ax\"\n"
11635diff -urNp linux-2.6.32.45/arch/x86/Kconfig linux-2.6.32.45/arch/x86/Kconfig
11636--- linux-2.6.32.45/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11637+++ linux-2.6.32.45/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11638@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11639
11640 config X86_32_LAZY_GS
11641 def_bool y
11642- depends on X86_32 && !CC_STACKPROTECTOR
11643+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11644
11645 config KTIME_SCALAR
11646 def_bool X86_32
11647@@ -1008,7 +1008,7 @@ choice
11648
11649 config NOHIGHMEM
11650 bool "off"
11651- depends on !X86_NUMAQ
11652+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11653 ---help---
11654 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11655 However, the address space of 32-bit x86 processors is only 4
11656@@ -1045,7 +1045,7 @@ config NOHIGHMEM
11657
11658 config HIGHMEM4G
11659 bool "4GB"
11660- depends on !X86_NUMAQ
11661+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11662 ---help---
11663 Select this if you have a 32-bit processor and between 1 and 4
11664 gigabytes of physical RAM.
11665@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11666 hex
11667 default 0xB0000000 if VMSPLIT_3G_OPT
11668 default 0x80000000 if VMSPLIT_2G
11669- default 0x78000000 if VMSPLIT_2G_OPT
11670+ default 0x70000000 if VMSPLIT_2G_OPT
11671 default 0x40000000 if VMSPLIT_1G
11672 default 0xC0000000
11673 depends on X86_32
11674@@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11675
11676 config EFI
11677 bool "EFI runtime service support"
11678- depends on ACPI
11679+ depends on ACPI && !PAX_KERNEXEC
11680 ---help---
11681 This enables the kernel to use EFI runtime services that are
11682 available (such as the EFI variable services).
11683@@ -1460,6 +1460,7 @@ config SECCOMP
11684
11685 config CC_STACKPROTECTOR
11686 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11687+ depends on X86_64 || !PAX_MEMORY_UDEREF
11688 ---help---
11689 This option turns on the -fstack-protector GCC feature. This
11690 feature puts, at the beginning of functions, a canary value on
11691@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11692 config PHYSICAL_START
11693 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11694 default "0x1000000"
11695+ range 0x400000 0x40000000
11696 ---help---
11697 This gives the physical address where the kernel is loaded.
11698
11699@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11700 hex
11701 prompt "Alignment value to which kernel should be aligned" if X86_32
11702 default "0x1000000"
11703+ range 0x400000 0x1000000 if PAX_KERNEXEC
11704 range 0x2000 0x1000000
11705 ---help---
11706 This value puts the alignment restrictions on physical address
11707@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11708 Say N if you want to disable CPU hotplug.
11709
11710 config COMPAT_VDSO
11711- def_bool y
11712+ def_bool n
11713 prompt "Compat VDSO support"
11714 depends on X86_32 || IA32_EMULATION
11715+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11716 ---help---
11717 Map the 32-bit VDSO to the predictable old-style address too.
11718 ---help---
11719diff -urNp linux-2.6.32.45/arch/x86/Kconfig.cpu linux-2.6.32.45/arch/x86/Kconfig.cpu
11720--- linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11721+++ linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11722@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11723
11724 config X86_F00F_BUG
11725 def_bool y
11726- depends on M586MMX || M586TSC || M586 || M486 || M386
11727+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11728
11729 config X86_WP_WORKS_OK
11730 def_bool y
11731@@ -360,7 +360,7 @@ config X86_POPAD_OK
11732
11733 config X86_ALIGNMENT_16
11734 def_bool y
11735- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11736+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11737
11738 config X86_INTEL_USERCOPY
11739 def_bool y
11740@@ -406,7 +406,7 @@ config X86_CMPXCHG64
11741 # generates cmov.
11742 config X86_CMOV
11743 def_bool y
11744- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11745+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11746
11747 config X86_MINIMUM_CPU_FAMILY
11748 int
11749diff -urNp linux-2.6.32.45/arch/x86/Kconfig.debug linux-2.6.32.45/arch/x86/Kconfig.debug
11750--- linux-2.6.32.45/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11751+++ linux-2.6.32.45/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11752@@ -99,7 +99,7 @@ config X86_PTDUMP
11753 config DEBUG_RODATA
11754 bool "Write protect kernel read-only data structures"
11755 default y
11756- depends on DEBUG_KERNEL
11757+ depends on DEBUG_KERNEL && BROKEN
11758 ---help---
11759 Mark the kernel read-only data as write-protected in the pagetables,
11760 in order to catch accidental (and incorrect) writes to such const
11761diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile
11762--- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-03-27 14:31:47.000000000 -0400
11763+++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-08-07 14:38:58.000000000 -0400
11764@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
11765 $(call cc-option, -fno-stack-protector) \
11766 $(call cc-option, -mpreferred-stack-boundary=2)
11767 KBUILD_CFLAGS += $(call cc-option, -m32)
11768+ifdef CONSTIFY_PLUGIN
11769+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11770+endif
11771 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11772 GCOV_PROFILE := n
11773
11774diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S
11775--- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11776+++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11777@@ -91,6 +91,9 @@ _start:
11778 /* Do any other stuff... */
11779
11780 #ifndef CONFIG_64BIT
11781+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
11782+ call verify_cpu
11783+
11784 /* This could also be done in C code... */
11785 movl pmode_cr3, %eax
11786 movl %eax, %cr3
11787@@ -104,7 +107,7 @@ _start:
11788 movl %eax, %ecx
11789 orl %edx, %ecx
11790 jz 1f
11791- movl $0xc0000080, %ecx
11792+ mov $MSR_EFER, %ecx
11793 wrmsr
11794 1:
11795
11796@@ -114,6 +117,7 @@ _start:
11797 movl pmode_cr0, %eax
11798 movl %eax, %cr0
11799 jmp pmode_return
11800+# include "../../verify_cpu.S"
11801 #else
11802 pushw $0
11803 pushw trampoline_segment
11804diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c
11805--- linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11806+++ linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11807@@ -11,11 +11,12 @@
11808 #include <linux/cpumask.h>
11809 #include <asm/segment.h>
11810 #include <asm/desc.h>
11811+#include <asm/e820.h>
11812
11813 #include "realmode/wakeup.h"
11814 #include "sleep.h"
11815
11816-unsigned long acpi_wakeup_address;
11817+unsigned long acpi_wakeup_address = 0x2000;
11818 unsigned long acpi_realmode_flags;
11819
11820 /* address in low memory of the wakeup routine. */
11821@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11822 #else /* CONFIG_64BIT */
11823 header->trampoline_segment = setup_trampoline() >> 4;
11824 #ifdef CONFIG_SMP
11825- stack_start.sp = temp_stack + sizeof(temp_stack);
11826+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11827+
11828+ pax_open_kernel();
11829 early_gdt_descr.address =
11830 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11831+ pax_close_kernel();
11832+
11833 initial_gs = per_cpu_offset(smp_processor_id());
11834 #endif
11835 initial_code = (unsigned long)wakeup_long64;
11836@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11837 return;
11838 }
11839
11840- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11841-
11842- if (!acpi_realmode) {
11843- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11844- return;
11845- }
11846-
11847- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11848+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11849+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11850 }
11851
11852
11853diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S
11854--- linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11855+++ linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11856@@ -30,13 +30,11 @@ wakeup_pmode_return:
11857 # and restore the stack ... but you need gdt for this to work
11858 movl saved_context_esp, %esp
11859
11860- movl %cs:saved_magic, %eax
11861- cmpl $0x12345678, %eax
11862+ cmpl $0x12345678, saved_magic
11863 jne bogus_magic
11864
11865 # jump to place where we left off
11866- movl saved_eip, %eax
11867- jmp *%eax
11868+ jmp *(saved_eip)
11869
11870 bogus_magic:
11871 jmp bogus_magic
11872diff -urNp linux-2.6.32.45/arch/x86/kernel/alternative.c linux-2.6.32.45/arch/x86/kernel/alternative.c
11873--- linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11874+++ linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11875@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11876
11877 BUG_ON(p->len > MAX_PATCH_LEN);
11878 /* prep the buffer with the original instructions */
11879- memcpy(insnbuf, p->instr, p->len);
11880+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11881 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11882 (unsigned long)p->instr, p->len);
11883
11884@@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11885 if (smp_alt_once)
11886 free_init_pages("SMP alternatives",
11887 (unsigned long)__smp_locks,
11888- (unsigned long)__smp_locks_end);
11889+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11890
11891 restart_nmi();
11892 }
11893@@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11894 * instructions. And on the local CPU you need to be protected again NMI or MCE
11895 * handlers seeing an inconsistent instruction while you patch.
11896 */
11897-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11898+static void *__kprobes text_poke_early(void *addr, const void *opcode,
11899 size_t len)
11900 {
11901 unsigned long flags;
11902 local_irq_save(flags);
11903- memcpy(addr, opcode, len);
11904+
11905+ pax_open_kernel();
11906+ memcpy(ktla_ktva(addr), opcode, len);
11907 sync_core();
11908+ pax_close_kernel();
11909+
11910 local_irq_restore(flags);
11911 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11912 that causes hangs on some VIA CPUs. */
11913@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11914 */
11915 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11916 {
11917- unsigned long flags;
11918- char *vaddr;
11919+ unsigned char *vaddr = ktla_ktva(addr);
11920 struct page *pages[2];
11921- int i;
11922+ size_t i;
11923
11924 if (!core_kernel_text((unsigned long)addr)) {
11925- pages[0] = vmalloc_to_page(addr);
11926- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11927+ pages[0] = vmalloc_to_page(vaddr);
11928+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11929 } else {
11930- pages[0] = virt_to_page(addr);
11931+ pages[0] = virt_to_page(vaddr);
11932 WARN_ON(!PageReserved(pages[0]));
11933- pages[1] = virt_to_page(addr + PAGE_SIZE);
11934+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11935 }
11936 BUG_ON(!pages[0]);
11937- local_irq_save(flags);
11938- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11939- if (pages[1])
11940- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11941- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11942- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11943- clear_fixmap(FIX_TEXT_POKE0);
11944- if (pages[1])
11945- clear_fixmap(FIX_TEXT_POKE1);
11946- local_flush_tlb();
11947- sync_core();
11948- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11949- that causes hangs on some VIA CPUs. */
11950+ text_poke_early(addr, opcode, len);
11951 for (i = 0; i < len; i++)
11952- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11953- local_irq_restore(flags);
11954+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11955 return addr;
11956 }
11957diff -urNp linux-2.6.32.45/arch/x86/kernel/amd_iommu.c linux-2.6.32.45/arch/x86/kernel/amd_iommu.c
11958--- linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11959+++ linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11960@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11961 }
11962 }
11963
11964-static struct dma_map_ops amd_iommu_dma_ops = {
11965+static const struct dma_map_ops amd_iommu_dma_ops = {
11966 .alloc_coherent = alloc_coherent,
11967 .free_coherent = free_coherent,
11968 .map_page = map_page,
11969diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/apic.c linux-2.6.32.45/arch/x86/kernel/apic/apic.c
11970--- linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11971+++ linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-08-17 20:00:16.000000000 -0400
11972@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
11973 /*
11974 * Debug level, exported for io_apic.c
11975 */
11976-unsigned int apic_verbosity;
11977+int apic_verbosity;
11978
11979 int pic_mode;
11980
11981@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11982 apic_write(APIC_ESR, 0);
11983 v1 = apic_read(APIC_ESR);
11984 ack_APIC_irq();
11985- atomic_inc(&irq_err_count);
11986+ atomic_inc_unchecked(&irq_err_count);
11987
11988 /*
11989 * Here is what the APIC error bits mean:
11990@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11991 u16 *bios_cpu_apicid;
11992 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11993
11994+ pax_track_stack();
11995+
11996 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11997 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11998
11999diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c
12000--- linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
12001+++ linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
12002@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
12003 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
12004 GFP_ATOMIC);
12005 if (!ioapic_entries)
12006- return 0;
12007+ return NULL;
12008
12009 for (apic = 0; apic < nr_ioapics; apic++) {
12010 ioapic_entries[apic] =
12011@@ -733,7 +733,7 @@ nomem:
12012 kfree(ioapic_entries[apic]);
12013 kfree(ioapic_entries);
12014
12015- return 0;
12016+ return NULL;
12017 }
12018
12019 /*
12020@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
12021 }
12022 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
12023
12024-void lock_vector_lock(void)
12025+void lock_vector_lock(void) __acquires(vector_lock)
12026 {
12027 /* Used to the online set of cpus does not change
12028 * during assign_irq_vector.
12029@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
12030 spin_lock(&vector_lock);
12031 }
12032
12033-void unlock_vector_lock(void)
12034+void unlock_vector_lock(void) __releases(vector_lock)
12035 {
12036 spin_unlock(&vector_lock);
12037 }
12038@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
12039 ack_APIC_irq();
12040 }
12041
12042-atomic_t irq_mis_count;
12043+atomic_unchecked_t irq_mis_count;
12044
12045 static void ack_apic_level(unsigned int irq)
12046 {
12047@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
12048
12049 /* Tail end of version 0x11 I/O APIC bug workaround */
12050 if (!(v & (1 << (i & 0x1f)))) {
12051- atomic_inc(&irq_mis_count);
12052+ atomic_inc_unchecked(&irq_mis_count);
12053 spin_lock(&ioapic_lock);
12054 __mask_and_edge_IO_APIC_irq(cfg);
12055 __unmask_and_level_IO_APIC_irq(cfg);
12056diff -urNp linux-2.6.32.45/arch/x86/kernel/apm_32.c linux-2.6.32.45/arch/x86/kernel/apm_32.c
12057--- linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
12058+++ linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
12059@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
12060 * This is for buggy BIOS's that refer to (real mode) segment 0x40
12061 * even though they are called in protected mode.
12062 */
12063-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
12064+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
12065 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
12066
12067 static const char driver_version[] = "1.16ac"; /* no spaces */
12068@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
12069 BUG_ON(cpu != 0);
12070 gdt = get_cpu_gdt_table(cpu);
12071 save_desc_40 = gdt[0x40 / 8];
12072+
12073+ pax_open_kernel();
12074 gdt[0x40 / 8] = bad_bios_desc;
12075+ pax_close_kernel();
12076
12077 apm_irq_save(flags);
12078 APM_DO_SAVE_SEGS;
12079@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
12080 &call->esi);
12081 APM_DO_RESTORE_SEGS;
12082 apm_irq_restore(flags);
12083+
12084+ pax_open_kernel();
12085 gdt[0x40 / 8] = save_desc_40;
12086+ pax_close_kernel();
12087+
12088 put_cpu();
12089
12090 return call->eax & 0xff;
12091@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
12092 BUG_ON(cpu != 0);
12093 gdt = get_cpu_gdt_table(cpu);
12094 save_desc_40 = gdt[0x40 / 8];
12095+
12096+ pax_open_kernel();
12097 gdt[0x40 / 8] = bad_bios_desc;
12098+ pax_close_kernel();
12099
12100 apm_irq_save(flags);
12101 APM_DO_SAVE_SEGS;
12102@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
12103 &call->eax);
12104 APM_DO_RESTORE_SEGS;
12105 apm_irq_restore(flags);
12106+
12107+ pax_open_kernel();
12108 gdt[0x40 / 8] = save_desc_40;
12109+ pax_close_kernel();
12110+
12111 put_cpu();
12112 return error;
12113 }
12114@@ -975,7 +989,7 @@ recalc:
12115
12116 static void apm_power_off(void)
12117 {
12118- unsigned char po_bios_call[] = {
12119+ const unsigned char po_bios_call[] = {
12120 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
12121 0x8e, 0xd0, /* movw ax,ss */
12122 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
12123@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
12124 * code to that CPU.
12125 */
12126 gdt = get_cpu_gdt_table(0);
12127+
12128+ pax_open_kernel();
12129 set_desc_base(&gdt[APM_CS >> 3],
12130 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
12131 set_desc_base(&gdt[APM_CS_16 >> 3],
12132 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
12133 set_desc_base(&gdt[APM_DS >> 3],
12134 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
12135+ pax_close_kernel();
12136
12137 proc_create("apm", 0, NULL, &apm_file_ops);
12138
12139diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c
12140--- linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
12141+++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
12142@@ -51,7 +51,6 @@ void foo(void)
12143 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
12144 BLANK();
12145
12146- OFFSET(TI_task, thread_info, task);
12147 OFFSET(TI_exec_domain, thread_info, exec_domain);
12148 OFFSET(TI_flags, thread_info, flags);
12149 OFFSET(TI_status, thread_info, status);
12150@@ -60,6 +59,8 @@ void foo(void)
12151 OFFSET(TI_restart_block, thread_info, restart_block);
12152 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
12153 OFFSET(TI_cpu, thread_info, cpu);
12154+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12155+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12156 BLANK();
12157
12158 OFFSET(GDS_size, desc_ptr, size);
12159@@ -99,6 +100,7 @@ void foo(void)
12160
12161 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12162 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12163+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12164 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
12165 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
12166 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
12167@@ -115,6 +117,11 @@ void foo(void)
12168 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
12169 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12170 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12171+
12172+#ifdef CONFIG_PAX_KERNEXEC
12173+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12174+#endif
12175+
12176 #endif
12177
12178 #ifdef CONFIG_XEN
12179diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c
12180--- linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
12181+++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-08-23 20:24:19.000000000 -0400
12182@@ -44,6 +44,8 @@ int main(void)
12183 ENTRY(addr_limit);
12184 ENTRY(preempt_count);
12185 ENTRY(status);
12186+ ENTRY(lowest_stack);
12187+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12188 #ifdef CONFIG_IA32_EMULATION
12189 ENTRY(sysenter_return);
12190 #endif
12191@@ -63,6 +65,18 @@ int main(void)
12192 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12193 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
12194 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12195+
12196+#ifdef CONFIG_PAX_KERNEXEC
12197+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12198+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12199+#endif
12200+
12201+#ifdef CONFIG_PAX_MEMORY_UDEREF
12202+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12203+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12204+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
12205+#endif
12206+
12207 #endif
12208
12209
12210@@ -115,6 +129,7 @@ int main(void)
12211 ENTRY(cr8);
12212 BLANK();
12213 #undef ENTRY
12214+ DEFINE(TSS_size, sizeof(struct tss_struct));
12215 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
12216 BLANK();
12217 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
12218@@ -130,6 +145,7 @@ int main(void)
12219
12220 BLANK();
12221 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12222+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12223 #ifdef CONFIG_XEN
12224 BLANK();
12225 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12226diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/amd.c
12227--- linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
12228+++ linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
12229@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
12230 unsigned int size)
12231 {
12232 /* AMD errata T13 (order #21922) */
12233- if ((c->x86 == 6)) {
12234+ if (c->x86 == 6) {
12235 /* Duron Rev A0 */
12236 if (c->x86_model == 3 && c->x86_mask == 0)
12237 size = 64;
12238diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/common.c linux-2.6.32.45/arch/x86/kernel/cpu/common.c
12239--- linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
12240+++ linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
12241@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
12242
12243 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12244
12245-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12246-#ifdef CONFIG_X86_64
12247- /*
12248- * We need valid kernel segments for data and code in long mode too
12249- * IRET will check the segment types kkeil 2000/10/28
12250- * Also sysret mandates a special GDT layout
12251- *
12252- * TLS descriptors are currently at a different place compared to i386.
12253- * Hopefully nobody expects them at a fixed place (Wine?)
12254- */
12255- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12256- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12257- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12258- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12259- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12260- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12261-#else
12262- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12263- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12264- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12265- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12266- /*
12267- * Segments used for calling PnP BIOS have byte granularity.
12268- * They code segments and data segments have fixed 64k limits,
12269- * the transfer segment sizes are set at run time.
12270- */
12271- /* 32-bit code */
12272- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12273- /* 16-bit code */
12274- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12275- /* 16-bit data */
12276- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12277- /* 16-bit data */
12278- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12279- /* 16-bit data */
12280- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12281- /*
12282- * The APM segments have byte granularity and their bases
12283- * are set at run time. All have 64k limits.
12284- */
12285- /* 32-bit code */
12286- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12287- /* 16-bit code */
12288- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12289- /* data */
12290- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12291-
12292- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12293- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12294- GDT_STACK_CANARY_INIT
12295-#endif
12296-} };
12297-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12298-
12299 static int __init x86_xsave_setup(char *s)
12300 {
12301 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12302@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
12303 {
12304 struct desc_ptr gdt_descr;
12305
12306- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12307+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12308 gdt_descr.size = GDT_SIZE - 1;
12309 load_gdt(&gdt_descr);
12310 /* Reload the per-cpu base */
12311@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
12312 /* Filter out anything that depends on CPUID levels we don't have */
12313 filter_cpuid_features(c, true);
12314
12315+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
12316+ setup_clear_cpu_cap(X86_FEATURE_SEP);
12317+#endif
12318+
12319 /* If the model name is still unset, do table lookup. */
12320 if (!c->x86_model_id[0]) {
12321 const char *p;
12322@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
12323 }
12324 __setup("clearcpuid=", setup_disablecpuid);
12325
12326+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12327+EXPORT_PER_CPU_SYMBOL(current_tinfo);
12328+
12329 #ifdef CONFIG_X86_64
12330 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12331
12332@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
12333 EXPORT_PER_CPU_SYMBOL(current_task);
12334
12335 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12336- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12337+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12338 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12339
12340 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12341@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
12342 {
12343 memset(regs, 0, sizeof(struct pt_regs));
12344 regs->fs = __KERNEL_PERCPU;
12345- regs->gs = __KERNEL_STACK_CANARY;
12346+ savesegment(gs, regs->gs);
12347
12348 return regs;
12349 }
12350@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
12351 int i;
12352
12353 cpu = stack_smp_processor_id();
12354- t = &per_cpu(init_tss, cpu);
12355+ t = init_tss + cpu;
12356 orig_ist = &per_cpu(orig_ist, cpu);
12357
12358 #ifdef CONFIG_NUMA
12359@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12360 switch_to_new_gdt(cpu);
12361 loadsegment(fs, 0);
12362
12363- load_idt((const struct desc_ptr *)&idt_descr);
12364+ load_idt(&idt_descr);
12365
12366 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12367 syscall_init();
12368@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12369 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12370 barrier();
12371
12372- check_efer();
12373 if (cpu != 0)
12374 enable_x2apic();
12375
12376@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12377 {
12378 int cpu = smp_processor_id();
12379 struct task_struct *curr = current;
12380- struct tss_struct *t = &per_cpu(init_tss, cpu);
12381+ struct tss_struct *t = init_tss + cpu;
12382 struct thread_struct *thread = &curr->thread;
12383
12384 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12385diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel.c linux-2.6.32.45/arch/x86/kernel/cpu/intel.c
12386--- linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12387+++ linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12388@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12389 * Update the IDT descriptor and reload the IDT so that
12390 * it uses the read-only mapped virtual address.
12391 */
12392- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12393+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12394 load_idt(&idt_descr);
12395 }
12396 #endif
12397diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c
12398--- linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12399+++ linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12400@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12401 return ret;
12402 }
12403
12404-static struct sysfs_ops sysfs_ops = {
12405+static const struct sysfs_ops sysfs_ops = {
12406 .show = show,
12407 .store = store,
12408 };
12409diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/Makefile linux-2.6.32.45/arch/x86/kernel/cpu/Makefile
12410--- linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12411+++ linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12412@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12413 CFLAGS_REMOVE_common.o = -pg
12414 endif
12415
12416-# Make sure load_percpu_segment has no stackprotector
12417-nostackp := $(call cc-option, -fno-stack-protector)
12418-CFLAGS_common.o := $(nostackp)
12419-
12420 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12421 obj-y += proc.o capflags.o powerflags.o common.o
12422 obj-y += vmware.o hypervisor.o sched.o
12423diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c
12424--- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12425+++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12426@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12427 return ret;
12428 }
12429
12430-static struct sysfs_ops threshold_ops = {
12431+static const struct sysfs_ops threshold_ops = {
12432 .show = show,
12433 .store = store,
12434 };
12435diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c
12436--- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12437+++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12438@@ -43,6 +43,7 @@
12439 #include <asm/ipi.h>
12440 #include <asm/mce.h>
12441 #include <asm/msr.h>
12442+#include <asm/local.h>
12443
12444 #include "mce-internal.h"
12445
12446@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12447 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12448 m->cs, m->ip);
12449
12450- if (m->cs == __KERNEL_CS)
12451+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12452 print_symbol("{%s}", m->ip);
12453 pr_cont("\n");
12454 }
12455@@ -221,10 +222,10 @@ static void print_mce_tail(void)
12456
12457 #define PANIC_TIMEOUT 5 /* 5 seconds */
12458
12459-static atomic_t mce_paniced;
12460+static atomic_unchecked_t mce_paniced;
12461
12462 static int fake_panic;
12463-static atomic_t mce_fake_paniced;
12464+static atomic_unchecked_t mce_fake_paniced;
12465
12466 /* Panic in progress. Enable interrupts and wait for final IPI */
12467 static void wait_for_panic(void)
12468@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12469 /*
12470 * Make sure only one CPU runs in machine check panic
12471 */
12472- if (atomic_inc_return(&mce_paniced) > 1)
12473+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12474 wait_for_panic();
12475 barrier();
12476
12477@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12478 console_verbose();
12479 } else {
12480 /* Don't log too much for fake panic */
12481- if (atomic_inc_return(&mce_fake_paniced) > 1)
12482+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12483 return;
12484 }
12485 print_mce_head();
12486@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12487 * might have been modified by someone else.
12488 */
12489 rmb();
12490- if (atomic_read(&mce_paniced))
12491+ if (atomic_read_unchecked(&mce_paniced))
12492 wait_for_panic();
12493 if (!monarch_timeout)
12494 goto out;
12495@@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12496 */
12497
12498 static DEFINE_SPINLOCK(mce_state_lock);
12499-static int open_count; /* #times opened */
12500+static local_t open_count; /* #times opened */
12501 static int open_exclu; /* already open exclusive? */
12502
12503 static int mce_open(struct inode *inode, struct file *file)
12504 {
12505 spin_lock(&mce_state_lock);
12506
12507- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12508+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12509 spin_unlock(&mce_state_lock);
12510
12511 return -EBUSY;
12512@@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12513
12514 if (file->f_flags & O_EXCL)
12515 open_exclu = 1;
12516- open_count++;
12517+ local_inc(&open_count);
12518
12519 spin_unlock(&mce_state_lock);
12520
12521@@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12522 {
12523 spin_lock(&mce_state_lock);
12524
12525- open_count--;
12526+ local_dec(&open_count);
12527 open_exclu = 0;
12528
12529 spin_unlock(&mce_state_lock);
12530@@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12531 static void mce_reset(void)
12532 {
12533 cpu_missing = 0;
12534- atomic_set(&mce_fake_paniced, 0);
12535+ atomic_set_unchecked(&mce_fake_paniced, 0);
12536 atomic_set(&mce_executing, 0);
12537 atomic_set(&mce_callin, 0);
12538 atomic_set(&global_nwo, 0);
12539diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c
12540--- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-03-27 14:31:47.000000000 -0400
12541+++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:33:55.000000000 -0400
12542@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *fi
12543 static int inject_init(void)
12544 {
12545 printk(KERN_INFO "Machine check injector initialized\n");
12546- mce_chrdev_ops.write = mce_write;
12547+ pax_open_kernel();
12548+ *(void **)&mce_chrdev_ops.write = mce_write;
12549+ pax_close_kernel();
12550 register_die_notifier(&mce_raise_nb);
12551 return 0;
12552 }
12553diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c
12554--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12555+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12556@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12557 return 0;
12558 }
12559
12560-static struct mtrr_ops amd_mtrr_ops = {
12561+static const struct mtrr_ops amd_mtrr_ops = {
12562 .vendor = X86_VENDOR_AMD,
12563 .set = amd_set_mtrr,
12564 .get = amd_get_mtrr,
12565diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c
12566--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12567+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12568@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12569 return 0;
12570 }
12571
12572-static struct mtrr_ops centaur_mtrr_ops = {
12573+static const struct mtrr_ops centaur_mtrr_ops = {
12574 .vendor = X86_VENDOR_CENTAUR,
12575 .set = centaur_set_mcr,
12576 .get = centaur_get_mcr,
12577diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c
12578--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12579+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12580@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12581 post_set();
12582 }
12583
12584-static struct mtrr_ops cyrix_mtrr_ops = {
12585+static const struct mtrr_ops cyrix_mtrr_ops = {
12586 .vendor = X86_VENDOR_CYRIX,
12587 .set_all = cyrix_set_all,
12588 .set = cyrix_set_arr,
12589diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c
12590--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12591+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12592@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12593 /*
12594 * Generic structure...
12595 */
12596-struct mtrr_ops generic_mtrr_ops = {
12597+const struct mtrr_ops generic_mtrr_ops = {
12598 .use_intel_if = 1,
12599 .set_all = generic_set_all,
12600 .get = generic_get_mtrr,
12601diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c
12602--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12603+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12604@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12605 u64 size_or_mask, size_and_mask;
12606 static bool mtrr_aps_delayed_init;
12607
12608-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12609+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12610
12611-struct mtrr_ops *mtrr_if;
12612+const struct mtrr_ops *mtrr_if;
12613
12614 static void set_mtrr(unsigned int reg, unsigned long base,
12615 unsigned long size, mtrr_type type);
12616
12617-void set_mtrr_ops(struct mtrr_ops *ops)
12618+void set_mtrr_ops(const struct mtrr_ops *ops)
12619 {
12620 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12621 mtrr_ops[ops->vendor] = ops;
12622diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h
12623--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12624+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12625@@ -12,19 +12,19 @@
12626 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12627
12628 struct mtrr_ops {
12629- u32 vendor;
12630- u32 use_intel_if;
12631- void (*set)(unsigned int reg, unsigned long base,
12632+ const u32 vendor;
12633+ const u32 use_intel_if;
12634+ void (* const set)(unsigned int reg, unsigned long base,
12635 unsigned long size, mtrr_type type);
12636- void (*set_all)(void);
12637+ void (* const set_all)(void);
12638
12639- void (*get)(unsigned int reg, unsigned long *base,
12640+ void (* const get)(unsigned int reg, unsigned long *base,
12641 unsigned long *size, mtrr_type *type);
12642- int (*get_free_region)(unsigned long base, unsigned long size,
12643+ int (* const get_free_region)(unsigned long base, unsigned long size,
12644 int replace_reg);
12645- int (*validate_add_page)(unsigned long base, unsigned long size,
12646+ int (* const validate_add_page)(unsigned long base, unsigned long size,
12647 unsigned int type);
12648- int (*have_wrcomb)(void);
12649+ int (* const have_wrcomb)(void);
12650 };
12651
12652 extern int generic_get_free_region(unsigned long base, unsigned long size,
12653@@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12654 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12655 unsigned int type);
12656
12657-extern struct mtrr_ops generic_mtrr_ops;
12658+extern const struct mtrr_ops generic_mtrr_ops;
12659
12660 extern int positive_have_wrcomb(void);
12661
12662@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12663 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12664 void get_mtrr_state(void);
12665
12666-extern void set_mtrr_ops(struct mtrr_ops *ops);
12667+extern void set_mtrr_ops(const struct mtrr_ops *ops);
12668
12669 extern u64 size_or_mask, size_and_mask;
12670-extern struct mtrr_ops *mtrr_if;
12671+extern const struct mtrr_ops *mtrr_if;
12672
12673 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12674 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12675diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c
12676--- linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12677+++ linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12678@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12679
12680 /* Interface defining a CPU specific perfctr watchdog */
12681 struct wd_ops {
12682- int (*reserve)(void);
12683- void (*unreserve)(void);
12684- int (*setup)(unsigned nmi_hz);
12685- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12686- void (*stop)(void);
12687+ int (* const reserve)(void);
12688+ void (* const unreserve)(void);
12689+ int (* const setup)(unsigned nmi_hz);
12690+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12691+ void (* const stop)(void);
12692 unsigned perfctr;
12693 unsigned evntsel;
12694 u64 checkbit;
12695@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12696 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12697 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12698
12699+/* cannot be const */
12700 static struct wd_ops intel_arch_wd_ops;
12701
12702 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12703@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12704 return 1;
12705 }
12706
12707+/* cannot be const */
12708 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12709 .reserve = single_msr_reserve,
12710 .unreserve = single_msr_unreserve,
12711diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c
12712--- linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12713+++ linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12714@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12715 * count to the generic event atomically:
12716 */
12717 again:
12718- prev_raw_count = atomic64_read(&hwc->prev_count);
12719+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12720 rdmsrl(hwc->event_base + idx, new_raw_count);
12721
12722- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12723+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12724 new_raw_count) != prev_raw_count)
12725 goto again;
12726
12727@@ -741,7 +741,7 @@ again:
12728 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12729 delta >>= shift;
12730
12731- atomic64_add(delta, &event->count);
12732+ atomic64_add_unchecked(delta, &event->count);
12733 atomic64_sub(delta, &hwc->period_left);
12734
12735 return new_raw_count;
12736@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12737 * The hw event starts counting from this event offset,
12738 * mark it to be able to extra future deltas:
12739 */
12740- atomic64_set(&hwc->prev_count, (u64)-left);
12741+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12742
12743 err = checking_wrmsrl(hwc->event_base + idx,
12744 (u64)(-left) & x86_pmu.event_mask);
12745@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12746 break;
12747
12748 callchain_store(entry, frame.return_address);
12749- fp = frame.next_frame;
12750+ fp = (__force const void __user *)frame.next_frame;
12751 }
12752 }
12753
12754diff -urNp linux-2.6.32.45/arch/x86/kernel/crash.c linux-2.6.32.45/arch/x86/kernel/crash.c
12755--- linux-2.6.32.45/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12756+++ linux-2.6.32.45/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12757@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12758 regs = args->regs;
12759
12760 #ifdef CONFIG_X86_32
12761- if (!user_mode_vm(regs)) {
12762+ if (!user_mode(regs)) {
12763 crash_fixup_ss_esp(&fixed_regs, regs);
12764 regs = &fixed_regs;
12765 }
12766diff -urNp linux-2.6.32.45/arch/x86/kernel/doublefault_32.c linux-2.6.32.45/arch/x86/kernel/doublefault_32.c
12767--- linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12768+++ linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12769@@ -11,7 +11,7 @@
12770
12771 #define DOUBLEFAULT_STACKSIZE (1024)
12772 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12773-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12774+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12775
12776 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12777
12778@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12779 unsigned long gdt, tss;
12780
12781 store_gdt(&gdt_desc);
12782- gdt = gdt_desc.address;
12783+ gdt = (unsigned long)gdt_desc.address;
12784
12785 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12786
12787@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12788 /* 0x2 bit is always set */
12789 .flags = X86_EFLAGS_SF | 0x2,
12790 .sp = STACK_START,
12791- .es = __USER_DS,
12792+ .es = __KERNEL_DS,
12793 .cs = __KERNEL_CS,
12794 .ss = __KERNEL_DS,
12795- .ds = __USER_DS,
12796+ .ds = __KERNEL_DS,
12797 .fs = __KERNEL_PERCPU,
12798
12799 .__cr3 = __pa_nodebug(swapper_pg_dir),
12800diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c
12801--- linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12802+++ linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12803@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12804 #endif
12805
12806 for (;;) {
12807- struct thread_info *context;
12808+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12809+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12810
12811- context = (struct thread_info *)
12812- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12813- bp = print_context_stack(context, stack, bp, ops,
12814- data, NULL, &graph);
12815-
12816- stack = (unsigned long *)context->previous_esp;
12817- if (!stack)
12818+ if (stack_start == task_stack_page(task))
12819 break;
12820+ stack = *(unsigned long **)stack_start;
12821 if (ops->stack(data, "IRQ") < 0)
12822 break;
12823 touch_nmi_watchdog();
12824@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12825 * When in-kernel, we also print out the stack and code at the
12826 * time of the fault..
12827 */
12828- if (!user_mode_vm(regs)) {
12829+ if (!user_mode(regs)) {
12830 unsigned int code_prologue = code_bytes * 43 / 64;
12831 unsigned int code_len = code_bytes;
12832 unsigned char c;
12833 u8 *ip;
12834+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12835
12836 printk(KERN_EMERG "Stack:\n");
12837 show_stack_log_lvl(NULL, regs, &regs->sp,
12838@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12839
12840 printk(KERN_EMERG "Code: ");
12841
12842- ip = (u8 *)regs->ip - code_prologue;
12843+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12844 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12845 /* try starting at IP */
12846- ip = (u8 *)regs->ip;
12847+ ip = (u8 *)regs->ip + cs_base;
12848 code_len = code_len - code_prologue + 1;
12849 }
12850 for (i = 0; i < code_len; i++, ip++) {
12851@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12852 printk(" Bad EIP value.");
12853 break;
12854 }
12855- if (ip == (u8 *)regs->ip)
12856+ if (ip == (u8 *)regs->ip + cs_base)
12857 printk("<%02x> ", c);
12858 else
12859 printk("%02x ", c);
12860@@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12861 {
12862 unsigned short ud2;
12863
12864+ ip = ktla_ktva(ip);
12865 if (ip < PAGE_OFFSET)
12866 return 0;
12867 if (probe_kernel_address((unsigned short *)ip, ud2))
12868diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c
12869--- linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12870+++ linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12871@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12872 unsigned long *irq_stack_end =
12873 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12874 unsigned used = 0;
12875- struct thread_info *tinfo;
12876 int graph = 0;
12877+ void *stack_start;
12878
12879 if (!task)
12880 task = current;
12881@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12882 * current stack address. If the stacks consist of nested
12883 * exceptions
12884 */
12885- tinfo = task_thread_info(task);
12886 for (;;) {
12887 char *id;
12888 unsigned long *estack_end;
12889+
12890 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12891 &used, &id);
12892
12893@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12894 if (ops->stack(data, id) < 0)
12895 break;
12896
12897- bp = print_context_stack(tinfo, stack, bp, ops,
12898+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12899 data, estack_end, &graph);
12900 ops->stack(data, "<EOE>");
12901 /*
12902@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12903 if (stack >= irq_stack && stack < irq_stack_end) {
12904 if (ops->stack(data, "IRQ") < 0)
12905 break;
12906- bp = print_context_stack(tinfo, stack, bp,
12907+ bp = print_context_stack(task, irq_stack, stack, bp,
12908 ops, data, irq_stack_end, &graph);
12909 /*
12910 * We link to the next stack (which would be
12911@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12912 /*
12913 * This handles the process stack:
12914 */
12915- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12916+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12917+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12918 put_cpu();
12919 }
12920 EXPORT_SYMBOL(dump_trace);
12921diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.c linux-2.6.32.45/arch/x86/kernel/dumpstack.c
12922--- linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12923+++ linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12924@@ -2,6 +2,9 @@
12925 * Copyright (C) 1991, 1992 Linus Torvalds
12926 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12927 */
12928+#ifdef CONFIG_GRKERNSEC_HIDESYM
12929+#define __INCLUDED_BY_HIDESYM 1
12930+#endif
12931 #include <linux/kallsyms.h>
12932 #include <linux/kprobes.h>
12933 #include <linux/uaccess.h>
12934@@ -28,7 +31,7 @@ static int die_counter;
12935
12936 void printk_address(unsigned long address, int reliable)
12937 {
12938- printk(" [<%p>] %s%pS\n", (void *) address,
12939+ printk(" [<%p>] %s%pA\n", (void *) address,
12940 reliable ? "" : "? ", (void *) address);
12941 }
12942
12943@@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12944 static void
12945 print_ftrace_graph_addr(unsigned long addr, void *data,
12946 const struct stacktrace_ops *ops,
12947- struct thread_info *tinfo, int *graph)
12948+ struct task_struct *task, int *graph)
12949 {
12950- struct task_struct *task = tinfo->task;
12951 unsigned long ret_addr;
12952 int index = task->curr_ret_stack;
12953
12954@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12955 static inline void
12956 print_ftrace_graph_addr(unsigned long addr, void *data,
12957 const struct stacktrace_ops *ops,
12958- struct thread_info *tinfo, int *graph)
12959+ struct task_struct *task, int *graph)
12960 { }
12961 #endif
12962
12963@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12964 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12965 */
12966
12967-static inline int valid_stack_ptr(struct thread_info *tinfo,
12968- void *p, unsigned int size, void *end)
12969+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12970 {
12971- void *t = tinfo;
12972 if (end) {
12973 if (p < end && p >= (end-THREAD_SIZE))
12974 return 1;
12975@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12976 }
12977
12978 unsigned long
12979-print_context_stack(struct thread_info *tinfo,
12980+print_context_stack(struct task_struct *task, void *stack_start,
12981 unsigned long *stack, unsigned long bp,
12982 const struct stacktrace_ops *ops, void *data,
12983 unsigned long *end, int *graph)
12984 {
12985 struct stack_frame *frame = (struct stack_frame *)bp;
12986
12987- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12988+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12989 unsigned long addr;
12990
12991 addr = *stack;
12992@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12993 } else {
12994 ops->address(data, addr, 0);
12995 }
12996- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12997+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12998 }
12999 stack++;
13000 }
13001@@ -180,7 +180,7 @@ void dump_stack(void)
13002 #endif
13003
13004 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
13005- current->pid, current->comm, print_tainted(),
13006+ task_pid_nr(current), current->comm, print_tainted(),
13007 init_utsname()->release,
13008 (int)strcspn(init_utsname()->version, " "),
13009 init_utsname()->version);
13010@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
13011 return flags;
13012 }
13013
13014+extern void gr_handle_kernel_exploit(void);
13015+
13016 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13017 {
13018 if (regs && kexec_should_crash(current))
13019@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
13020 panic("Fatal exception in interrupt");
13021 if (panic_on_oops)
13022 panic("Fatal exception");
13023- do_exit(signr);
13024+
13025+ gr_handle_kernel_exploit();
13026+
13027+ do_group_exit(signr);
13028 }
13029
13030 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13031@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
13032 unsigned long flags = oops_begin();
13033 int sig = SIGSEGV;
13034
13035- if (!user_mode_vm(regs))
13036+ if (!user_mode(regs))
13037 report_bug(regs->ip, regs);
13038
13039 if (__die(str, regs, err))
13040diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.h linux-2.6.32.45/arch/x86/kernel/dumpstack.h
13041--- linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
13042+++ linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
13043@@ -15,7 +15,7 @@
13044 #endif
13045
13046 extern unsigned long
13047-print_context_stack(struct thread_info *tinfo,
13048+print_context_stack(struct task_struct *task, void *stack_start,
13049 unsigned long *stack, unsigned long bp,
13050 const struct stacktrace_ops *ops, void *data,
13051 unsigned long *end, int *graph);
13052diff -urNp linux-2.6.32.45/arch/x86/kernel/e820.c linux-2.6.32.45/arch/x86/kernel/e820.c
13053--- linux-2.6.32.45/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
13054+++ linux-2.6.32.45/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
13055@@ -733,7 +733,7 @@ struct early_res {
13056 };
13057 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
13058 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
13059- {}
13060+ { 0, 0, {0}, 0 }
13061 };
13062
13063 static int __init find_overlapped_early(u64 start, u64 end)
13064diff -urNp linux-2.6.32.45/arch/x86/kernel/early_printk.c linux-2.6.32.45/arch/x86/kernel/early_printk.c
13065--- linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
13066+++ linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
13067@@ -7,6 +7,7 @@
13068 #include <linux/pci_regs.h>
13069 #include <linux/pci_ids.h>
13070 #include <linux/errno.h>
13071+#include <linux/sched.h>
13072 #include <asm/io.h>
13073 #include <asm/processor.h>
13074 #include <asm/fcntl.h>
13075@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
13076 int n;
13077 va_list ap;
13078
13079+ pax_track_stack();
13080+
13081 va_start(ap, fmt);
13082 n = vscnprintf(buf, sizeof(buf), fmt, ap);
13083 early_console->write(early_console, buf, n);
13084diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_32.c linux-2.6.32.45/arch/x86/kernel/efi_32.c
13085--- linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
13086+++ linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
13087@@ -38,70 +38,38 @@
13088 */
13089
13090 static unsigned long efi_rt_eflags;
13091-static pgd_t efi_bak_pg_dir_pointer[2];
13092+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
13093
13094-void efi_call_phys_prelog(void)
13095+void __init efi_call_phys_prelog(void)
13096 {
13097- unsigned long cr4;
13098- unsigned long temp;
13099 struct desc_ptr gdt_descr;
13100
13101 local_irq_save(efi_rt_eflags);
13102
13103- /*
13104- * If I don't have PAE, I should just duplicate two entries in page
13105- * directory. If I have PAE, I just need to duplicate one entry in
13106- * page directory.
13107- */
13108- cr4 = read_cr4_safe();
13109
13110- if (cr4 & X86_CR4_PAE) {
13111- efi_bak_pg_dir_pointer[0].pgd =
13112- swapper_pg_dir[pgd_index(0)].pgd;
13113- swapper_pg_dir[0].pgd =
13114- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13115- } else {
13116- efi_bak_pg_dir_pointer[0].pgd =
13117- swapper_pg_dir[pgd_index(0)].pgd;
13118- efi_bak_pg_dir_pointer[1].pgd =
13119- swapper_pg_dir[pgd_index(0x400000)].pgd;
13120- swapper_pg_dir[pgd_index(0)].pgd =
13121- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13122- temp = PAGE_OFFSET + 0x400000;
13123- swapper_pg_dir[pgd_index(0x400000)].pgd =
13124- swapper_pg_dir[pgd_index(temp)].pgd;
13125- }
13126+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
13127+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13128+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
13129
13130 /*
13131 * After the lock is released, the original page table is restored.
13132 */
13133 __flush_tlb_all();
13134
13135- gdt_descr.address = __pa(get_cpu_gdt_table(0));
13136+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
13137 gdt_descr.size = GDT_SIZE - 1;
13138 load_gdt(&gdt_descr);
13139 }
13140
13141-void efi_call_phys_epilog(void)
13142+void __init efi_call_phys_epilog(void)
13143 {
13144- unsigned long cr4;
13145 struct desc_ptr gdt_descr;
13146
13147- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
13148+ gdt_descr.address = get_cpu_gdt_table(0);
13149 gdt_descr.size = GDT_SIZE - 1;
13150 load_gdt(&gdt_descr);
13151
13152- cr4 = read_cr4_safe();
13153-
13154- if (cr4 & X86_CR4_PAE) {
13155- swapper_pg_dir[pgd_index(0)].pgd =
13156- efi_bak_pg_dir_pointer[0].pgd;
13157- } else {
13158- swapper_pg_dir[pgd_index(0)].pgd =
13159- efi_bak_pg_dir_pointer[0].pgd;
13160- swapper_pg_dir[pgd_index(0x400000)].pgd =
13161- efi_bak_pg_dir_pointer[1].pgd;
13162- }
13163+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
13164
13165 /*
13166 * After the lock is released, the original page table is restored.
13167diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S
13168--- linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
13169+++ linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
13170@@ -6,6 +6,7 @@
13171 */
13172
13173 #include <linux/linkage.h>
13174+#include <linux/init.h>
13175 #include <asm/page_types.h>
13176
13177 /*
13178@@ -20,7 +21,7 @@
13179 * service functions will comply with gcc calling convention, too.
13180 */
13181
13182-.text
13183+__INIT
13184 ENTRY(efi_call_phys)
13185 /*
13186 * 0. The function can only be called in Linux kernel. So CS has been
13187@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
13188 * The mapping of lower virtual memory has been created in prelog and
13189 * epilog.
13190 */
13191- movl $1f, %edx
13192- subl $__PAGE_OFFSET, %edx
13193- jmp *%edx
13194+ jmp 1f-__PAGE_OFFSET
13195 1:
13196
13197 /*
13198@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
13199 * parameter 2, ..., param n. To make things easy, we save the return
13200 * address of efi_call_phys in a global variable.
13201 */
13202- popl %edx
13203- movl %edx, saved_return_addr
13204- /* get the function pointer into ECX*/
13205- popl %ecx
13206- movl %ecx, efi_rt_function_ptr
13207- movl $2f, %edx
13208- subl $__PAGE_OFFSET, %edx
13209- pushl %edx
13210+ popl (saved_return_addr)
13211+ popl (efi_rt_function_ptr)
13212
13213 /*
13214 * 3. Clear PG bit in %CR0.
13215@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
13216 /*
13217 * 5. Call the physical function.
13218 */
13219- jmp *%ecx
13220+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
13221
13222-2:
13223 /*
13224 * 6. After EFI runtime service returns, control will return to
13225 * following instruction. We'd better readjust stack pointer first.
13226@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
13227 movl %cr0, %edx
13228 orl $0x80000000, %edx
13229 movl %edx, %cr0
13230- jmp 1f
13231-1:
13232+
13233 /*
13234 * 8. Now restore the virtual mode from flat mode by
13235 * adding EIP with PAGE_OFFSET.
13236 */
13237- movl $1f, %edx
13238- jmp *%edx
13239+ jmp 1f+__PAGE_OFFSET
13240 1:
13241
13242 /*
13243 * 9. Balance the stack. And because EAX contain the return value,
13244 * we'd better not clobber it.
13245 */
13246- leal efi_rt_function_ptr, %edx
13247- movl (%edx), %ecx
13248- pushl %ecx
13249+ pushl (efi_rt_function_ptr)
13250
13251 /*
13252- * 10. Push the saved return address onto the stack and return.
13253+ * 10. Return to the saved return address.
13254 */
13255- leal saved_return_addr, %edx
13256- movl (%edx), %ecx
13257- pushl %ecx
13258- ret
13259+ jmpl *(saved_return_addr)
13260 ENDPROC(efi_call_phys)
13261 .previous
13262
13263-.data
13264+__INITDATA
13265 saved_return_addr:
13266 .long 0
13267 efi_rt_function_ptr:
13268diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_32.S linux-2.6.32.45/arch/x86/kernel/entry_32.S
13269--- linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
13270+++ linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-08-23 20:24:19.000000000 -0400
13271@@ -185,13 +185,146 @@
13272 /*CFI_REL_OFFSET gs, PT_GS*/
13273 .endm
13274 .macro SET_KERNEL_GS reg
13275+
13276+#ifdef CONFIG_CC_STACKPROTECTOR
13277 movl $(__KERNEL_STACK_CANARY), \reg
13278+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13279+ movl $(__USER_DS), \reg
13280+#else
13281+ xorl \reg, \reg
13282+#endif
13283+
13284 movl \reg, %gs
13285 .endm
13286
13287 #endif /* CONFIG_X86_32_LAZY_GS */
13288
13289-.macro SAVE_ALL
13290+.macro pax_enter_kernel
13291+#ifdef CONFIG_PAX_KERNEXEC
13292+ call pax_enter_kernel
13293+#endif
13294+.endm
13295+
13296+.macro pax_exit_kernel
13297+#ifdef CONFIG_PAX_KERNEXEC
13298+ call pax_exit_kernel
13299+#endif
13300+.endm
13301+
13302+#ifdef CONFIG_PAX_KERNEXEC
13303+ENTRY(pax_enter_kernel)
13304+#ifdef CONFIG_PARAVIRT
13305+ pushl %eax
13306+ pushl %ecx
13307+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13308+ mov %eax, %esi
13309+#else
13310+ mov %cr0, %esi
13311+#endif
13312+ bts $16, %esi
13313+ jnc 1f
13314+ mov %cs, %esi
13315+ cmp $__KERNEL_CS, %esi
13316+ jz 3f
13317+ ljmp $__KERNEL_CS, $3f
13318+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13319+2:
13320+#ifdef CONFIG_PARAVIRT
13321+ mov %esi, %eax
13322+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13323+#else
13324+ mov %esi, %cr0
13325+#endif
13326+3:
13327+#ifdef CONFIG_PARAVIRT
13328+ popl %ecx
13329+ popl %eax
13330+#endif
13331+ ret
13332+ENDPROC(pax_enter_kernel)
13333+
13334+ENTRY(pax_exit_kernel)
13335+#ifdef CONFIG_PARAVIRT
13336+ pushl %eax
13337+ pushl %ecx
13338+#endif
13339+ mov %cs, %esi
13340+ cmp $__KERNEXEC_KERNEL_CS, %esi
13341+ jnz 2f
13342+#ifdef CONFIG_PARAVIRT
13343+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13344+ mov %eax, %esi
13345+#else
13346+ mov %cr0, %esi
13347+#endif
13348+ btr $16, %esi
13349+ ljmp $__KERNEL_CS, $1f
13350+1:
13351+#ifdef CONFIG_PARAVIRT
13352+ mov %esi, %eax
13353+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13354+#else
13355+ mov %esi, %cr0
13356+#endif
13357+2:
13358+#ifdef CONFIG_PARAVIRT
13359+ popl %ecx
13360+ popl %eax
13361+#endif
13362+ ret
13363+ENDPROC(pax_exit_kernel)
13364+#endif
13365+
13366+.macro pax_erase_kstack
13367+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13368+ call pax_erase_kstack
13369+#endif
13370+.endm
13371+
13372+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13373+/*
13374+ * ebp: thread_info
13375+ * ecx, edx: can be clobbered
13376+ */
13377+ENTRY(pax_erase_kstack)
13378+ pushl %edi
13379+ pushl %eax
13380+
13381+ mov TI_lowest_stack(%ebp), %edi
13382+ mov $-0xBEEF, %eax
13383+ std
13384+
13385+1: mov %edi, %ecx
13386+ and $THREAD_SIZE_asm - 1, %ecx
13387+ shr $2, %ecx
13388+ repne scasl
13389+ jecxz 2f
13390+
13391+ cmp $2*16, %ecx
13392+ jc 2f
13393+
13394+ mov $2*16, %ecx
13395+ repe scasl
13396+ jecxz 2f
13397+ jne 1b
13398+
13399+2: cld
13400+ mov %esp, %ecx
13401+ sub %edi, %ecx
13402+ shr $2, %ecx
13403+ rep stosl
13404+
13405+ mov TI_task_thread_sp0(%ebp), %edi
13406+ sub $128, %edi
13407+ mov %edi, TI_lowest_stack(%ebp)
13408+
13409+ popl %eax
13410+ popl %edi
13411+ ret
13412+ENDPROC(pax_erase_kstack)
13413+#endif
13414+
13415+.macro __SAVE_ALL _DS
13416 cld
13417 PUSH_GS
13418 pushl %fs
13419@@ -224,7 +357,7 @@
13420 pushl %ebx
13421 CFI_ADJUST_CFA_OFFSET 4
13422 CFI_REL_OFFSET ebx, 0
13423- movl $(__USER_DS), %edx
13424+ movl $\_DS, %edx
13425 movl %edx, %ds
13426 movl %edx, %es
13427 movl $(__KERNEL_PERCPU), %edx
13428@@ -232,6 +365,15 @@
13429 SET_KERNEL_GS %edx
13430 .endm
13431
13432+.macro SAVE_ALL
13433+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13434+ __SAVE_ALL __KERNEL_DS
13435+ pax_enter_kernel
13436+#else
13437+ __SAVE_ALL __USER_DS
13438+#endif
13439+.endm
13440+
13441 .macro RESTORE_INT_REGS
13442 popl %ebx
13443 CFI_ADJUST_CFA_OFFSET -4
13444@@ -352,7 +494,15 @@ check_userspace:
13445 movb PT_CS(%esp), %al
13446 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13447 cmpl $USER_RPL, %eax
13448+
13449+#ifdef CONFIG_PAX_KERNEXEC
13450+ jae resume_userspace
13451+
13452+ PAX_EXIT_KERNEL
13453+ jmp resume_kernel
13454+#else
13455 jb resume_kernel # not returning to v8086 or userspace
13456+#endif
13457
13458 ENTRY(resume_userspace)
13459 LOCKDEP_SYS_EXIT
13460@@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13461 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13462 # int/exception return?
13463 jne work_pending
13464- jmp restore_all
13465+ jmp restore_all_pax
13466 END(ret_from_exception)
13467
13468 #ifdef CONFIG_PREEMPT
13469@@ -414,25 +564,36 @@ sysenter_past_esp:
13470 /*CFI_REL_OFFSET cs, 0*/
13471 /*
13472 * Push current_thread_info()->sysenter_return to the stack.
13473- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13474- * pushed above; +8 corresponds to copy_thread's esp0 setting.
13475 */
13476- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13477+ pushl $0
13478 CFI_ADJUST_CFA_OFFSET 4
13479 CFI_REL_OFFSET eip, 0
13480
13481 pushl %eax
13482 CFI_ADJUST_CFA_OFFSET 4
13483 SAVE_ALL
13484+ GET_THREAD_INFO(%ebp)
13485+ movl TI_sysenter_return(%ebp),%ebp
13486+ movl %ebp,PT_EIP(%esp)
13487 ENABLE_INTERRUPTS(CLBR_NONE)
13488
13489 /*
13490 * Load the potential sixth argument from user stack.
13491 * Careful about security.
13492 */
13493+ movl PT_OLDESP(%esp),%ebp
13494+
13495+#ifdef CONFIG_PAX_MEMORY_UDEREF
13496+ mov PT_OLDSS(%esp),%ds
13497+1: movl %ds:(%ebp),%ebp
13498+ push %ss
13499+ pop %ds
13500+#else
13501 cmpl $__PAGE_OFFSET-3,%ebp
13502 jae syscall_fault
13503 1: movl (%ebp),%ebp
13504+#endif
13505+
13506 movl %ebp,PT_EBP(%esp)
13507 .section __ex_table,"a"
13508 .align 4
13509@@ -455,12 +616,23 @@ sysenter_do_call:
13510 testl $_TIF_ALLWORK_MASK, %ecx
13511 jne sysexit_audit
13512 sysenter_exit:
13513+
13514+#ifdef CONFIG_PAX_RANDKSTACK
13515+ pushl_cfi %eax
13516+ call pax_randomize_kstack
13517+ popl_cfi %eax
13518+#endif
13519+
13520+ pax_erase_kstack
13521+
13522 /* if something modifies registers it must also disable sysexit */
13523 movl PT_EIP(%esp), %edx
13524 movl PT_OLDESP(%esp), %ecx
13525 xorl %ebp,%ebp
13526 TRACE_IRQS_ON
13527 1: mov PT_FS(%esp), %fs
13528+2: mov PT_DS(%esp), %ds
13529+3: mov PT_ES(%esp), %es
13530 PTGS_TO_GS
13531 ENABLE_INTERRUPTS_SYSEXIT
13532
13533@@ -477,6 +649,9 @@ sysenter_audit:
13534 movl %eax,%edx /* 2nd arg: syscall number */
13535 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13536 call audit_syscall_entry
13537+
13538+ pax_erase_kstack
13539+
13540 pushl %ebx
13541 CFI_ADJUST_CFA_OFFSET 4
13542 movl PT_EAX(%esp),%eax /* reload syscall number */
13543@@ -504,11 +679,17 @@ sysexit_audit:
13544
13545 CFI_ENDPROC
13546 .pushsection .fixup,"ax"
13547-2: movl $0,PT_FS(%esp)
13548+4: movl $0,PT_FS(%esp)
13549+ jmp 1b
13550+5: movl $0,PT_DS(%esp)
13551+ jmp 1b
13552+6: movl $0,PT_ES(%esp)
13553 jmp 1b
13554 .section __ex_table,"a"
13555 .align 4
13556- .long 1b,2b
13557+ .long 1b,4b
13558+ .long 2b,5b
13559+ .long 3b,6b
13560 .popsection
13561 PTGS_TO_GS_EX
13562 ENDPROC(ia32_sysenter_target)
13563@@ -538,6 +719,14 @@ syscall_exit:
13564 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13565 jne syscall_exit_work
13566
13567+restore_all_pax:
13568+
13569+#ifdef CONFIG_PAX_RANDKSTACK
13570+ call pax_randomize_kstack
13571+#endif
13572+
13573+ pax_erase_kstack
13574+
13575 restore_all:
13576 TRACE_IRQS_IRET
13577 restore_all_notrace:
13578@@ -602,10 +791,29 @@ ldt_ss:
13579 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13580 mov %dx, %ax /* eax: new kernel esp */
13581 sub %eax, %edx /* offset (low word is 0) */
13582- PER_CPU(gdt_page, %ebx)
13583+#ifdef CONFIG_SMP
13584+ movl PER_CPU_VAR(cpu_number), %ebx
13585+ shll $PAGE_SHIFT_asm, %ebx
13586+ addl $cpu_gdt_table, %ebx
13587+#else
13588+ movl $cpu_gdt_table, %ebx
13589+#endif
13590 shr $16, %edx
13591+
13592+#ifdef CONFIG_PAX_KERNEXEC
13593+ mov %cr0, %esi
13594+ btr $16, %esi
13595+ mov %esi, %cr0
13596+#endif
13597+
13598 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13599 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13600+
13601+#ifdef CONFIG_PAX_KERNEXEC
13602+ bts $16, %esi
13603+ mov %esi, %cr0
13604+#endif
13605+
13606 pushl $__ESPFIX_SS
13607 CFI_ADJUST_CFA_OFFSET 4
13608 push %eax /* new kernel esp */
13609@@ -636,31 +844,25 @@ work_resched:
13610 movl TI_flags(%ebp), %ecx
13611 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13612 # than syscall tracing?
13613- jz restore_all
13614+ jz restore_all_pax
13615 testb $_TIF_NEED_RESCHED, %cl
13616 jnz work_resched
13617
13618 work_notifysig: # deal with pending signals and
13619 # notify-resume requests
13620+ movl %esp, %eax
13621 #ifdef CONFIG_VM86
13622 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13623- movl %esp, %eax
13624- jne work_notifysig_v86 # returning to kernel-space or
13625+ jz 1f # returning to kernel-space or
13626 # vm86-space
13627- xorl %edx, %edx
13628- call do_notify_resume
13629- jmp resume_userspace_sig
13630
13631- ALIGN
13632-work_notifysig_v86:
13633 pushl %ecx # save ti_flags for do_notify_resume
13634 CFI_ADJUST_CFA_OFFSET 4
13635 call save_v86_state # %eax contains pt_regs pointer
13636 popl %ecx
13637 CFI_ADJUST_CFA_OFFSET -4
13638 movl %eax, %esp
13639-#else
13640- movl %esp, %eax
13641+1:
13642 #endif
13643 xorl %edx, %edx
13644 call do_notify_resume
13645@@ -673,6 +875,9 @@ syscall_trace_entry:
13646 movl $-ENOSYS,PT_EAX(%esp)
13647 movl %esp, %eax
13648 call syscall_trace_enter
13649+
13650+ pax_erase_kstack
13651+
13652 /* What it returned is what we'll actually use. */
13653 cmpl $(nr_syscalls), %eax
13654 jnae syscall_call
13655@@ -695,6 +900,10 @@ END(syscall_exit_work)
13656
13657 RING0_INT_FRAME # can't unwind into user space anyway
13658 syscall_fault:
13659+#ifdef CONFIG_PAX_MEMORY_UDEREF
13660+ push %ss
13661+ pop %ds
13662+#endif
13663 GET_THREAD_INFO(%ebp)
13664 movl $-EFAULT,PT_EAX(%esp)
13665 jmp resume_userspace
13666@@ -726,6 +935,33 @@ PTREGSCALL(rt_sigreturn)
13667 PTREGSCALL(vm86)
13668 PTREGSCALL(vm86old)
13669
13670+ ALIGN;
13671+ENTRY(kernel_execve)
13672+ push %ebp
13673+ sub $PT_OLDSS+4,%esp
13674+ push %edi
13675+ push %ecx
13676+ push %eax
13677+ lea 3*4(%esp),%edi
13678+ mov $PT_OLDSS/4+1,%ecx
13679+ xorl %eax,%eax
13680+ rep stosl
13681+ pop %eax
13682+ pop %ecx
13683+ pop %edi
13684+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13685+ mov %eax,PT_EBX(%esp)
13686+ mov %edx,PT_ECX(%esp)
13687+ mov %ecx,PT_EDX(%esp)
13688+ mov %esp,%eax
13689+ call sys_execve
13690+ GET_THREAD_INFO(%ebp)
13691+ test %eax,%eax
13692+ jz syscall_exit
13693+ add $PT_OLDSS+4,%esp
13694+ pop %ebp
13695+ ret
13696+
13697 .macro FIXUP_ESPFIX_STACK
13698 /*
13699 * Switch back for ESPFIX stack to the normal zerobased stack
13700@@ -735,7 +971,13 @@ PTREGSCALL(vm86old)
13701 * normal stack and adjusts ESP with the matching offset.
13702 */
13703 /* fixup the stack */
13704- PER_CPU(gdt_page, %ebx)
13705+#ifdef CONFIG_SMP
13706+ movl PER_CPU_VAR(cpu_number), %ebx
13707+ shll $PAGE_SHIFT_asm, %ebx
13708+ addl $cpu_gdt_table, %ebx
13709+#else
13710+ movl $cpu_gdt_table, %ebx
13711+#endif
13712 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13713 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13714 shl $16, %eax
13715@@ -1198,7 +1440,6 @@ return_to_handler:
13716 ret
13717 #endif
13718
13719-.section .rodata,"a"
13720 #include "syscall_table_32.S"
13721
13722 syscall_table_size=(.-sys_call_table)
13723@@ -1255,9 +1496,12 @@ error_code:
13724 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13725 REG_TO_PTGS %ecx
13726 SET_KERNEL_GS %ecx
13727- movl $(__USER_DS), %ecx
13728+ movl $(__KERNEL_DS), %ecx
13729 movl %ecx, %ds
13730 movl %ecx, %es
13731+
13732+ pax_enter_kernel
13733+
13734 TRACE_IRQS_OFF
13735 movl %esp,%eax # pt_regs pointer
13736 call *%edi
13737@@ -1351,6 +1595,9 @@ nmi_stack_correct:
13738 xorl %edx,%edx # zero error code
13739 movl %esp,%eax # pt_regs pointer
13740 call do_nmi
13741+
13742+ pax_exit_kernel
13743+
13744 jmp restore_all_notrace
13745 CFI_ENDPROC
13746
13747@@ -1391,6 +1638,9 @@ nmi_espfix_stack:
13748 FIXUP_ESPFIX_STACK # %eax == %esp
13749 xorl %edx,%edx # zero error code
13750 call do_nmi
13751+
13752+ pax_exit_kernel
13753+
13754 RESTORE_REGS
13755 lss 12+4(%esp), %esp # back to espfix stack
13756 CFI_ADJUST_CFA_OFFSET -24
13757diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_64.S linux-2.6.32.45/arch/x86/kernel/entry_64.S
13758--- linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13759+++ linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-08-23 20:24:19.000000000 -0400
13760@@ -53,6 +53,7 @@
13761 #include <asm/paravirt.h>
13762 #include <asm/ftrace.h>
13763 #include <asm/percpu.h>
13764+#include <asm/pgtable.h>
13765
13766 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13767 #include <linux/elf-em.h>
13768@@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13769 ENDPROC(native_usergs_sysret64)
13770 #endif /* CONFIG_PARAVIRT */
13771
13772+ .macro ljmpq sel, off
13773+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13774+ .byte 0x48; ljmp *1234f(%rip)
13775+ .pushsection .rodata
13776+ .align 16
13777+ 1234: .quad \off; .word \sel
13778+ .popsection
13779+#else
13780+ pushq $\sel
13781+ pushq $\off
13782+ lretq
13783+#endif
13784+ .endm
13785+
13786+ .macro pax_enter_kernel
13787+#ifdef CONFIG_PAX_KERNEXEC
13788+ call pax_enter_kernel
13789+#endif
13790+ .endm
13791+
13792+ .macro pax_exit_kernel
13793+#ifdef CONFIG_PAX_KERNEXEC
13794+ call pax_exit_kernel
13795+#endif
13796+ .endm
13797+
13798+#ifdef CONFIG_PAX_KERNEXEC
13799+ENTRY(pax_enter_kernel)
13800+ pushq %rdi
13801+
13802+#ifdef CONFIG_PARAVIRT
13803+ PV_SAVE_REGS(CLBR_RDI)
13804+#endif
13805+
13806+ GET_CR0_INTO_RDI
13807+ bts $16,%rdi
13808+ jnc 1f
13809+ mov %cs,%edi
13810+ cmp $__KERNEL_CS,%edi
13811+ jz 3f
13812+ ljmpq __KERNEL_CS,3f
13813+1: ljmpq __KERNEXEC_KERNEL_CS,2f
13814+2: SET_RDI_INTO_CR0
13815+3:
13816+
13817+#ifdef CONFIG_PARAVIRT
13818+ PV_RESTORE_REGS(CLBR_RDI)
13819+#endif
13820+
13821+ popq %rdi
13822+ retq
13823+ENDPROC(pax_enter_kernel)
13824+
13825+ENTRY(pax_exit_kernel)
13826+ pushq %rdi
13827+
13828+#ifdef CONFIG_PARAVIRT
13829+ PV_SAVE_REGS(CLBR_RDI)
13830+#endif
13831+
13832+ mov %cs,%rdi
13833+ cmp $__KERNEXEC_KERNEL_CS,%edi
13834+ jnz 2f
13835+ GET_CR0_INTO_RDI
13836+ btr $16,%rdi
13837+ ljmpq __KERNEL_CS,1f
13838+1: SET_RDI_INTO_CR0
13839+2:
13840+
13841+#ifdef CONFIG_PARAVIRT
13842+ PV_RESTORE_REGS(CLBR_RDI);
13843+#endif
13844+
13845+ popq %rdi
13846+ retq
13847+ENDPROC(pax_exit_kernel)
13848+#endif
13849+
13850+ .macro pax_enter_kernel_user
13851+#ifdef CONFIG_PAX_MEMORY_UDEREF
13852+ call pax_enter_kernel_user
13853+#endif
13854+ .endm
13855+
13856+ .macro pax_exit_kernel_user
13857+#ifdef CONFIG_PAX_MEMORY_UDEREF
13858+ call pax_exit_kernel_user
13859+#endif
13860+#ifdef CONFIG_PAX_RANDKSTACK
13861+ push %rax
13862+ call pax_randomize_kstack
13863+ pop %rax
13864+#endif
13865+ pax_erase_kstack
13866+ .endm
13867+
13868+#ifdef CONFIG_PAX_MEMORY_UDEREF
13869+ENTRY(pax_enter_kernel_user)
13870+ pushq %rdi
13871+ pushq %rbx
13872+
13873+#ifdef CONFIG_PARAVIRT
13874+ PV_SAVE_REGS(CLBR_RDI)
13875+#endif
13876+
13877+ GET_CR3_INTO_RDI
13878+ mov %rdi,%rbx
13879+ add $__START_KERNEL_map,%rbx
13880+ sub phys_base(%rip),%rbx
13881+
13882+#ifdef CONFIG_PARAVIRT
13883+ pushq %rdi
13884+ cmpl $0, pv_info+PARAVIRT_enabled
13885+ jz 1f
13886+ i = 0
13887+ .rept USER_PGD_PTRS
13888+ mov i*8(%rbx),%rsi
13889+ mov $0,%sil
13890+ lea i*8(%rbx),%rdi
13891+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13892+ i = i + 1
13893+ .endr
13894+ jmp 2f
13895+1:
13896+#endif
13897+
13898+ i = 0
13899+ .rept USER_PGD_PTRS
13900+ movb $0,i*8(%rbx)
13901+ i = i + 1
13902+ .endr
13903+
13904+#ifdef CONFIG_PARAVIRT
13905+2: popq %rdi
13906+#endif
13907+ SET_RDI_INTO_CR3
13908+
13909+#ifdef CONFIG_PAX_KERNEXEC
13910+ GET_CR0_INTO_RDI
13911+ bts $16,%rdi
13912+ SET_RDI_INTO_CR0
13913+#endif
13914+
13915+#ifdef CONFIG_PARAVIRT
13916+ PV_RESTORE_REGS(CLBR_RDI)
13917+#endif
13918+
13919+ popq %rbx
13920+ popq %rdi
13921+ retq
13922+ENDPROC(pax_enter_kernel_user)
13923+
13924+ENTRY(pax_exit_kernel_user)
13925+ push %rdi
13926+
13927+#ifdef CONFIG_PARAVIRT
13928+ pushq %rbx
13929+ PV_SAVE_REGS(CLBR_RDI)
13930+#endif
13931+
13932+#ifdef CONFIG_PAX_KERNEXEC
13933+ GET_CR0_INTO_RDI
13934+ btr $16,%rdi
13935+ SET_RDI_INTO_CR0
13936+#endif
13937+
13938+ GET_CR3_INTO_RDI
13939+ add $__START_KERNEL_map,%rdi
13940+ sub phys_base(%rip),%rdi
13941+
13942+#ifdef CONFIG_PARAVIRT
13943+ cmpl $0, pv_info+PARAVIRT_enabled
13944+ jz 1f
13945+ mov %rdi,%rbx
13946+ i = 0
13947+ .rept USER_PGD_PTRS
13948+ mov i*8(%rbx),%rsi
13949+ mov $0x67,%sil
13950+ lea i*8(%rbx),%rdi
13951+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13952+ i = i + 1
13953+ .endr
13954+ jmp 2f
13955+1:
13956+#endif
13957+
13958+ i = 0
13959+ .rept USER_PGD_PTRS
13960+ movb $0x67,i*8(%rdi)
13961+ i = i + 1
13962+ .endr
13963+
13964+#ifdef CONFIG_PARAVIRT
13965+2: PV_RESTORE_REGS(CLBR_RDI)
13966+ popq %rbx
13967+#endif
13968+
13969+ popq %rdi
13970+ retq
13971+ENDPROC(pax_exit_kernel_user)
13972+#endif
13973+
13974+.macro pax_erase_kstack
13975+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13976+ call pax_erase_kstack
13977+#endif
13978+.endm
13979+
13980+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13981+/*
13982+ * r10: thread_info
13983+ * rcx, rdx: can be clobbered
13984+ */
13985+ENTRY(pax_erase_kstack)
13986+ pushq %rdi
13987+ pushq %rax
13988+
13989+ GET_THREAD_INFO(%r10)
13990+ mov TI_lowest_stack(%r10), %rdi
13991+ mov $-0xBEEF, %rax
13992+ std
13993+
13994+1: mov %edi, %ecx
13995+ and $THREAD_SIZE_asm - 1, %ecx
13996+ shr $3, %ecx
13997+ repne scasq
13998+ jecxz 2f
13999+
14000+ cmp $2*8, %ecx
14001+ jc 2f
14002+
14003+ mov $2*8, %ecx
14004+ repe scasq
14005+ jecxz 2f
14006+ jne 1b
14007+
14008+2: cld
14009+ mov %esp, %ecx
14010+ sub %edi, %ecx
14011+ shr $3, %ecx
14012+ rep stosq
14013+
14014+ mov TI_task_thread_sp0(%r10), %rdi
14015+ sub $256, %rdi
14016+ mov %rdi, TI_lowest_stack(%r10)
14017+
14018+ popq %rax
14019+ popq %rdi
14020+ ret
14021+ENDPROC(pax_erase_kstack)
14022+#endif
14023
14024 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
14025 #ifdef CONFIG_TRACE_IRQFLAGS
14026@@ -317,7 +569,7 @@ ENTRY(save_args)
14027 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
14028 movq_cfi rbp, 8 /* push %rbp */
14029 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
14030- testl $3, CS(%rdi)
14031+ testb $3, CS(%rdi)
14032 je 1f
14033 SWAPGS
14034 /*
14035@@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
14036
14037 RESTORE_REST
14038
14039- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14040+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14041 je int_ret_from_sys_call
14042
14043 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
14044@@ -455,7 +707,7 @@ END(ret_from_fork)
14045 ENTRY(system_call)
14046 CFI_STARTPROC simple
14047 CFI_SIGNAL_FRAME
14048- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14049+ CFI_DEF_CFA rsp,0
14050 CFI_REGISTER rip,rcx
14051 /*CFI_REGISTER rflags,r11*/
14052 SWAPGS_UNSAFE_STACK
14053@@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
14054
14055 movq %rsp,PER_CPU_VAR(old_rsp)
14056 movq PER_CPU_VAR(kernel_stack),%rsp
14057+ pax_enter_kernel_user
14058 /*
14059 * No need to follow this irqs off/on section - it's straight
14060 * and short:
14061 */
14062 ENABLE_INTERRUPTS(CLBR_NONE)
14063- SAVE_ARGS 8,1
14064+ SAVE_ARGS 8*6,1
14065 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14066 movq %rcx,RIP-ARGOFFSET(%rsp)
14067 CFI_REL_OFFSET rip,RIP-ARGOFFSET
14068@@ -502,6 +755,7 @@ sysret_check:
14069 andl %edi,%edx
14070 jnz sysret_careful
14071 CFI_REMEMBER_STATE
14072+ pax_exit_kernel_user
14073 /*
14074 * sysretq will re-enable interrupts:
14075 */
14076@@ -562,6 +816,9 @@ auditsys:
14077 movq %rax,%rsi /* 2nd arg: syscall number */
14078 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
14079 call audit_syscall_entry
14080+
14081+ pax_erase_kstack
14082+
14083 LOAD_ARGS 0 /* reload call-clobbered registers */
14084 jmp system_call_fastpath
14085
14086@@ -592,6 +849,9 @@ tracesys:
14087 FIXUP_TOP_OF_STACK %rdi
14088 movq %rsp,%rdi
14089 call syscall_trace_enter
14090+
14091+ pax_erase_kstack
14092+
14093 /*
14094 * Reload arg registers from stack in case ptrace changed them.
14095 * We don't reload %rax because syscall_trace_enter() returned
14096@@ -613,7 +873,7 @@ tracesys:
14097 GLOBAL(int_ret_from_sys_call)
14098 DISABLE_INTERRUPTS(CLBR_NONE)
14099 TRACE_IRQS_OFF
14100- testl $3,CS-ARGOFFSET(%rsp)
14101+ testb $3,CS-ARGOFFSET(%rsp)
14102 je retint_restore_args
14103 movl $_TIF_ALLWORK_MASK,%edi
14104 /* edi: mask to check */
14105@@ -800,6 +1060,16 @@ END(interrupt)
14106 CFI_ADJUST_CFA_OFFSET 10*8
14107 call save_args
14108 PARTIAL_FRAME 0
14109+#ifdef CONFIG_PAX_MEMORY_UDEREF
14110+ testb $3, CS(%rdi)
14111+ jnz 1f
14112+ pax_enter_kernel
14113+ jmp 2f
14114+1: pax_enter_kernel_user
14115+2:
14116+#else
14117+ pax_enter_kernel
14118+#endif
14119 call \func
14120 .endm
14121
14122@@ -822,7 +1092,7 @@ ret_from_intr:
14123 CFI_ADJUST_CFA_OFFSET -8
14124 exit_intr:
14125 GET_THREAD_INFO(%rcx)
14126- testl $3,CS-ARGOFFSET(%rsp)
14127+ testb $3,CS-ARGOFFSET(%rsp)
14128 je retint_kernel
14129
14130 /* Interrupt came from user space */
14131@@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
14132 * The iretq could re-enable interrupts:
14133 */
14134 DISABLE_INTERRUPTS(CLBR_ANY)
14135+ pax_exit_kernel_user
14136 TRACE_IRQS_IRETQ
14137 SWAPGS
14138 jmp restore_args
14139
14140 retint_restore_args: /* return to kernel space */
14141 DISABLE_INTERRUPTS(CLBR_ANY)
14142+ pax_exit_kernel
14143 /*
14144 * The iretq could re-enable interrupts:
14145 */
14146@@ -1032,6 +1304,16 @@ ENTRY(\sym)
14147 CFI_ADJUST_CFA_OFFSET 15*8
14148 call error_entry
14149 DEFAULT_FRAME 0
14150+#ifdef CONFIG_PAX_MEMORY_UDEREF
14151+ testb $3, CS(%rsp)
14152+ jnz 1f
14153+ pax_enter_kernel
14154+ jmp 2f
14155+1: pax_enter_kernel_user
14156+2:
14157+#else
14158+ pax_enter_kernel
14159+#endif
14160 movq %rsp,%rdi /* pt_regs pointer */
14161 xorl %esi,%esi /* no error code */
14162 call \do_sym
14163@@ -1049,6 +1331,16 @@ ENTRY(\sym)
14164 subq $15*8, %rsp
14165 call save_paranoid
14166 TRACE_IRQS_OFF
14167+#ifdef CONFIG_PAX_MEMORY_UDEREF
14168+ testb $3, CS(%rsp)
14169+ jnz 1f
14170+ pax_enter_kernel
14171+ jmp 2f
14172+1: pax_enter_kernel_user
14173+2:
14174+#else
14175+ pax_enter_kernel
14176+#endif
14177 movq %rsp,%rdi /* pt_regs pointer */
14178 xorl %esi,%esi /* no error code */
14179 call \do_sym
14180@@ -1066,9 +1358,24 @@ ENTRY(\sym)
14181 subq $15*8, %rsp
14182 call save_paranoid
14183 TRACE_IRQS_OFF
14184+#ifdef CONFIG_PAX_MEMORY_UDEREF
14185+ testb $3, CS(%rsp)
14186+ jnz 1f
14187+ pax_enter_kernel
14188+ jmp 2f
14189+1: pax_enter_kernel_user
14190+2:
14191+#else
14192+ pax_enter_kernel
14193+#endif
14194 movq %rsp,%rdi /* pt_regs pointer */
14195 xorl %esi,%esi /* no error code */
14196- PER_CPU(init_tss, %rbp)
14197+#ifdef CONFIG_SMP
14198+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
14199+ lea init_tss(%rbp), %rbp
14200+#else
14201+ lea init_tss(%rip), %rbp
14202+#endif
14203 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14204 call \do_sym
14205 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14206@@ -1085,6 +1392,16 @@ ENTRY(\sym)
14207 CFI_ADJUST_CFA_OFFSET 15*8
14208 call error_entry
14209 DEFAULT_FRAME 0
14210+#ifdef CONFIG_PAX_MEMORY_UDEREF
14211+ testb $3, CS(%rsp)
14212+ jnz 1f
14213+ pax_enter_kernel
14214+ jmp 2f
14215+1: pax_enter_kernel_user
14216+2:
14217+#else
14218+ pax_enter_kernel
14219+#endif
14220 movq %rsp,%rdi /* pt_regs pointer */
14221 movq ORIG_RAX(%rsp),%rsi /* get error code */
14222 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14223@@ -1104,6 +1421,16 @@ ENTRY(\sym)
14224 call save_paranoid
14225 DEFAULT_FRAME 0
14226 TRACE_IRQS_OFF
14227+#ifdef CONFIG_PAX_MEMORY_UDEREF
14228+ testb $3, CS(%rsp)
14229+ jnz 1f
14230+ pax_enter_kernel
14231+ jmp 2f
14232+1: pax_enter_kernel_user
14233+2:
14234+#else
14235+ pax_enter_kernel
14236+#endif
14237 movq %rsp,%rdi /* pt_regs pointer */
14238 movq ORIG_RAX(%rsp),%rsi /* get error code */
14239 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14240@@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
14241 TRACE_IRQS_OFF
14242 testl %ebx,%ebx /* swapgs needed? */
14243 jnz paranoid_restore
14244- testl $3,CS(%rsp)
14245+ testb $3,CS(%rsp)
14246 jnz paranoid_userspace
14247+#ifdef CONFIG_PAX_MEMORY_UDEREF
14248+ pax_exit_kernel
14249+ TRACE_IRQS_IRETQ 0
14250+ SWAPGS_UNSAFE_STACK
14251+ RESTORE_ALL 8
14252+ jmp irq_return
14253+#endif
14254 paranoid_swapgs:
14255+#ifdef CONFIG_PAX_MEMORY_UDEREF
14256+ pax_exit_kernel_user
14257+#else
14258+ pax_exit_kernel
14259+#endif
14260 TRACE_IRQS_IRETQ 0
14261 SWAPGS_UNSAFE_STACK
14262 RESTORE_ALL 8
14263 jmp irq_return
14264 paranoid_restore:
14265+ pax_exit_kernel
14266 TRACE_IRQS_IRETQ 0
14267 RESTORE_ALL 8
14268 jmp irq_return
14269@@ -1470,7 +1810,7 @@ ENTRY(error_entry)
14270 movq_cfi r14, R14+8
14271 movq_cfi r15, R15+8
14272 xorl %ebx,%ebx
14273- testl $3,CS+8(%rsp)
14274+ testb $3,CS+8(%rsp)
14275 je error_kernelspace
14276 error_swapgs:
14277 SWAPGS
14278@@ -1529,6 +1869,16 @@ ENTRY(nmi)
14279 CFI_ADJUST_CFA_OFFSET 15*8
14280 call save_paranoid
14281 DEFAULT_FRAME 0
14282+#ifdef CONFIG_PAX_MEMORY_UDEREF
14283+ testb $3, CS(%rsp)
14284+ jnz 1f
14285+ pax_enter_kernel
14286+ jmp 2f
14287+1: pax_enter_kernel_user
14288+2:
14289+#else
14290+ pax_enter_kernel
14291+#endif
14292 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14293 movq %rsp,%rdi
14294 movq $-1,%rsi
14295@@ -1539,11 +1889,25 @@ ENTRY(nmi)
14296 DISABLE_INTERRUPTS(CLBR_NONE)
14297 testl %ebx,%ebx /* swapgs needed? */
14298 jnz nmi_restore
14299- testl $3,CS(%rsp)
14300+ testb $3,CS(%rsp)
14301 jnz nmi_userspace
14302+#ifdef CONFIG_PAX_MEMORY_UDEREF
14303+ pax_exit_kernel
14304+ SWAPGS_UNSAFE_STACK
14305+ RESTORE_ALL 8
14306+ jmp irq_return
14307+#endif
14308 nmi_swapgs:
14309+#ifdef CONFIG_PAX_MEMORY_UDEREF
14310+ pax_exit_kernel_user
14311+#else
14312+ pax_exit_kernel
14313+#endif
14314 SWAPGS_UNSAFE_STACK
14315+ RESTORE_ALL 8
14316+ jmp irq_return
14317 nmi_restore:
14318+ pax_exit_kernel
14319 RESTORE_ALL 8
14320 jmp irq_return
14321 nmi_userspace:
14322diff -urNp linux-2.6.32.45/arch/x86/kernel/ftrace.c linux-2.6.32.45/arch/x86/kernel/ftrace.c
14323--- linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
14324+++ linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
14325@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
14326 static void *mod_code_newcode; /* holds the text to write to the IP */
14327
14328 static unsigned nmi_wait_count;
14329-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14330+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14331
14332 int ftrace_arch_read_dyn_info(char *buf, int size)
14333 {
14334@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
14335
14336 r = snprintf(buf, size, "%u %u",
14337 nmi_wait_count,
14338- atomic_read(&nmi_update_count));
14339+ atomic_read_unchecked(&nmi_update_count));
14340 return r;
14341 }
14342
14343@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
14344 {
14345 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14346 smp_rmb();
14347+ pax_open_kernel();
14348 ftrace_mod_code();
14349- atomic_inc(&nmi_update_count);
14350+ pax_close_kernel();
14351+ atomic_inc_unchecked(&nmi_update_count);
14352 }
14353 /* Must have previous changes seen before executions */
14354 smp_mb();
14355@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
14356
14357
14358
14359-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
14360+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
14361
14362 static unsigned char *ftrace_nop_replace(void)
14363 {
14364@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
14365 {
14366 unsigned char replaced[MCOUNT_INSN_SIZE];
14367
14368+ ip = ktla_ktva(ip);
14369+
14370 /*
14371 * Note: Due to modules and __init, code can
14372 * disappear and change, we need to protect against faulting
14373@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
14374 unsigned char old[MCOUNT_INSN_SIZE], *new;
14375 int ret;
14376
14377- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14378+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14379 new = ftrace_call_replace(ip, (unsigned long)func);
14380 ret = ftrace_modify_code(ip, old, new);
14381
14382@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
14383 switch (faulted) {
14384 case 0:
14385 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
14386- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
14387+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14388 break;
14389 case 1:
14390 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14391- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14392+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14393 break;
14394 case 2:
14395 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14396- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14397+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14398 break;
14399 }
14400
14401@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14402 {
14403 unsigned char code[MCOUNT_INSN_SIZE];
14404
14405+ ip = ktla_ktva(ip);
14406+
14407 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14408 return -EFAULT;
14409
14410diff -urNp linux-2.6.32.45/arch/x86/kernel/head32.c linux-2.6.32.45/arch/x86/kernel/head32.c
14411--- linux-2.6.32.45/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14412+++ linux-2.6.32.45/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14413@@ -16,6 +16,7 @@
14414 #include <asm/apic.h>
14415 #include <asm/io_apic.h>
14416 #include <asm/bios_ebda.h>
14417+#include <asm/boot.h>
14418
14419 static void __init i386_default_early_setup(void)
14420 {
14421@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14422 {
14423 reserve_trampoline_memory();
14424
14425- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14426+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14427
14428 #ifdef CONFIG_BLK_DEV_INITRD
14429 /* Reserve INITRD */
14430diff -urNp linux-2.6.32.45/arch/x86/kernel/head_32.S linux-2.6.32.45/arch/x86/kernel/head_32.S
14431--- linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14432+++ linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14433@@ -19,10 +19,17 @@
14434 #include <asm/setup.h>
14435 #include <asm/processor-flags.h>
14436 #include <asm/percpu.h>
14437+#include <asm/msr-index.h>
14438
14439 /* Physical address */
14440 #define pa(X) ((X) - __PAGE_OFFSET)
14441
14442+#ifdef CONFIG_PAX_KERNEXEC
14443+#define ta(X) (X)
14444+#else
14445+#define ta(X) ((X) - __PAGE_OFFSET)
14446+#endif
14447+
14448 /*
14449 * References to members of the new_cpu_data structure.
14450 */
14451@@ -52,11 +59,7 @@
14452 * and small than max_low_pfn, otherwise will waste some page table entries
14453 */
14454
14455-#if PTRS_PER_PMD > 1
14456-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14457-#else
14458-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14459-#endif
14460+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14461
14462 /* Enough space to fit pagetables for the low memory linear map */
14463 MAPPING_BEYOND_END = \
14464@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14465 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14466
14467 /*
14468+ * Real beginning of normal "text" segment
14469+ */
14470+ENTRY(stext)
14471+ENTRY(_stext)
14472+
14473+/*
14474 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14475 * %esi points to the real-mode code as a 32-bit pointer.
14476 * CS and DS must be 4 GB flat segments, but we don't depend on
14477@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14478 * can.
14479 */
14480 __HEAD
14481+
14482+#ifdef CONFIG_PAX_KERNEXEC
14483+ jmp startup_32
14484+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14485+.fill PAGE_SIZE-5,1,0xcc
14486+#endif
14487+
14488 ENTRY(startup_32)
14489+ movl pa(stack_start),%ecx
14490+
14491 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14492 us to not reload segments */
14493 testb $(1<<6), BP_loadflags(%esi)
14494@@ -95,7 +113,60 @@ ENTRY(startup_32)
14495 movl %eax,%es
14496 movl %eax,%fs
14497 movl %eax,%gs
14498+ movl %eax,%ss
14499 2:
14500+ leal -__PAGE_OFFSET(%ecx),%esp
14501+
14502+#ifdef CONFIG_SMP
14503+ movl $pa(cpu_gdt_table),%edi
14504+ movl $__per_cpu_load,%eax
14505+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14506+ rorl $16,%eax
14507+ movb %al,__KERNEL_PERCPU + 4(%edi)
14508+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14509+ movl $__per_cpu_end - 1,%eax
14510+ subl $__per_cpu_start,%eax
14511+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14512+#endif
14513+
14514+#ifdef CONFIG_PAX_MEMORY_UDEREF
14515+ movl $NR_CPUS,%ecx
14516+ movl $pa(cpu_gdt_table),%edi
14517+1:
14518+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14519+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14520+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14521+ addl $PAGE_SIZE_asm,%edi
14522+ loop 1b
14523+#endif
14524+
14525+#ifdef CONFIG_PAX_KERNEXEC
14526+ movl $pa(boot_gdt),%edi
14527+ movl $__LOAD_PHYSICAL_ADDR,%eax
14528+ movw %ax,__BOOT_CS + 2(%edi)
14529+ rorl $16,%eax
14530+ movb %al,__BOOT_CS + 4(%edi)
14531+ movb %ah,__BOOT_CS + 7(%edi)
14532+ rorl $16,%eax
14533+
14534+ ljmp $(__BOOT_CS),$1f
14535+1:
14536+
14537+ movl $NR_CPUS,%ecx
14538+ movl $pa(cpu_gdt_table),%edi
14539+ addl $__PAGE_OFFSET,%eax
14540+1:
14541+ movw %ax,__KERNEL_CS + 2(%edi)
14542+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14543+ rorl $16,%eax
14544+ movb %al,__KERNEL_CS + 4(%edi)
14545+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14546+ movb %ah,__KERNEL_CS + 7(%edi)
14547+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14548+ rorl $16,%eax
14549+ addl $PAGE_SIZE_asm,%edi
14550+ loop 1b
14551+#endif
14552
14553 /*
14554 * Clear BSS first so that there are no surprises...
14555@@ -140,9 +211,7 @@ ENTRY(startup_32)
14556 cmpl $num_subarch_entries, %eax
14557 jae bad_subarch
14558
14559- movl pa(subarch_entries)(,%eax,4), %eax
14560- subl $__PAGE_OFFSET, %eax
14561- jmp *%eax
14562+ jmp *pa(subarch_entries)(,%eax,4)
14563
14564 bad_subarch:
14565 WEAK(lguest_entry)
14566@@ -154,10 +223,10 @@ WEAK(xen_entry)
14567 __INITDATA
14568
14569 subarch_entries:
14570- .long default_entry /* normal x86/PC */
14571- .long lguest_entry /* lguest hypervisor */
14572- .long xen_entry /* Xen hypervisor */
14573- .long default_entry /* Moorestown MID */
14574+ .long ta(default_entry) /* normal x86/PC */
14575+ .long ta(lguest_entry) /* lguest hypervisor */
14576+ .long ta(xen_entry) /* Xen hypervisor */
14577+ .long ta(default_entry) /* Moorestown MID */
14578 num_subarch_entries = (. - subarch_entries) / 4
14579 .previous
14580 #endif /* CONFIG_PARAVIRT */
14581@@ -218,8 +287,11 @@ default_entry:
14582 movl %eax, pa(max_pfn_mapped)
14583
14584 /* Do early initialization of the fixmap area */
14585- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14586- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14587+#ifdef CONFIG_COMPAT_VDSO
14588+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14589+#else
14590+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14591+#endif
14592 #else /* Not PAE */
14593
14594 page_pde_offset = (__PAGE_OFFSET >> 20);
14595@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14596 movl %eax, pa(max_pfn_mapped)
14597
14598 /* Do early initialization of the fixmap area */
14599- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14600- movl %eax,pa(swapper_pg_dir+0xffc)
14601+#ifdef CONFIG_COMPAT_VDSO
14602+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14603+#else
14604+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14605+#endif
14606 #endif
14607 jmp 3f
14608 /*
14609@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14610 movl %eax,%es
14611 movl %eax,%fs
14612 movl %eax,%gs
14613+ movl pa(stack_start),%ecx
14614+ movl %eax,%ss
14615+ leal -__PAGE_OFFSET(%ecx),%esp
14616 #endif /* CONFIG_SMP */
14617 3:
14618
14619@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14620 orl %edx,%eax
14621 movl %eax,%cr4
14622
14623+#ifdef CONFIG_X86_PAE
14624 btl $5, %eax # check if PAE is enabled
14625 jnc 6f
14626
14627@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14628 cpuid
14629 cmpl $0x80000000, %eax
14630 jbe 6f
14631+
14632+ /* Clear bogus XD_DISABLE bits */
14633+ call verify_cpu
14634+
14635 mov $0x80000001, %eax
14636 cpuid
14637 /* Execute Disable bit supported? */
14638@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14639 jnc 6f
14640
14641 /* Setup EFER (Extended Feature Enable Register) */
14642- movl $0xc0000080, %ecx
14643+ movl $MSR_EFER, %ecx
14644 rdmsr
14645
14646 btsl $11, %eax
14647 /* Make changes effective */
14648 wrmsr
14649
14650+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14651+ movl $1,pa(nx_enabled)
14652+#endif
14653+
14654 6:
14655
14656 /*
14657@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14658 movl %eax,%cr0 /* ..and set paging (PG) bit */
14659 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14660 1:
14661- /* Set up the stack pointer */
14662- lss stack_start,%esp
14663+ /* Shift the stack pointer to a virtual address */
14664+ addl $__PAGE_OFFSET, %esp
14665
14666 /*
14667 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14668@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14669
14670 #ifdef CONFIG_SMP
14671 cmpb $0, ready
14672- jz 1f /* Initial CPU cleans BSS */
14673- jmp checkCPUtype
14674-1:
14675+ jnz checkCPUtype
14676 #endif /* CONFIG_SMP */
14677
14678 /*
14679@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14680 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14681 movl %eax,%ss # after changing gdt.
14682
14683- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14684+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14685 movl %eax,%ds
14686 movl %eax,%es
14687
14688@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14689 */
14690 cmpb $0,ready
14691 jne 1f
14692- movl $per_cpu__gdt_page,%eax
14693+ movl $cpu_gdt_table,%eax
14694 movl $per_cpu__stack_canary,%ecx
14695+#ifdef CONFIG_SMP
14696+ addl $__per_cpu_load,%ecx
14697+#endif
14698 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14699 shrl $16, %ecx
14700 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14701 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14702 1:
14703-#endif
14704 movl $(__KERNEL_STACK_CANARY),%eax
14705+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14706+ movl $(__USER_DS),%eax
14707+#else
14708+ xorl %eax,%eax
14709+#endif
14710 movl %eax,%gs
14711
14712 xorl %eax,%eax # Clear LDT
14713@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14714
14715 cld # gcc2 wants the direction flag cleared at all times
14716 pushl $0 # fake return address for unwinder
14717-#ifdef CONFIG_SMP
14718- movb ready, %cl
14719 movb $1, ready
14720- cmpb $0,%cl # the first CPU calls start_kernel
14721- je 1f
14722- movl (stack_start), %esp
14723-1:
14724-#endif /* CONFIG_SMP */
14725 jmp *(initial_code)
14726
14727 /*
14728@@ -546,22 +631,22 @@ early_page_fault:
14729 jmp early_fault
14730
14731 early_fault:
14732- cld
14733 #ifdef CONFIG_PRINTK
14734+ cmpl $1,%ss:early_recursion_flag
14735+ je hlt_loop
14736+ incl %ss:early_recursion_flag
14737+ cld
14738 pusha
14739 movl $(__KERNEL_DS),%eax
14740 movl %eax,%ds
14741 movl %eax,%es
14742- cmpl $2,early_recursion_flag
14743- je hlt_loop
14744- incl early_recursion_flag
14745 movl %cr2,%eax
14746 pushl %eax
14747 pushl %edx /* trapno */
14748 pushl $fault_msg
14749 call printk
14750+; call dump_stack
14751 #endif
14752- call dump_stack
14753 hlt_loop:
14754 hlt
14755 jmp hlt_loop
14756@@ -569,8 +654,11 @@ hlt_loop:
14757 /* This is the default interrupt "handler" :-) */
14758 ALIGN
14759 ignore_int:
14760- cld
14761 #ifdef CONFIG_PRINTK
14762+ cmpl $2,%ss:early_recursion_flag
14763+ je hlt_loop
14764+ incl %ss:early_recursion_flag
14765+ cld
14766 pushl %eax
14767 pushl %ecx
14768 pushl %edx
14769@@ -579,9 +667,6 @@ ignore_int:
14770 movl $(__KERNEL_DS),%eax
14771 movl %eax,%ds
14772 movl %eax,%es
14773- cmpl $2,early_recursion_flag
14774- je hlt_loop
14775- incl early_recursion_flag
14776 pushl 16(%esp)
14777 pushl 24(%esp)
14778 pushl 32(%esp)
14779@@ -600,6 +685,8 @@ ignore_int:
14780 #endif
14781 iret
14782
14783+#include "verify_cpu.S"
14784+
14785 __REFDATA
14786 .align 4
14787 ENTRY(initial_code)
14788@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14789 /*
14790 * BSS section
14791 */
14792-__PAGE_ALIGNED_BSS
14793- .align PAGE_SIZE_asm
14794 #ifdef CONFIG_X86_PAE
14795+.section .swapper_pg_pmd,"a",@progbits
14796 swapper_pg_pmd:
14797 .fill 1024*KPMDS,4,0
14798 #else
14799+.section .swapper_pg_dir,"a",@progbits
14800 ENTRY(swapper_pg_dir)
14801 .fill 1024,4,0
14802 #endif
14803+.section .swapper_pg_fixmap,"a",@progbits
14804 swapper_pg_fixmap:
14805 .fill 1024,4,0
14806 #ifdef CONFIG_X86_TRAMPOLINE
14807+.section .trampoline_pg_dir,"a",@progbits
14808 ENTRY(trampoline_pg_dir)
14809+#ifdef CONFIG_X86_PAE
14810+ .fill 4,8,0
14811+#else
14812 .fill 1024,4,0
14813 #endif
14814+#endif
14815+
14816+.section .empty_zero_page,"a",@progbits
14817 ENTRY(empty_zero_page)
14818 .fill 4096,1,0
14819
14820 /*
14821+ * The IDT has to be page-aligned to simplify the Pentium
14822+ * F0 0F bug workaround.. We have a special link segment
14823+ * for this.
14824+ */
14825+.section .idt,"a",@progbits
14826+ENTRY(idt_table)
14827+ .fill 256,8,0
14828+
14829+/*
14830 * This starts the data section.
14831 */
14832 #ifdef CONFIG_X86_PAE
14833-__PAGE_ALIGNED_DATA
14834- /* Page-aligned for the benefit of paravirt? */
14835- .align PAGE_SIZE_asm
14836+.section .swapper_pg_dir,"a",@progbits
14837+
14838 ENTRY(swapper_pg_dir)
14839 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14840 # if KPMDS == 3
14841@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14842 # error "Kernel PMDs should be 1, 2 or 3"
14843 # endif
14844 .align PAGE_SIZE_asm /* needs to be page-sized too */
14845+
14846+#ifdef CONFIG_PAX_PER_CPU_PGD
14847+ENTRY(cpu_pgd)
14848+ .rept NR_CPUS
14849+ .fill 4,8,0
14850+ .endr
14851+#endif
14852+
14853 #endif
14854
14855 .data
14856+.balign 4
14857 ENTRY(stack_start)
14858- .long init_thread_union+THREAD_SIZE
14859- .long __BOOT_DS
14860+ .long init_thread_union+THREAD_SIZE-8
14861
14862 ready: .byte 0
14863
14864+.section .rodata,"a",@progbits
14865 early_recursion_flag:
14866 .long 0
14867
14868@@ -697,7 +809,7 @@ fault_msg:
14869 .word 0 # 32 bit align gdt_desc.address
14870 boot_gdt_descr:
14871 .word __BOOT_DS+7
14872- .long boot_gdt - __PAGE_OFFSET
14873+ .long pa(boot_gdt)
14874
14875 .word 0 # 32-bit align idt_desc.address
14876 idt_descr:
14877@@ -708,7 +820,7 @@ idt_descr:
14878 .word 0 # 32 bit align gdt_desc.address
14879 ENTRY(early_gdt_descr)
14880 .word GDT_ENTRIES*8-1
14881- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14882+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14883
14884 /*
14885 * The boot_gdt must mirror the equivalent in setup.S and is
14886@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14887 .align L1_CACHE_BYTES
14888 ENTRY(boot_gdt)
14889 .fill GDT_ENTRY_BOOT_CS,8,0
14890- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14891- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14892+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14893+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14894+
14895+ .align PAGE_SIZE_asm
14896+ENTRY(cpu_gdt_table)
14897+ .rept NR_CPUS
14898+ .quad 0x0000000000000000 /* NULL descriptor */
14899+ .quad 0x0000000000000000 /* 0x0b reserved */
14900+ .quad 0x0000000000000000 /* 0x13 reserved */
14901+ .quad 0x0000000000000000 /* 0x1b reserved */
14902+
14903+#ifdef CONFIG_PAX_KERNEXEC
14904+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14905+#else
14906+ .quad 0x0000000000000000 /* 0x20 unused */
14907+#endif
14908+
14909+ .quad 0x0000000000000000 /* 0x28 unused */
14910+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14911+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14912+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14913+ .quad 0x0000000000000000 /* 0x4b reserved */
14914+ .quad 0x0000000000000000 /* 0x53 reserved */
14915+ .quad 0x0000000000000000 /* 0x5b reserved */
14916+
14917+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14918+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14919+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14920+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14921+
14922+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14923+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14924+
14925+ /*
14926+ * Segments used for calling PnP BIOS have byte granularity.
14927+ * The code segments and data segments have fixed 64k limits,
14928+ * the transfer segment sizes are set at run time.
14929+ */
14930+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14931+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14932+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14933+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14934+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14935+
14936+ /*
14937+ * The APM segments have byte granularity and their bases
14938+ * are set at run time. All have 64k limits.
14939+ */
14940+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14941+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14942+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14943+
14944+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14945+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14946+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14947+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14948+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14949+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14950+
14951+ /* Be sure this is zeroed to avoid false validations in Xen */
14952+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14953+ .endr
14954diff -urNp linux-2.6.32.45/arch/x86/kernel/head_64.S linux-2.6.32.45/arch/x86/kernel/head_64.S
14955--- linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14956+++ linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14957@@ -19,6 +19,7 @@
14958 #include <asm/cache.h>
14959 #include <asm/processor-flags.h>
14960 #include <asm/percpu.h>
14961+#include <asm/cpufeature.h>
14962
14963 #ifdef CONFIG_PARAVIRT
14964 #include <asm/asm-offsets.h>
14965@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14966 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14967 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14968 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14969+L4_VMALLOC_START = pgd_index(VMALLOC_START)
14970+L3_VMALLOC_START = pud_index(VMALLOC_START)
14971+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14972+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14973
14974 .text
14975 __HEAD
14976@@ -85,35 +90,22 @@ startup_64:
14977 */
14978 addq %rbp, init_level4_pgt + 0(%rip)
14979 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14980+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14981+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14982 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14983
14984 addq %rbp, level3_ident_pgt + 0(%rip)
14985+#ifndef CONFIG_XEN
14986+ addq %rbp, level3_ident_pgt + 8(%rip)
14987+#endif
14988
14989- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14990- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14991+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14992
14993- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14994+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14995+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14996
14997- /* Add an Identity mapping if I am above 1G */
14998- leaq _text(%rip), %rdi
14999- andq $PMD_PAGE_MASK, %rdi
15000-
15001- movq %rdi, %rax
15002- shrq $PUD_SHIFT, %rax
15003- andq $(PTRS_PER_PUD - 1), %rax
15004- jz ident_complete
15005-
15006- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15007- leaq level3_ident_pgt(%rip), %rbx
15008- movq %rdx, 0(%rbx, %rax, 8)
15009-
15010- movq %rdi, %rax
15011- shrq $PMD_SHIFT, %rax
15012- andq $(PTRS_PER_PMD - 1), %rax
15013- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15014- leaq level2_spare_pgt(%rip), %rbx
15015- movq %rdx, 0(%rbx, %rax, 8)
15016-ident_complete:
15017+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15018+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15019
15020 /*
15021 * Fixup the kernel text+data virtual addresses. Note that
15022@@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
15023 * after the boot processor executes this code.
15024 */
15025
15026- /* Enable PAE mode and PGE */
15027- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15028+ /* Enable PAE mode and PSE/PGE */
15029+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15030 movq %rax, %cr4
15031
15032 /* Setup early boot stage 4 level pagetables. */
15033@@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
15034 movl $MSR_EFER, %ecx
15035 rdmsr
15036 btsl $_EFER_SCE, %eax /* Enable System Call */
15037- btl $20,%edi /* No Execute supported? */
15038+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15039 jnc 1f
15040 btsl $_EFER_NX, %eax
15041+ leaq init_level4_pgt(%rip), %rdi
15042+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15043+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15044+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15045 1: wrmsr /* Make changes effective */
15046
15047 /* Setup cr0 */
15048@@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
15049 .quad x86_64_start_kernel
15050 ENTRY(initial_gs)
15051 .quad INIT_PER_CPU_VAR(irq_stack_union)
15052- __FINITDATA
15053
15054 ENTRY(stack_start)
15055 .quad init_thread_union+THREAD_SIZE-8
15056 .word 0
15057+ __FINITDATA
15058
15059 bad_address:
15060 jmp bad_address
15061
15062- .section ".init.text","ax"
15063+ __INIT
15064 #ifdef CONFIG_EARLY_PRINTK
15065 .globl early_idt_handlers
15066 early_idt_handlers:
15067@@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
15068 #endif /* EARLY_PRINTK */
15069 1: hlt
15070 jmp 1b
15071+ .previous
15072
15073 #ifdef CONFIG_EARLY_PRINTK
15074+ __INITDATA
15075 early_recursion_flag:
15076 .long 0
15077+ .previous
15078
15079+ .section .rodata,"a",@progbits
15080 early_idt_msg:
15081 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15082 early_idt_ripmsg:
15083 .asciz "RIP %s\n"
15084-#endif /* CONFIG_EARLY_PRINTK */
15085 .previous
15086+#endif /* CONFIG_EARLY_PRINTK */
15087
15088+ .section .rodata,"a",@progbits
15089 #define NEXT_PAGE(name) \
15090 .balign PAGE_SIZE; \
15091 ENTRY(name)
15092@@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
15093 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15094 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15095 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15096+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
15097+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
15098+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15099+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15100 .org init_level4_pgt + L4_START_KERNEL*8, 0
15101 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15102 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15103
15104+#ifdef CONFIG_PAX_PER_CPU_PGD
15105+NEXT_PAGE(cpu_pgd)
15106+ .rept NR_CPUS
15107+ .fill 512,8,0
15108+ .endr
15109+#endif
15110+
15111 NEXT_PAGE(level3_ident_pgt)
15112 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15113+#ifdef CONFIG_XEN
15114 .fill 511,8,0
15115+#else
15116+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15117+ .fill 510,8,0
15118+#endif
15119+
15120+NEXT_PAGE(level3_vmalloc_pgt)
15121+ .fill 512,8,0
15122+
15123+NEXT_PAGE(level3_vmemmap_pgt)
15124+ .fill L3_VMEMMAP_START,8,0
15125+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15126
15127 NEXT_PAGE(level3_kernel_pgt)
15128 .fill L3_START_KERNEL,8,0
15129@@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
15130 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15131 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15132
15133+NEXT_PAGE(level2_vmemmap_pgt)
15134+ .fill 512,8,0
15135+
15136 NEXT_PAGE(level2_fixmap_pgt)
15137- .fill 506,8,0
15138- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15139- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15140- .fill 5,8,0
15141+ .fill 507,8,0
15142+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15143+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15144+ .fill 4,8,0
15145
15146-NEXT_PAGE(level1_fixmap_pgt)
15147+NEXT_PAGE(level1_vsyscall_pgt)
15148 .fill 512,8,0
15149
15150-NEXT_PAGE(level2_ident_pgt)
15151- /* Since I easily can, map the first 1G.
15152+ /* Since I easily can, map the first 2G.
15153 * Don't set NX because code runs from these pages.
15154 */
15155- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15156+NEXT_PAGE(level2_ident_pgt)
15157+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15158
15159 NEXT_PAGE(level2_kernel_pgt)
15160 /*
15161@@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
15162 * If you want to increase this then increase MODULES_VADDR
15163 * too.)
15164 */
15165- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15166- KERNEL_IMAGE_SIZE/PMD_SIZE)
15167-
15168-NEXT_PAGE(level2_spare_pgt)
15169- .fill 512, 8, 0
15170+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15171
15172 #undef PMDS
15173 #undef NEXT_PAGE
15174
15175- .data
15176+ .align PAGE_SIZE
15177+ENTRY(cpu_gdt_table)
15178+ .rept NR_CPUS
15179+ .quad 0x0000000000000000 /* NULL descriptor */
15180+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15181+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15182+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15183+ .quad 0x00cffb000000ffff /* __USER32_CS */
15184+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15185+ .quad 0x00affb000000ffff /* __USER_CS */
15186+
15187+#ifdef CONFIG_PAX_KERNEXEC
15188+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15189+#else
15190+ .quad 0x0 /* unused */
15191+#endif
15192+
15193+ .quad 0,0 /* TSS */
15194+ .quad 0,0 /* LDT */
15195+ .quad 0,0,0 /* three TLS descriptors */
15196+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15197+ /* asm/segment.h:GDT_ENTRIES must match this */
15198+
15199+ /* zero the remaining page */
15200+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15201+ .endr
15202+
15203 .align 16
15204 .globl early_gdt_descr
15205 early_gdt_descr:
15206 .word GDT_ENTRIES*8-1
15207 early_gdt_descr_base:
15208- .quad INIT_PER_CPU_VAR(gdt_page)
15209+ .quad cpu_gdt_table
15210
15211 ENTRY(phys_base)
15212 /* This must match the first entry in level2_kernel_pgt */
15213 .quad 0x0000000000000000
15214
15215 #include "../../x86/xen/xen-head.S"
15216-
15217- .section .bss, "aw", @nobits
15218+
15219+ .section .rodata,"a",@progbits
15220 .align L1_CACHE_BYTES
15221 ENTRY(idt_table)
15222- .skip IDT_ENTRIES * 16
15223+ .fill 512,8,0
15224
15225 __PAGE_ALIGNED_BSS
15226 .align PAGE_SIZE
15227diff -urNp linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c
15228--- linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
15229+++ linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
15230@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15231 EXPORT_SYMBOL(cmpxchg8b_emu);
15232 #endif
15233
15234+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15235+
15236 /* Networking helper routines. */
15237 EXPORT_SYMBOL(csum_partial_copy_generic);
15238+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15239+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15240
15241 EXPORT_SYMBOL(__get_user_1);
15242 EXPORT_SYMBOL(__get_user_2);
15243@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15244
15245 EXPORT_SYMBOL(csum_partial);
15246 EXPORT_SYMBOL(empty_zero_page);
15247+
15248+#ifdef CONFIG_PAX_KERNEXEC
15249+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15250+#endif
15251diff -urNp linux-2.6.32.45/arch/x86/kernel/i8259.c linux-2.6.32.45/arch/x86/kernel/i8259.c
15252--- linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
15253+++ linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
15254@@ -208,7 +208,7 @@ spurious_8259A_irq:
15255 "spurious 8259A interrupt: IRQ%d.\n", irq);
15256 spurious_irq_mask |= irqmask;
15257 }
15258- atomic_inc(&irq_err_count);
15259+ atomic_inc_unchecked(&irq_err_count);
15260 /*
15261 * Theoretically we do not have to handle this IRQ,
15262 * but in Linux this does not cause problems and is
15263diff -urNp linux-2.6.32.45/arch/x86/kernel/init_task.c linux-2.6.32.45/arch/x86/kernel/init_task.c
15264--- linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
15265+++ linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
15266@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
15267 * way process stacks are handled. This is done by having a special
15268 * "init_task" linker map entry..
15269 */
15270-union thread_union init_thread_union __init_task_data =
15271- { INIT_THREAD_INFO(init_task) };
15272+union thread_union init_thread_union __init_task_data;
15273
15274 /*
15275 * Initial task structure.
15276@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15277 * section. Since TSS's are completely CPU-local, we want them
15278 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15279 */
15280-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15281-
15282+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15283+EXPORT_SYMBOL(init_tss);
15284diff -urNp linux-2.6.32.45/arch/x86/kernel/ioport.c linux-2.6.32.45/arch/x86/kernel/ioport.c
15285--- linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
15286+++ linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
15287@@ -6,6 +6,7 @@
15288 #include <linux/sched.h>
15289 #include <linux/kernel.h>
15290 #include <linux/capability.h>
15291+#include <linux/security.h>
15292 #include <linux/errno.h>
15293 #include <linux/types.h>
15294 #include <linux/ioport.h>
15295@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
15296
15297 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15298 return -EINVAL;
15299+#ifdef CONFIG_GRKERNSEC_IO
15300+ if (turn_on && grsec_disable_privio) {
15301+ gr_handle_ioperm();
15302+ return -EPERM;
15303+ }
15304+#endif
15305 if (turn_on && !capable(CAP_SYS_RAWIO))
15306 return -EPERM;
15307
15308@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
15309 * because the ->io_bitmap_max value must match the bitmap
15310 * contents:
15311 */
15312- tss = &per_cpu(init_tss, get_cpu());
15313+ tss = init_tss + get_cpu();
15314
15315 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
15316
15317@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
15318 return -EINVAL;
15319 /* Trying to gain more privileges? */
15320 if (level > old) {
15321+#ifdef CONFIG_GRKERNSEC_IO
15322+ if (grsec_disable_privio) {
15323+ gr_handle_iopl();
15324+ return -EPERM;
15325+ }
15326+#endif
15327 if (!capable(CAP_SYS_RAWIO))
15328 return -EPERM;
15329 }
15330diff -urNp linux-2.6.32.45/arch/x86/kernel/irq_32.c linux-2.6.32.45/arch/x86/kernel/irq_32.c
15331--- linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
15332+++ linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
15333@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
15334 __asm__ __volatile__("andl %%esp,%0" :
15335 "=r" (sp) : "0" (THREAD_SIZE - 1));
15336
15337- return sp < (sizeof(struct thread_info) + STACK_WARN);
15338+ return sp < STACK_WARN;
15339 }
15340
15341 static void print_stack_overflow(void)
15342@@ -54,9 +54,9 @@ static inline void print_stack_overflow(
15343 * per-CPU IRQ handling contexts (thread information and stack)
15344 */
15345 union irq_ctx {
15346- struct thread_info tinfo;
15347- u32 stack[THREAD_SIZE/sizeof(u32)];
15348-} __attribute__((aligned(PAGE_SIZE)));
15349+ unsigned long previous_esp;
15350+ u32 stack[THREAD_SIZE/sizeof(u32)];
15351+} __attribute__((aligned(THREAD_SIZE)));
15352
15353 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15354 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
15355@@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
15356 static inline int
15357 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15358 {
15359- union irq_ctx *curctx, *irqctx;
15360+ union irq_ctx *irqctx;
15361 u32 *isp, arg1, arg2;
15362
15363- curctx = (union irq_ctx *) current_thread_info();
15364 irqctx = __get_cpu_var(hardirq_ctx);
15365
15366 /*
15367@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
15368 * handler) we can't do that and just have to keep using the
15369 * current stack (which is the irq stack already after all)
15370 */
15371- if (unlikely(curctx == irqctx))
15372+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15373 return 0;
15374
15375 /* build the stack frame on the IRQ stack */
15376- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15377- irqctx->tinfo.task = curctx->tinfo.task;
15378- irqctx->tinfo.previous_esp = current_stack_pointer;
15379+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15380+ irqctx->previous_esp = current_stack_pointer;
15381
15382- /*
15383- * Copy the softirq bits in preempt_count so that the
15384- * softirq checks work in the hardirq context.
15385- */
15386- irqctx->tinfo.preempt_count =
15387- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15388- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15389+#ifdef CONFIG_PAX_MEMORY_UDEREF
15390+ __set_fs(MAKE_MM_SEG(0));
15391+#endif
15392
15393 if (unlikely(overflow))
15394 call_on_stack(print_stack_overflow, isp);
15395@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15396 : "0" (irq), "1" (desc), "2" (isp),
15397 "D" (desc->handle_irq)
15398 : "memory", "cc", "ecx");
15399+
15400+#ifdef CONFIG_PAX_MEMORY_UDEREF
15401+ __set_fs(current_thread_info()->addr_limit);
15402+#endif
15403+
15404 return 1;
15405 }
15406
15407@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15408 */
15409 void __cpuinit irq_ctx_init(int cpu)
15410 {
15411- union irq_ctx *irqctx;
15412-
15413 if (per_cpu(hardirq_ctx, cpu))
15414 return;
15415
15416- irqctx = &per_cpu(hardirq_stack, cpu);
15417- irqctx->tinfo.task = NULL;
15418- irqctx->tinfo.exec_domain = NULL;
15419- irqctx->tinfo.cpu = cpu;
15420- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15421- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15422-
15423- per_cpu(hardirq_ctx, cpu) = irqctx;
15424-
15425- irqctx = &per_cpu(softirq_stack, cpu);
15426- irqctx->tinfo.task = NULL;
15427- irqctx->tinfo.exec_domain = NULL;
15428- irqctx->tinfo.cpu = cpu;
15429- irqctx->tinfo.preempt_count = 0;
15430- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15431-
15432- per_cpu(softirq_ctx, cpu) = irqctx;
15433+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15434+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15435
15436 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15437 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15438@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15439 asmlinkage void do_softirq(void)
15440 {
15441 unsigned long flags;
15442- struct thread_info *curctx;
15443 union irq_ctx *irqctx;
15444 u32 *isp;
15445
15446@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15447 local_irq_save(flags);
15448
15449 if (local_softirq_pending()) {
15450- curctx = current_thread_info();
15451 irqctx = __get_cpu_var(softirq_ctx);
15452- irqctx->tinfo.task = curctx->task;
15453- irqctx->tinfo.previous_esp = current_stack_pointer;
15454+ irqctx->previous_esp = current_stack_pointer;
15455
15456 /* build the stack frame on the softirq stack */
15457- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15458+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15459+
15460+#ifdef CONFIG_PAX_MEMORY_UDEREF
15461+ __set_fs(MAKE_MM_SEG(0));
15462+#endif
15463
15464 call_on_stack(__do_softirq, isp);
15465+
15466+#ifdef CONFIG_PAX_MEMORY_UDEREF
15467+ __set_fs(current_thread_info()->addr_limit);
15468+#endif
15469+
15470 /*
15471 * Shouldnt happen, we returned above if in_interrupt():
15472 */
15473diff -urNp linux-2.6.32.45/arch/x86/kernel/irq.c linux-2.6.32.45/arch/x86/kernel/irq.c
15474--- linux-2.6.32.45/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15475+++ linux-2.6.32.45/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15476@@ -15,7 +15,7 @@
15477 #include <asm/mce.h>
15478 #include <asm/hw_irq.h>
15479
15480-atomic_t irq_err_count;
15481+atomic_unchecked_t irq_err_count;
15482
15483 /* Function pointer for generic interrupt vector handling */
15484 void (*generic_interrupt_extension)(void) = NULL;
15485@@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15486 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15487 seq_printf(p, " Machine check polls\n");
15488 #endif
15489- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15490+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15491 #if defined(CONFIG_X86_IO_APIC)
15492- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15493+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15494 #endif
15495 return 0;
15496 }
15497@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15498
15499 u64 arch_irq_stat(void)
15500 {
15501- u64 sum = atomic_read(&irq_err_count);
15502+ u64 sum = atomic_read_unchecked(&irq_err_count);
15503
15504 #ifdef CONFIG_X86_IO_APIC
15505- sum += atomic_read(&irq_mis_count);
15506+ sum += atomic_read_unchecked(&irq_mis_count);
15507 #endif
15508 return sum;
15509 }
15510diff -urNp linux-2.6.32.45/arch/x86/kernel/kgdb.c linux-2.6.32.45/arch/x86/kernel/kgdb.c
15511--- linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15512+++ linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15513@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15514
15515 /* clear the trace bit */
15516 linux_regs->flags &= ~X86_EFLAGS_TF;
15517- atomic_set(&kgdb_cpu_doing_single_step, -1);
15518+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15519
15520 /* set the trace bit if we're stepping */
15521 if (remcomInBuffer[0] == 's') {
15522 linux_regs->flags |= X86_EFLAGS_TF;
15523 kgdb_single_step = 1;
15524- atomic_set(&kgdb_cpu_doing_single_step,
15525+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15526 raw_smp_processor_id());
15527 }
15528
15529@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15530 break;
15531
15532 case DIE_DEBUG:
15533- if (atomic_read(&kgdb_cpu_doing_single_step) ==
15534+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15535 raw_smp_processor_id()) {
15536 if (user_mode(regs))
15537 return single_step_cont(regs, args);
15538@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15539 return instruction_pointer(regs);
15540 }
15541
15542-struct kgdb_arch arch_kgdb_ops = {
15543+const struct kgdb_arch arch_kgdb_ops = {
15544 /* Breakpoint instruction: */
15545 .gdb_bpt_instr = { 0xcc },
15546 .flags = KGDB_HW_BREAKPOINT,
15547diff -urNp linux-2.6.32.45/arch/x86/kernel/kprobes.c linux-2.6.32.45/arch/x86/kernel/kprobes.c
15548--- linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15549+++ linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15550@@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15551 char op;
15552 s32 raddr;
15553 } __attribute__((packed)) * jop;
15554- jop = (struct __arch_jmp_op *)from;
15555+
15556+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15557+
15558+ pax_open_kernel();
15559 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15560 jop->op = RELATIVEJUMP_INSTRUCTION;
15561+ pax_close_kernel();
15562 }
15563
15564 /*
15565@@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15566 kprobe_opcode_t opcode;
15567 kprobe_opcode_t *orig_opcodes = opcodes;
15568
15569- if (search_exception_tables((unsigned long)opcodes))
15570+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15571 return 0; /* Page fault may occur on this address. */
15572
15573 retry:
15574@@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15575 disp = (u8 *) p->addr + *((s32 *) insn) -
15576 (u8 *) p->ainsn.insn;
15577 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15578+ pax_open_kernel();
15579 *(s32 *)insn = (s32) disp;
15580+ pax_close_kernel();
15581 }
15582 }
15583 #endif
15584@@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15585
15586 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15587 {
15588- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15589+ pax_open_kernel();
15590+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15591+ pax_close_kernel();
15592
15593 fix_riprel(p);
15594
15595- if (can_boost(p->addr))
15596+ if (can_boost(ktla_ktva(p->addr)))
15597 p->ainsn.boostable = 0;
15598 else
15599 p->ainsn.boostable = -1;
15600
15601- p->opcode = *p->addr;
15602+ p->opcode = *(ktla_ktva(p->addr));
15603 }
15604
15605 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15606@@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15607 if (p->opcode == BREAKPOINT_INSTRUCTION)
15608 regs->ip = (unsigned long)p->addr;
15609 else
15610- regs->ip = (unsigned long)p->ainsn.insn;
15611+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15612 }
15613
15614 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15615@@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15616 if (p->ainsn.boostable == 1 && !p->post_handler) {
15617 /* Boost up -- we can execute copied instructions directly */
15618 reset_current_kprobe();
15619- regs->ip = (unsigned long)p->ainsn.insn;
15620+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15621 preempt_enable_no_resched();
15622 return;
15623 }
15624@@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15625 struct kprobe_ctlblk *kcb;
15626
15627 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15628- if (*addr != BREAKPOINT_INSTRUCTION) {
15629+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15630 /*
15631 * The breakpoint instruction was removed right
15632 * after we hit it. Another cpu has removed
15633@@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15634 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15635 {
15636 unsigned long *tos = stack_addr(regs);
15637- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15638+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15639 unsigned long orig_ip = (unsigned long)p->addr;
15640 kprobe_opcode_t *insn = p->ainsn.insn;
15641
15642@@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15643 struct die_args *args = data;
15644 int ret = NOTIFY_DONE;
15645
15646- if (args->regs && user_mode_vm(args->regs))
15647+ if (args->regs && user_mode(args->regs))
15648 return ret;
15649
15650 switch (val) {
15651diff -urNp linux-2.6.32.45/arch/x86/kernel/kvm.c linux-2.6.32.45/arch/x86/kernel/kvm.c
15652--- linux-2.6.32.45/arch/x86/kernel/kvm.c 2011-03-27 14:31:47.000000000 -0400
15653+++ linux-2.6.32.45/arch/x86/kernel/kvm.c 2011-08-24 18:35:52.000000000 -0400
15654@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(vo
15655 pv_mmu_ops.set_pud = kvm_set_pud;
15656 #if PAGETABLE_LEVELS == 4
15657 pv_mmu_ops.set_pgd = kvm_set_pgd;
15658+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15659 #endif
15660 #endif
15661 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15662diff -urNp linux-2.6.32.45/arch/x86/kernel/ldt.c linux-2.6.32.45/arch/x86/kernel/ldt.c
15663--- linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15664+++ linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15665@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15666 if (reload) {
15667 #ifdef CONFIG_SMP
15668 preempt_disable();
15669- load_LDT(pc);
15670+ load_LDT_nolock(pc);
15671 if (!cpumask_equal(mm_cpumask(current->mm),
15672 cpumask_of(smp_processor_id())))
15673 smp_call_function(flush_ldt, current->mm, 1);
15674 preempt_enable();
15675 #else
15676- load_LDT(pc);
15677+ load_LDT_nolock(pc);
15678 #endif
15679 }
15680 if (oldsize) {
15681@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15682 return err;
15683
15684 for (i = 0; i < old->size; i++)
15685- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15686+ write_ldt_entry(new->ldt, i, old->ldt + i);
15687 return 0;
15688 }
15689
15690@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15691 retval = copy_ldt(&mm->context, &old_mm->context);
15692 mutex_unlock(&old_mm->context.lock);
15693 }
15694+
15695+ if (tsk == current) {
15696+ mm->context.vdso = 0;
15697+
15698+#ifdef CONFIG_X86_32
15699+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15700+ mm->context.user_cs_base = 0UL;
15701+ mm->context.user_cs_limit = ~0UL;
15702+
15703+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15704+ cpus_clear(mm->context.cpu_user_cs_mask);
15705+#endif
15706+
15707+#endif
15708+#endif
15709+
15710+ }
15711+
15712 return retval;
15713 }
15714
15715@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15716 }
15717 }
15718
15719+#ifdef CONFIG_PAX_SEGMEXEC
15720+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15721+ error = -EINVAL;
15722+ goto out_unlock;
15723+ }
15724+#endif
15725+
15726 fill_ldt(&ldt, &ldt_info);
15727 if (oldmode)
15728 ldt.avl = 0;
15729diff -urNp linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c
15730--- linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15731+++ linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15732@@ -26,7 +26,7 @@
15733 #include <asm/system.h>
15734 #include <asm/cacheflush.h>
15735
15736-static void set_idt(void *newidt, __u16 limit)
15737+static void set_idt(struct desc_struct *newidt, __u16 limit)
15738 {
15739 struct desc_ptr curidt;
15740
15741@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15742 }
15743
15744
15745-static void set_gdt(void *newgdt, __u16 limit)
15746+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15747 {
15748 struct desc_ptr curgdt;
15749
15750@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15751 }
15752
15753 control_page = page_address(image->control_code_page);
15754- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15755+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15756
15757 relocate_kernel_ptr = control_page;
15758 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15759diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_amd.c linux-2.6.32.45/arch/x86/kernel/microcode_amd.c
15760--- linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15761+++ linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15762@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15763 uci->mc = NULL;
15764 }
15765
15766-static struct microcode_ops microcode_amd_ops = {
15767+static const struct microcode_ops microcode_amd_ops = {
15768 .request_microcode_user = request_microcode_user,
15769 .request_microcode_fw = request_microcode_fw,
15770 .collect_cpu_info = collect_cpu_info_amd,
15771@@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15772 .microcode_fini_cpu = microcode_fini_cpu_amd,
15773 };
15774
15775-struct microcode_ops * __init init_amd_microcode(void)
15776+const struct microcode_ops * __init init_amd_microcode(void)
15777 {
15778 return &microcode_amd_ops;
15779 }
15780diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_core.c linux-2.6.32.45/arch/x86/kernel/microcode_core.c
15781--- linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15782+++ linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15783@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15784
15785 #define MICROCODE_VERSION "2.00"
15786
15787-static struct microcode_ops *microcode_ops;
15788+static const struct microcode_ops *microcode_ops;
15789
15790 /*
15791 * Synchronization.
15792diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_intel.c linux-2.6.32.45/arch/x86/kernel/microcode_intel.c
15793--- linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15794+++ linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15795@@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15796
15797 static int get_ucode_user(void *to, const void *from, size_t n)
15798 {
15799- return copy_from_user(to, from, n);
15800+ return copy_from_user(to, (__force const void __user *)from, n);
15801 }
15802
15803 static enum ucode_state
15804 request_microcode_user(int cpu, const void __user *buf, size_t size)
15805 {
15806- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15807+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15808 }
15809
15810 static void microcode_fini_cpu(int cpu)
15811@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15812 uci->mc = NULL;
15813 }
15814
15815-static struct microcode_ops microcode_intel_ops = {
15816+static const struct microcode_ops microcode_intel_ops = {
15817 .request_microcode_user = request_microcode_user,
15818 .request_microcode_fw = request_microcode_fw,
15819 .collect_cpu_info = collect_cpu_info,
15820@@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15821 .microcode_fini_cpu = microcode_fini_cpu,
15822 };
15823
15824-struct microcode_ops * __init init_intel_microcode(void)
15825+const struct microcode_ops * __init init_intel_microcode(void)
15826 {
15827 return &microcode_intel_ops;
15828 }
15829diff -urNp linux-2.6.32.45/arch/x86/kernel/module.c linux-2.6.32.45/arch/x86/kernel/module.c
15830--- linux-2.6.32.45/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15831+++ linux-2.6.32.45/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15832@@ -34,7 +34,7 @@
15833 #define DEBUGP(fmt...)
15834 #endif
15835
15836-void *module_alloc(unsigned long size)
15837+static void *__module_alloc(unsigned long size, pgprot_t prot)
15838 {
15839 struct vm_struct *area;
15840
15841@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15842 if (!area)
15843 return NULL;
15844
15845- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15846- PAGE_KERNEL_EXEC);
15847+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15848+}
15849+
15850+void *module_alloc(unsigned long size)
15851+{
15852+
15853+#ifdef CONFIG_PAX_KERNEXEC
15854+ return __module_alloc(size, PAGE_KERNEL);
15855+#else
15856+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15857+#endif
15858+
15859 }
15860
15861 /* Free memory returned from module_alloc */
15862@@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15863 vfree(module_region);
15864 }
15865
15866+#ifdef CONFIG_PAX_KERNEXEC
15867+#ifdef CONFIG_X86_32
15868+void *module_alloc_exec(unsigned long size)
15869+{
15870+ struct vm_struct *area;
15871+
15872+ if (size == 0)
15873+ return NULL;
15874+
15875+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15876+ return area ? area->addr : NULL;
15877+}
15878+EXPORT_SYMBOL(module_alloc_exec);
15879+
15880+void module_free_exec(struct module *mod, void *module_region)
15881+{
15882+ vunmap(module_region);
15883+}
15884+EXPORT_SYMBOL(module_free_exec);
15885+#else
15886+void module_free_exec(struct module *mod, void *module_region)
15887+{
15888+ module_free(mod, module_region);
15889+}
15890+EXPORT_SYMBOL(module_free_exec);
15891+
15892+void *module_alloc_exec(unsigned long size)
15893+{
15894+ return __module_alloc(size, PAGE_KERNEL_RX);
15895+}
15896+EXPORT_SYMBOL(module_alloc_exec);
15897+#endif
15898+#endif
15899+
15900 /* We don't need anything special. */
15901 int module_frob_arch_sections(Elf_Ehdr *hdr,
15902 Elf_Shdr *sechdrs,
15903@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15904 unsigned int i;
15905 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15906 Elf32_Sym *sym;
15907- uint32_t *location;
15908+ uint32_t *plocation, location;
15909
15910 DEBUGP("Applying relocate section %u to %u\n", relsec,
15911 sechdrs[relsec].sh_info);
15912 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15913 /* This is where to make the change */
15914- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15915- + rel[i].r_offset;
15916+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15917+ location = (uint32_t)plocation;
15918+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15919+ plocation = ktla_ktva((void *)plocation);
15920 /* This is the symbol it is referring to. Note that all
15921 undefined symbols have been resolved. */
15922 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15923@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15924 switch (ELF32_R_TYPE(rel[i].r_info)) {
15925 case R_386_32:
15926 /* We add the value into the location given */
15927- *location += sym->st_value;
15928+ pax_open_kernel();
15929+ *plocation += sym->st_value;
15930+ pax_close_kernel();
15931 break;
15932 case R_386_PC32:
15933 /* Add the value, subtract its postition */
15934- *location += sym->st_value - (uint32_t)location;
15935+ pax_open_kernel();
15936+ *plocation += sym->st_value - location;
15937+ pax_close_kernel();
15938 break;
15939 default:
15940 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15941@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15942 case R_X86_64_NONE:
15943 break;
15944 case R_X86_64_64:
15945+ pax_open_kernel();
15946 *(u64 *)loc = val;
15947+ pax_close_kernel();
15948 break;
15949 case R_X86_64_32:
15950+ pax_open_kernel();
15951 *(u32 *)loc = val;
15952+ pax_close_kernel();
15953 if (val != *(u32 *)loc)
15954 goto overflow;
15955 break;
15956 case R_X86_64_32S:
15957+ pax_open_kernel();
15958 *(s32 *)loc = val;
15959+ pax_close_kernel();
15960 if ((s64)val != *(s32 *)loc)
15961 goto overflow;
15962 break;
15963 case R_X86_64_PC32:
15964 val -= (u64)loc;
15965+ pax_open_kernel();
15966 *(u32 *)loc = val;
15967+ pax_close_kernel();
15968+
15969 #if 0
15970 if ((s64)val != *(s32 *)loc)
15971 goto overflow;
15972diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt.c linux-2.6.32.45/arch/x86/kernel/paravirt.c
15973--- linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15974+++ linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-08-23 20:24:19.000000000 -0400
15975@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15976 {
15977 return x;
15978 }
15979+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15980+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15981+#endif
15982
15983 void __init default_banner(void)
15984 {
15985@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15986 * corresponding structure. */
15987 static void *get_call_destination(u8 type)
15988 {
15989- struct paravirt_patch_template tmpl = {
15990+ const struct paravirt_patch_template tmpl = {
15991 .pv_init_ops = pv_init_ops,
15992 .pv_time_ops = pv_time_ops,
15993 .pv_cpu_ops = pv_cpu_ops,
15994@@ -133,6 +136,8 @@ static void *get_call_destination(u8 typ
15995 .pv_lock_ops = pv_lock_ops,
15996 #endif
15997 };
15998+
15999+ pax_track_stack();
16000 return *((void **)&tmpl + type);
16001 }
16002
16003@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
16004 if (opfunc == NULL)
16005 /* If there's no function, patch it with a ud2a (BUG) */
16006 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16007- else if (opfunc == _paravirt_nop)
16008+ else if (opfunc == (void *)_paravirt_nop)
16009 /* If the operation is a nop, then nop the callsite */
16010 ret = paravirt_patch_nop();
16011
16012 /* identity functions just return their single argument */
16013- else if (opfunc == _paravirt_ident_32)
16014+ else if (opfunc == (void *)_paravirt_ident_32)
16015 ret = paravirt_patch_ident_32(insnbuf, len);
16016- else if (opfunc == _paravirt_ident_64)
16017+ else if (opfunc == (void *)_paravirt_ident_64)
16018+ ret = paravirt_patch_ident_64(insnbuf, len);
16019+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16020+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16021 ret = paravirt_patch_ident_64(insnbuf, len);
16022+#endif
16023
16024 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16025 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16026@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
16027 if (insn_len > len || start == NULL)
16028 insn_len = len;
16029 else
16030- memcpy(insnbuf, start, insn_len);
16031+ memcpy(insnbuf, ktla_ktva(start), insn_len);
16032
16033 return insn_len;
16034 }
16035@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
16036 preempt_enable();
16037 }
16038
16039-struct pv_info pv_info = {
16040+struct pv_info pv_info __read_only = {
16041 .name = "bare hardware",
16042 .paravirt_enabled = 0,
16043 .kernel_rpl = 0,
16044 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
16045 };
16046
16047-struct pv_init_ops pv_init_ops = {
16048+struct pv_init_ops pv_init_ops __read_only = {
16049 .patch = native_patch,
16050 };
16051
16052-struct pv_time_ops pv_time_ops = {
16053+struct pv_time_ops pv_time_ops __read_only = {
16054 .sched_clock = native_sched_clock,
16055 };
16056
16057-struct pv_irq_ops pv_irq_ops = {
16058+struct pv_irq_ops pv_irq_ops __read_only = {
16059 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16060 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16061 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16062@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
16063 #endif
16064 };
16065
16066-struct pv_cpu_ops pv_cpu_ops = {
16067+struct pv_cpu_ops pv_cpu_ops __read_only = {
16068 .cpuid = native_cpuid,
16069 .get_debugreg = native_get_debugreg,
16070 .set_debugreg = native_set_debugreg,
16071@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16072 .end_context_switch = paravirt_nop,
16073 };
16074
16075-struct pv_apic_ops pv_apic_ops = {
16076+struct pv_apic_ops pv_apic_ops __read_only = {
16077 #ifdef CONFIG_X86_LOCAL_APIC
16078 .startup_ipi_hook = paravirt_nop,
16079 #endif
16080 };
16081
16082-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16083+#ifdef CONFIG_X86_32
16084+#ifdef CONFIG_X86_PAE
16085+/* 64-bit pagetable entries */
16086+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16087+#else
16088 /* 32-bit pagetable entries */
16089 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16090+#endif
16091 #else
16092 /* 64-bit pagetable entries */
16093 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16094 #endif
16095
16096-struct pv_mmu_ops pv_mmu_ops = {
16097+struct pv_mmu_ops pv_mmu_ops __read_only = {
16098
16099 .read_cr2 = native_read_cr2,
16100 .write_cr2 = native_write_cr2,
16101@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16102 .make_pud = PTE_IDENT,
16103
16104 .set_pgd = native_set_pgd,
16105+ .set_pgd_batched = native_set_pgd_batched,
16106 #endif
16107 #endif /* PAGETABLE_LEVELS >= 3 */
16108
16109@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16110 },
16111
16112 .set_fixmap = native_set_fixmap,
16113+
16114+#ifdef CONFIG_PAX_KERNEXEC
16115+ .pax_open_kernel = native_pax_open_kernel,
16116+ .pax_close_kernel = native_pax_close_kernel,
16117+#endif
16118+
16119 };
16120
16121 EXPORT_SYMBOL_GPL(pv_time_ops);
16122diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c
16123--- linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
16124+++ linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
16125@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
16126 __raw_spin_lock(lock);
16127 }
16128
16129-struct pv_lock_ops pv_lock_ops = {
16130+struct pv_lock_ops pv_lock_ops __read_only = {
16131 #ifdef CONFIG_SMP
16132 .spin_is_locked = __ticket_spin_is_locked,
16133 .spin_is_contended = __ticket_spin_is_contended,
16134diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c
16135--- linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
16136+++ linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
16137@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
16138 free_pages((unsigned long)vaddr, get_order(size));
16139 }
16140
16141-static struct dma_map_ops calgary_dma_ops = {
16142+static const struct dma_map_ops calgary_dma_ops = {
16143 .alloc_coherent = calgary_alloc_coherent,
16144 .free_coherent = calgary_free_coherent,
16145 .map_sg = calgary_map_sg,
16146diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-dma.c linux-2.6.32.45/arch/x86/kernel/pci-dma.c
16147--- linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
16148+++ linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
16149@@ -14,7 +14,7 @@
16150
16151 static int forbid_dac __read_mostly;
16152
16153-struct dma_map_ops *dma_ops;
16154+const struct dma_map_ops *dma_ops;
16155 EXPORT_SYMBOL(dma_ops);
16156
16157 static int iommu_sac_force __read_mostly;
16158@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
16159
16160 int dma_supported(struct device *dev, u64 mask)
16161 {
16162- struct dma_map_ops *ops = get_dma_ops(dev);
16163+ const struct dma_map_ops *ops = get_dma_ops(dev);
16164
16165 #ifdef CONFIG_PCI
16166 if (mask > 0xffffffff && forbid_dac > 0) {
16167diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c
16168--- linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
16169+++ linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
16170@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
16171 return -1;
16172 }
16173
16174-static struct dma_map_ops gart_dma_ops = {
16175+static const struct dma_map_ops gart_dma_ops = {
16176 .map_sg = gart_map_sg,
16177 .unmap_sg = gart_unmap_sg,
16178 .map_page = gart_map_page,
16179diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-nommu.c linux-2.6.32.45/arch/x86/kernel/pci-nommu.c
16180--- linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
16181+++ linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
16182@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
16183 flush_write_buffers();
16184 }
16185
16186-struct dma_map_ops nommu_dma_ops = {
16187+const struct dma_map_ops nommu_dma_ops = {
16188 .alloc_coherent = dma_generic_alloc_coherent,
16189 .free_coherent = nommu_free_coherent,
16190 .map_sg = nommu_map_sg,
16191diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c
16192--- linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
16193+++ linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
16194@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
16195 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
16196 }
16197
16198-static struct dma_map_ops swiotlb_dma_ops = {
16199+static const struct dma_map_ops swiotlb_dma_ops = {
16200 .mapping_error = swiotlb_dma_mapping_error,
16201 .alloc_coherent = x86_swiotlb_alloc_coherent,
16202 .free_coherent = swiotlb_free_coherent,
16203diff -urNp linux-2.6.32.45/arch/x86/kernel/process_32.c linux-2.6.32.45/arch/x86/kernel/process_32.c
16204--- linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
16205+++ linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
16206@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
16207 unsigned long thread_saved_pc(struct task_struct *tsk)
16208 {
16209 return ((unsigned long *)tsk->thread.sp)[3];
16210+//XXX return tsk->thread.eip;
16211 }
16212
16213 #ifndef CONFIG_SMP
16214@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
16215 unsigned short ss, gs;
16216 const char *board;
16217
16218- if (user_mode_vm(regs)) {
16219+ if (user_mode(regs)) {
16220 sp = regs->sp;
16221 ss = regs->ss & 0xffff;
16222- gs = get_user_gs(regs);
16223 } else {
16224 sp = (unsigned long) (&regs->sp);
16225 savesegment(ss, ss);
16226- savesegment(gs, gs);
16227 }
16228+ gs = get_user_gs(regs);
16229
16230 printk("\n");
16231
16232@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
16233 regs.bx = (unsigned long) fn;
16234 regs.dx = (unsigned long) arg;
16235
16236- regs.ds = __USER_DS;
16237- regs.es = __USER_DS;
16238+ regs.ds = __KERNEL_DS;
16239+ regs.es = __KERNEL_DS;
16240 regs.fs = __KERNEL_PERCPU;
16241- regs.gs = __KERNEL_STACK_CANARY;
16242+ savesegment(gs, regs.gs);
16243 regs.orig_ax = -1;
16244 regs.ip = (unsigned long) kernel_thread_helper;
16245 regs.cs = __KERNEL_CS | get_kernel_rpl();
16246@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
16247 struct task_struct *tsk;
16248 int err;
16249
16250- childregs = task_pt_regs(p);
16251+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16252 *childregs = *regs;
16253 childregs->ax = 0;
16254 childregs->sp = sp;
16255
16256 p->thread.sp = (unsigned long) childregs;
16257 p->thread.sp0 = (unsigned long) (childregs+1);
16258+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16259
16260 p->thread.ip = (unsigned long) ret_from_fork;
16261
16262@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
16263 struct thread_struct *prev = &prev_p->thread,
16264 *next = &next_p->thread;
16265 int cpu = smp_processor_id();
16266- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16267+ struct tss_struct *tss = init_tss + cpu;
16268 bool preload_fpu;
16269
16270 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16271@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
16272 */
16273 lazy_save_gs(prev->gs);
16274
16275+#ifdef CONFIG_PAX_MEMORY_UDEREF
16276+ __set_fs(task_thread_info(next_p)->addr_limit);
16277+#endif
16278+
16279 /*
16280 * Load the per-thread Thread-Local Storage descriptor.
16281 */
16282@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
16283 */
16284 arch_end_context_switch(next_p);
16285
16286+ percpu_write(current_task, next_p);
16287+ percpu_write(current_tinfo, &next_p->tinfo);
16288+
16289 if (preload_fpu)
16290 __math_state_restore();
16291
16292@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
16293 if (prev->gs | next->gs)
16294 lazy_load_gs(next->gs);
16295
16296- percpu_write(current_task, next_p);
16297-
16298 return prev_p;
16299 }
16300
16301@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
16302 } while (count++ < 16);
16303 return 0;
16304 }
16305-
16306diff -urNp linux-2.6.32.45/arch/x86/kernel/process_64.c linux-2.6.32.45/arch/x86/kernel/process_64.c
16307--- linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
16308+++ linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
16309@@ -91,7 +91,7 @@ static void __exit_idle(void)
16310 void exit_idle(void)
16311 {
16312 /* idle loop has pid 0 */
16313- if (current->pid)
16314+ if (task_pid_nr(current))
16315 return;
16316 __exit_idle();
16317 }
16318@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
16319 if (!board)
16320 board = "";
16321 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
16322- current->pid, current->comm, print_tainted(),
16323+ task_pid_nr(current), current->comm, print_tainted(),
16324 init_utsname()->release,
16325 (int)strcspn(init_utsname()->version, " "),
16326 init_utsname()->version, board);
16327@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
16328 struct pt_regs *childregs;
16329 struct task_struct *me = current;
16330
16331- childregs = ((struct pt_regs *)
16332- (THREAD_SIZE + task_stack_page(p))) - 1;
16333+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16334 *childregs = *regs;
16335
16336 childregs->ax = 0;
16337@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
16338 p->thread.sp = (unsigned long) childregs;
16339 p->thread.sp0 = (unsigned long) (childregs+1);
16340 p->thread.usersp = me->thread.usersp;
16341+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16342
16343 set_tsk_thread_flag(p, TIF_FORK);
16344
16345@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
16346 struct thread_struct *prev = &prev_p->thread;
16347 struct thread_struct *next = &next_p->thread;
16348 int cpu = smp_processor_id();
16349- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16350+ struct tss_struct *tss = init_tss + cpu;
16351 unsigned fsindex, gsindex;
16352 bool preload_fpu;
16353
16354@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
16355 prev->usersp = percpu_read(old_rsp);
16356 percpu_write(old_rsp, next->usersp);
16357 percpu_write(current_task, next_p);
16358+ percpu_write(current_tinfo, &next_p->tinfo);
16359
16360- percpu_write(kernel_stack,
16361- (unsigned long)task_stack_page(next_p) +
16362- THREAD_SIZE - KERNEL_STACK_OFFSET);
16363+ percpu_write(kernel_stack, next->sp0);
16364
16365 /*
16366 * Now maybe reload the debug registers and handle I/O bitmaps
16367@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
16368 if (!p || p == current || p->state == TASK_RUNNING)
16369 return 0;
16370 stack = (unsigned long)task_stack_page(p);
16371- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16372+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16373 return 0;
16374 fp = *(u64 *)(p->thread.sp);
16375 do {
16376- if (fp < (unsigned long)stack ||
16377- fp >= (unsigned long)stack+THREAD_SIZE)
16378+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16379 return 0;
16380 ip = *(u64 *)(fp+8);
16381 if (!in_sched_functions(ip))
16382diff -urNp linux-2.6.32.45/arch/x86/kernel/process.c linux-2.6.32.45/arch/x86/kernel/process.c
16383--- linux-2.6.32.45/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
16384+++ linux-2.6.32.45/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
16385@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
16386
16387 void free_thread_info(struct thread_info *ti)
16388 {
16389- free_thread_xstate(ti->task);
16390 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16391 }
16392
16393+static struct kmem_cache *task_struct_cachep;
16394+
16395 void arch_task_cache_init(void)
16396 {
16397- task_xstate_cachep =
16398- kmem_cache_create("task_xstate", xstate_size,
16399+ /* create a slab on which task_structs can be allocated */
16400+ task_struct_cachep =
16401+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16402+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16403+
16404+ task_xstate_cachep =
16405+ kmem_cache_create("task_xstate", xstate_size,
16406 __alignof__(union thread_xstate),
16407- SLAB_PANIC | SLAB_NOTRACK, NULL);
16408+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16409+}
16410+
16411+struct task_struct *alloc_task_struct(void)
16412+{
16413+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16414+}
16415+
16416+void free_task_struct(struct task_struct *task)
16417+{
16418+ free_thread_xstate(task);
16419+ kmem_cache_free(task_struct_cachep, task);
16420 }
16421
16422 /*
16423@@ -73,7 +90,7 @@ void exit_thread(void)
16424 unsigned long *bp = t->io_bitmap_ptr;
16425
16426 if (bp) {
16427- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16428+ struct tss_struct *tss = init_tss + get_cpu();
16429
16430 t->io_bitmap_ptr = NULL;
16431 clear_thread_flag(TIF_IO_BITMAP);
16432@@ -93,6 +110,9 @@ void flush_thread(void)
16433
16434 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16435
16436+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16437+ loadsegment(gs, 0);
16438+#endif
16439 tsk->thread.debugreg0 = 0;
16440 tsk->thread.debugreg1 = 0;
16441 tsk->thread.debugreg2 = 0;
16442@@ -307,7 +327,7 @@ void default_idle(void)
16443 EXPORT_SYMBOL(default_idle);
16444 #endif
16445
16446-void stop_this_cpu(void *dummy)
16447+__noreturn void stop_this_cpu(void *dummy)
16448 {
16449 local_irq_disable();
16450 /*
16451@@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16452 }
16453 early_param("idle", idle_setup);
16454
16455-unsigned long arch_align_stack(unsigned long sp)
16456+#ifdef CONFIG_PAX_RANDKSTACK
16457+asmlinkage void pax_randomize_kstack(void)
16458 {
16459- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16460- sp -= get_random_int() % 8192;
16461- return sp & ~0xf;
16462-}
16463+ struct thread_struct *thread = &current->thread;
16464+ unsigned long time;
16465
16466-unsigned long arch_randomize_brk(struct mm_struct *mm)
16467-{
16468- unsigned long range_end = mm->brk + 0x02000000;
16469- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16470+ if (!randomize_va_space)
16471+ return;
16472+
16473+ rdtscl(time);
16474+
16475+ /* P4 seems to return a 0 LSB, ignore it */
16476+#ifdef CONFIG_MPENTIUM4
16477+ time &= 0x3EUL;
16478+ time <<= 2;
16479+#elif defined(CONFIG_X86_64)
16480+ time &= 0xFUL;
16481+ time <<= 4;
16482+#else
16483+ time &= 0x1FUL;
16484+ time <<= 3;
16485+#endif
16486+
16487+ thread->sp0 ^= time;
16488+ load_sp0(init_tss + smp_processor_id(), thread);
16489+
16490+#ifdef CONFIG_X86_64
16491+ percpu_write(kernel_stack, thread->sp0);
16492+#endif
16493 }
16494+#endif
16495
16496diff -urNp linux-2.6.32.45/arch/x86/kernel/ptrace.c linux-2.6.32.45/arch/x86/kernel/ptrace.c
16497--- linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16498+++ linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16499@@ -925,7 +925,7 @@ static const struct user_regset_view use
16500 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16501 {
16502 int ret;
16503- unsigned long __user *datap = (unsigned long __user *)data;
16504+ unsigned long __user *datap = (__force unsigned long __user *)data;
16505
16506 switch (request) {
16507 /* read the word at location addr in the USER area. */
16508@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16509 if (addr < 0)
16510 return -EIO;
16511 ret = do_get_thread_area(child, addr,
16512- (struct user_desc __user *) data);
16513+ (__force struct user_desc __user *) data);
16514 break;
16515
16516 case PTRACE_SET_THREAD_AREA:
16517 if (addr < 0)
16518 return -EIO;
16519 ret = do_set_thread_area(child, addr,
16520- (struct user_desc __user *) data, 0);
16521+ (__force struct user_desc __user *) data, 0);
16522 break;
16523 #endif
16524
16525@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16526 #ifdef CONFIG_X86_PTRACE_BTS
16527 case PTRACE_BTS_CONFIG:
16528 ret = ptrace_bts_config
16529- (child, data, (struct ptrace_bts_config __user *)addr);
16530+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16531 break;
16532
16533 case PTRACE_BTS_STATUS:
16534 ret = ptrace_bts_status
16535- (child, data, (struct ptrace_bts_config __user *)addr);
16536+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16537 break;
16538
16539 case PTRACE_BTS_SIZE:
16540@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16541
16542 case PTRACE_BTS_GET:
16543 ret = ptrace_bts_read_record
16544- (child, data, (struct bts_struct __user *) addr);
16545+ (child, data, (__force struct bts_struct __user *) addr);
16546 break;
16547
16548 case PTRACE_BTS_CLEAR:
16549@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16550
16551 case PTRACE_BTS_DRAIN:
16552 ret = ptrace_bts_drain
16553- (child, data, (struct bts_struct __user *) addr);
16554+ (child, data, (__force struct bts_struct __user *) addr);
16555 break;
16556 #endif /* CONFIG_X86_PTRACE_BTS */
16557
16558@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16559 info.si_code = si_code;
16560
16561 /* User-mode ip? */
16562- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16563+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16564
16565 /* Send us the fake SIGTRAP */
16566 force_sig_info(SIGTRAP, &info, tsk);
16567@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16568 * We must return the syscall number to actually look up in the table.
16569 * This can be -1L to skip running any syscall at all.
16570 */
16571-asmregparm long syscall_trace_enter(struct pt_regs *regs)
16572+long syscall_trace_enter(struct pt_regs *regs)
16573 {
16574 long ret = 0;
16575
16576@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16577 return ret ?: regs->orig_ax;
16578 }
16579
16580-asmregparm void syscall_trace_leave(struct pt_regs *regs)
16581+void syscall_trace_leave(struct pt_regs *regs)
16582 {
16583 if (unlikely(current->audit_context))
16584 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16585diff -urNp linux-2.6.32.45/arch/x86/kernel/reboot.c linux-2.6.32.45/arch/x86/kernel/reboot.c
16586--- linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:35:28.000000000 -0400
16587+++ linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:33:59.000000000 -0400
16588@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16589 EXPORT_SYMBOL(pm_power_off);
16590
16591 static const struct desc_ptr no_idt = {};
16592-static int reboot_mode;
16593+static unsigned short reboot_mode;
16594 enum reboot_type reboot_type = BOOT_KBD;
16595 int reboot_force;
16596
16597@@ -292,12 +292,12 @@ core_initcall(reboot_init);
16598 controller to pulse the CPU reset line, which is more thorough, but
16599 doesn't work with at least one type of 486 motherboard. It is easy
16600 to stop this code working; hence the copious comments. */
16601-static const unsigned long long
16602-real_mode_gdt_entries [3] =
16603+static struct desc_struct
16604+real_mode_gdt_entries [3] __read_only =
16605 {
16606- 0x0000000000000000ULL, /* Null descriptor */
16607- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16608- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16609+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16610+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16611+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16612 };
16613
16614 static const struct desc_ptr
16615@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16616 * specified by the code and length parameters.
16617 * We assume that length will aways be less that 100!
16618 */
16619-void machine_real_restart(const unsigned char *code, int length)
16620+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16621 {
16622 local_irq_disable();
16623
16624@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16625 /* Remap the kernel at virtual address zero, as well as offset zero
16626 from the kernel segment. This assumes the kernel segment starts at
16627 virtual address PAGE_OFFSET. */
16628- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16629- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16630+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16631+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16632
16633 /*
16634 * Use `swapper_pg_dir' as our page directory.
16635@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16636 boot)". This seems like a fairly standard thing that gets set by
16637 REBOOT.COM programs, and the previous reset routine did this
16638 too. */
16639- *((unsigned short *)0x472) = reboot_mode;
16640+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16641
16642 /* For the switch to real mode, copy some code to low memory. It has
16643 to be in the first 64k because it is running in 16-bit mode, and it
16644 has to have the same physical and virtual address, because it turns
16645 off paging. Copy it near the end of the first page, out of the way
16646 of BIOS variables. */
16647- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16648- real_mode_switch, sizeof (real_mode_switch));
16649- memcpy((void *)(0x1000 - 100), code, length);
16650+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16651+ memcpy(__va(0x1000 - 100), code, length);
16652
16653 /* Set up the IDT for real mode. */
16654 load_idt(&real_mode_idt);
16655@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16656 __asm__ __volatile__ ("ljmp $0x0008,%0"
16657 :
16658 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16659+ do { } while (1);
16660 }
16661 #ifdef CONFIG_APM_MODULE
16662 EXPORT_SYMBOL(machine_real_restart);
16663@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_f
16664 {
16665 }
16666
16667-static void native_machine_emergency_restart(void)
16668+__noreturn static void native_machine_emergency_restart(void)
16669 {
16670 int i;
16671
16672@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
16673 #endif
16674 }
16675
16676-static void __machine_emergency_restart(int emergency)
16677+static __noreturn void __machine_emergency_restart(int emergency)
16678 {
16679 reboot_emergency = emergency;
16680 machine_ops.emergency_restart();
16681 }
16682
16683-static void native_machine_restart(char *__unused)
16684+static __noreturn void native_machine_restart(char *__unused)
16685 {
16686 printk("machine restart\n");
16687
16688@@ -674,7 +674,7 @@ static void native_machine_restart(char
16689 __machine_emergency_restart(0);
16690 }
16691
16692-static void native_machine_halt(void)
16693+static __noreturn void native_machine_halt(void)
16694 {
16695 /* stop other cpus and apics */
16696 machine_shutdown();
16697@@ -685,7 +685,7 @@ static void native_machine_halt(void)
16698 stop_this_cpu(NULL);
16699 }
16700
16701-static void native_machine_power_off(void)
16702+__noreturn static void native_machine_power_off(void)
16703 {
16704 if (pm_power_off) {
16705 if (!reboot_force)
16706@@ -694,6 +694,7 @@ static void native_machine_power_off(voi
16707 }
16708 /* a fallback in case there is no PM info available */
16709 tboot_shutdown(TB_SHUTDOWN_HALT);
16710+ do { } while (1);
16711 }
16712
16713 struct machine_ops machine_ops = {
16714diff -urNp linux-2.6.32.45/arch/x86/kernel/setup.c linux-2.6.32.45/arch/x86/kernel/setup.c
16715--- linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16716+++ linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16717@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16718
16719 if (!boot_params.hdr.root_flags)
16720 root_mountflags &= ~MS_RDONLY;
16721- init_mm.start_code = (unsigned long) _text;
16722- init_mm.end_code = (unsigned long) _etext;
16723+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16724+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16725 init_mm.end_data = (unsigned long) _edata;
16726 init_mm.brk = _brk_end;
16727
16728- code_resource.start = virt_to_phys(_text);
16729- code_resource.end = virt_to_phys(_etext)-1;
16730- data_resource.start = virt_to_phys(_etext);
16731+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16732+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16733+ data_resource.start = virt_to_phys(_sdata);
16734 data_resource.end = virt_to_phys(_edata)-1;
16735 bss_resource.start = virt_to_phys(&__bss_start);
16736 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16737diff -urNp linux-2.6.32.45/arch/x86/kernel/setup_percpu.c linux-2.6.32.45/arch/x86/kernel/setup_percpu.c
16738--- linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16739+++ linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16740@@ -25,19 +25,17 @@
16741 # define DBG(x...)
16742 #endif
16743
16744-DEFINE_PER_CPU(int, cpu_number);
16745+#ifdef CONFIG_SMP
16746+DEFINE_PER_CPU(unsigned int, cpu_number);
16747 EXPORT_PER_CPU_SYMBOL(cpu_number);
16748+#endif
16749
16750-#ifdef CONFIG_X86_64
16751 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16752-#else
16753-#define BOOT_PERCPU_OFFSET 0
16754-#endif
16755
16756 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16757 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16758
16759-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16760+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16761 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16762 };
16763 EXPORT_SYMBOL(__per_cpu_offset);
16764@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16765 {
16766 #ifdef CONFIG_X86_32
16767 struct desc_struct gdt;
16768+ unsigned long base = per_cpu_offset(cpu);
16769
16770- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16771- 0x2 | DESCTYPE_S, 0x8);
16772- gdt.s = 1;
16773+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16774+ 0x83 | DESCTYPE_S, 0xC);
16775 write_gdt_entry(get_cpu_gdt_table(cpu),
16776 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16777 #endif
16778@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16779 /* alrighty, percpu areas up and running */
16780 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16781 for_each_possible_cpu(cpu) {
16782+#ifdef CONFIG_CC_STACKPROTECTOR
16783+#ifdef CONFIG_X86_32
16784+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16785+#endif
16786+#endif
16787 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16788 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16789 per_cpu(cpu_number, cpu) = cpu;
16790@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16791 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16792 #endif
16793 #endif
16794+#ifdef CONFIG_CC_STACKPROTECTOR
16795+#ifdef CONFIG_X86_32
16796+ if (!cpu)
16797+ per_cpu(stack_canary.canary, cpu) = canary;
16798+#endif
16799+#endif
16800 /*
16801 * Up to this point, the boot CPU has been using .data.init
16802 * area. Reload any changed state for the boot CPU.
16803diff -urNp linux-2.6.32.45/arch/x86/kernel/signal.c linux-2.6.32.45/arch/x86/kernel/signal.c
16804--- linux-2.6.32.45/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16805+++ linux-2.6.32.45/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16806@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16807 * Align the stack pointer according to the i386 ABI,
16808 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16809 */
16810- sp = ((sp + 4) & -16ul) - 4;
16811+ sp = ((sp - 12) & -16ul) - 4;
16812 #else /* !CONFIG_X86_32 */
16813 sp = round_down(sp, 16) - 8;
16814 #endif
16815@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16816 * Return an always-bogus address instead so we will die with SIGSEGV.
16817 */
16818 if (onsigstack && !likely(on_sig_stack(sp)))
16819- return (void __user *)-1L;
16820+ return (__force void __user *)-1L;
16821
16822 /* save i387 state */
16823 if (used_math() && save_i387_xstate(*fpstate) < 0)
16824- return (void __user *)-1L;
16825+ return (__force void __user *)-1L;
16826
16827 return (void __user *)sp;
16828 }
16829@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16830 }
16831
16832 if (current->mm->context.vdso)
16833- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16834+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16835 else
16836- restorer = &frame->retcode;
16837+ restorer = (void __user *)&frame->retcode;
16838 if (ka->sa.sa_flags & SA_RESTORER)
16839 restorer = ka->sa.sa_restorer;
16840
16841@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16842 * reasons and because gdb uses it as a signature to notice
16843 * signal handler stack frames.
16844 */
16845- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16846+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16847
16848 if (err)
16849 return -EFAULT;
16850@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16851 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16852
16853 /* Set up to return from userspace. */
16854- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16855+ if (current->mm->context.vdso)
16856+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16857+ else
16858+ restorer = (void __user *)&frame->retcode;
16859 if (ka->sa.sa_flags & SA_RESTORER)
16860 restorer = ka->sa.sa_restorer;
16861 put_user_ex(restorer, &frame->pretcode);
16862@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16863 * reasons and because gdb uses it as a signature to notice
16864 * signal handler stack frames.
16865 */
16866- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16867+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16868 } put_user_catch(err);
16869
16870 if (err)
16871@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16872 int signr;
16873 sigset_t *oldset;
16874
16875+ pax_track_stack();
16876+
16877 /*
16878 * We want the common case to go fast, which is why we may in certain
16879 * cases get here from kernel mode. Just return without doing anything
16880@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16881 * X86_32: vm86 regs switched out by assembly code before reaching
16882 * here, so testing against kernel CS suffices.
16883 */
16884- if (!user_mode(regs))
16885+ if (!user_mode_novm(regs))
16886 return;
16887
16888 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16889diff -urNp linux-2.6.32.45/arch/x86/kernel/smpboot.c linux-2.6.32.45/arch/x86/kernel/smpboot.c
16890--- linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16891+++ linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16892@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16893 */
16894 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16895
16896-void cpu_hotplug_driver_lock()
16897+void cpu_hotplug_driver_lock(void)
16898 {
16899- mutex_lock(&x86_cpu_hotplug_driver_mutex);
16900+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
16901 }
16902
16903-void cpu_hotplug_driver_unlock()
16904+void cpu_hotplug_driver_unlock(void)
16905 {
16906- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16907+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16908 }
16909
16910 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16911@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16912 * target processor state.
16913 */
16914 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16915- (unsigned long)stack_start.sp);
16916+ stack_start);
16917
16918 /*
16919 * Run STARTUP IPI loop.
16920@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16921 set_idle_for_cpu(cpu, c_idle.idle);
16922 do_rest:
16923 per_cpu(current_task, cpu) = c_idle.idle;
16924+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16925 #ifdef CONFIG_X86_32
16926 /* Stack for startup_32 can be just as for start_secondary onwards */
16927 irq_ctx_init(cpu);
16928@@ -750,13 +751,15 @@ do_rest:
16929 #else
16930 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16931 initial_gs = per_cpu_offset(cpu);
16932- per_cpu(kernel_stack, cpu) =
16933- (unsigned long)task_stack_page(c_idle.idle) -
16934- KERNEL_STACK_OFFSET + THREAD_SIZE;
16935+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16936 #endif
16937+
16938+ pax_open_kernel();
16939 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16940+ pax_close_kernel();
16941+
16942 initial_code = (unsigned long)start_secondary;
16943- stack_start.sp = (void *) c_idle.idle->thread.sp;
16944+ stack_start = c_idle.idle->thread.sp;
16945
16946 /* start_ip had better be page-aligned! */
16947 start_ip = setup_trampoline();
16948@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16949
16950 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16951
16952+#ifdef CONFIG_PAX_PER_CPU_PGD
16953+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16954+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16955+ KERNEL_PGD_PTRS);
16956+#endif
16957+
16958 err = do_boot_cpu(apicid, cpu);
16959
16960 if (err) {
16961diff -urNp linux-2.6.32.45/arch/x86/kernel/step.c linux-2.6.32.45/arch/x86/kernel/step.c
16962--- linux-2.6.32.45/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16963+++ linux-2.6.32.45/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16964@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16965 struct desc_struct *desc;
16966 unsigned long base;
16967
16968- seg &= ~7UL;
16969+ seg >>= 3;
16970
16971 mutex_lock(&child->mm->context.lock);
16972- if (unlikely((seg >> 3) >= child->mm->context.size))
16973+ if (unlikely(seg >= child->mm->context.size))
16974 addr = -1L; /* bogus selector, access would fault */
16975 else {
16976 desc = child->mm->context.ldt + seg;
16977@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16978 addr += base;
16979 }
16980 mutex_unlock(&child->mm->context.lock);
16981- }
16982+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16983+ addr = ktla_ktva(addr);
16984
16985 return addr;
16986 }
16987@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16988 unsigned char opcode[15];
16989 unsigned long addr = convert_ip_to_linear(child, regs);
16990
16991+ if (addr == -EINVAL)
16992+ return 0;
16993+
16994 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16995 for (i = 0; i < copied; i++) {
16996 switch (opcode[i]) {
16997@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16998
16999 #ifdef CONFIG_X86_64
17000 case 0x40 ... 0x4f:
17001- if (regs->cs != __USER_CS)
17002+ if ((regs->cs & 0xffff) != __USER_CS)
17003 /* 32-bit mode: register increment */
17004 return 0;
17005 /* 64-bit mode: REX prefix */
17006diff -urNp linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S
17007--- linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
17008+++ linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
17009@@ -1,3 +1,4 @@
17010+.section .rodata,"a",@progbits
17011 ENTRY(sys_call_table)
17012 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17013 .long sys_exit
17014diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c
17015--- linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
17016+++ linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
17017@@ -24,6 +24,21 @@
17018
17019 #include <asm/syscalls.h>
17020
17021+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17022+{
17023+ unsigned long pax_task_size = TASK_SIZE;
17024+
17025+#ifdef CONFIG_PAX_SEGMEXEC
17026+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17027+ pax_task_size = SEGMEXEC_TASK_SIZE;
17028+#endif
17029+
17030+ if (len > pax_task_size || addr > pax_task_size - len)
17031+ return -EINVAL;
17032+
17033+ return 0;
17034+}
17035+
17036 /*
17037 * Perform the select(nd, in, out, ex, tv) and mmap() system
17038 * calls. Linux/i386 didn't use to be able to handle more than
17039@@ -58,6 +73,212 @@ out:
17040 return err;
17041 }
17042
17043+unsigned long
17044+arch_get_unmapped_area(struct file *filp, unsigned long addr,
17045+ unsigned long len, unsigned long pgoff, unsigned long flags)
17046+{
17047+ struct mm_struct *mm = current->mm;
17048+ struct vm_area_struct *vma;
17049+ unsigned long start_addr, pax_task_size = TASK_SIZE;
17050+
17051+#ifdef CONFIG_PAX_SEGMEXEC
17052+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17053+ pax_task_size = SEGMEXEC_TASK_SIZE;
17054+#endif
17055+
17056+ pax_task_size -= PAGE_SIZE;
17057+
17058+ if (len > pax_task_size)
17059+ return -ENOMEM;
17060+
17061+ if (flags & MAP_FIXED)
17062+ return addr;
17063+
17064+#ifdef CONFIG_PAX_RANDMMAP
17065+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17066+#endif
17067+
17068+ if (addr) {
17069+ addr = PAGE_ALIGN(addr);
17070+ if (pax_task_size - len >= addr) {
17071+ vma = find_vma(mm, addr);
17072+ if (check_heap_stack_gap(vma, addr, len))
17073+ return addr;
17074+ }
17075+ }
17076+ if (len > mm->cached_hole_size) {
17077+ start_addr = addr = mm->free_area_cache;
17078+ } else {
17079+ start_addr = addr = mm->mmap_base;
17080+ mm->cached_hole_size = 0;
17081+ }
17082+
17083+#ifdef CONFIG_PAX_PAGEEXEC
17084+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17085+ start_addr = 0x00110000UL;
17086+
17087+#ifdef CONFIG_PAX_RANDMMAP
17088+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17089+ start_addr += mm->delta_mmap & 0x03FFF000UL;
17090+#endif
17091+
17092+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17093+ start_addr = addr = mm->mmap_base;
17094+ else
17095+ addr = start_addr;
17096+ }
17097+#endif
17098+
17099+full_search:
17100+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17101+ /* At this point: (!vma || addr < vma->vm_end). */
17102+ if (pax_task_size - len < addr) {
17103+ /*
17104+ * Start a new search - just in case we missed
17105+ * some holes.
17106+ */
17107+ if (start_addr != mm->mmap_base) {
17108+ start_addr = addr = mm->mmap_base;
17109+ mm->cached_hole_size = 0;
17110+ goto full_search;
17111+ }
17112+ return -ENOMEM;
17113+ }
17114+ if (check_heap_stack_gap(vma, addr, len))
17115+ break;
17116+ if (addr + mm->cached_hole_size < vma->vm_start)
17117+ mm->cached_hole_size = vma->vm_start - addr;
17118+ addr = vma->vm_end;
17119+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
17120+ start_addr = addr = mm->mmap_base;
17121+ mm->cached_hole_size = 0;
17122+ goto full_search;
17123+ }
17124+ }
17125+
17126+ /*
17127+ * Remember the place where we stopped the search:
17128+ */
17129+ mm->free_area_cache = addr + len;
17130+ return addr;
17131+}
17132+
17133+unsigned long
17134+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17135+ const unsigned long len, const unsigned long pgoff,
17136+ const unsigned long flags)
17137+{
17138+ struct vm_area_struct *vma;
17139+ struct mm_struct *mm = current->mm;
17140+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17141+
17142+#ifdef CONFIG_PAX_SEGMEXEC
17143+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17144+ pax_task_size = SEGMEXEC_TASK_SIZE;
17145+#endif
17146+
17147+ pax_task_size -= PAGE_SIZE;
17148+
17149+ /* requested length too big for entire address space */
17150+ if (len > pax_task_size)
17151+ return -ENOMEM;
17152+
17153+ if (flags & MAP_FIXED)
17154+ return addr;
17155+
17156+#ifdef CONFIG_PAX_PAGEEXEC
17157+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17158+ goto bottomup;
17159+#endif
17160+
17161+#ifdef CONFIG_PAX_RANDMMAP
17162+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17163+#endif
17164+
17165+ /* requesting a specific address */
17166+ if (addr) {
17167+ addr = PAGE_ALIGN(addr);
17168+ if (pax_task_size - len >= addr) {
17169+ vma = find_vma(mm, addr);
17170+ if (check_heap_stack_gap(vma, addr, len))
17171+ return addr;
17172+ }
17173+ }
17174+
17175+ /* check if free_area_cache is useful for us */
17176+ if (len <= mm->cached_hole_size) {
17177+ mm->cached_hole_size = 0;
17178+ mm->free_area_cache = mm->mmap_base;
17179+ }
17180+
17181+ /* either no address requested or can't fit in requested address hole */
17182+ addr = mm->free_area_cache;
17183+
17184+ /* make sure it can fit in the remaining address space */
17185+ if (addr > len) {
17186+ vma = find_vma(mm, addr-len);
17187+ if (check_heap_stack_gap(vma, addr - len, len))
17188+ /* remember the address as a hint for next time */
17189+ return (mm->free_area_cache = addr-len);
17190+ }
17191+
17192+ if (mm->mmap_base < len)
17193+ goto bottomup;
17194+
17195+ addr = mm->mmap_base-len;
17196+
17197+ do {
17198+ /*
17199+ * Lookup failure means no vma is above this address,
17200+ * else if new region fits below vma->vm_start,
17201+ * return with success:
17202+ */
17203+ vma = find_vma(mm, addr);
17204+ if (check_heap_stack_gap(vma, addr, len))
17205+ /* remember the address as a hint for next time */
17206+ return (mm->free_area_cache = addr);
17207+
17208+ /* remember the largest hole we saw so far */
17209+ if (addr + mm->cached_hole_size < vma->vm_start)
17210+ mm->cached_hole_size = vma->vm_start - addr;
17211+
17212+ /* try just below the current vma->vm_start */
17213+ addr = skip_heap_stack_gap(vma, len);
17214+ } while (!IS_ERR_VALUE(addr));
17215+
17216+bottomup:
17217+ /*
17218+ * A failed mmap() very likely causes application failure,
17219+ * so fall back to the bottom-up function here. This scenario
17220+ * can happen with large stack limits and large mmap()
17221+ * allocations.
17222+ */
17223+
17224+#ifdef CONFIG_PAX_SEGMEXEC
17225+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17226+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17227+ else
17228+#endif
17229+
17230+ mm->mmap_base = TASK_UNMAPPED_BASE;
17231+
17232+#ifdef CONFIG_PAX_RANDMMAP
17233+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17234+ mm->mmap_base += mm->delta_mmap;
17235+#endif
17236+
17237+ mm->free_area_cache = mm->mmap_base;
17238+ mm->cached_hole_size = ~0UL;
17239+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17240+ /*
17241+ * Restore the topdown base:
17242+ */
17243+ mm->mmap_base = base;
17244+ mm->free_area_cache = base;
17245+ mm->cached_hole_size = ~0UL;
17246+
17247+ return addr;
17248+}
17249
17250 struct sel_arg_struct {
17251 unsigned long n;
17252@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
17253 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
17254 case SEMTIMEDOP:
17255 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
17256- (const struct timespec __user *)fifth);
17257+ (__force const struct timespec __user *)fifth);
17258
17259 case SEMGET:
17260 return sys_semget(first, second, third);
17261@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
17262 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
17263 if (ret)
17264 return ret;
17265- return put_user(raddr, (ulong __user *) third);
17266+ return put_user(raddr, (__force ulong __user *) third);
17267 }
17268 case 1: /* iBCS2 emulator entry point */
17269 if (!segment_eq(get_fs(), get_ds()))
17270@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
17271
17272 return error;
17273 }
17274-
17275-
17276-/*
17277- * Do a system call from kernel instead of calling sys_execve so we
17278- * end up with proper pt_regs.
17279- */
17280-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
17281-{
17282- long __res;
17283- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
17284- : "=a" (__res)
17285- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
17286- return __res;
17287-}
17288diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c
17289--- linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
17290+++ linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
17291@@ -32,8 +32,8 @@ out:
17292 return error;
17293 }
17294
17295-static void find_start_end(unsigned long flags, unsigned long *begin,
17296- unsigned long *end)
17297+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17298+ unsigned long *begin, unsigned long *end)
17299 {
17300 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17301 unsigned long new_begin;
17302@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
17303 *begin = new_begin;
17304 }
17305 } else {
17306- *begin = TASK_UNMAPPED_BASE;
17307+ *begin = mm->mmap_base;
17308 *end = TASK_SIZE;
17309 }
17310 }
17311@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
17312 if (flags & MAP_FIXED)
17313 return addr;
17314
17315- find_start_end(flags, &begin, &end);
17316+ find_start_end(mm, flags, &begin, &end);
17317
17318 if (len > end)
17319 return -ENOMEM;
17320
17321+#ifdef CONFIG_PAX_RANDMMAP
17322+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17323+#endif
17324+
17325 if (addr) {
17326 addr = PAGE_ALIGN(addr);
17327 vma = find_vma(mm, addr);
17328- if (end - len >= addr &&
17329- (!vma || addr + len <= vma->vm_start))
17330+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17331 return addr;
17332 }
17333 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17334@@ -106,7 +109,7 @@ full_search:
17335 }
17336 return -ENOMEM;
17337 }
17338- if (!vma || addr + len <= vma->vm_start) {
17339+ if (check_heap_stack_gap(vma, addr, len)) {
17340 /*
17341 * Remember the place where we stopped the search:
17342 */
17343@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
17344 {
17345 struct vm_area_struct *vma;
17346 struct mm_struct *mm = current->mm;
17347- unsigned long addr = addr0;
17348+ unsigned long base = mm->mmap_base, addr = addr0;
17349
17350 /* requested length too big for entire address space */
17351 if (len > TASK_SIZE)
17352@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
17353 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17354 goto bottomup;
17355
17356+#ifdef CONFIG_PAX_RANDMMAP
17357+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17358+#endif
17359+
17360 /* requesting a specific address */
17361 if (addr) {
17362 addr = PAGE_ALIGN(addr);
17363- vma = find_vma(mm, addr);
17364- if (TASK_SIZE - len >= addr &&
17365- (!vma || addr + len <= vma->vm_start))
17366- return addr;
17367+ if (TASK_SIZE - len >= addr) {
17368+ vma = find_vma(mm, addr);
17369+ if (check_heap_stack_gap(vma, addr, len))
17370+ return addr;
17371+ }
17372 }
17373
17374 /* check if free_area_cache is useful for us */
17375@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
17376 /* make sure it can fit in the remaining address space */
17377 if (addr > len) {
17378 vma = find_vma(mm, addr-len);
17379- if (!vma || addr <= vma->vm_start)
17380+ if (check_heap_stack_gap(vma, addr - len, len))
17381 /* remember the address as a hint for next time */
17382 return mm->free_area_cache = addr-len;
17383 }
17384@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
17385 * return with success:
17386 */
17387 vma = find_vma(mm, addr);
17388- if (!vma || addr+len <= vma->vm_start)
17389+ if (check_heap_stack_gap(vma, addr, len))
17390 /* remember the address as a hint for next time */
17391 return mm->free_area_cache = addr;
17392
17393@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
17394 mm->cached_hole_size = vma->vm_start - addr;
17395
17396 /* try just below the current vma->vm_start */
17397- addr = vma->vm_start-len;
17398- } while (len < vma->vm_start);
17399+ addr = skip_heap_stack_gap(vma, len);
17400+ } while (!IS_ERR_VALUE(addr));
17401
17402 bottomup:
17403 /*
17404@@ -198,13 +206,21 @@ bottomup:
17405 * can happen with large stack limits and large mmap()
17406 * allocations.
17407 */
17408+ mm->mmap_base = TASK_UNMAPPED_BASE;
17409+
17410+#ifdef CONFIG_PAX_RANDMMAP
17411+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17412+ mm->mmap_base += mm->delta_mmap;
17413+#endif
17414+
17415+ mm->free_area_cache = mm->mmap_base;
17416 mm->cached_hole_size = ~0UL;
17417- mm->free_area_cache = TASK_UNMAPPED_BASE;
17418 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17419 /*
17420 * Restore the topdown base:
17421 */
17422- mm->free_area_cache = mm->mmap_base;
17423+ mm->mmap_base = base;
17424+ mm->free_area_cache = base;
17425 mm->cached_hole_size = ~0UL;
17426
17427 return addr;
17428diff -urNp linux-2.6.32.45/arch/x86/kernel/tboot.c linux-2.6.32.45/arch/x86/kernel/tboot.c
17429--- linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
17430+++ linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
17431@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17432
17433 void tboot_shutdown(u32 shutdown_type)
17434 {
17435- void (*shutdown)(void);
17436+ void (* __noreturn shutdown)(void);
17437
17438 if (!tboot_enabled())
17439 return;
17440@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17441
17442 switch_to_tboot_pt();
17443
17444- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17445+ shutdown = (void *)tboot->shutdown_entry;
17446 shutdown();
17447
17448 /* should not reach here */
17449@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17450 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17451 }
17452
17453-static atomic_t ap_wfs_count;
17454+static atomic_unchecked_t ap_wfs_count;
17455
17456 static int tboot_wait_for_aps(int num_aps)
17457 {
17458@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17459 {
17460 switch (action) {
17461 case CPU_DYING:
17462- atomic_inc(&ap_wfs_count);
17463+ atomic_inc_unchecked(&ap_wfs_count);
17464 if (num_online_cpus() == 1)
17465- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17466+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17467 return NOTIFY_BAD;
17468 break;
17469 }
17470@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17471
17472 tboot_create_trampoline();
17473
17474- atomic_set(&ap_wfs_count, 0);
17475+ atomic_set_unchecked(&ap_wfs_count, 0);
17476 register_hotcpu_notifier(&tboot_cpu_notifier);
17477 return 0;
17478 }
17479diff -urNp linux-2.6.32.45/arch/x86/kernel/time.c linux-2.6.32.45/arch/x86/kernel/time.c
17480--- linux-2.6.32.45/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17481+++ linux-2.6.32.45/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17482@@ -26,17 +26,13 @@
17483 int timer_ack;
17484 #endif
17485
17486-#ifdef CONFIG_X86_64
17487-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17488-#endif
17489-
17490 unsigned long profile_pc(struct pt_regs *regs)
17491 {
17492 unsigned long pc = instruction_pointer(regs);
17493
17494- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17495+ if (!user_mode(regs) && in_lock_functions(pc)) {
17496 #ifdef CONFIG_FRAME_POINTER
17497- return *(unsigned long *)(regs->bp + sizeof(long));
17498+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17499 #else
17500 unsigned long *sp =
17501 (unsigned long *)kernel_stack_pointer(regs);
17502@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17503 * or above a saved flags. Eflags has bits 22-31 zero,
17504 * kernel addresses don't.
17505 */
17506+
17507+#ifdef CONFIG_PAX_KERNEXEC
17508+ return ktla_ktva(sp[0]);
17509+#else
17510 if (sp[0] >> 22)
17511 return sp[0];
17512 if (sp[1] >> 22)
17513 return sp[1];
17514 #endif
17515+
17516+#endif
17517 }
17518 return pc;
17519 }
17520diff -urNp linux-2.6.32.45/arch/x86/kernel/tls.c linux-2.6.32.45/arch/x86/kernel/tls.c
17521--- linux-2.6.32.45/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17522+++ linux-2.6.32.45/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17523@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17524 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17525 return -EINVAL;
17526
17527+#ifdef CONFIG_PAX_SEGMEXEC
17528+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17529+ return -EINVAL;
17530+#endif
17531+
17532 set_tls_desc(p, idx, &info, 1);
17533
17534 return 0;
17535diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_32.S linux-2.6.32.45/arch/x86/kernel/trampoline_32.S
17536--- linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17537+++ linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17538@@ -32,6 +32,12 @@
17539 #include <asm/segment.h>
17540 #include <asm/page_types.h>
17541
17542+#ifdef CONFIG_PAX_KERNEXEC
17543+#define ta(X) (X)
17544+#else
17545+#define ta(X) ((X) - __PAGE_OFFSET)
17546+#endif
17547+
17548 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17549 __CPUINITRODATA
17550 .code16
17551@@ -60,7 +66,7 @@ r_base = .
17552 inc %ax # protected mode (PE) bit
17553 lmsw %ax # into protected mode
17554 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17555- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17556+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17557
17558 # These need to be in the same 64K segment as the above;
17559 # hence we don't use the boot_gdt_descr defined in head.S
17560diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_64.S linux-2.6.32.45/arch/x86/kernel/trampoline_64.S
17561--- linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17562+++ linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17563@@ -91,7 +91,7 @@ startup_32:
17564 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17565 movl %eax, %ds
17566
17567- movl $X86_CR4_PAE, %eax
17568+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17569 movl %eax, %cr4 # Enable PAE mode
17570
17571 # Setup trampoline 4 level pagetables
17572@@ -127,7 +127,7 @@ startup_64:
17573 no_longmode:
17574 hlt
17575 jmp no_longmode
17576-#include "verify_cpu_64.S"
17577+#include "verify_cpu.S"
17578
17579 # Careful these need to be in the same 64K segment as the above;
17580 tidt:
17581@@ -138,7 +138,7 @@ tidt:
17582 # so the kernel can live anywhere
17583 .balign 4
17584 tgdt:
17585- .short tgdt_end - tgdt # gdt limit
17586+ .short tgdt_end - tgdt - 1 # gdt limit
17587 .long tgdt - r_base
17588 .short 0
17589 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17590diff -urNp linux-2.6.32.45/arch/x86/kernel/traps.c linux-2.6.32.45/arch/x86/kernel/traps.c
17591--- linux-2.6.32.45/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17592+++ linux-2.6.32.45/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17593@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17594
17595 /* Do we ignore FPU interrupts ? */
17596 char ignore_fpu_irq;
17597-
17598-/*
17599- * The IDT has to be page-aligned to simplify the Pentium
17600- * F0 0F bug workaround.
17601- */
17602-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17603 #endif
17604
17605 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17606@@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17607 static inline void
17608 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17609 {
17610- if (!user_mode_vm(regs))
17611+ if (!user_mode(regs))
17612 die(str, regs, err);
17613 }
17614 #endif
17615
17616 static void __kprobes
17617-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17618+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17619 long error_code, siginfo_t *info)
17620 {
17621 struct task_struct *tsk = current;
17622
17623 #ifdef CONFIG_X86_32
17624- if (regs->flags & X86_VM_MASK) {
17625+ if (v8086_mode(regs)) {
17626 /*
17627 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17628 * On nmi (interrupt 2), do_trap should not be called.
17629@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17630 }
17631 #endif
17632
17633- if (!user_mode(regs))
17634+ if (!user_mode_novm(regs))
17635 goto kernel_trap;
17636
17637 #ifdef CONFIG_X86_32
17638@@ -158,7 +152,7 @@ trap_signal:
17639 printk_ratelimit()) {
17640 printk(KERN_INFO
17641 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17642- tsk->comm, tsk->pid, str,
17643+ tsk->comm, task_pid_nr(tsk), str,
17644 regs->ip, regs->sp, error_code);
17645 print_vma_addr(" in ", regs->ip);
17646 printk("\n");
17647@@ -175,8 +169,20 @@ kernel_trap:
17648 if (!fixup_exception(regs)) {
17649 tsk->thread.error_code = error_code;
17650 tsk->thread.trap_no = trapnr;
17651+
17652+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17653+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17654+ str = "PAX: suspicious stack segment fault";
17655+#endif
17656+
17657 die(str, regs, error_code);
17658 }
17659+
17660+#ifdef CONFIG_PAX_REFCOUNT
17661+ if (trapnr == 4)
17662+ pax_report_refcount_overflow(regs);
17663+#endif
17664+
17665 return;
17666
17667 #ifdef CONFIG_X86_32
17668@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17669 conditional_sti(regs);
17670
17671 #ifdef CONFIG_X86_32
17672- if (regs->flags & X86_VM_MASK)
17673+ if (v8086_mode(regs))
17674 goto gp_in_vm86;
17675 #endif
17676
17677 tsk = current;
17678- if (!user_mode(regs))
17679+ if (!user_mode_novm(regs))
17680 goto gp_in_kernel;
17681
17682+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17683+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17684+ struct mm_struct *mm = tsk->mm;
17685+ unsigned long limit;
17686+
17687+ down_write(&mm->mmap_sem);
17688+ limit = mm->context.user_cs_limit;
17689+ if (limit < TASK_SIZE) {
17690+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17691+ up_write(&mm->mmap_sem);
17692+ return;
17693+ }
17694+ up_write(&mm->mmap_sem);
17695+ }
17696+#endif
17697+
17698 tsk->thread.error_code = error_code;
17699 tsk->thread.trap_no = 13;
17700
17701@@ -305,6 +327,13 @@ gp_in_kernel:
17702 if (notify_die(DIE_GPF, "general protection fault", regs,
17703 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17704 return;
17705+
17706+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17707+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17708+ die("PAX: suspicious general protection fault", regs, error_code);
17709+ else
17710+#endif
17711+
17712 die("general protection fault", regs, error_code);
17713 }
17714
17715@@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17716 dotraplinkage notrace __kprobes void
17717 do_nmi(struct pt_regs *regs, long error_code)
17718 {
17719+
17720+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17721+ if (!user_mode(regs)) {
17722+ unsigned long cs = regs->cs & 0xFFFF;
17723+ unsigned long ip = ktva_ktla(regs->ip);
17724+
17725+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17726+ regs->ip = ip;
17727+ }
17728+#endif
17729+
17730 nmi_enter();
17731
17732 inc_irq_stat(__nmi_count);
17733@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17734 }
17735
17736 #ifdef CONFIG_X86_32
17737- if (regs->flags & X86_VM_MASK)
17738+ if (v8086_mode(regs))
17739 goto debug_vm86;
17740 #endif
17741
17742@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17743 * kernel space (but re-enable TF when returning to user mode).
17744 */
17745 if (condition & DR_STEP) {
17746- if (!user_mode(regs))
17747+ if (!user_mode_novm(regs))
17748 goto clear_TF_reenable;
17749 }
17750
17751@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17752 * Handle strange cache flush from user space exception
17753 * in all other cases. This is undocumented behaviour.
17754 */
17755- if (regs->flags & X86_VM_MASK) {
17756+ if (v8086_mode(regs)) {
17757 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17758 return;
17759 }
17760@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17761 void __math_state_restore(void)
17762 {
17763 struct thread_info *thread = current_thread_info();
17764- struct task_struct *tsk = thread->task;
17765+ struct task_struct *tsk = current;
17766
17767 /*
17768 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17769@@ -825,8 +865,7 @@ void __math_state_restore(void)
17770 */
17771 asmlinkage void math_state_restore(void)
17772 {
17773- struct thread_info *thread = current_thread_info();
17774- struct task_struct *tsk = thread->task;
17775+ struct task_struct *tsk = current;
17776
17777 if (!tsk_used_math(tsk)) {
17778 local_irq_enable();
17779diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S
17780--- linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17781+++ linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17782@@ -1,105 +0,0 @@
17783-/*
17784- *
17785- * verify_cpu.S - Code for cpu long mode and SSE verification. This
17786- * code has been borrowed from boot/setup.S and was introduced by
17787- * Andi Kleen.
17788- *
17789- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17790- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17791- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17792- *
17793- * This source code is licensed under the GNU General Public License,
17794- * Version 2. See the file COPYING for more details.
17795- *
17796- * This is a common code for verification whether CPU supports
17797- * long mode and SSE or not. It is not called directly instead this
17798- * file is included at various places and compiled in that context.
17799- * Following are the current usage.
17800- *
17801- * This file is included by both 16bit and 32bit code.
17802- *
17803- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17804- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17805- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17806- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17807- *
17808- * verify_cpu, returns the status of cpu check in register %eax.
17809- * 0: Success 1: Failure
17810- *
17811- * The caller needs to check for the error code and take the action
17812- * appropriately. Either display a message or halt.
17813- */
17814-
17815-#include <asm/cpufeature.h>
17816-
17817-verify_cpu:
17818- pushfl # Save caller passed flags
17819- pushl $0 # Kill any dangerous flags
17820- popfl
17821-
17822- pushfl # standard way to check for cpuid
17823- popl %eax
17824- movl %eax,%ebx
17825- xorl $0x200000,%eax
17826- pushl %eax
17827- popfl
17828- pushfl
17829- popl %eax
17830- cmpl %eax,%ebx
17831- jz verify_cpu_no_longmode # cpu has no cpuid
17832-
17833- movl $0x0,%eax # See if cpuid 1 is implemented
17834- cpuid
17835- cmpl $0x1,%eax
17836- jb verify_cpu_no_longmode # no cpuid 1
17837-
17838- xor %di,%di
17839- cmpl $0x68747541,%ebx # AuthenticAMD
17840- jnz verify_cpu_noamd
17841- cmpl $0x69746e65,%edx
17842- jnz verify_cpu_noamd
17843- cmpl $0x444d4163,%ecx
17844- jnz verify_cpu_noamd
17845- mov $1,%di # cpu is from AMD
17846-
17847-verify_cpu_noamd:
17848- movl $0x1,%eax # Does the cpu have what it takes
17849- cpuid
17850- andl $REQUIRED_MASK0,%edx
17851- xorl $REQUIRED_MASK0,%edx
17852- jnz verify_cpu_no_longmode
17853-
17854- movl $0x80000000,%eax # See if extended cpuid is implemented
17855- cpuid
17856- cmpl $0x80000001,%eax
17857- jb verify_cpu_no_longmode # no extended cpuid
17858-
17859- movl $0x80000001,%eax # Does the cpu have what it takes
17860- cpuid
17861- andl $REQUIRED_MASK1,%edx
17862- xorl $REQUIRED_MASK1,%edx
17863- jnz verify_cpu_no_longmode
17864-
17865-verify_cpu_sse_test:
17866- movl $1,%eax
17867- cpuid
17868- andl $SSE_MASK,%edx
17869- cmpl $SSE_MASK,%edx
17870- je verify_cpu_sse_ok
17871- test %di,%di
17872- jz verify_cpu_no_longmode # only try to force SSE on AMD
17873- movl $0xc0010015,%ecx # HWCR
17874- rdmsr
17875- btr $15,%eax # enable SSE
17876- wrmsr
17877- xor %di,%di # don't loop
17878- jmp verify_cpu_sse_test # try again
17879-
17880-verify_cpu_no_longmode:
17881- popfl # Restore caller passed flags
17882- movl $1,%eax
17883- ret
17884-verify_cpu_sse_ok:
17885- popfl # Restore caller passed flags
17886- xorl %eax, %eax
17887- ret
17888diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu.S linux-2.6.32.45/arch/x86/kernel/verify_cpu.S
17889--- linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17890+++ linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17891@@ -0,0 +1,140 @@
17892+/*
17893+ *
17894+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
17895+ * code has been borrowed from boot/setup.S and was introduced by
17896+ * Andi Kleen.
17897+ *
17898+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17899+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17900+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17901+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17902+ *
17903+ * This source code is licensed under the GNU General Public License,
17904+ * Version 2. See the file COPYING for more details.
17905+ *
17906+ * This is a common code for verification whether CPU supports
17907+ * long mode and SSE or not. It is not called directly instead this
17908+ * file is included at various places and compiled in that context.
17909+ * This file is expected to run in 32bit code. Currently:
17910+ *
17911+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17912+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
17913+ * arch/x86/kernel/head_32.S: processor startup
17914+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17915+ *
17916+ * verify_cpu, returns the status of longmode and SSE in register %eax.
17917+ * 0: Success 1: Failure
17918+ *
17919+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17920+ *
17921+ * The caller needs to check for the error code and take the action
17922+ * appropriately. Either display a message or halt.
17923+ */
17924+
17925+#include <asm/cpufeature.h>
17926+#include <asm/msr-index.h>
17927+
17928+verify_cpu:
17929+ pushfl # Save caller passed flags
17930+ pushl $0 # Kill any dangerous flags
17931+ popfl
17932+
17933+ pushfl # standard way to check for cpuid
17934+ popl %eax
17935+ movl %eax,%ebx
17936+ xorl $0x200000,%eax
17937+ pushl %eax
17938+ popfl
17939+ pushfl
17940+ popl %eax
17941+ cmpl %eax,%ebx
17942+ jz verify_cpu_no_longmode # cpu has no cpuid
17943+
17944+ movl $0x0,%eax # See if cpuid 1 is implemented
17945+ cpuid
17946+ cmpl $0x1,%eax
17947+ jb verify_cpu_no_longmode # no cpuid 1
17948+
17949+ xor %di,%di
17950+ cmpl $0x68747541,%ebx # AuthenticAMD
17951+ jnz verify_cpu_noamd
17952+ cmpl $0x69746e65,%edx
17953+ jnz verify_cpu_noamd
17954+ cmpl $0x444d4163,%ecx
17955+ jnz verify_cpu_noamd
17956+ mov $1,%di # cpu is from AMD
17957+ jmp verify_cpu_check
17958+
17959+verify_cpu_noamd:
17960+ cmpl $0x756e6547,%ebx # GenuineIntel?
17961+ jnz verify_cpu_check
17962+ cmpl $0x49656e69,%edx
17963+ jnz verify_cpu_check
17964+ cmpl $0x6c65746e,%ecx
17965+ jnz verify_cpu_check
17966+
17967+ # only call IA32_MISC_ENABLE when:
17968+ # family > 6 || (family == 6 && model >= 0xd)
17969+ movl $0x1, %eax # check CPU family and model
17970+ cpuid
17971+ movl %eax, %ecx
17972+
17973+ andl $0x0ff00f00, %eax # mask family and extended family
17974+ shrl $8, %eax
17975+ cmpl $6, %eax
17976+ ja verify_cpu_clear_xd # family > 6, ok
17977+ jb verify_cpu_check # family < 6, skip
17978+
17979+ andl $0x000f00f0, %ecx # mask model and extended model
17980+ shrl $4, %ecx
17981+ cmpl $0xd, %ecx
17982+ jb verify_cpu_check # family == 6, model < 0xd, skip
17983+
17984+verify_cpu_clear_xd:
17985+ movl $MSR_IA32_MISC_ENABLE, %ecx
17986+ rdmsr
17987+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17988+ jnc verify_cpu_check # only write MSR if bit was changed
17989+ wrmsr
17990+
17991+verify_cpu_check:
17992+ movl $0x1,%eax # Does the cpu have what it takes
17993+ cpuid
17994+ andl $REQUIRED_MASK0,%edx
17995+ xorl $REQUIRED_MASK0,%edx
17996+ jnz verify_cpu_no_longmode
17997+
17998+ movl $0x80000000,%eax # See if extended cpuid is implemented
17999+ cpuid
18000+ cmpl $0x80000001,%eax
18001+ jb verify_cpu_no_longmode # no extended cpuid
18002+
18003+ movl $0x80000001,%eax # Does the cpu have what it takes
18004+ cpuid
18005+ andl $REQUIRED_MASK1,%edx
18006+ xorl $REQUIRED_MASK1,%edx
18007+ jnz verify_cpu_no_longmode
18008+
18009+verify_cpu_sse_test:
18010+ movl $1,%eax
18011+ cpuid
18012+ andl $SSE_MASK,%edx
18013+ cmpl $SSE_MASK,%edx
18014+ je verify_cpu_sse_ok
18015+ test %di,%di
18016+ jz verify_cpu_no_longmode # only try to force SSE on AMD
18017+ movl $MSR_K7_HWCR,%ecx
18018+ rdmsr
18019+ btr $15,%eax # enable SSE
18020+ wrmsr
18021+ xor %di,%di # don't loop
18022+ jmp verify_cpu_sse_test # try again
18023+
18024+verify_cpu_no_longmode:
18025+ popfl # Restore caller passed flags
18026+ movl $1,%eax
18027+ ret
18028+verify_cpu_sse_ok:
18029+ popfl # Restore caller passed flags
18030+ xorl %eax, %eax
18031+ ret
18032diff -urNp linux-2.6.32.45/arch/x86/kernel/vm86_32.c linux-2.6.32.45/arch/x86/kernel/vm86_32.c
18033--- linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
18034+++ linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
18035@@ -41,6 +41,7 @@
18036 #include <linux/ptrace.h>
18037 #include <linux/audit.h>
18038 #include <linux/stddef.h>
18039+#include <linux/grsecurity.h>
18040
18041 #include <asm/uaccess.h>
18042 #include <asm/io.h>
18043@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
18044 do_exit(SIGSEGV);
18045 }
18046
18047- tss = &per_cpu(init_tss, get_cpu());
18048+ tss = init_tss + get_cpu();
18049 current->thread.sp0 = current->thread.saved_sp0;
18050 current->thread.sysenter_cs = __KERNEL_CS;
18051 load_sp0(tss, &current->thread);
18052@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
18053 struct task_struct *tsk;
18054 int tmp, ret = -EPERM;
18055
18056+#ifdef CONFIG_GRKERNSEC_VM86
18057+ if (!capable(CAP_SYS_RAWIO)) {
18058+ gr_handle_vm86();
18059+ goto out;
18060+ }
18061+#endif
18062+
18063 tsk = current;
18064 if (tsk->thread.saved_sp0)
18065 goto out;
18066@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
18067 int tmp, ret;
18068 struct vm86plus_struct __user *v86;
18069
18070+#ifdef CONFIG_GRKERNSEC_VM86
18071+ if (!capable(CAP_SYS_RAWIO)) {
18072+ gr_handle_vm86();
18073+ ret = -EPERM;
18074+ goto out;
18075+ }
18076+#endif
18077+
18078 tsk = current;
18079 switch (regs->bx) {
18080 case VM86_REQUEST_IRQ:
18081@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
18082 tsk->thread.saved_fs = info->regs32->fs;
18083 tsk->thread.saved_gs = get_user_gs(info->regs32);
18084
18085- tss = &per_cpu(init_tss, get_cpu());
18086+ tss = init_tss + get_cpu();
18087 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
18088 if (cpu_has_sep)
18089 tsk->thread.sysenter_cs = 0;
18090@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
18091 goto cannot_handle;
18092 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
18093 goto cannot_handle;
18094- intr_ptr = (unsigned long __user *) (i << 2);
18095+ intr_ptr = (__force unsigned long __user *) (i << 2);
18096 if (get_user(segoffs, intr_ptr))
18097 goto cannot_handle;
18098 if ((segoffs >> 16) == BIOSSEG)
18099diff -urNp linux-2.6.32.45/arch/x86/kernel/vmi_32.c linux-2.6.32.45/arch/x86/kernel/vmi_32.c
18100--- linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
18101+++ linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-08-05 20:33:55.000000000 -0400
18102@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
18103 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
18104
18105 #define call_vrom_func(rom,func) \
18106- (((VROMFUNC *)(rom->func))())
18107+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
18108
18109 #define call_vrom_long_func(rom,func,arg) \
18110- (((VROMLONGFUNC *)(rom->func)) (arg))
18111+({\
18112+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
18113+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
18114+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
18115+ __reloc;\
18116+})
18117
18118-static struct vrom_header *vmi_rom;
18119+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
18120 static int disable_pge;
18121 static int disable_pse;
18122 static int disable_sep;
18123@@ -76,10 +81,10 @@ static struct {
18124 void (*set_initial_ap_state)(int, int);
18125 void (*halt)(void);
18126 void (*set_lazy_mode)(int mode);
18127-} vmi_ops;
18128+} __no_const vmi_ops __read_only;
18129
18130 /* Cached VMI operations */
18131-struct vmi_timer_ops vmi_timer_ops;
18132+struct vmi_timer_ops vmi_timer_ops __read_only;
18133
18134 /*
18135 * VMI patching routines.
18136@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
18137 static inline void patch_offset(void *insnbuf,
18138 unsigned long ip, unsigned long dest)
18139 {
18140- *(unsigned long *)(insnbuf+1) = dest-ip-5;
18141+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
18142 }
18143
18144 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
18145@@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
18146 {
18147 u64 reloc;
18148 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
18149+
18150 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
18151 switch(rel->type) {
18152 case VMI_RELOCATION_CALL_REL:
18153@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
18154
18155 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
18156 {
18157- const pte_t pte = { .pte = 0 };
18158+ const pte_t pte = __pte(0ULL);
18159 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
18160 }
18161
18162 static void vmi_pmd_clear(pmd_t *pmd)
18163 {
18164- const pte_t pte = { .pte = 0 };
18165+ const pte_t pte = __pte(0ULL);
18166 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
18167 }
18168 #endif
18169@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
18170 ap.ss = __KERNEL_DS;
18171 ap.esp = (unsigned long) start_esp;
18172
18173- ap.ds = __USER_DS;
18174- ap.es = __USER_DS;
18175+ ap.ds = __KERNEL_DS;
18176+ ap.es = __KERNEL_DS;
18177 ap.fs = __KERNEL_PERCPU;
18178- ap.gs = __KERNEL_STACK_CANARY;
18179+ savesegment(gs, ap.gs);
18180
18181 ap.eflags = 0;
18182
18183@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
18184 paravirt_leave_lazy_mmu();
18185 }
18186
18187+#ifdef CONFIG_PAX_KERNEXEC
18188+static unsigned long vmi_pax_open_kernel(void)
18189+{
18190+ return 0;
18191+}
18192+
18193+static unsigned long vmi_pax_close_kernel(void)
18194+{
18195+ return 0;
18196+}
18197+#endif
18198+
18199 static inline int __init check_vmi_rom(struct vrom_header *rom)
18200 {
18201 struct pci_header *pci;
18202@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
18203 return 0;
18204 if (rom->vrom_signature != VMI_SIGNATURE)
18205 return 0;
18206+ if (rom->rom_length * 512 > sizeof(*rom)) {
18207+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
18208+ return 0;
18209+ }
18210 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
18211 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
18212 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
18213@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
18214 struct vrom_header *romstart;
18215 romstart = (struct vrom_header *)isa_bus_to_virt(base);
18216 if (check_vmi_rom(romstart)) {
18217- vmi_rom = romstart;
18218+ vmi_rom = *romstart;
18219 return 1;
18220 }
18221 }
18222@@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
18223
18224 para_fill(pv_irq_ops.safe_halt, Halt);
18225
18226+#ifdef CONFIG_PAX_KERNEXEC
18227+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
18228+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
18229+#endif
18230+
18231 /*
18232 * Alternative instruction rewriting doesn't happen soon enough
18233 * to convert VMI_IRET to a call instead of a jump; so we have
18234@@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
18235
18236 void __init vmi_init(void)
18237 {
18238- if (!vmi_rom)
18239+ if (!vmi_rom.rom_signature)
18240 probe_vmi_rom();
18241 else
18242- check_vmi_rom(vmi_rom);
18243+ check_vmi_rom(&vmi_rom);
18244
18245 /* In case probing for or validating the ROM failed, basil */
18246- if (!vmi_rom)
18247+ if (!vmi_rom.rom_signature)
18248 return;
18249
18250- reserve_top_address(-vmi_rom->virtual_top);
18251+ reserve_top_address(-vmi_rom.virtual_top);
18252
18253 #ifdef CONFIG_X86_IO_APIC
18254 /* This is virtual hardware; timer routing is wired correctly */
18255@@ -874,7 +901,7 @@ void __init vmi_activate(void)
18256 {
18257 unsigned long flags;
18258
18259- if (!vmi_rom)
18260+ if (!vmi_rom.rom_signature)
18261 return;
18262
18263 local_irq_save(flags);
18264diff -urNp linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S
18265--- linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
18266+++ linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
18267@@ -26,6 +26,13 @@
18268 #include <asm/page_types.h>
18269 #include <asm/cache.h>
18270 #include <asm/boot.h>
18271+#include <asm/segment.h>
18272+
18273+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18274+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18275+#else
18276+#define __KERNEL_TEXT_OFFSET 0
18277+#endif
18278
18279 #undef i386 /* in case the preprocessor is a 32bit one */
18280
18281@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
18282 #ifdef CONFIG_X86_32
18283 OUTPUT_ARCH(i386)
18284 ENTRY(phys_startup_32)
18285-jiffies = jiffies_64;
18286 #else
18287 OUTPUT_ARCH(i386:x86-64)
18288 ENTRY(phys_startup_64)
18289-jiffies_64 = jiffies;
18290 #endif
18291
18292 PHDRS {
18293 text PT_LOAD FLAGS(5); /* R_E */
18294- data PT_LOAD FLAGS(7); /* RWE */
18295+#ifdef CONFIG_X86_32
18296+ module PT_LOAD FLAGS(5); /* R_E */
18297+#endif
18298+#ifdef CONFIG_XEN
18299+ rodata PT_LOAD FLAGS(5); /* R_E */
18300+#else
18301+ rodata PT_LOAD FLAGS(4); /* R__ */
18302+#endif
18303+ data PT_LOAD FLAGS(6); /* RW_ */
18304 #ifdef CONFIG_X86_64
18305 user PT_LOAD FLAGS(5); /* R_E */
18306+#endif
18307+ init.begin PT_LOAD FLAGS(6); /* RW_ */
18308 #ifdef CONFIG_SMP
18309 percpu PT_LOAD FLAGS(6); /* RW_ */
18310 #endif
18311+ text.init PT_LOAD FLAGS(5); /* R_E */
18312+ text.exit PT_LOAD FLAGS(5); /* R_E */
18313 init PT_LOAD FLAGS(7); /* RWE */
18314-#endif
18315 note PT_NOTE FLAGS(0); /* ___ */
18316 }
18317
18318 SECTIONS
18319 {
18320 #ifdef CONFIG_X86_32
18321- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18322- phys_startup_32 = startup_32 - LOAD_OFFSET;
18323+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18324 #else
18325- . = __START_KERNEL;
18326- phys_startup_64 = startup_64 - LOAD_OFFSET;
18327+ . = __START_KERNEL;
18328 #endif
18329
18330 /* Text and read-only data */
18331- .text : AT(ADDR(.text) - LOAD_OFFSET) {
18332- _text = .;
18333+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18334 /* bootstrapping code */
18335+#ifdef CONFIG_X86_32
18336+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18337+#else
18338+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18339+#endif
18340+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18341+ _text = .;
18342 HEAD_TEXT
18343 #ifdef CONFIG_X86_32
18344 . = ALIGN(PAGE_SIZE);
18345@@ -82,28 +102,71 @@ SECTIONS
18346 IRQENTRY_TEXT
18347 *(.fixup)
18348 *(.gnu.warning)
18349- /* End of text section */
18350- _etext = .;
18351 } :text = 0x9090
18352
18353- NOTES :text :note
18354+ . += __KERNEL_TEXT_OFFSET;
18355+
18356+#ifdef CONFIG_X86_32
18357+ . = ALIGN(PAGE_SIZE);
18358+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
18359+ *(.vmi.rom)
18360+ } :module
18361+
18362+ . = ALIGN(PAGE_SIZE);
18363+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18364+
18365+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18366+ MODULES_EXEC_VADDR = .;
18367+ BYTE(0)
18368+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18369+ . = ALIGN(HPAGE_SIZE);
18370+ MODULES_EXEC_END = . - 1;
18371+#endif
18372+
18373+ } :module
18374+#endif
18375
18376- EXCEPTION_TABLE(16) :text = 0x9090
18377+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18378+ /* End of text section */
18379+ _etext = . - __KERNEL_TEXT_OFFSET;
18380+ }
18381+
18382+#ifdef CONFIG_X86_32
18383+ . = ALIGN(PAGE_SIZE);
18384+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18385+ *(.idt)
18386+ . = ALIGN(PAGE_SIZE);
18387+ *(.empty_zero_page)
18388+ *(.swapper_pg_fixmap)
18389+ *(.swapper_pg_pmd)
18390+ *(.swapper_pg_dir)
18391+ *(.trampoline_pg_dir)
18392+ } :rodata
18393+#endif
18394+
18395+ . = ALIGN(PAGE_SIZE);
18396+ NOTES :rodata :note
18397+
18398+ EXCEPTION_TABLE(16) :rodata
18399
18400 RO_DATA(PAGE_SIZE)
18401
18402 /* Data */
18403 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18404+
18405+#ifdef CONFIG_PAX_KERNEXEC
18406+ . = ALIGN(HPAGE_SIZE);
18407+#else
18408+ . = ALIGN(PAGE_SIZE);
18409+#endif
18410+
18411 /* Start of data section */
18412 _sdata = .;
18413
18414 /* init_task */
18415 INIT_TASK_DATA(THREAD_SIZE)
18416
18417-#ifdef CONFIG_X86_32
18418- /* 32 bit has nosave before _edata */
18419 NOSAVE_DATA
18420-#endif
18421
18422 PAGE_ALIGNED_DATA(PAGE_SIZE)
18423
18424@@ -112,6 +175,8 @@ SECTIONS
18425 DATA_DATA
18426 CONSTRUCTORS
18427
18428+ jiffies = jiffies_64;
18429+
18430 /* rarely changed data like cpu maps */
18431 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18432
18433@@ -166,12 +231,6 @@ SECTIONS
18434 }
18435 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18436
18437- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18438- .jiffies : AT(VLOAD(.jiffies)) {
18439- *(.jiffies)
18440- }
18441- jiffies = VVIRT(.jiffies);
18442-
18443 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18444 *(.vsyscall_3)
18445 }
18446@@ -187,12 +246,19 @@ SECTIONS
18447 #endif /* CONFIG_X86_64 */
18448
18449 /* Init code and data - will be freed after init */
18450- . = ALIGN(PAGE_SIZE);
18451 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18452+ BYTE(0)
18453+
18454+#ifdef CONFIG_PAX_KERNEXEC
18455+ . = ALIGN(HPAGE_SIZE);
18456+#else
18457+ . = ALIGN(PAGE_SIZE);
18458+#endif
18459+
18460 __init_begin = .; /* paired with __init_end */
18461- }
18462+ } :init.begin
18463
18464-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18465+#ifdef CONFIG_SMP
18466 /*
18467 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18468 * output PHDR, so the next output section - .init.text - should
18469@@ -201,12 +267,27 @@ SECTIONS
18470 PERCPU_VADDR(0, :percpu)
18471 #endif
18472
18473- INIT_TEXT_SECTION(PAGE_SIZE)
18474-#ifdef CONFIG_X86_64
18475- :init
18476-#endif
18477+ . = ALIGN(PAGE_SIZE);
18478+ init_begin = .;
18479+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18480+ VMLINUX_SYMBOL(_sinittext) = .;
18481+ INIT_TEXT
18482+ VMLINUX_SYMBOL(_einittext) = .;
18483+ . = ALIGN(PAGE_SIZE);
18484+ } :text.init
18485
18486- INIT_DATA_SECTION(16)
18487+ /*
18488+ * .exit.text is discard at runtime, not link time, to deal with
18489+ * references from .altinstructions and .eh_frame
18490+ */
18491+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18492+ EXIT_TEXT
18493+ . = ALIGN(16);
18494+ } :text.exit
18495+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18496+
18497+ . = ALIGN(PAGE_SIZE);
18498+ INIT_DATA_SECTION(16) :init
18499
18500 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18501 __x86_cpu_dev_start = .;
18502@@ -232,19 +313,11 @@ SECTIONS
18503 *(.altinstr_replacement)
18504 }
18505
18506- /*
18507- * .exit.text is discard at runtime, not link time, to deal with
18508- * references from .altinstructions and .eh_frame
18509- */
18510- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18511- EXIT_TEXT
18512- }
18513-
18514 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18515 EXIT_DATA
18516 }
18517
18518-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18519+#ifndef CONFIG_SMP
18520 PERCPU(PAGE_SIZE)
18521 #endif
18522
18523@@ -267,12 +340,6 @@ SECTIONS
18524 . = ALIGN(PAGE_SIZE);
18525 }
18526
18527-#ifdef CONFIG_X86_64
18528- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18529- NOSAVE_DATA
18530- }
18531-#endif
18532-
18533 /* BSS */
18534 . = ALIGN(PAGE_SIZE);
18535 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18536@@ -288,6 +355,7 @@ SECTIONS
18537 __brk_base = .;
18538 . += 64 * 1024; /* 64k alignment slop space */
18539 *(.brk_reservation) /* areas brk users have reserved */
18540+ . = ALIGN(HPAGE_SIZE);
18541 __brk_limit = .;
18542 }
18543
18544@@ -316,13 +384,12 @@ SECTIONS
18545 * for the boot processor.
18546 */
18547 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18548-INIT_PER_CPU(gdt_page);
18549 INIT_PER_CPU(irq_stack_union);
18550
18551 /*
18552 * Build-time check on the image size:
18553 */
18554-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18555+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18556 "kernel image bigger than KERNEL_IMAGE_SIZE");
18557
18558 #ifdef CONFIG_SMP
18559diff -urNp linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c
18560--- linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18561+++ linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18562@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18563
18564 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18565 /* copy vsyscall data */
18566+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18567 vsyscall_gtod_data.clock.vread = clock->vread;
18568 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18569 vsyscall_gtod_data.clock.mask = clock->mask;
18570@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18571 We do this here because otherwise user space would do it on
18572 its own in a likely inferior way (no access to jiffies).
18573 If you don't like it pass NULL. */
18574- if (tcache && tcache->blob[0] == (j = __jiffies)) {
18575+ if (tcache && tcache->blob[0] == (j = jiffies)) {
18576 p = tcache->blob[1];
18577 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18578 /* Load per CPU data from RDTSCP */
18579diff -urNp linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c
18580--- linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18581+++ linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18582@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18583
18584 EXPORT_SYMBOL(copy_user_generic);
18585 EXPORT_SYMBOL(__copy_user_nocache);
18586-EXPORT_SYMBOL(copy_from_user);
18587-EXPORT_SYMBOL(copy_to_user);
18588 EXPORT_SYMBOL(__copy_from_user_inatomic);
18589
18590 EXPORT_SYMBOL(copy_page);
18591diff -urNp linux-2.6.32.45/arch/x86/kernel/xsave.c linux-2.6.32.45/arch/x86/kernel/xsave.c
18592--- linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18593+++ linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18594@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18595 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18596 return -1;
18597
18598- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18599+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18600 fx_sw_user->extended_size -
18601 FP_XSTATE_MAGIC2_SIZE));
18602 /*
18603@@ -196,7 +196,7 @@ fx_only:
18604 * the other extended state.
18605 */
18606 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18607- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18608+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18609 }
18610
18611 /*
18612@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18613 if (task_thread_info(tsk)->status & TS_XSAVE)
18614 err = restore_user_xstate(buf);
18615 else
18616- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18617+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
18618 buf);
18619 if (unlikely(err)) {
18620 /*
18621diff -urNp linux-2.6.32.45/arch/x86/kvm/emulate.c linux-2.6.32.45/arch/x86/kvm/emulate.c
18622--- linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18623+++ linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18624@@ -81,8 +81,8 @@
18625 #define Src2CL (1<<29)
18626 #define Src2ImmByte (2<<29)
18627 #define Src2One (3<<29)
18628-#define Src2Imm16 (4<<29)
18629-#define Src2Mask (7<<29)
18630+#define Src2Imm16 (4U<<29)
18631+#define Src2Mask (7U<<29)
18632
18633 enum {
18634 Group1_80, Group1_81, Group1_82, Group1_83,
18635@@ -411,6 +411,7 @@ static u32 group2_table[] = {
18636
18637 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18638 do { \
18639+ unsigned long _tmp; \
18640 __asm__ __volatile__ ( \
18641 _PRE_EFLAGS("0", "4", "2") \
18642 _op _suffix " %"_x"3,%1; " \
18643@@ -424,8 +425,6 @@ static u32 group2_table[] = {
18644 /* Raw emulation: instruction has two explicit operands. */
18645 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18646 do { \
18647- unsigned long _tmp; \
18648- \
18649 switch ((_dst).bytes) { \
18650 case 2: \
18651 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18652@@ -441,7 +440,6 @@ static u32 group2_table[] = {
18653
18654 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18655 do { \
18656- unsigned long _tmp; \
18657 switch ((_dst).bytes) { \
18658 case 1: \
18659 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18660diff -urNp linux-2.6.32.45/arch/x86/kvm/lapic.c linux-2.6.32.45/arch/x86/kvm/lapic.c
18661--- linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18662+++ linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18663@@ -52,7 +52,7 @@
18664 #define APIC_BUS_CYCLE_NS 1
18665
18666 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18667-#define apic_debug(fmt, arg...)
18668+#define apic_debug(fmt, arg...) do {} while (0)
18669
18670 #define APIC_LVT_NUM 6
18671 /* 14 is the version for Xeon and Pentium 8.4.8*/
18672diff -urNp linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h
18673--- linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18674+++ linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18675@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18676 int level = PT_PAGE_TABLE_LEVEL;
18677 unsigned long mmu_seq;
18678
18679+ pax_track_stack();
18680+
18681 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18682 kvm_mmu_audit(vcpu, "pre page fault");
18683
18684diff -urNp linux-2.6.32.45/arch/x86/kvm/svm.c linux-2.6.32.45/arch/x86/kvm/svm.c
18685--- linux-2.6.32.45/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18686+++ linux-2.6.32.45/arch/x86/kvm/svm.c 2011-08-05 20:33:55.000000000 -0400
18687@@ -2485,7 +2485,11 @@ static void reload_tss(struct kvm_vcpu *
18688 int cpu = raw_smp_processor_id();
18689
18690 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18691+
18692+ pax_open_kernel();
18693 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18694+ pax_close_kernel();
18695+
18696 load_TR_desc();
18697 }
18698
18699@@ -2946,7 +2950,7 @@ static bool svm_gb_page_enable(void)
18700 return true;
18701 }
18702
18703-static struct kvm_x86_ops svm_x86_ops = {
18704+static const struct kvm_x86_ops svm_x86_ops = {
18705 .cpu_has_kvm_support = has_svm,
18706 .disabled_by_bios = is_disabled,
18707 .hardware_setup = svm_hardware_setup,
18708diff -urNp linux-2.6.32.45/arch/x86/kvm/vmx.c linux-2.6.32.45/arch/x86/kvm/vmx.c
18709--- linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18710+++ linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18711@@ -570,7 +570,11 @@ static void reload_tss(void)
18712
18713 kvm_get_gdt(&gdt);
18714 descs = (void *)gdt.base;
18715+
18716+ pax_open_kernel();
18717 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18718+ pax_close_kernel();
18719+
18720 load_TR_desc();
18721 }
18722
18723@@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18724 if (!cpu_has_vmx_flexpriority())
18725 flexpriority_enabled = 0;
18726
18727- if (!cpu_has_vmx_tpr_shadow())
18728- kvm_x86_ops->update_cr8_intercept = NULL;
18729+ if (!cpu_has_vmx_tpr_shadow()) {
18730+ pax_open_kernel();
18731+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18732+ pax_close_kernel();
18733+ }
18734
18735 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18736 kvm_disable_largepages();
18737@@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18738 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18739
18740 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18741- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18742+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18743 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18744 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18745 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18746@@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18747 "jmp .Lkvm_vmx_return \n\t"
18748 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18749 ".Lkvm_vmx_return: "
18750+
18751+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18752+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18753+ ".Lkvm_vmx_return2: "
18754+#endif
18755+
18756 /* Save guest registers, load host registers, keep flags */
18757 "xchg %0, (%%"R"sp) \n\t"
18758 "mov %%"R"ax, %c[rax](%0) \n\t"
18759@@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18760 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18761 #endif
18762 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18763+
18764+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18765+ ,[cs]"i"(__KERNEL_CS)
18766+#endif
18767+
18768 : "cc", "memory"
18769- , R"bx", R"di", R"si"
18770+ , R"ax", R"bx", R"di", R"si"
18771 #ifdef CONFIG_X86_64
18772 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18773 #endif
18774@@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18775 if (vmx->rmode.irq.pending)
18776 fixup_rmode_irq(vmx);
18777
18778- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18779+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18780+
18781+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18782+ loadsegment(fs, __KERNEL_PERCPU);
18783+#endif
18784+
18785+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18786+ __set_fs(current_thread_info()->addr_limit);
18787+#endif
18788+
18789 vmx->launched = 1;
18790
18791 vmx_complete_interrupts(vmx);
18792@@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18793 return false;
18794 }
18795
18796-static struct kvm_x86_ops vmx_x86_ops = {
18797+static const struct kvm_x86_ops vmx_x86_ops = {
18798 .cpu_has_kvm_support = cpu_has_kvm_support,
18799 .disabled_by_bios = vmx_disabled_by_bios,
18800 .hardware_setup = hardware_setup,
18801diff -urNp linux-2.6.32.45/arch/x86/kvm/x86.c linux-2.6.32.45/arch/x86/kvm/x86.c
18802--- linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18803+++ linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18804@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18805 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18806 struct kvm_cpuid_entry2 __user *entries);
18807
18808-struct kvm_x86_ops *kvm_x86_ops;
18809+const struct kvm_x86_ops *kvm_x86_ops;
18810 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18811
18812 int ignore_msrs = 0;
18813@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18814 struct kvm_cpuid2 *cpuid,
18815 struct kvm_cpuid_entry2 __user *entries)
18816 {
18817- int r;
18818+ int r, i;
18819
18820 r = -E2BIG;
18821 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18822 goto out;
18823 r = -EFAULT;
18824- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18825- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18826+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18827 goto out;
18828+ for (i = 0; i < cpuid->nent; ++i) {
18829+ struct kvm_cpuid_entry2 cpuid_entry;
18830+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18831+ goto out;
18832+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18833+ }
18834 vcpu->arch.cpuid_nent = cpuid->nent;
18835 kvm_apic_set_version(vcpu);
18836 return 0;
18837@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18838 struct kvm_cpuid2 *cpuid,
18839 struct kvm_cpuid_entry2 __user *entries)
18840 {
18841- int r;
18842+ int r, i;
18843
18844 vcpu_load(vcpu);
18845 r = -E2BIG;
18846 if (cpuid->nent < vcpu->arch.cpuid_nent)
18847 goto out;
18848 r = -EFAULT;
18849- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18850- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18851+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18852 goto out;
18853+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18854+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18855+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18856+ goto out;
18857+ }
18858 return 0;
18859
18860 out:
18861@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18862 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18863 struct kvm_interrupt *irq)
18864 {
18865- if (irq->irq < 0 || irq->irq >= 256)
18866+ if (irq->irq >= 256)
18867 return -EINVAL;
18868 if (irqchip_in_kernel(vcpu->kvm))
18869 return -ENXIO;
18870@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18871 .notifier_call = kvmclock_cpufreq_notifier
18872 };
18873
18874-int kvm_arch_init(void *opaque)
18875+int kvm_arch_init(const void *opaque)
18876 {
18877 int r, cpu;
18878- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18879+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18880
18881 if (kvm_x86_ops) {
18882 printk(KERN_ERR "kvm: already loaded the other module\n");
18883diff -urNp linux-2.6.32.45/arch/x86/lguest/boot.c linux-2.6.32.45/arch/x86/lguest/boot.c
18884--- linux-2.6.32.45/arch/x86/lguest/boot.c 2011-03-27 14:31:47.000000000 -0400
18885+++ linux-2.6.32.45/arch/x86/lguest/boot.c 2011-08-05 20:33:55.000000000 -0400
18886@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vt
18887 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18888 * Launcher to reboot us.
18889 */
18890-static void lguest_restart(char *reason)
18891+static __noreturn void lguest_restart(char *reason)
18892 {
18893 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
18894+ BUG();
18895 }
18896
18897 /*G:050
18898diff -urNp linux-2.6.32.45/arch/x86/lib/atomic64_32.c linux-2.6.32.45/arch/x86/lib/atomic64_32.c
18899--- linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18900+++ linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18901@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18902 }
18903 EXPORT_SYMBOL(atomic64_cmpxchg);
18904
18905+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18906+{
18907+ return cmpxchg8b(&ptr->counter, old_val, new_val);
18908+}
18909+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18910+
18911 /**
18912 * atomic64_xchg - xchg atomic64 variable
18913 * @ptr: pointer to type atomic64_t
18914@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18915 EXPORT_SYMBOL(atomic64_xchg);
18916
18917 /**
18918+ * atomic64_xchg_unchecked - xchg atomic64 variable
18919+ * @ptr: pointer to type atomic64_unchecked_t
18920+ * @new_val: value to assign
18921+ *
18922+ * Atomically xchgs the value of @ptr to @new_val and returns
18923+ * the old value.
18924+ */
18925+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18926+{
18927+ /*
18928+ * Try first with a (possibly incorrect) assumption about
18929+ * what we have there. We'll do two loops most likely,
18930+ * but we'll get an ownership MESI transaction straight away
18931+ * instead of a read transaction followed by a
18932+ * flush-for-ownership transaction:
18933+ */
18934+ u64 old_val, real_val = 0;
18935+
18936+ do {
18937+ old_val = real_val;
18938+
18939+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18940+
18941+ } while (real_val != old_val);
18942+
18943+ return old_val;
18944+}
18945+EXPORT_SYMBOL(atomic64_xchg_unchecked);
18946+
18947+/**
18948 * atomic64_set - set atomic64 variable
18949 * @ptr: pointer to type atomic64_t
18950 * @new_val: value to assign
18951@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18952 EXPORT_SYMBOL(atomic64_set);
18953
18954 /**
18955-EXPORT_SYMBOL(atomic64_read);
18956+ * atomic64_unchecked_set - set atomic64 variable
18957+ * @ptr: pointer to type atomic64_unchecked_t
18958+ * @new_val: value to assign
18959+ *
18960+ * Atomically sets the value of @ptr to @new_val.
18961+ */
18962+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18963+{
18964+ atomic64_xchg_unchecked(ptr, new_val);
18965+}
18966+EXPORT_SYMBOL(atomic64_set_unchecked);
18967+
18968+/**
18969 * atomic64_add_return - add and return
18970 * @delta: integer value to add
18971 * @ptr: pointer to type atomic64_t
18972@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18973 }
18974 EXPORT_SYMBOL(atomic64_add_return);
18975
18976+/**
18977+ * atomic64_add_return_unchecked - add and return
18978+ * @delta: integer value to add
18979+ * @ptr: pointer to type atomic64_unchecked_t
18980+ *
18981+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
18982+ */
18983+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18984+{
18985+ /*
18986+ * Try first with a (possibly incorrect) assumption about
18987+ * what we have there. We'll do two loops most likely,
18988+ * but we'll get an ownership MESI transaction straight away
18989+ * instead of a read transaction followed by a
18990+ * flush-for-ownership transaction:
18991+ */
18992+ u64 old_val, new_val, real_val = 0;
18993+
18994+ do {
18995+ old_val = real_val;
18996+ new_val = old_val + delta;
18997+
18998+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18999+
19000+ } while (real_val != old_val);
19001+
19002+ return new_val;
19003+}
19004+EXPORT_SYMBOL(atomic64_add_return_unchecked);
19005+
19006 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
19007 {
19008 return atomic64_add_return(-delta, ptr);
19009 }
19010 EXPORT_SYMBOL(atomic64_sub_return);
19011
19012+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19013+{
19014+ return atomic64_add_return_unchecked(-delta, ptr);
19015+}
19016+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
19017+
19018 u64 atomic64_inc_return(atomic64_t *ptr)
19019 {
19020 return atomic64_add_return(1, ptr);
19021 }
19022 EXPORT_SYMBOL(atomic64_inc_return);
19023
19024+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
19025+{
19026+ return atomic64_add_return_unchecked(1, ptr);
19027+}
19028+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
19029+
19030 u64 atomic64_dec_return(atomic64_t *ptr)
19031 {
19032 return atomic64_sub_return(1, ptr);
19033 }
19034 EXPORT_SYMBOL(atomic64_dec_return);
19035
19036+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
19037+{
19038+ return atomic64_sub_return_unchecked(1, ptr);
19039+}
19040+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
19041+
19042 /**
19043 * atomic64_add - add integer to atomic64 variable
19044 * @delta: integer value to add
19045@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
19046 EXPORT_SYMBOL(atomic64_add);
19047
19048 /**
19049+ * atomic64_add_unchecked - add integer to atomic64 variable
19050+ * @delta: integer value to add
19051+ * @ptr: pointer to type atomic64_unchecked_t
19052+ *
19053+ * Atomically adds @delta to @ptr.
19054+ */
19055+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19056+{
19057+ atomic64_add_return_unchecked(delta, ptr);
19058+}
19059+EXPORT_SYMBOL(atomic64_add_unchecked);
19060+
19061+/**
19062 * atomic64_sub - subtract the atomic64 variable
19063 * @delta: integer value to subtract
19064 * @ptr: pointer to type atomic64_t
19065@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
19066 EXPORT_SYMBOL(atomic64_sub);
19067
19068 /**
19069+ * atomic64_sub_unchecked - subtract the atomic64 variable
19070+ * @delta: integer value to subtract
19071+ * @ptr: pointer to type atomic64_unchecked_t
19072+ *
19073+ * Atomically subtracts @delta from @ptr.
19074+ */
19075+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19076+{
19077+ atomic64_add_unchecked(-delta, ptr);
19078+}
19079+EXPORT_SYMBOL(atomic64_sub_unchecked);
19080+
19081+/**
19082 * atomic64_sub_and_test - subtract value from variable and test result
19083 * @delta: integer value to subtract
19084 * @ptr: pointer to type atomic64_t
19085@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
19086 EXPORT_SYMBOL(atomic64_inc);
19087
19088 /**
19089+ * atomic64_inc_unchecked - increment atomic64 variable
19090+ * @ptr: pointer to type atomic64_unchecked_t
19091+ *
19092+ * Atomically increments @ptr by 1.
19093+ */
19094+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
19095+{
19096+ atomic64_add_unchecked(1, ptr);
19097+}
19098+EXPORT_SYMBOL(atomic64_inc_unchecked);
19099+
19100+/**
19101 * atomic64_dec - decrement atomic64 variable
19102 * @ptr: pointer to type atomic64_t
19103 *
19104@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
19105 EXPORT_SYMBOL(atomic64_dec);
19106
19107 /**
19108+ * atomic64_dec_unchecked - decrement atomic64 variable
19109+ * @ptr: pointer to type atomic64_unchecked_t
19110+ *
19111+ * Atomically decrements @ptr by 1.
19112+ */
19113+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
19114+{
19115+ atomic64_sub_unchecked(1, ptr);
19116+}
19117+EXPORT_SYMBOL(atomic64_dec_unchecked);
19118+
19119+/**
19120 * atomic64_dec_and_test - decrement and test
19121 * @ptr: pointer to type atomic64_t
19122 *
19123diff -urNp linux-2.6.32.45/arch/x86/lib/checksum_32.S linux-2.6.32.45/arch/x86/lib/checksum_32.S
19124--- linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
19125+++ linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
19126@@ -28,7 +28,8 @@
19127 #include <linux/linkage.h>
19128 #include <asm/dwarf2.h>
19129 #include <asm/errno.h>
19130-
19131+#include <asm/segment.h>
19132+
19133 /*
19134 * computes a partial checksum, e.g. for TCP/UDP fragments
19135 */
19136@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
19137
19138 #define ARGBASE 16
19139 #define FP 12
19140-
19141-ENTRY(csum_partial_copy_generic)
19142+
19143+ENTRY(csum_partial_copy_generic_to_user)
19144 CFI_STARTPROC
19145+
19146+#ifdef CONFIG_PAX_MEMORY_UDEREF
19147+ pushl %gs
19148+ CFI_ADJUST_CFA_OFFSET 4
19149+ popl %es
19150+ CFI_ADJUST_CFA_OFFSET -4
19151+ jmp csum_partial_copy_generic
19152+#endif
19153+
19154+ENTRY(csum_partial_copy_generic_from_user)
19155+
19156+#ifdef CONFIG_PAX_MEMORY_UDEREF
19157+ pushl %gs
19158+ CFI_ADJUST_CFA_OFFSET 4
19159+ popl %ds
19160+ CFI_ADJUST_CFA_OFFSET -4
19161+#endif
19162+
19163+ENTRY(csum_partial_copy_generic)
19164 subl $4,%esp
19165 CFI_ADJUST_CFA_OFFSET 4
19166 pushl %edi
19167@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
19168 jmp 4f
19169 SRC(1: movw (%esi), %bx )
19170 addl $2, %esi
19171-DST( movw %bx, (%edi) )
19172+DST( movw %bx, %es:(%edi) )
19173 addl $2, %edi
19174 addw %bx, %ax
19175 adcl $0, %eax
19176@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
19177 SRC(1: movl (%esi), %ebx )
19178 SRC( movl 4(%esi), %edx )
19179 adcl %ebx, %eax
19180-DST( movl %ebx, (%edi) )
19181+DST( movl %ebx, %es:(%edi) )
19182 adcl %edx, %eax
19183-DST( movl %edx, 4(%edi) )
19184+DST( movl %edx, %es:4(%edi) )
19185
19186 SRC( movl 8(%esi), %ebx )
19187 SRC( movl 12(%esi), %edx )
19188 adcl %ebx, %eax
19189-DST( movl %ebx, 8(%edi) )
19190+DST( movl %ebx, %es:8(%edi) )
19191 adcl %edx, %eax
19192-DST( movl %edx, 12(%edi) )
19193+DST( movl %edx, %es:12(%edi) )
19194
19195 SRC( movl 16(%esi), %ebx )
19196 SRC( movl 20(%esi), %edx )
19197 adcl %ebx, %eax
19198-DST( movl %ebx, 16(%edi) )
19199+DST( movl %ebx, %es:16(%edi) )
19200 adcl %edx, %eax
19201-DST( movl %edx, 20(%edi) )
19202+DST( movl %edx, %es:20(%edi) )
19203
19204 SRC( movl 24(%esi), %ebx )
19205 SRC( movl 28(%esi), %edx )
19206 adcl %ebx, %eax
19207-DST( movl %ebx, 24(%edi) )
19208+DST( movl %ebx, %es:24(%edi) )
19209 adcl %edx, %eax
19210-DST( movl %edx, 28(%edi) )
19211+DST( movl %edx, %es:28(%edi) )
19212
19213 lea 32(%esi), %esi
19214 lea 32(%edi), %edi
19215@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
19216 shrl $2, %edx # This clears CF
19217 SRC(3: movl (%esi), %ebx )
19218 adcl %ebx, %eax
19219-DST( movl %ebx, (%edi) )
19220+DST( movl %ebx, %es:(%edi) )
19221 lea 4(%esi), %esi
19222 lea 4(%edi), %edi
19223 dec %edx
19224@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
19225 jb 5f
19226 SRC( movw (%esi), %cx )
19227 leal 2(%esi), %esi
19228-DST( movw %cx, (%edi) )
19229+DST( movw %cx, %es:(%edi) )
19230 leal 2(%edi), %edi
19231 je 6f
19232 shll $16,%ecx
19233 SRC(5: movb (%esi), %cl )
19234-DST( movb %cl, (%edi) )
19235+DST( movb %cl, %es:(%edi) )
19236 6: addl %ecx, %eax
19237 adcl $0, %eax
19238 7:
19239@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
19240
19241 6001:
19242 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19243- movl $-EFAULT, (%ebx)
19244+ movl $-EFAULT, %ss:(%ebx)
19245
19246 # zero the complete destination - computing the rest
19247 # is too much work
19248@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
19249
19250 6002:
19251 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19252- movl $-EFAULT,(%ebx)
19253+ movl $-EFAULT,%ss:(%ebx)
19254 jmp 5000b
19255
19256 .previous
19257
19258+ pushl %ss
19259+ CFI_ADJUST_CFA_OFFSET 4
19260+ popl %ds
19261+ CFI_ADJUST_CFA_OFFSET -4
19262+ pushl %ss
19263+ CFI_ADJUST_CFA_OFFSET 4
19264+ popl %es
19265+ CFI_ADJUST_CFA_OFFSET -4
19266 popl %ebx
19267 CFI_ADJUST_CFA_OFFSET -4
19268 CFI_RESTORE ebx
19269@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
19270 CFI_ADJUST_CFA_OFFSET -4
19271 ret
19272 CFI_ENDPROC
19273-ENDPROC(csum_partial_copy_generic)
19274+ENDPROC(csum_partial_copy_generic_to_user)
19275
19276 #else
19277
19278 /* Version for PentiumII/PPro */
19279
19280 #define ROUND1(x) \
19281+ nop; nop; nop; \
19282 SRC(movl x(%esi), %ebx ) ; \
19283 addl %ebx, %eax ; \
19284- DST(movl %ebx, x(%edi) ) ;
19285+ DST(movl %ebx, %es:x(%edi)) ;
19286
19287 #define ROUND(x) \
19288+ nop; nop; nop; \
19289 SRC(movl x(%esi), %ebx ) ; \
19290 adcl %ebx, %eax ; \
19291- DST(movl %ebx, x(%edi) ) ;
19292+ DST(movl %ebx, %es:x(%edi)) ;
19293
19294 #define ARGBASE 12
19295-
19296-ENTRY(csum_partial_copy_generic)
19297+
19298+ENTRY(csum_partial_copy_generic_to_user)
19299 CFI_STARTPROC
19300+
19301+#ifdef CONFIG_PAX_MEMORY_UDEREF
19302+ pushl %gs
19303+ CFI_ADJUST_CFA_OFFSET 4
19304+ popl %es
19305+ CFI_ADJUST_CFA_OFFSET -4
19306+ jmp csum_partial_copy_generic
19307+#endif
19308+
19309+ENTRY(csum_partial_copy_generic_from_user)
19310+
19311+#ifdef CONFIG_PAX_MEMORY_UDEREF
19312+ pushl %gs
19313+ CFI_ADJUST_CFA_OFFSET 4
19314+ popl %ds
19315+ CFI_ADJUST_CFA_OFFSET -4
19316+#endif
19317+
19318+ENTRY(csum_partial_copy_generic)
19319 pushl %ebx
19320 CFI_ADJUST_CFA_OFFSET 4
19321 CFI_REL_OFFSET ebx, 0
19322@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
19323 subl %ebx, %edi
19324 lea -1(%esi),%edx
19325 andl $-32,%edx
19326- lea 3f(%ebx,%ebx), %ebx
19327+ lea 3f(%ebx,%ebx,2), %ebx
19328 testl %esi, %esi
19329 jmp *%ebx
19330 1: addl $64,%esi
19331@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
19332 jb 5f
19333 SRC( movw (%esi), %dx )
19334 leal 2(%esi), %esi
19335-DST( movw %dx, (%edi) )
19336+DST( movw %dx, %es:(%edi) )
19337 leal 2(%edi), %edi
19338 je 6f
19339 shll $16,%edx
19340 5:
19341 SRC( movb (%esi), %dl )
19342-DST( movb %dl, (%edi) )
19343+DST( movb %dl, %es:(%edi) )
19344 6: addl %edx, %eax
19345 adcl $0, %eax
19346 7:
19347 .section .fixup, "ax"
19348 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19349- movl $-EFAULT, (%ebx)
19350+ movl $-EFAULT, %ss:(%ebx)
19351 # zero the complete destination (computing the rest is too much work)
19352 movl ARGBASE+8(%esp),%edi # dst
19353 movl ARGBASE+12(%esp),%ecx # len
19354@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
19355 rep; stosb
19356 jmp 7b
19357 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19358- movl $-EFAULT, (%ebx)
19359+ movl $-EFAULT, %ss:(%ebx)
19360 jmp 7b
19361 .previous
19362
19363+#ifdef CONFIG_PAX_MEMORY_UDEREF
19364+ pushl %ss
19365+ CFI_ADJUST_CFA_OFFSET 4
19366+ popl %ds
19367+ CFI_ADJUST_CFA_OFFSET -4
19368+ pushl %ss
19369+ CFI_ADJUST_CFA_OFFSET 4
19370+ popl %es
19371+ CFI_ADJUST_CFA_OFFSET -4
19372+#endif
19373+
19374 popl %esi
19375 CFI_ADJUST_CFA_OFFSET -4
19376 CFI_RESTORE esi
19377@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
19378 CFI_RESTORE ebx
19379 ret
19380 CFI_ENDPROC
19381-ENDPROC(csum_partial_copy_generic)
19382+ENDPROC(csum_partial_copy_generic_to_user)
19383
19384 #undef ROUND
19385 #undef ROUND1
19386diff -urNp linux-2.6.32.45/arch/x86/lib/clear_page_64.S linux-2.6.32.45/arch/x86/lib/clear_page_64.S
19387--- linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
19388+++ linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
19389@@ -43,7 +43,7 @@ ENDPROC(clear_page)
19390
19391 #include <asm/cpufeature.h>
19392
19393- .section .altinstr_replacement,"ax"
19394+ .section .altinstr_replacement,"a"
19395 1: .byte 0xeb /* jmp <disp8> */
19396 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19397 2:
19398diff -urNp linux-2.6.32.45/arch/x86/lib/copy_page_64.S linux-2.6.32.45/arch/x86/lib/copy_page_64.S
19399--- linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
19400+++ linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
19401@@ -104,7 +104,7 @@ ENDPROC(copy_page)
19402
19403 #include <asm/cpufeature.h>
19404
19405- .section .altinstr_replacement,"ax"
19406+ .section .altinstr_replacement,"a"
19407 1: .byte 0xeb /* jmp <disp8> */
19408 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19409 2:
19410diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_64.S linux-2.6.32.45/arch/x86/lib/copy_user_64.S
19411--- linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
19412+++ linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
19413@@ -15,13 +15,14 @@
19414 #include <asm/asm-offsets.h>
19415 #include <asm/thread_info.h>
19416 #include <asm/cpufeature.h>
19417+#include <asm/pgtable.h>
19418
19419 .macro ALTERNATIVE_JUMP feature,orig,alt
19420 0:
19421 .byte 0xe9 /* 32bit jump */
19422 .long \orig-1f /* by default jump to orig */
19423 1:
19424- .section .altinstr_replacement,"ax"
19425+ .section .altinstr_replacement,"a"
19426 2: .byte 0xe9 /* near jump with 32bit immediate */
19427 .long \alt-1b /* offset */ /* or alternatively to alt */
19428 .previous
19429@@ -64,49 +65,19 @@
19430 #endif
19431 .endm
19432
19433-/* Standard copy_to_user with segment limit checking */
19434-ENTRY(copy_to_user)
19435- CFI_STARTPROC
19436- GET_THREAD_INFO(%rax)
19437- movq %rdi,%rcx
19438- addq %rdx,%rcx
19439- jc bad_to_user
19440- cmpq TI_addr_limit(%rax),%rcx
19441- ja bad_to_user
19442- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19443- CFI_ENDPROC
19444-ENDPROC(copy_to_user)
19445-
19446-/* Standard copy_from_user with segment limit checking */
19447-ENTRY(copy_from_user)
19448- CFI_STARTPROC
19449- GET_THREAD_INFO(%rax)
19450- movq %rsi,%rcx
19451- addq %rdx,%rcx
19452- jc bad_from_user
19453- cmpq TI_addr_limit(%rax),%rcx
19454- ja bad_from_user
19455- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19456- CFI_ENDPROC
19457-ENDPROC(copy_from_user)
19458-
19459 ENTRY(copy_user_generic)
19460 CFI_STARTPROC
19461 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19462 CFI_ENDPROC
19463 ENDPROC(copy_user_generic)
19464
19465-ENTRY(__copy_from_user_inatomic)
19466- CFI_STARTPROC
19467- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19468- CFI_ENDPROC
19469-ENDPROC(__copy_from_user_inatomic)
19470-
19471 .section .fixup,"ax"
19472 /* must zero dest */
19473 ENTRY(bad_from_user)
19474 bad_from_user:
19475 CFI_STARTPROC
19476+ testl %edx,%edx
19477+ js bad_to_user
19478 movl %edx,%ecx
19479 xorl %eax,%eax
19480 rep
19481diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S
19482--- linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19483+++ linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19484@@ -14,6 +14,7 @@
19485 #include <asm/current.h>
19486 #include <asm/asm-offsets.h>
19487 #include <asm/thread_info.h>
19488+#include <asm/pgtable.h>
19489
19490 .macro ALIGN_DESTINATION
19491 #ifdef FIX_ALIGNMENT
19492@@ -50,6 +51,15 @@
19493 */
19494 ENTRY(__copy_user_nocache)
19495 CFI_STARTPROC
19496+
19497+#ifdef CONFIG_PAX_MEMORY_UDEREF
19498+ mov $PAX_USER_SHADOW_BASE,%rcx
19499+ cmp %rcx,%rsi
19500+ jae 1f
19501+ add %rcx,%rsi
19502+1:
19503+#endif
19504+
19505 cmpl $8,%edx
19506 jb 20f /* less then 8 bytes, go to byte copy loop */
19507 ALIGN_DESTINATION
19508diff -urNp linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c
19509--- linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19510+++ linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19511@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19512 len -= 2;
19513 }
19514 }
19515+
19516+#ifdef CONFIG_PAX_MEMORY_UDEREF
19517+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19518+ src += PAX_USER_SHADOW_BASE;
19519+#endif
19520+
19521 isum = csum_partial_copy_generic((__force const void *)src,
19522 dst, len, isum, errp, NULL);
19523 if (unlikely(*errp))
19524@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19525 }
19526
19527 *errp = 0;
19528+
19529+#ifdef CONFIG_PAX_MEMORY_UDEREF
19530+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19531+ dst += PAX_USER_SHADOW_BASE;
19532+#endif
19533+
19534 return csum_partial_copy_generic(src, (void __force *)dst,
19535 len, isum, NULL, errp);
19536 }
19537diff -urNp linux-2.6.32.45/arch/x86/lib/getuser.S linux-2.6.32.45/arch/x86/lib/getuser.S
19538--- linux-2.6.32.45/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19539+++ linux-2.6.32.45/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19540@@ -33,14 +33,35 @@
19541 #include <asm/asm-offsets.h>
19542 #include <asm/thread_info.h>
19543 #include <asm/asm.h>
19544+#include <asm/segment.h>
19545+#include <asm/pgtable.h>
19546+
19547+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19548+#define __copyuser_seg gs;
19549+#else
19550+#define __copyuser_seg
19551+#endif
19552
19553 .text
19554 ENTRY(__get_user_1)
19555 CFI_STARTPROC
19556+
19557+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19558 GET_THREAD_INFO(%_ASM_DX)
19559 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19560 jae bad_get_user
19561-1: movzb (%_ASM_AX),%edx
19562+
19563+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19564+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19565+ cmp %_ASM_DX,%_ASM_AX
19566+ jae 1234f
19567+ add %_ASM_DX,%_ASM_AX
19568+1234:
19569+#endif
19570+
19571+#endif
19572+
19573+1: __copyuser_seg movzb (%_ASM_AX),%edx
19574 xor %eax,%eax
19575 ret
19576 CFI_ENDPROC
19577@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19578 ENTRY(__get_user_2)
19579 CFI_STARTPROC
19580 add $1,%_ASM_AX
19581+
19582+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19583 jc bad_get_user
19584 GET_THREAD_INFO(%_ASM_DX)
19585 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19586 jae bad_get_user
19587-2: movzwl -1(%_ASM_AX),%edx
19588+
19589+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19590+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19591+ cmp %_ASM_DX,%_ASM_AX
19592+ jae 1234f
19593+ add %_ASM_DX,%_ASM_AX
19594+1234:
19595+#endif
19596+
19597+#endif
19598+
19599+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19600 xor %eax,%eax
19601 ret
19602 CFI_ENDPROC
19603@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19604 ENTRY(__get_user_4)
19605 CFI_STARTPROC
19606 add $3,%_ASM_AX
19607+
19608+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19609 jc bad_get_user
19610 GET_THREAD_INFO(%_ASM_DX)
19611 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19612 jae bad_get_user
19613-3: mov -3(%_ASM_AX),%edx
19614+
19615+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19616+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19617+ cmp %_ASM_DX,%_ASM_AX
19618+ jae 1234f
19619+ add %_ASM_DX,%_ASM_AX
19620+1234:
19621+#endif
19622+
19623+#endif
19624+
19625+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19626 xor %eax,%eax
19627 ret
19628 CFI_ENDPROC
19629@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19630 GET_THREAD_INFO(%_ASM_DX)
19631 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19632 jae bad_get_user
19633+
19634+#ifdef CONFIG_PAX_MEMORY_UDEREF
19635+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19636+ cmp %_ASM_DX,%_ASM_AX
19637+ jae 1234f
19638+ add %_ASM_DX,%_ASM_AX
19639+1234:
19640+#endif
19641+
19642 4: movq -7(%_ASM_AX),%_ASM_DX
19643 xor %eax,%eax
19644 ret
19645diff -urNp linux-2.6.32.45/arch/x86/lib/memcpy_64.S linux-2.6.32.45/arch/x86/lib/memcpy_64.S
19646--- linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19647+++ linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19648@@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19649 * It is also a lot simpler. Use this when possible:
19650 */
19651
19652- .section .altinstr_replacement, "ax"
19653+ .section .altinstr_replacement, "a"
19654 1: .byte 0xeb /* jmp <disp8> */
19655 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19656 2:
19657diff -urNp linux-2.6.32.45/arch/x86/lib/memset_64.S linux-2.6.32.45/arch/x86/lib/memset_64.S
19658--- linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19659+++ linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19660@@ -118,7 +118,7 @@ ENDPROC(__memset)
19661
19662 #include <asm/cpufeature.h>
19663
19664- .section .altinstr_replacement,"ax"
19665+ .section .altinstr_replacement,"a"
19666 1: .byte 0xeb /* jmp <disp8> */
19667 .byte (memset_c - memset) - (2f - 1b) /* offset */
19668 2:
19669diff -urNp linux-2.6.32.45/arch/x86/lib/mmx_32.c linux-2.6.32.45/arch/x86/lib/mmx_32.c
19670--- linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19671+++ linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19672@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19673 {
19674 void *p;
19675 int i;
19676+ unsigned long cr0;
19677
19678 if (unlikely(in_interrupt()))
19679 return __memcpy(to, from, len);
19680@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19681 kernel_fpu_begin();
19682
19683 __asm__ __volatile__ (
19684- "1: prefetch (%0)\n" /* This set is 28 bytes */
19685- " prefetch 64(%0)\n"
19686- " prefetch 128(%0)\n"
19687- " prefetch 192(%0)\n"
19688- " prefetch 256(%0)\n"
19689+ "1: prefetch (%1)\n" /* This set is 28 bytes */
19690+ " prefetch 64(%1)\n"
19691+ " prefetch 128(%1)\n"
19692+ " prefetch 192(%1)\n"
19693+ " prefetch 256(%1)\n"
19694 "2: \n"
19695 ".section .fixup, \"ax\"\n"
19696- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19697+ "3: \n"
19698+
19699+#ifdef CONFIG_PAX_KERNEXEC
19700+ " movl %%cr0, %0\n"
19701+ " movl %0, %%eax\n"
19702+ " andl $0xFFFEFFFF, %%eax\n"
19703+ " movl %%eax, %%cr0\n"
19704+#endif
19705+
19706+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19707+
19708+#ifdef CONFIG_PAX_KERNEXEC
19709+ " movl %0, %%cr0\n"
19710+#endif
19711+
19712 " jmp 2b\n"
19713 ".previous\n"
19714 _ASM_EXTABLE(1b, 3b)
19715- : : "r" (from));
19716+ : "=&r" (cr0) : "r" (from) : "ax");
19717
19718 for ( ; i > 5; i--) {
19719 __asm__ __volatile__ (
19720- "1: prefetch 320(%0)\n"
19721- "2: movq (%0), %%mm0\n"
19722- " movq 8(%0), %%mm1\n"
19723- " movq 16(%0), %%mm2\n"
19724- " movq 24(%0), %%mm3\n"
19725- " movq %%mm0, (%1)\n"
19726- " movq %%mm1, 8(%1)\n"
19727- " movq %%mm2, 16(%1)\n"
19728- " movq %%mm3, 24(%1)\n"
19729- " movq 32(%0), %%mm0\n"
19730- " movq 40(%0), %%mm1\n"
19731- " movq 48(%0), %%mm2\n"
19732- " movq 56(%0), %%mm3\n"
19733- " movq %%mm0, 32(%1)\n"
19734- " movq %%mm1, 40(%1)\n"
19735- " movq %%mm2, 48(%1)\n"
19736- " movq %%mm3, 56(%1)\n"
19737+ "1: prefetch 320(%1)\n"
19738+ "2: movq (%1), %%mm0\n"
19739+ " movq 8(%1), %%mm1\n"
19740+ " movq 16(%1), %%mm2\n"
19741+ " movq 24(%1), %%mm3\n"
19742+ " movq %%mm0, (%2)\n"
19743+ " movq %%mm1, 8(%2)\n"
19744+ " movq %%mm2, 16(%2)\n"
19745+ " movq %%mm3, 24(%2)\n"
19746+ " movq 32(%1), %%mm0\n"
19747+ " movq 40(%1), %%mm1\n"
19748+ " movq 48(%1), %%mm2\n"
19749+ " movq 56(%1), %%mm3\n"
19750+ " movq %%mm0, 32(%2)\n"
19751+ " movq %%mm1, 40(%2)\n"
19752+ " movq %%mm2, 48(%2)\n"
19753+ " movq %%mm3, 56(%2)\n"
19754 ".section .fixup, \"ax\"\n"
19755- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19756+ "3:\n"
19757+
19758+#ifdef CONFIG_PAX_KERNEXEC
19759+ " movl %%cr0, %0\n"
19760+ " movl %0, %%eax\n"
19761+ " andl $0xFFFEFFFF, %%eax\n"
19762+ " movl %%eax, %%cr0\n"
19763+#endif
19764+
19765+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19766+
19767+#ifdef CONFIG_PAX_KERNEXEC
19768+ " movl %0, %%cr0\n"
19769+#endif
19770+
19771 " jmp 2b\n"
19772 ".previous\n"
19773 _ASM_EXTABLE(1b, 3b)
19774- : : "r" (from), "r" (to) : "memory");
19775+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19776
19777 from += 64;
19778 to += 64;
19779@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19780 static void fast_copy_page(void *to, void *from)
19781 {
19782 int i;
19783+ unsigned long cr0;
19784
19785 kernel_fpu_begin();
19786
19787@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19788 * but that is for later. -AV
19789 */
19790 __asm__ __volatile__(
19791- "1: prefetch (%0)\n"
19792- " prefetch 64(%0)\n"
19793- " prefetch 128(%0)\n"
19794- " prefetch 192(%0)\n"
19795- " prefetch 256(%0)\n"
19796+ "1: prefetch (%1)\n"
19797+ " prefetch 64(%1)\n"
19798+ " prefetch 128(%1)\n"
19799+ " prefetch 192(%1)\n"
19800+ " prefetch 256(%1)\n"
19801 "2: \n"
19802 ".section .fixup, \"ax\"\n"
19803- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19804+ "3: \n"
19805+
19806+#ifdef CONFIG_PAX_KERNEXEC
19807+ " movl %%cr0, %0\n"
19808+ " movl %0, %%eax\n"
19809+ " andl $0xFFFEFFFF, %%eax\n"
19810+ " movl %%eax, %%cr0\n"
19811+#endif
19812+
19813+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19814+
19815+#ifdef CONFIG_PAX_KERNEXEC
19816+ " movl %0, %%cr0\n"
19817+#endif
19818+
19819 " jmp 2b\n"
19820 ".previous\n"
19821- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19822+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19823
19824 for (i = 0; i < (4096-320)/64; i++) {
19825 __asm__ __volatile__ (
19826- "1: prefetch 320(%0)\n"
19827- "2: movq (%0), %%mm0\n"
19828- " movntq %%mm0, (%1)\n"
19829- " movq 8(%0), %%mm1\n"
19830- " movntq %%mm1, 8(%1)\n"
19831- " movq 16(%0), %%mm2\n"
19832- " movntq %%mm2, 16(%1)\n"
19833- " movq 24(%0), %%mm3\n"
19834- " movntq %%mm3, 24(%1)\n"
19835- " movq 32(%0), %%mm4\n"
19836- " movntq %%mm4, 32(%1)\n"
19837- " movq 40(%0), %%mm5\n"
19838- " movntq %%mm5, 40(%1)\n"
19839- " movq 48(%0), %%mm6\n"
19840- " movntq %%mm6, 48(%1)\n"
19841- " movq 56(%0), %%mm7\n"
19842- " movntq %%mm7, 56(%1)\n"
19843+ "1: prefetch 320(%1)\n"
19844+ "2: movq (%1), %%mm0\n"
19845+ " movntq %%mm0, (%2)\n"
19846+ " movq 8(%1), %%mm1\n"
19847+ " movntq %%mm1, 8(%2)\n"
19848+ " movq 16(%1), %%mm2\n"
19849+ " movntq %%mm2, 16(%2)\n"
19850+ " movq 24(%1), %%mm3\n"
19851+ " movntq %%mm3, 24(%2)\n"
19852+ " movq 32(%1), %%mm4\n"
19853+ " movntq %%mm4, 32(%2)\n"
19854+ " movq 40(%1), %%mm5\n"
19855+ " movntq %%mm5, 40(%2)\n"
19856+ " movq 48(%1), %%mm6\n"
19857+ " movntq %%mm6, 48(%2)\n"
19858+ " movq 56(%1), %%mm7\n"
19859+ " movntq %%mm7, 56(%2)\n"
19860 ".section .fixup, \"ax\"\n"
19861- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19862+ "3:\n"
19863+
19864+#ifdef CONFIG_PAX_KERNEXEC
19865+ " movl %%cr0, %0\n"
19866+ " movl %0, %%eax\n"
19867+ " andl $0xFFFEFFFF, %%eax\n"
19868+ " movl %%eax, %%cr0\n"
19869+#endif
19870+
19871+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19872+
19873+#ifdef CONFIG_PAX_KERNEXEC
19874+ " movl %0, %%cr0\n"
19875+#endif
19876+
19877 " jmp 2b\n"
19878 ".previous\n"
19879- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19880+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19881
19882 from += 64;
19883 to += 64;
19884@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19885 static void fast_copy_page(void *to, void *from)
19886 {
19887 int i;
19888+ unsigned long cr0;
19889
19890 kernel_fpu_begin();
19891
19892 __asm__ __volatile__ (
19893- "1: prefetch (%0)\n"
19894- " prefetch 64(%0)\n"
19895- " prefetch 128(%0)\n"
19896- " prefetch 192(%0)\n"
19897- " prefetch 256(%0)\n"
19898+ "1: prefetch (%1)\n"
19899+ " prefetch 64(%1)\n"
19900+ " prefetch 128(%1)\n"
19901+ " prefetch 192(%1)\n"
19902+ " prefetch 256(%1)\n"
19903 "2: \n"
19904 ".section .fixup, \"ax\"\n"
19905- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19906+ "3: \n"
19907+
19908+#ifdef CONFIG_PAX_KERNEXEC
19909+ " movl %%cr0, %0\n"
19910+ " movl %0, %%eax\n"
19911+ " andl $0xFFFEFFFF, %%eax\n"
19912+ " movl %%eax, %%cr0\n"
19913+#endif
19914+
19915+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19916+
19917+#ifdef CONFIG_PAX_KERNEXEC
19918+ " movl %0, %%cr0\n"
19919+#endif
19920+
19921 " jmp 2b\n"
19922 ".previous\n"
19923- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19924+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19925
19926 for (i = 0; i < 4096/64; i++) {
19927 __asm__ __volatile__ (
19928- "1: prefetch 320(%0)\n"
19929- "2: movq (%0), %%mm0\n"
19930- " movq 8(%0), %%mm1\n"
19931- " movq 16(%0), %%mm2\n"
19932- " movq 24(%0), %%mm3\n"
19933- " movq %%mm0, (%1)\n"
19934- " movq %%mm1, 8(%1)\n"
19935- " movq %%mm2, 16(%1)\n"
19936- " movq %%mm3, 24(%1)\n"
19937- " movq 32(%0), %%mm0\n"
19938- " movq 40(%0), %%mm1\n"
19939- " movq 48(%0), %%mm2\n"
19940- " movq 56(%0), %%mm3\n"
19941- " movq %%mm0, 32(%1)\n"
19942- " movq %%mm1, 40(%1)\n"
19943- " movq %%mm2, 48(%1)\n"
19944- " movq %%mm3, 56(%1)\n"
19945+ "1: prefetch 320(%1)\n"
19946+ "2: movq (%1), %%mm0\n"
19947+ " movq 8(%1), %%mm1\n"
19948+ " movq 16(%1), %%mm2\n"
19949+ " movq 24(%1), %%mm3\n"
19950+ " movq %%mm0, (%2)\n"
19951+ " movq %%mm1, 8(%2)\n"
19952+ " movq %%mm2, 16(%2)\n"
19953+ " movq %%mm3, 24(%2)\n"
19954+ " movq 32(%1), %%mm0\n"
19955+ " movq 40(%1), %%mm1\n"
19956+ " movq 48(%1), %%mm2\n"
19957+ " movq 56(%1), %%mm3\n"
19958+ " movq %%mm0, 32(%2)\n"
19959+ " movq %%mm1, 40(%2)\n"
19960+ " movq %%mm2, 48(%2)\n"
19961+ " movq %%mm3, 56(%2)\n"
19962 ".section .fixup, \"ax\"\n"
19963- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19964+ "3:\n"
19965+
19966+#ifdef CONFIG_PAX_KERNEXEC
19967+ " movl %%cr0, %0\n"
19968+ " movl %0, %%eax\n"
19969+ " andl $0xFFFEFFFF, %%eax\n"
19970+ " movl %%eax, %%cr0\n"
19971+#endif
19972+
19973+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19974+
19975+#ifdef CONFIG_PAX_KERNEXEC
19976+ " movl %0, %%cr0\n"
19977+#endif
19978+
19979 " jmp 2b\n"
19980 ".previous\n"
19981 _ASM_EXTABLE(1b, 3b)
19982- : : "r" (from), "r" (to) : "memory");
19983+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19984
19985 from += 64;
19986 to += 64;
19987diff -urNp linux-2.6.32.45/arch/x86/lib/putuser.S linux-2.6.32.45/arch/x86/lib/putuser.S
19988--- linux-2.6.32.45/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19989+++ linux-2.6.32.45/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19990@@ -15,7 +15,8 @@
19991 #include <asm/thread_info.h>
19992 #include <asm/errno.h>
19993 #include <asm/asm.h>
19994-
19995+#include <asm/segment.h>
19996+#include <asm/pgtable.h>
19997
19998 /*
19999 * __put_user_X
20000@@ -29,52 +30,119 @@
20001 * as they get called from within inline assembly.
20002 */
20003
20004-#define ENTER CFI_STARTPROC ; \
20005- GET_THREAD_INFO(%_ASM_BX)
20006+#define ENTER CFI_STARTPROC
20007 #define EXIT ret ; \
20008 CFI_ENDPROC
20009
20010+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20011+#define _DEST %_ASM_CX,%_ASM_BX
20012+#else
20013+#define _DEST %_ASM_CX
20014+#endif
20015+
20016+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20017+#define __copyuser_seg gs;
20018+#else
20019+#define __copyuser_seg
20020+#endif
20021+
20022 .text
20023 ENTRY(__put_user_1)
20024 ENTER
20025+
20026+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20027+ GET_THREAD_INFO(%_ASM_BX)
20028 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20029 jae bad_put_user
20030-1: movb %al,(%_ASM_CX)
20031+
20032+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20033+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20034+ cmp %_ASM_BX,%_ASM_CX
20035+ jb 1234f
20036+ xor %ebx,%ebx
20037+1234:
20038+#endif
20039+
20040+#endif
20041+
20042+1: __copyuser_seg movb %al,(_DEST)
20043 xor %eax,%eax
20044 EXIT
20045 ENDPROC(__put_user_1)
20046
20047 ENTRY(__put_user_2)
20048 ENTER
20049+
20050+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20051+ GET_THREAD_INFO(%_ASM_BX)
20052 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20053 sub $1,%_ASM_BX
20054 cmp %_ASM_BX,%_ASM_CX
20055 jae bad_put_user
20056-2: movw %ax,(%_ASM_CX)
20057+
20058+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20059+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20060+ cmp %_ASM_BX,%_ASM_CX
20061+ jb 1234f
20062+ xor %ebx,%ebx
20063+1234:
20064+#endif
20065+
20066+#endif
20067+
20068+2: __copyuser_seg movw %ax,(_DEST)
20069 xor %eax,%eax
20070 EXIT
20071 ENDPROC(__put_user_2)
20072
20073 ENTRY(__put_user_4)
20074 ENTER
20075+
20076+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20077+ GET_THREAD_INFO(%_ASM_BX)
20078 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20079 sub $3,%_ASM_BX
20080 cmp %_ASM_BX,%_ASM_CX
20081 jae bad_put_user
20082-3: movl %eax,(%_ASM_CX)
20083+
20084+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20085+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20086+ cmp %_ASM_BX,%_ASM_CX
20087+ jb 1234f
20088+ xor %ebx,%ebx
20089+1234:
20090+#endif
20091+
20092+#endif
20093+
20094+3: __copyuser_seg movl %eax,(_DEST)
20095 xor %eax,%eax
20096 EXIT
20097 ENDPROC(__put_user_4)
20098
20099 ENTRY(__put_user_8)
20100 ENTER
20101+
20102+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20103+ GET_THREAD_INFO(%_ASM_BX)
20104 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20105 sub $7,%_ASM_BX
20106 cmp %_ASM_BX,%_ASM_CX
20107 jae bad_put_user
20108-4: mov %_ASM_AX,(%_ASM_CX)
20109+
20110+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20111+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20112+ cmp %_ASM_BX,%_ASM_CX
20113+ jb 1234f
20114+ xor %ebx,%ebx
20115+1234:
20116+#endif
20117+
20118+#endif
20119+
20120+4: __copyuser_seg mov %_ASM_AX,(_DEST)
20121 #ifdef CONFIG_X86_32
20122-5: movl %edx,4(%_ASM_CX)
20123+5: __copyuser_seg movl %edx,4(_DEST)
20124 #endif
20125 xor %eax,%eax
20126 EXIT
20127diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_32.c linux-2.6.32.45/arch/x86/lib/usercopy_32.c
20128--- linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
20129+++ linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
20130@@ -43,7 +43,7 @@ do { \
20131 __asm__ __volatile__( \
20132 " testl %1,%1\n" \
20133 " jz 2f\n" \
20134- "0: lodsb\n" \
20135+ "0: "__copyuser_seg"lodsb\n" \
20136 " stosb\n" \
20137 " testb %%al,%%al\n" \
20138 " jz 1f\n" \
20139@@ -128,10 +128,12 @@ do { \
20140 int __d0; \
20141 might_fault(); \
20142 __asm__ __volatile__( \
20143+ __COPYUSER_SET_ES \
20144 "0: rep; stosl\n" \
20145 " movl %2,%0\n" \
20146 "1: rep; stosb\n" \
20147 "2:\n" \
20148+ __COPYUSER_RESTORE_ES \
20149 ".section .fixup,\"ax\"\n" \
20150 "3: lea 0(%2,%0,4),%0\n" \
20151 " jmp 2b\n" \
20152@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
20153 might_fault();
20154
20155 __asm__ __volatile__(
20156+ __COPYUSER_SET_ES
20157 " testl %0, %0\n"
20158 " jz 3f\n"
20159 " andl %0,%%ecx\n"
20160@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
20161 " subl %%ecx,%0\n"
20162 " addl %0,%%eax\n"
20163 "1:\n"
20164+ __COPYUSER_RESTORE_ES
20165 ".section .fixup,\"ax\"\n"
20166 "2: xorl %%eax,%%eax\n"
20167 " jmp 1b\n"
20168@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20169
20170 #ifdef CONFIG_X86_INTEL_USERCOPY
20171 static unsigned long
20172-__copy_user_intel(void __user *to, const void *from, unsigned long size)
20173+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20174 {
20175 int d0, d1;
20176 __asm__ __volatile__(
20177@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
20178 " .align 2,0x90\n"
20179 "3: movl 0(%4), %%eax\n"
20180 "4: movl 4(%4), %%edx\n"
20181- "5: movl %%eax, 0(%3)\n"
20182- "6: movl %%edx, 4(%3)\n"
20183+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20184+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20185 "7: movl 8(%4), %%eax\n"
20186 "8: movl 12(%4),%%edx\n"
20187- "9: movl %%eax, 8(%3)\n"
20188- "10: movl %%edx, 12(%3)\n"
20189+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20190+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20191 "11: movl 16(%4), %%eax\n"
20192 "12: movl 20(%4), %%edx\n"
20193- "13: movl %%eax, 16(%3)\n"
20194- "14: movl %%edx, 20(%3)\n"
20195+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20196+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20197 "15: movl 24(%4), %%eax\n"
20198 "16: movl 28(%4), %%edx\n"
20199- "17: movl %%eax, 24(%3)\n"
20200- "18: movl %%edx, 28(%3)\n"
20201+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20202+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20203 "19: movl 32(%4), %%eax\n"
20204 "20: movl 36(%4), %%edx\n"
20205- "21: movl %%eax, 32(%3)\n"
20206- "22: movl %%edx, 36(%3)\n"
20207+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20208+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20209 "23: movl 40(%4), %%eax\n"
20210 "24: movl 44(%4), %%edx\n"
20211- "25: movl %%eax, 40(%3)\n"
20212- "26: movl %%edx, 44(%3)\n"
20213+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20214+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20215 "27: movl 48(%4), %%eax\n"
20216 "28: movl 52(%4), %%edx\n"
20217- "29: movl %%eax, 48(%3)\n"
20218- "30: movl %%edx, 52(%3)\n"
20219+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20220+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20221 "31: movl 56(%4), %%eax\n"
20222 "32: movl 60(%4), %%edx\n"
20223- "33: movl %%eax, 56(%3)\n"
20224- "34: movl %%edx, 60(%3)\n"
20225+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20226+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20227 " addl $-64, %0\n"
20228 " addl $64, %4\n"
20229 " addl $64, %3\n"
20230@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
20231 " shrl $2, %0\n"
20232 " andl $3, %%eax\n"
20233 " cld\n"
20234+ __COPYUSER_SET_ES
20235 "99: rep; movsl\n"
20236 "36: movl %%eax, %0\n"
20237 "37: rep; movsb\n"
20238 "100:\n"
20239+ __COPYUSER_RESTORE_ES
20240+ ".section .fixup,\"ax\"\n"
20241+ "101: lea 0(%%eax,%0,4),%0\n"
20242+ " jmp 100b\n"
20243+ ".previous\n"
20244+ ".section __ex_table,\"a\"\n"
20245+ " .align 4\n"
20246+ " .long 1b,100b\n"
20247+ " .long 2b,100b\n"
20248+ " .long 3b,100b\n"
20249+ " .long 4b,100b\n"
20250+ " .long 5b,100b\n"
20251+ " .long 6b,100b\n"
20252+ " .long 7b,100b\n"
20253+ " .long 8b,100b\n"
20254+ " .long 9b,100b\n"
20255+ " .long 10b,100b\n"
20256+ " .long 11b,100b\n"
20257+ " .long 12b,100b\n"
20258+ " .long 13b,100b\n"
20259+ " .long 14b,100b\n"
20260+ " .long 15b,100b\n"
20261+ " .long 16b,100b\n"
20262+ " .long 17b,100b\n"
20263+ " .long 18b,100b\n"
20264+ " .long 19b,100b\n"
20265+ " .long 20b,100b\n"
20266+ " .long 21b,100b\n"
20267+ " .long 22b,100b\n"
20268+ " .long 23b,100b\n"
20269+ " .long 24b,100b\n"
20270+ " .long 25b,100b\n"
20271+ " .long 26b,100b\n"
20272+ " .long 27b,100b\n"
20273+ " .long 28b,100b\n"
20274+ " .long 29b,100b\n"
20275+ " .long 30b,100b\n"
20276+ " .long 31b,100b\n"
20277+ " .long 32b,100b\n"
20278+ " .long 33b,100b\n"
20279+ " .long 34b,100b\n"
20280+ " .long 35b,100b\n"
20281+ " .long 36b,100b\n"
20282+ " .long 37b,100b\n"
20283+ " .long 99b,101b\n"
20284+ ".previous"
20285+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
20286+ : "1"(to), "2"(from), "0"(size)
20287+ : "eax", "edx", "memory");
20288+ return size;
20289+}
20290+
20291+static unsigned long
20292+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20293+{
20294+ int d0, d1;
20295+ __asm__ __volatile__(
20296+ " .align 2,0x90\n"
20297+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20298+ " cmpl $67, %0\n"
20299+ " jbe 3f\n"
20300+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20301+ " .align 2,0x90\n"
20302+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20303+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20304+ "5: movl %%eax, 0(%3)\n"
20305+ "6: movl %%edx, 4(%3)\n"
20306+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20307+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20308+ "9: movl %%eax, 8(%3)\n"
20309+ "10: movl %%edx, 12(%3)\n"
20310+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20311+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20312+ "13: movl %%eax, 16(%3)\n"
20313+ "14: movl %%edx, 20(%3)\n"
20314+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20315+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20316+ "17: movl %%eax, 24(%3)\n"
20317+ "18: movl %%edx, 28(%3)\n"
20318+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20319+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20320+ "21: movl %%eax, 32(%3)\n"
20321+ "22: movl %%edx, 36(%3)\n"
20322+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20323+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20324+ "25: movl %%eax, 40(%3)\n"
20325+ "26: movl %%edx, 44(%3)\n"
20326+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20327+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20328+ "29: movl %%eax, 48(%3)\n"
20329+ "30: movl %%edx, 52(%3)\n"
20330+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20331+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20332+ "33: movl %%eax, 56(%3)\n"
20333+ "34: movl %%edx, 60(%3)\n"
20334+ " addl $-64, %0\n"
20335+ " addl $64, %4\n"
20336+ " addl $64, %3\n"
20337+ " cmpl $63, %0\n"
20338+ " ja 1b\n"
20339+ "35: movl %0, %%eax\n"
20340+ " shrl $2, %0\n"
20341+ " andl $3, %%eax\n"
20342+ " cld\n"
20343+ "99: rep; "__copyuser_seg" movsl\n"
20344+ "36: movl %%eax, %0\n"
20345+ "37: rep; "__copyuser_seg" movsb\n"
20346+ "100:\n"
20347 ".section .fixup,\"ax\"\n"
20348 "101: lea 0(%%eax,%0,4),%0\n"
20349 " jmp 100b\n"
20350@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
20351 int d0, d1;
20352 __asm__ __volatile__(
20353 " .align 2,0x90\n"
20354- "0: movl 32(%4), %%eax\n"
20355+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20356 " cmpl $67, %0\n"
20357 " jbe 2f\n"
20358- "1: movl 64(%4), %%eax\n"
20359+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20360 " .align 2,0x90\n"
20361- "2: movl 0(%4), %%eax\n"
20362- "21: movl 4(%4), %%edx\n"
20363+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20364+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20365 " movl %%eax, 0(%3)\n"
20366 " movl %%edx, 4(%3)\n"
20367- "3: movl 8(%4), %%eax\n"
20368- "31: movl 12(%4),%%edx\n"
20369+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20370+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20371 " movl %%eax, 8(%3)\n"
20372 " movl %%edx, 12(%3)\n"
20373- "4: movl 16(%4), %%eax\n"
20374- "41: movl 20(%4), %%edx\n"
20375+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20376+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20377 " movl %%eax, 16(%3)\n"
20378 " movl %%edx, 20(%3)\n"
20379- "10: movl 24(%4), %%eax\n"
20380- "51: movl 28(%4), %%edx\n"
20381+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20382+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20383 " movl %%eax, 24(%3)\n"
20384 " movl %%edx, 28(%3)\n"
20385- "11: movl 32(%4), %%eax\n"
20386- "61: movl 36(%4), %%edx\n"
20387+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20388+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20389 " movl %%eax, 32(%3)\n"
20390 " movl %%edx, 36(%3)\n"
20391- "12: movl 40(%4), %%eax\n"
20392- "71: movl 44(%4), %%edx\n"
20393+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20394+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20395 " movl %%eax, 40(%3)\n"
20396 " movl %%edx, 44(%3)\n"
20397- "13: movl 48(%4), %%eax\n"
20398- "81: movl 52(%4), %%edx\n"
20399+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20400+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20401 " movl %%eax, 48(%3)\n"
20402 " movl %%edx, 52(%3)\n"
20403- "14: movl 56(%4), %%eax\n"
20404- "91: movl 60(%4), %%edx\n"
20405+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20406+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20407 " movl %%eax, 56(%3)\n"
20408 " movl %%edx, 60(%3)\n"
20409 " addl $-64, %0\n"
20410@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
20411 " shrl $2, %0\n"
20412 " andl $3, %%eax\n"
20413 " cld\n"
20414- "6: rep; movsl\n"
20415+ "6: rep; "__copyuser_seg" movsl\n"
20416 " movl %%eax,%0\n"
20417- "7: rep; movsb\n"
20418+ "7: rep; "__copyuser_seg" movsb\n"
20419 "8:\n"
20420 ".section .fixup,\"ax\"\n"
20421 "9: lea 0(%%eax,%0,4),%0\n"
20422@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20423
20424 __asm__ __volatile__(
20425 " .align 2,0x90\n"
20426- "0: movl 32(%4), %%eax\n"
20427+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20428 " cmpl $67, %0\n"
20429 " jbe 2f\n"
20430- "1: movl 64(%4), %%eax\n"
20431+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20432 " .align 2,0x90\n"
20433- "2: movl 0(%4), %%eax\n"
20434- "21: movl 4(%4), %%edx\n"
20435+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20436+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20437 " movnti %%eax, 0(%3)\n"
20438 " movnti %%edx, 4(%3)\n"
20439- "3: movl 8(%4), %%eax\n"
20440- "31: movl 12(%4),%%edx\n"
20441+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20442+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20443 " movnti %%eax, 8(%3)\n"
20444 " movnti %%edx, 12(%3)\n"
20445- "4: movl 16(%4), %%eax\n"
20446- "41: movl 20(%4), %%edx\n"
20447+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20448+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20449 " movnti %%eax, 16(%3)\n"
20450 " movnti %%edx, 20(%3)\n"
20451- "10: movl 24(%4), %%eax\n"
20452- "51: movl 28(%4), %%edx\n"
20453+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20454+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20455 " movnti %%eax, 24(%3)\n"
20456 " movnti %%edx, 28(%3)\n"
20457- "11: movl 32(%4), %%eax\n"
20458- "61: movl 36(%4), %%edx\n"
20459+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20460+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20461 " movnti %%eax, 32(%3)\n"
20462 " movnti %%edx, 36(%3)\n"
20463- "12: movl 40(%4), %%eax\n"
20464- "71: movl 44(%4), %%edx\n"
20465+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20466+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20467 " movnti %%eax, 40(%3)\n"
20468 " movnti %%edx, 44(%3)\n"
20469- "13: movl 48(%4), %%eax\n"
20470- "81: movl 52(%4), %%edx\n"
20471+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20472+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20473 " movnti %%eax, 48(%3)\n"
20474 " movnti %%edx, 52(%3)\n"
20475- "14: movl 56(%4), %%eax\n"
20476- "91: movl 60(%4), %%edx\n"
20477+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20478+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20479 " movnti %%eax, 56(%3)\n"
20480 " movnti %%edx, 60(%3)\n"
20481 " addl $-64, %0\n"
20482@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20483 " shrl $2, %0\n"
20484 " andl $3, %%eax\n"
20485 " cld\n"
20486- "6: rep; movsl\n"
20487+ "6: rep; "__copyuser_seg" movsl\n"
20488 " movl %%eax,%0\n"
20489- "7: rep; movsb\n"
20490+ "7: rep; "__copyuser_seg" movsb\n"
20491 "8:\n"
20492 ".section .fixup,\"ax\"\n"
20493 "9: lea 0(%%eax,%0,4),%0\n"
20494@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20495
20496 __asm__ __volatile__(
20497 " .align 2,0x90\n"
20498- "0: movl 32(%4), %%eax\n"
20499+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20500 " cmpl $67, %0\n"
20501 " jbe 2f\n"
20502- "1: movl 64(%4), %%eax\n"
20503+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20504 " .align 2,0x90\n"
20505- "2: movl 0(%4), %%eax\n"
20506- "21: movl 4(%4), %%edx\n"
20507+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20508+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20509 " movnti %%eax, 0(%3)\n"
20510 " movnti %%edx, 4(%3)\n"
20511- "3: movl 8(%4), %%eax\n"
20512- "31: movl 12(%4),%%edx\n"
20513+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20514+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20515 " movnti %%eax, 8(%3)\n"
20516 " movnti %%edx, 12(%3)\n"
20517- "4: movl 16(%4), %%eax\n"
20518- "41: movl 20(%4), %%edx\n"
20519+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20520+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20521 " movnti %%eax, 16(%3)\n"
20522 " movnti %%edx, 20(%3)\n"
20523- "10: movl 24(%4), %%eax\n"
20524- "51: movl 28(%4), %%edx\n"
20525+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20526+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20527 " movnti %%eax, 24(%3)\n"
20528 " movnti %%edx, 28(%3)\n"
20529- "11: movl 32(%4), %%eax\n"
20530- "61: movl 36(%4), %%edx\n"
20531+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20532+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20533 " movnti %%eax, 32(%3)\n"
20534 " movnti %%edx, 36(%3)\n"
20535- "12: movl 40(%4), %%eax\n"
20536- "71: movl 44(%4), %%edx\n"
20537+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20538+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20539 " movnti %%eax, 40(%3)\n"
20540 " movnti %%edx, 44(%3)\n"
20541- "13: movl 48(%4), %%eax\n"
20542- "81: movl 52(%4), %%edx\n"
20543+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20544+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20545 " movnti %%eax, 48(%3)\n"
20546 " movnti %%edx, 52(%3)\n"
20547- "14: movl 56(%4), %%eax\n"
20548- "91: movl 60(%4), %%edx\n"
20549+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20550+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20551 " movnti %%eax, 56(%3)\n"
20552 " movnti %%edx, 60(%3)\n"
20553 " addl $-64, %0\n"
20554@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20555 " shrl $2, %0\n"
20556 " andl $3, %%eax\n"
20557 " cld\n"
20558- "6: rep; movsl\n"
20559+ "6: rep; "__copyuser_seg" movsl\n"
20560 " movl %%eax,%0\n"
20561- "7: rep; movsb\n"
20562+ "7: rep; "__copyuser_seg" movsb\n"
20563 "8:\n"
20564 ".section .fixup,\"ax\"\n"
20565 "9: lea 0(%%eax,%0,4),%0\n"
20566@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20567 */
20568 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20569 unsigned long size);
20570-unsigned long __copy_user_intel(void __user *to, const void *from,
20571+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20572+ unsigned long size);
20573+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20574 unsigned long size);
20575 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20576 const void __user *from, unsigned long size);
20577 #endif /* CONFIG_X86_INTEL_USERCOPY */
20578
20579 /* Generic arbitrary sized copy. */
20580-#define __copy_user(to, from, size) \
20581+#define __copy_user(to, from, size, prefix, set, restore) \
20582 do { \
20583 int __d0, __d1, __d2; \
20584 __asm__ __volatile__( \
20585+ set \
20586 " cmp $7,%0\n" \
20587 " jbe 1f\n" \
20588 " movl %1,%0\n" \
20589 " negl %0\n" \
20590 " andl $7,%0\n" \
20591 " subl %0,%3\n" \
20592- "4: rep; movsb\n" \
20593+ "4: rep; "prefix"movsb\n" \
20594 " movl %3,%0\n" \
20595 " shrl $2,%0\n" \
20596 " andl $3,%3\n" \
20597 " .align 2,0x90\n" \
20598- "0: rep; movsl\n" \
20599+ "0: rep; "prefix"movsl\n" \
20600 " movl %3,%0\n" \
20601- "1: rep; movsb\n" \
20602+ "1: rep; "prefix"movsb\n" \
20603 "2:\n" \
20604+ restore \
20605 ".section .fixup,\"ax\"\n" \
20606 "5: addl %3,%0\n" \
20607 " jmp 2b\n" \
20608@@ -682,14 +799,14 @@ do { \
20609 " negl %0\n" \
20610 " andl $7,%0\n" \
20611 " subl %0,%3\n" \
20612- "4: rep; movsb\n" \
20613+ "4: rep; "__copyuser_seg"movsb\n" \
20614 " movl %3,%0\n" \
20615 " shrl $2,%0\n" \
20616 " andl $3,%3\n" \
20617 " .align 2,0x90\n" \
20618- "0: rep; movsl\n" \
20619+ "0: rep; "__copyuser_seg"movsl\n" \
20620 " movl %3,%0\n" \
20621- "1: rep; movsb\n" \
20622+ "1: rep; "__copyuser_seg"movsb\n" \
20623 "2:\n" \
20624 ".section .fixup,\"ax\"\n" \
20625 "5: addl %3,%0\n" \
20626@@ -775,9 +892,9 @@ survive:
20627 }
20628 #endif
20629 if (movsl_is_ok(to, from, n))
20630- __copy_user(to, from, n);
20631+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20632 else
20633- n = __copy_user_intel(to, from, n);
20634+ n = __generic_copy_to_user_intel(to, from, n);
20635 return n;
20636 }
20637 EXPORT_SYMBOL(__copy_to_user_ll);
20638@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20639 unsigned long n)
20640 {
20641 if (movsl_is_ok(to, from, n))
20642- __copy_user(to, from, n);
20643+ __copy_user(to, from, n, __copyuser_seg, "", "");
20644 else
20645- n = __copy_user_intel((void __user *)to,
20646- (const void *)from, n);
20647+ n = __generic_copy_from_user_intel(to, from, n);
20648 return n;
20649 }
20650 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20651@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20652 if (n > 64 && cpu_has_xmm2)
20653 n = __copy_user_intel_nocache(to, from, n);
20654 else
20655- __copy_user(to, from, n);
20656+ __copy_user(to, from, n, __copyuser_seg, "", "");
20657 #else
20658- __copy_user(to, from, n);
20659+ __copy_user(to, from, n, __copyuser_seg, "", "");
20660 #endif
20661 return n;
20662 }
20663 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20664
20665-/**
20666- * copy_to_user: - Copy a block of data into user space.
20667- * @to: Destination address, in user space.
20668- * @from: Source address, in kernel space.
20669- * @n: Number of bytes to copy.
20670- *
20671- * Context: User context only. This function may sleep.
20672- *
20673- * Copy data from kernel space to user space.
20674- *
20675- * Returns number of bytes that could not be copied.
20676- * On success, this will be zero.
20677- */
20678-unsigned long
20679-copy_to_user(void __user *to, const void *from, unsigned long n)
20680+#ifdef CONFIG_PAX_MEMORY_UDEREF
20681+void __set_fs(mm_segment_t x)
20682 {
20683- if (access_ok(VERIFY_WRITE, to, n))
20684- n = __copy_to_user(to, from, n);
20685- return n;
20686+ switch (x.seg) {
20687+ case 0:
20688+ loadsegment(gs, 0);
20689+ break;
20690+ case TASK_SIZE_MAX:
20691+ loadsegment(gs, __USER_DS);
20692+ break;
20693+ case -1UL:
20694+ loadsegment(gs, __KERNEL_DS);
20695+ break;
20696+ default:
20697+ BUG();
20698+ }
20699+ return;
20700 }
20701-EXPORT_SYMBOL(copy_to_user);
20702+EXPORT_SYMBOL(__set_fs);
20703
20704-/**
20705- * copy_from_user: - Copy a block of data from user space.
20706- * @to: Destination address, in kernel space.
20707- * @from: Source address, in user space.
20708- * @n: Number of bytes to copy.
20709- *
20710- * Context: User context only. This function may sleep.
20711- *
20712- * Copy data from user space to kernel space.
20713- *
20714- * Returns number of bytes that could not be copied.
20715- * On success, this will be zero.
20716- *
20717- * If some data could not be copied, this function will pad the copied
20718- * data to the requested size using zero bytes.
20719- */
20720-unsigned long
20721-copy_from_user(void *to, const void __user *from, unsigned long n)
20722+void set_fs(mm_segment_t x)
20723 {
20724- if (access_ok(VERIFY_READ, from, n))
20725- n = __copy_from_user(to, from, n);
20726- else
20727- memset(to, 0, n);
20728- return n;
20729+ current_thread_info()->addr_limit = x;
20730+ __set_fs(x);
20731 }
20732-EXPORT_SYMBOL(copy_from_user);
20733+EXPORT_SYMBOL(set_fs);
20734+#endif
20735diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_64.c linux-2.6.32.45/arch/x86/lib/usercopy_64.c
20736--- linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20737+++ linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20738@@ -42,6 +42,12 @@ long
20739 __strncpy_from_user(char *dst, const char __user *src, long count)
20740 {
20741 long res;
20742+
20743+#ifdef CONFIG_PAX_MEMORY_UDEREF
20744+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20745+ src += PAX_USER_SHADOW_BASE;
20746+#endif
20747+
20748 __do_strncpy_from_user(dst, src, count, res);
20749 return res;
20750 }
20751@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20752 {
20753 long __d0;
20754 might_fault();
20755+
20756+#ifdef CONFIG_PAX_MEMORY_UDEREF
20757+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20758+ addr += PAX_USER_SHADOW_BASE;
20759+#endif
20760+
20761 /* no memory constraint because it doesn't change any memory gcc knows
20762 about */
20763 asm volatile(
20764@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20765
20766 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20767 {
20768- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20769+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20770+
20771+#ifdef CONFIG_PAX_MEMORY_UDEREF
20772+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20773+ to += PAX_USER_SHADOW_BASE;
20774+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20775+ from += PAX_USER_SHADOW_BASE;
20776+#endif
20777+
20778 return copy_user_generic((__force void *)to, (__force void *)from, len);
20779- }
20780- return len;
20781+ }
20782+ return len;
20783 }
20784 EXPORT_SYMBOL(copy_in_user);
20785
20786diff -urNp linux-2.6.32.45/arch/x86/Makefile linux-2.6.32.45/arch/x86/Makefile
20787--- linux-2.6.32.45/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20788+++ linux-2.6.32.45/arch/x86/Makefile 2011-07-19 18:16:02.000000000 -0400
20789@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
20790 else
20791 BITS := 64
20792 UTS_MACHINE := x86_64
20793+ biarch := $(call cc-option,-m64)
20794 CHECKFLAGS += -D__x86_64__ -m64
20795
20796 KBUILD_AFLAGS += -m64
20797@@ -189,3 +190,12 @@ define archhelp
20798 echo ' FDARGS="..." arguments for the booted kernel'
20799 echo ' FDINITRD=file initrd for the booted kernel'
20800 endef
20801+
20802+define OLD_LD
20803+
20804+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20805+*** Please upgrade your binutils to 2.18 or newer
20806+endef
20807+
20808+archprepare:
20809+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20810diff -urNp linux-2.6.32.45/arch/x86/mm/extable.c linux-2.6.32.45/arch/x86/mm/extable.c
20811--- linux-2.6.32.45/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20812+++ linux-2.6.32.45/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20813@@ -1,14 +1,71 @@
20814 #include <linux/module.h>
20815 #include <linux/spinlock.h>
20816+#include <linux/sort.h>
20817 #include <asm/uaccess.h>
20818+#include <asm/pgtable.h>
20819
20820+/*
20821+ * The exception table needs to be sorted so that the binary
20822+ * search that we use to find entries in it works properly.
20823+ * This is used both for the kernel exception table and for
20824+ * the exception tables of modules that get loaded.
20825+ */
20826+static int cmp_ex(const void *a, const void *b)
20827+{
20828+ const struct exception_table_entry *x = a, *y = b;
20829+
20830+ /* avoid overflow */
20831+ if (x->insn > y->insn)
20832+ return 1;
20833+ if (x->insn < y->insn)
20834+ return -1;
20835+ return 0;
20836+}
20837+
20838+static void swap_ex(void *a, void *b, int size)
20839+{
20840+ struct exception_table_entry t, *x = a, *y = b;
20841+
20842+ t = *x;
20843+
20844+ pax_open_kernel();
20845+ *x = *y;
20846+ *y = t;
20847+ pax_close_kernel();
20848+}
20849+
20850+void sort_extable(struct exception_table_entry *start,
20851+ struct exception_table_entry *finish)
20852+{
20853+ sort(start, finish - start, sizeof(struct exception_table_entry),
20854+ cmp_ex, swap_ex);
20855+}
20856+
20857+#ifdef CONFIG_MODULES
20858+/*
20859+ * If the exception table is sorted, any referring to the module init
20860+ * will be at the beginning or the end.
20861+ */
20862+void trim_init_extable(struct module *m)
20863+{
20864+ /*trim the beginning*/
20865+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20866+ m->extable++;
20867+ m->num_exentries--;
20868+ }
20869+ /*trim the end*/
20870+ while (m->num_exentries &&
20871+ within_module_init(m->extable[m->num_exentries-1].insn, m))
20872+ m->num_exentries--;
20873+}
20874+#endif /* CONFIG_MODULES */
20875
20876 int fixup_exception(struct pt_regs *regs)
20877 {
20878 const struct exception_table_entry *fixup;
20879
20880 #ifdef CONFIG_PNPBIOS
20881- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20882+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20883 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20884 extern u32 pnp_bios_is_utter_crap;
20885 pnp_bios_is_utter_crap = 1;
20886diff -urNp linux-2.6.32.45/arch/x86/mm/fault.c linux-2.6.32.45/arch/x86/mm/fault.c
20887--- linux-2.6.32.45/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20888+++ linux-2.6.32.45/arch/x86/mm/fault.c 2011-08-17 20:06:44.000000000 -0400
20889@@ -11,10 +11,19 @@
20890 #include <linux/kprobes.h> /* __kprobes, ... */
20891 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20892 #include <linux/perf_event.h> /* perf_sw_event */
20893+#include <linux/unistd.h>
20894+#include <linux/compiler.h>
20895
20896 #include <asm/traps.h> /* dotraplinkage, ... */
20897 #include <asm/pgalloc.h> /* pgd_*(), ... */
20898 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20899+#include <asm/vsyscall.h>
20900+#include <asm/tlbflush.h>
20901+
20902+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20903+#include <asm/stacktrace.h>
20904+#include "../kernel/dumpstack.h"
20905+#endif
20906
20907 /*
20908 * Page fault error code bits:
20909@@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20910 int ret = 0;
20911
20912 /* kprobe_running() needs smp_processor_id() */
20913- if (kprobes_built_in() && !user_mode_vm(regs)) {
20914+ if (kprobes_built_in() && !user_mode(regs)) {
20915 preempt_disable();
20916 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20917 ret = 1;
20918@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20919 return !instr_lo || (instr_lo>>1) == 1;
20920 case 0x00:
20921 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20922- if (probe_kernel_address(instr, opcode))
20923+ if (user_mode(regs)) {
20924+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20925+ return 0;
20926+ } else if (probe_kernel_address(instr, opcode))
20927 return 0;
20928
20929 *prefetch = (instr_lo == 0xF) &&
20930@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20931 while (instr < max_instr) {
20932 unsigned char opcode;
20933
20934- if (probe_kernel_address(instr, opcode))
20935+ if (user_mode(regs)) {
20936+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20937+ break;
20938+ } else if (probe_kernel_address(instr, opcode))
20939 break;
20940
20941 instr++;
20942@@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20943 force_sig_info(si_signo, &info, tsk);
20944 }
20945
20946+#ifdef CONFIG_PAX_EMUTRAMP
20947+static int pax_handle_fetch_fault(struct pt_regs *regs);
20948+#endif
20949+
20950+#ifdef CONFIG_PAX_PAGEEXEC
20951+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20952+{
20953+ pgd_t *pgd;
20954+ pud_t *pud;
20955+ pmd_t *pmd;
20956+
20957+ pgd = pgd_offset(mm, address);
20958+ if (!pgd_present(*pgd))
20959+ return NULL;
20960+ pud = pud_offset(pgd, address);
20961+ if (!pud_present(*pud))
20962+ return NULL;
20963+ pmd = pmd_offset(pud, address);
20964+ if (!pmd_present(*pmd))
20965+ return NULL;
20966+ return pmd;
20967+}
20968+#endif
20969+
20970 DEFINE_SPINLOCK(pgd_lock);
20971 LIST_HEAD(pgd_list);
20972
20973@@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20974 address += PMD_SIZE) {
20975
20976 unsigned long flags;
20977+
20978+#ifdef CONFIG_PAX_PER_CPU_PGD
20979+ unsigned long cpu;
20980+#else
20981 struct page *page;
20982+#endif
20983
20984 spin_lock_irqsave(&pgd_lock, flags);
20985+
20986+#ifdef CONFIG_PAX_PER_CPU_PGD
20987+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20988+ pgd_t *pgd = get_cpu_pgd(cpu);
20989+#else
20990 list_for_each_entry(page, &pgd_list, lru) {
20991- if (!vmalloc_sync_one(page_address(page), address))
20992+ pgd_t *pgd = page_address(page);
20993+#endif
20994+
20995+ if (!vmalloc_sync_one(pgd, address))
20996 break;
20997 }
20998 spin_unlock_irqrestore(&pgd_lock, flags);
20999@@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
21000 * an interrupt in the middle of a task switch..
21001 */
21002 pgd_paddr = read_cr3();
21003+
21004+#ifdef CONFIG_PAX_PER_CPU_PGD
21005+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21006+#endif
21007+
21008 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21009 if (!pmd_k)
21010 return -1;
21011@@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
21012
21013 const pgd_t *pgd_ref = pgd_offset_k(address);
21014 unsigned long flags;
21015+
21016+#ifdef CONFIG_PAX_PER_CPU_PGD
21017+ unsigned long cpu;
21018+#else
21019 struct page *page;
21020+#endif
21021
21022 if (pgd_none(*pgd_ref))
21023 continue;
21024
21025 spin_lock_irqsave(&pgd_lock, flags);
21026+
21027+#ifdef CONFIG_PAX_PER_CPU_PGD
21028+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21029+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
21030+#else
21031 list_for_each_entry(page, &pgd_list, lru) {
21032 pgd_t *pgd;
21033 pgd = (pgd_t *)page_address(page) + pgd_index(address);
21034+#endif
21035+
21036 if (pgd_none(*pgd))
21037 set_pgd(pgd, *pgd_ref);
21038 else
21039@@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
21040 * happen within a race in page table update. In the later
21041 * case just flush:
21042 */
21043+
21044+#ifdef CONFIG_PAX_PER_CPU_PGD
21045+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21046+ pgd = pgd_offset_cpu(smp_processor_id(), address);
21047+#else
21048 pgd = pgd_offset(current->active_mm, address);
21049+#endif
21050+
21051 pgd_ref = pgd_offset_k(address);
21052 if (pgd_none(*pgd_ref))
21053 return -1;
21054@@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
21055 static int is_errata100(struct pt_regs *regs, unsigned long address)
21056 {
21057 #ifdef CONFIG_X86_64
21058- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21059+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21060 return 1;
21061 #endif
21062 return 0;
21063@@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
21064 }
21065
21066 static const char nx_warning[] = KERN_CRIT
21067-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21068+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21069
21070 static void
21071 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21072@@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
21073 if (!oops_may_print())
21074 return;
21075
21076- if (error_code & PF_INSTR) {
21077+ if (nx_enabled && (error_code & PF_INSTR)) {
21078 unsigned int level;
21079
21080 pte_t *pte = lookup_address(address, &level);
21081
21082 if (pte && pte_present(*pte) && !pte_exec(*pte))
21083- printk(nx_warning, current_uid());
21084+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21085 }
21086
21087+#ifdef CONFIG_PAX_KERNEXEC
21088+ if (init_mm.start_code <= address && address < init_mm.end_code) {
21089+ if (current->signal->curr_ip)
21090+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21091+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21092+ else
21093+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21094+ current->comm, task_pid_nr(current), current_uid(), current_euid());
21095+ }
21096+#endif
21097+
21098 printk(KERN_ALERT "BUG: unable to handle kernel ");
21099 if (address < PAGE_SIZE)
21100 printk(KERN_CONT "NULL pointer dereference");
21101@@ -704,6 +791,70 @@ __bad_area_nosemaphore(struct pt_regs *r
21102 unsigned long address, int si_code)
21103 {
21104 struct task_struct *tsk = current;
21105+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21106+ struct mm_struct *mm = tsk->mm;
21107+#endif
21108+
21109+#ifdef CONFIG_X86_64
21110+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
21111+ if (regs->ip == (unsigned long)vgettimeofday) {
21112+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
21113+ return;
21114+ } else if (regs->ip == (unsigned long)vtime) {
21115+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
21116+ return;
21117+ } else if (regs->ip == (unsigned long)vgetcpu) {
21118+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
21119+ return;
21120+ }
21121+ }
21122+#endif
21123+
21124+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21125+ if (mm && (error_code & PF_USER)) {
21126+ unsigned long ip = regs->ip;
21127+
21128+ if (v8086_mode(regs))
21129+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21130+
21131+ /*
21132+ * It's possible to have interrupts off here:
21133+ */
21134+ local_irq_enable();
21135+
21136+#ifdef CONFIG_PAX_PAGEEXEC
21137+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
21138+ ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
21139+
21140+#ifdef CONFIG_PAX_EMUTRAMP
21141+ switch (pax_handle_fetch_fault(regs)) {
21142+ case 2:
21143+ return;
21144+ }
21145+#endif
21146+
21147+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21148+ do_group_exit(SIGKILL);
21149+ }
21150+#endif
21151+
21152+#ifdef CONFIG_PAX_SEGMEXEC
21153+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
21154+
21155+#ifdef CONFIG_PAX_EMUTRAMP
21156+ switch (pax_handle_fetch_fault(regs)) {
21157+ case 2:
21158+ return;
21159+ }
21160+#endif
21161+
21162+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21163+ do_group_exit(SIGKILL);
21164+ }
21165+#endif
21166+
21167+ }
21168+#endif
21169
21170 /* User mode accesses just cause a SIGSEGV */
21171 if (error_code & PF_USER) {
21172@@ -857,6 +1008,99 @@ static int spurious_fault_check(unsigned
21173 return 1;
21174 }
21175
21176+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21177+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21178+{
21179+ pte_t *pte;
21180+ pmd_t *pmd;
21181+ spinlock_t *ptl;
21182+ unsigned char pte_mask;
21183+
21184+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21185+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
21186+ return 0;
21187+
21188+ /* PaX: it's our fault, let's handle it if we can */
21189+
21190+ /* PaX: take a look at read faults before acquiring any locks */
21191+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21192+ /* instruction fetch attempt from a protected page in user mode */
21193+ up_read(&mm->mmap_sem);
21194+
21195+#ifdef CONFIG_PAX_EMUTRAMP
21196+ switch (pax_handle_fetch_fault(regs)) {
21197+ case 2:
21198+ return 1;
21199+ }
21200+#endif
21201+
21202+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21203+ do_group_exit(SIGKILL);
21204+ }
21205+
21206+ pmd = pax_get_pmd(mm, address);
21207+ if (unlikely(!pmd))
21208+ return 0;
21209+
21210+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21211+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21212+ pte_unmap_unlock(pte, ptl);
21213+ return 0;
21214+ }
21215+
21216+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21217+ /* write attempt to a protected page in user mode */
21218+ pte_unmap_unlock(pte, ptl);
21219+ return 0;
21220+ }
21221+
21222+#ifdef CONFIG_SMP
21223+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21224+#else
21225+ if (likely(address > get_limit(regs->cs)))
21226+#endif
21227+ {
21228+ set_pte(pte, pte_mkread(*pte));
21229+ __flush_tlb_one(address);
21230+ pte_unmap_unlock(pte, ptl);
21231+ up_read(&mm->mmap_sem);
21232+ return 1;
21233+ }
21234+
21235+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21236+
21237+ /*
21238+ * PaX: fill DTLB with user rights and retry
21239+ */
21240+ __asm__ __volatile__ (
21241+ "orb %2,(%1)\n"
21242+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21243+/*
21244+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21245+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21246+ * page fault when examined during a TLB load attempt. this is true not only
21247+ * for PTEs holding a non-present entry but also present entries that will
21248+ * raise a page fault (such as those set up by PaX, or the copy-on-write
21249+ * mechanism). in effect it means that we do *not* need to flush the TLBs
21250+ * for our target pages since their PTEs are simply not in the TLBs at all.
21251+
21252+ * the best thing in omitting it is that we gain around 15-20% speed in the
21253+ * fast path of the page fault handler and can get rid of tracing since we
21254+ * can no longer flush unintended entries.
21255+ */
21256+ "invlpg (%0)\n"
21257+#endif
21258+ __copyuser_seg"testb $0,(%0)\n"
21259+ "xorb %3,(%1)\n"
21260+ :
21261+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21262+ : "memory", "cc");
21263+ pte_unmap_unlock(pte, ptl);
21264+ up_read(&mm->mmap_sem);
21265+ return 1;
21266+}
21267+#endif
21268+
21269 /*
21270 * Handle a spurious fault caused by a stale TLB entry.
21271 *
21272@@ -923,6 +1167,9 @@ int show_unhandled_signals = 1;
21273 static inline int
21274 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
21275 {
21276+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21277+ return 1;
21278+
21279 if (write) {
21280 /* write, present and write, not present: */
21281 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21282@@ -956,17 +1203,31 @@ do_page_fault(struct pt_regs *regs, unsi
21283 {
21284 struct vm_area_struct *vma;
21285 struct task_struct *tsk;
21286- unsigned long address;
21287 struct mm_struct *mm;
21288 int write;
21289 int fault;
21290
21291+ /* Get the faulting address: */
21292+ unsigned long address = read_cr2();
21293+
21294+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21295+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21296+ if (!search_exception_tables(regs->ip)) {
21297+ bad_area_nosemaphore(regs, error_code, address);
21298+ return;
21299+ }
21300+ if (address < PAX_USER_SHADOW_BASE) {
21301+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21302+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
21303+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21304+ } else
21305+ address -= PAX_USER_SHADOW_BASE;
21306+ }
21307+#endif
21308+
21309 tsk = current;
21310 mm = tsk->mm;
21311
21312- /* Get the faulting address: */
21313- address = read_cr2();
21314-
21315 /*
21316 * Detect and handle instructions that would cause a page fault for
21317 * both a tracked kernel page and a userspace page.
21318@@ -1026,7 +1287,7 @@ do_page_fault(struct pt_regs *regs, unsi
21319 * User-mode registers count as a user access even for any
21320 * potential system fault or CPU buglet:
21321 */
21322- if (user_mode_vm(regs)) {
21323+ if (user_mode(regs)) {
21324 local_irq_enable();
21325 error_code |= PF_USER;
21326 } else {
21327@@ -1080,6 +1341,11 @@ do_page_fault(struct pt_regs *regs, unsi
21328 might_sleep();
21329 }
21330
21331+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21332+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21333+ return;
21334+#endif
21335+
21336 vma = find_vma(mm, address);
21337 if (unlikely(!vma)) {
21338 bad_area(regs, error_code, address);
21339@@ -1091,18 +1357,24 @@ do_page_fault(struct pt_regs *regs, unsi
21340 bad_area(regs, error_code, address);
21341 return;
21342 }
21343- if (error_code & PF_USER) {
21344- /*
21345- * Accessing the stack below %sp is always a bug.
21346- * The large cushion allows instructions like enter
21347- * and pusha to work. ("enter $65535, $31" pushes
21348- * 32 pointers and then decrements %sp by 65535.)
21349- */
21350- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21351- bad_area(regs, error_code, address);
21352- return;
21353- }
21354+ /*
21355+ * Accessing the stack below %sp is always a bug.
21356+ * The large cushion allows instructions like enter
21357+ * and pusha to work. ("enter $65535, $31" pushes
21358+ * 32 pointers and then decrements %sp by 65535.)
21359+ */
21360+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21361+ bad_area(regs, error_code, address);
21362+ return;
21363 }
21364+
21365+#ifdef CONFIG_PAX_SEGMEXEC
21366+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21367+ bad_area(regs, error_code, address);
21368+ return;
21369+ }
21370+#endif
21371+
21372 if (unlikely(expand_stack(vma, address))) {
21373 bad_area(regs, error_code, address);
21374 return;
21375@@ -1146,3 +1418,199 @@ good_area:
21376
21377 up_read(&mm->mmap_sem);
21378 }
21379+
21380+#ifdef CONFIG_PAX_EMUTRAMP
21381+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21382+{
21383+ int err;
21384+
21385+ do { /* PaX: gcc trampoline emulation #1 */
21386+ unsigned char mov1, mov2;
21387+ unsigned short jmp;
21388+ unsigned int addr1, addr2;
21389+
21390+#ifdef CONFIG_X86_64
21391+ if ((regs->ip + 11) >> 32)
21392+ break;
21393+#endif
21394+
21395+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21396+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21397+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21398+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21399+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21400+
21401+ if (err)
21402+ break;
21403+
21404+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21405+ regs->cx = addr1;
21406+ regs->ax = addr2;
21407+ regs->ip = addr2;
21408+ return 2;
21409+ }
21410+ } while (0);
21411+
21412+ do { /* PaX: gcc trampoline emulation #2 */
21413+ unsigned char mov, jmp;
21414+ unsigned int addr1, addr2;
21415+
21416+#ifdef CONFIG_X86_64
21417+ if ((regs->ip + 9) >> 32)
21418+ break;
21419+#endif
21420+
21421+ err = get_user(mov, (unsigned char __user *)regs->ip);
21422+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21423+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21424+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21425+
21426+ if (err)
21427+ break;
21428+
21429+ if (mov == 0xB9 && jmp == 0xE9) {
21430+ regs->cx = addr1;
21431+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21432+ return 2;
21433+ }
21434+ } while (0);
21435+
21436+ return 1; /* PaX in action */
21437+}
21438+
21439+#ifdef CONFIG_X86_64
21440+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21441+{
21442+ int err;
21443+
21444+ do { /* PaX: gcc trampoline emulation #1 */
21445+ unsigned short mov1, mov2, jmp1;
21446+ unsigned char jmp2;
21447+ unsigned int addr1;
21448+ unsigned long addr2;
21449+
21450+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21451+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21452+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21453+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21454+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21455+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21456+
21457+ if (err)
21458+ break;
21459+
21460+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21461+ regs->r11 = addr1;
21462+ regs->r10 = addr2;
21463+ regs->ip = addr1;
21464+ return 2;
21465+ }
21466+ } while (0);
21467+
21468+ do { /* PaX: gcc trampoline emulation #2 */
21469+ unsigned short mov1, mov2, jmp1;
21470+ unsigned char jmp2;
21471+ unsigned long addr1, addr2;
21472+
21473+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21474+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21475+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21476+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21477+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21478+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21479+
21480+ if (err)
21481+ break;
21482+
21483+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21484+ regs->r11 = addr1;
21485+ regs->r10 = addr2;
21486+ regs->ip = addr1;
21487+ return 2;
21488+ }
21489+ } while (0);
21490+
21491+ return 1; /* PaX in action */
21492+}
21493+#endif
21494+
21495+/*
21496+ * PaX: decide what to do with offenders (regs->ip = fault address)
21497+ *
21498+ * returns 1 when task should be killed
21499+ * 2 when gcc trampoline was detected
21500+ */
21501+static int pax_handle_fetch_fault(struct pt_regs *regs)
21502+{
21503+ if (v8086_mode(regs))
21504+ return 1;
21505+
21506+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21507+ return 1;
21508+
21509+#ifdef CONFIG_X86_32
21510+ return pax_handle_fetch_fault_32(regs);
21511+#else
21512+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21513+ return pax_handle_fetch_fault_32(regs);
21514+ else
21515+ return pax_handle_fetch_fault_64(regs);
21516+#endif
21517+}
21518+#endif
21519+
21520+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21521+void pax_report_insns(void *pc, void *sp)
21522+{
21523+ long i;
21524+
21525+ printk(KERN_ERR "PAX: bytes at PC: ");
21526+ for (i = 0; i < 20; i++) {
21527+ unsigned char c;
21528+ if (get_user(c, (__force unsigned char __user *)pc+i))
21529+ printk(KERN_CONT "?? ");
21530+ else
21531+ printk(KERN_CONT "%02x ", c);
21532+ }
21533+ printk("\n");
21534+
21535+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21536+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
21537+ unsigned long c;
21538+ if (get_user(c, (__force unsigned long __user *)sp+i))
21539+#ifdef CONFIG_X86_32
21540+ printk(KERN_CONT "???????? ");
21541+#else
21542+ printk(KERN_CONT "???????????????? ");
21543+#endif
21544+ else
21545+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21546+ }
21547+ printk("\n");
21548+}
21549+#endif
21550+
21551+/**
21552+ * probe_kernel_write(): safely attempt to write to a location
21553+ * @dst: address to write to
21554+ * @src: pointer to the data that shall be written
21555+ * @size: size of the data chunk
21556+ *
21557+ * Safely write to address @dst from the buffer at @src. If a kernel fault
21558+ * happens, handle that and return -EFAULT.
21559+ */
21560+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21561+{
21562+ long ret;
21563+ mm_segment_t old_fs = get_fs();
21564+
21565+ set_fs(KERNEL_DS);
21566+ pagefault_disable();
21567+ pax_open_kernel();
21568+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21569+ pax_close_kernel();
21570+ pagefault_enable();
21571+ set_fs(old_fs);
21572+
21573+ return ret ? -EFAULT : 0;
21574+}
21575diff -urNp linux-2.6.32.45/arch/x86/mm/gup.c linux-2.6.32.45/arch/x86/mm/gup.c
21576--- linux-2.6.32.45/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21577+++ linux-2.6.32.45/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21578@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21579 addr = start;
21580 len = (unsigned long) nr_pages << PAGE_SHIFT;
21581 end = start + len;
21582- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21583+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21584 (void __user *)start, len)))
21585 return 0;
21586
21587diff -urNp linux-2.6.32.45/arch/x86/mm/highmem_32.c linux-2.6.32.45/arch/x86/mm/highmem_32.c
21588--- linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21589+++ linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21590@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21591 idx = type + KM_TYPE_NR*smp_processor_id();
21592 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21593 BUG_ON(!pte_none(*(kmap_pte-idx)));
21594+
21595+ pax_open_kernel();
21596 set_pte(kmap_pte-idx, mk_pte(page, prot));
21597+ pax_close_kernel();
21598
21599 return (void *)vaddr;
21600 }
21601diff -urNp linux-2.6.32.45/arch/x86/mm/hugetlbpage.c linux-2.6.32.45/arch/x86/mm/hugetlbpage.c
21602--- linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21603+++ linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21604@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21605 struct hstate *h = hstate_file(file);
21606 struct mm_struct *mm = current->mm;
21607 struct vm_area_struct *vma;
21608- unsigned long start_addr;
21609+ unsigned long start_addr, pax_task_size = TASK_SIZE;
21610+
21611+#ifdef CONFIG_PAX_SEGMEXEC
21612+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21613+ pax_task_size = SEGMEXEC_TASK_SIZE;
21614+#endif
21615+
21616+ pax_task_size -= PAGE_SIZE;
21617
21618 if (len > mm->cached_hole_size) {
21619- start_addr = mm->free_area_cache;
21620+ start_addr = mm->free_area_cache;
21621 } else {
21622- start_addr = TASK_UNMAPPED_BASE;
21623- mm->cached_hole_size = 0;
21624+ start_addr = mm->mmap_base;
21625+ mm->cached_hole_size = 0;
21626 }
21627
21628 full_search:
21629@@ -281,26 +288,27 @@ full_search:
21630
21631 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21632 /* At this point: (!vma || addr < vma->vm_end). */
21633- if (TASK_SIZE - len < addr) {
21634+ if (pax_task_size - len < addr) {
21635 /*
21636 * Start a new search - just in case we missed
21637 * some holes.
21638 */
21639- if (start_addr != TASK_UNMAPPED_BASE) {
21640- start_addr = TASK_UNMAPPED_BASE;
21641+ if (start_addr != mm->mmap_base) {
21642+ start_addr = mm->mmap_base;
21643 mm->cached_hole_size = 0;
21644 goto full_search;
21645 }
21646 return -ENOMEM;
21647 }
21648- if (!vma || addr + len <= vma->vm_start) {
21649- mm->free_area_cache = addr + len;
21650- return addr;
21651- }
21652+ if (check_heap_stack_gap(vma, addr, len))
21653+ break;
21654 if (addr + mm->cached_hole_size < vma->vm_start)
21655 mm->cached_hole_size = vma->vm_start - addr;
21656 addr = ALIGN(vma->vm_end, huge_page_size(h));
21657 }
21658+
21659+ mm->free_area_cache = addr + len;
21660+ return addr;
21661 }
21662
21663 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21664@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21665 {
21666 struct hstate *h = hstate_file(file);
21667 struct mm_struct *mm = current->mm;
21668- struct vm_area_struct *vma, *prev_vma;
21669- unsigned long base = mm->mmap_base, addr = addr0;
21670+ struct vm_area_struct *vma;
21671+ unsigned long base = mm->mmap_base, addr;
21672 unsigned long largest_hole = mm->cached_hole_size;
21673- int first_time = 1;
21674
21675 /* don't allow allocations above current base */
21676 if (mm->free_area_cache > base)
21677@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21678 largest_hole = 0;
21679 mm->free_area_cache = base;
21680 }
21681-try_again:
21682+
21683 /* make sure it can fit in the remaining address space */
21684 if (mm->free_area_cache < len)
21685 goto fail;
21686
21687 /* either no address requested or cant fit in requested address hole */
21688- addr = (mm->free_area_cache - len) & huge_page_mask(h);
21689+ addr = (mm->free_area_cache - len);
21690 do {
21691+ addr &= huge_page_mask(h);
21692+ vma = find_vma(mm, addr);
21693 /*
21694 * Lookup failure means no vma is above this address,
21695 * i.e. return with success:
21696- */
21697- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21698- return addr;
21699-
21700- /*
21701 * new region fits between prev_vma->vm_end and
21702 * vma->vm_start, use it:
21703 */
21704- if (addr + len <= vma->vm_start &&
21705- (!prev_vma || (addr >= prev_vma->vm_end))) {
21706+ if (check_heap_stack_gap(vma, addr, len)) {
21707 /* remember the address as a hint for next time */
21708- mm->cached_hole_size = largest_hole;
21709- return (mm->free_area_cache = addr);
21710- } else {
21711- /* pull free_area_cache down to the first hole */
21712- if (mm->free_area_cache == vma->vm_end) {
21713- mm->free_area_cache = vma->vm_start;
21714- mm->cached_hole_size = largest_hole;
21715- }
21716+ mm->cached_hole_size = largest_hole;
21717+ return (mm->free_area_cache = addr);
21718+ }
21719+ /* pull free_area_cache down to the first hole */
21720+ if (mm->free_area_cache == vma->vm_end) {
21721+ mm->free_area_cache = vma->vm_start;
21722+ mm->cached_hole_size = largest_hole;
21723 }
21724
21725 /* remember the largest hole we saw so far */
21726 if (addr + largest_hole < vma->vm_start)
21727- largest_hole = vma->vm_start - addr;
21728+ largest_hole = vma->vm_start - addr;
21729
21730 /* try just below the current vma->vm_start */
21731- addr = (vma->vm_start - len) & huge_page_mask(h);
21732- } while (len <= vma->vm_start);
21733+ addr = skip_heap_stack_gap(vma, len);
21734+ } while (!IS_ERR_VALUE(addr));
21735
21736 fail:
21737 /*
21738- * if hint left us with no space for the requested
21739- * mapping then try again:
21740- */
21741- if (first_time) {
21742- mm->free_area_cache = base;
21743- largest_hole = 0;
21744- first_time = 0;
21745- goto try_again;
21746- }
21747- /*
21748 * A failed mmap() very likely causes application failure,
21749 * so fall back to the bottom-up function here. This scenario
21750 * can happen with large stack limits and large mmap()
21751 * allocations.
21752 */
21753- mm->free_area_cache = TASK_UNMAPPED_BASE;
21754+
21755+#ifdef CONFIG_PAX_SEGMEXEC
21756+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21757+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21758+ else
21759+#endif
21760+
21761+ mm->mmap_base = TASK_UNMAPPED_BASE;
21762+
21763+#ifdef CONFIG_PAX_RANDMMAP
21764+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21765+ mm->mmap_base += mm->delta_mmap;
21766+#endif
21767+
21768+ mm->free_area_cache = mm->mmap_base;
21769 mm->cached_hole_size = ~0UL;
21770 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21771 len, pgoff, flags);
21772@@ -387,6 +393,7 @@ fail:
21773 /*
21774 * Restore the topdown base:
21775 */
21776+ mm->mmap_base = base;
21777 mm->free_area_cache = base;
21778 mm->cached_hole_size = ~0UL;
21779
21780@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21781 struct hstate *h = hstate_file(file);
21782 struct mm_struct *mm = current->mm;
21783 struct vm_area_struct *vma;
21784+ unsigned long pax_task_size = TASK_SIZE;
21785
21786 if (len & ~huge_page_mask(h))
21787 return -EINVAL;
21788- if (len > TASK_SIZE)
21789+
21790+#ifdef CONFIG_PAX_SEGMEXEC
21791+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21792+ pax_task_size = SEGMEXEC_TASK_SIZE;
21793+#endif
21794+
21795+ pax_task_size -= PAGE_SIZE;
21796+
21797+ if (len > pax_task_size)
21798 return -ENOMEM;
21799
21800 if (flags & MAP_FIXED) {
21801@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21802 if (addr) {
21803 addr = ALIGN(addr, huge_page_size(h));
21804 vma = find_vma(mm, addr);
21805- if (TASK_SIZE - len >= addr &&
21806- (!vma || addr + len <= vma->vm_start))
21807+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21808 return addr;
21809 }
21810 if (mm->get_unmapped_area == arch_get_unmapped_area)
21811diff -urNp linux-2.6.32.45/arch/x86/mm/init_32.c linux-2.6.32.45/arch/x86/mm/init_32.c
21812--- linux-2.6.32.45/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21813+++ linux-2.6.32.45/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21814@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21815 }
21816
21817 /*
21818- * Creates a middle page table and puts a pointer to it in the
21819- * given global directory entry. This only returns the gd entry
21820- * in non-PAE compilation mode, since the middle layer is folded.
21821- */
21822-static pmd_t * __init one_md_table_init(pgd_t *pgd)
21823-{
21824- pud_t *pud;
21825- pmd_t *pmd_table;
21826-
21827-#ifdef CONFIG_X86_PAE
21828- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21829- if (after_bootmem)
21830- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21831- else
21832- pmd_table = (pmd_t *)alloc_low_page();
21833- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21834- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21835- pud = pud_offset(pgd, 0);
21836- BUG_ON(pmd_table != pmd_offset(pud, 0));
21837-
21838- return pmd_table;
21839- }
21840-#endif
21841- pud = pud_offset(pgd, 0);
21842- pmd_table = pmd_offset(pud, 0);
21843-
21844- return pmd_table;
21845-}
21846-
21847-/*
21848 * Create a page table and place a pointer to it in a middle page
21849 * directory entry:
21850 */
21851@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21852 page_table = (pte_t *)alloc_low_page();
21853
21854 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21855+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21856+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21857+#else
21858 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21859+#endif
21860 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21861 }
21862
21863 return pte_offset_kernel(pmd, 0);
21864 }
21865
21866+static pmd_t * __init one_md_table_init(pgd_t *pgd)
21867+{
21868+ pud_t *pud;
21869+ pmd_t *pmd_table;
21870+
21871+ pud = pud_offset(pgd, 0);
21872+ pmd_table = pmd_offset(pud, 0);
21873+
21874+ return pmd_table;
21875+}
21876+
21877 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21878 {
21879 int pgd_idx = pgd_index(vaddr);
21880@@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21881 int pgd_idx, pmd_idx;
21882 unsigned long vaddr;
21883 pgd_t *pgd;
21884+ pud_t *pud;
21885 pmd_t *pmd;
21886 pte_t *pte = NULL;
21887
21888@@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21889 pgd = pgd_base + pgd_idx;
21890
21891 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21892- pmd = one_md_table_init(pgd);
21893- pmd = pmd + pmd_index(vaddr);
21894+ pud = pud_offset(pgd, vaddr);
21895+ pmd = pmd_offset(pud, vaddr);
21896+
21897+#ifdef CONFIG_X86_PAE
21898+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21899+#endif
21900+
21901 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21902 pmd++, pmd_idx++) {
21903 pte = page_table_kmap_check(one_page_table_init(pmd),
21904@@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21905 }
21906 }
21907
21908-static inline int is_kernel_text(unsigned long addr)
21909+static inline int is_kernel_text(unsigned long start, unsigned long end)
21910 {
21911- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21912- return 1;
21913- return 0;
21914+ if ((start > ktla_ktva((unsigned long)_etext) ||
21915+ end <= ktla_ktva((unsigned long)_stext)) &&
21916+ (start > ktla_ktva((unsigned long)_einittext) ||
21917+ end <= ktla_ktva((unsigned long)_sinittext)) &&
21918+
21919+#ifdef CONFIG_ACPI_SLEEP
21920+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21921+#endif
21922+
21923+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21924+ return 0;
21925+ return 1;
21926 }
21927
21928 /*
21929@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21930 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21931 unsigned long start_pfn, end_pfn;
21932 pgd_t *pgd_base = swapper_pg_dir;
21933- int pgd_idx, pmd_idx, pte_ofs;
21934+ unsigned int pgd_idx, pmd_idx, pte_ofs;
21935 unsigned long pfn;
21936 pgd_t *pgd;
21937+ pud_t *pud;
21938 pmd_t *pmd;
21939 pte_t *pte;
21940 unsigned pages_2m, pages_4k;
21941@@ -278,8 +279,13 @@ repeat:
21942 pfn = start_pfn;
21943 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21944 pgd = pgd_base + pgd_idx;
21945- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21946- pmd = one_md_table_init(pgd);
21947+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21948+ pud = pud_offset(pgd, 0);
21949+ pmd = pmd_offset(pud, 0);
21950+
21951+#ifdef CONFIG_X86_PAE
21952+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21953+#endif
21954
21955 if (pfn >= end_pfn)
21956 continue;
21957@@ -291,14 +297,13 @@ repeat:
21958 #endif
21959 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21960 pmd++, pmd_idx++) {
21961- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21962+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21963
21964 /*
21965 * Map with big pages if possible, otherwise
21966 * create normal page tables:
21967 */
21968 if (use_pse) {
21969- unsigned int addr2;
21970 pgprot_t prot = PAGE_KERNEL_LARGE;
21971 /*
21972 * first pass will use the same initial
21973@@ -308,11 +313,7 @@ repeat:
21974 __pgprot(PTE_IDENT_ATTR |
21975 _PAGE_PSE);
21976
21977- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21978- PAGE_OFFSET + PAGE_SIZE-1;
21979-
21980- if (is_kernel_text(addr) ||
21981- is_kernel_text(addr2))
21982+ if (is_kernel_text(address, address + PMD_SIZE))
21983 prot = PAGE_KERNEL_LARGE_EXEC;
21984
21985 pages_2m++;
21986@@ -329,7 +330,7 @@ repeat:
21987 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21988 pte += pte_ofs;
21989 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21990- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21991+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21992 pgprot_t prot = PAGE_KERNEL;
21993 /*
21994 * first pass will use the same initial
21995@@ -337,7 +338,7 @@ repeat:
21996 */
21997 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21998
21999- if (is_kernel_text(addr))
22000+ if (is_kernel_text(address, address + PAGE_SIZE))
22001 prot = PAGE_KERNEL_EXEC;
22002
22003 pages_4k++;
22004@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
22005
22006 pud = pud_offset(pgd, va);
22007 pmd = pmd_offset(pud, va);
22008- if (!pmd_present(*pmd))
22009+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
22010 break;
22011
22012 pte = pte_offset_kernel(pmd, va);
22013@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
22014
22015 static void __init pagetable_init(void)
22016 {
22017- pgd_t *pgd_base = swapper_pg_dir;
22018-
22019- permanent_kmaps_init(pgd_base);
22020+ permanent_kmaps_init(swapper_pg_dir);
22021 }
22022
22023 #ifdef CONFIG_ACPI_SLEEP
22024@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
22025 * ACPI suspend needs this for resume, because things like the intel-agp
22026 * driver might have split up a kernel 4MB mapping.
22027 */
22028-char swsusp_pg_dir[PAGE_SIZE]
22029+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
22030 __attribute__ ((aligned(PAGE_SIZE)));
22031
22032 static inline void save_pg_dir(void)
22033 {
22034- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
22035+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
22036 }
22037 #else /* !CONFIG_ACPI_SLEEP */
22038 static inline void save_pg_dir(void)
22039@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
22040 flush_tlb_all();
22041 }
22042
22043-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22044+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22045 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22046
22047 /* user-defined highmem size */
22048@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
22049 * Initialize the boot-time allocator (with low memory only):
22050 */
22051 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
22052- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
22053+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
22054 PAGE_SIZE);
22055 if (bootmap == -1L)
22056 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
22057@@ -864,6 +863,12 @@ void __init mem_init(void)
22058
22059 pci_iommu_alloc();
22060
22061+#ifdef CONFIG_PAX_PER_CPU_PGD
22062+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22063+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22064+ KERNEL_PGD_PTRS);
22065+#endif
22066+
22067 #ifdef CONFIG_FLATMEM
22068 BUG_ON(!mem_map);
22069 #endif
22070@@ -881,7 +886,7 @@ void __init mem_init(void)
22071 set_highmem_pages_init();
22072
22073 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22074- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22075+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22076 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22077
22078 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22079@@ -923,10 +928,10 @@ void __init mem_init(void)
22080 ((unsigned long)&__init_end -
22081 (unsigned long)&__init_begin) >> 10,
22082
22083- (unsigned long)&_etext, (unsigned long)&_edata,
22084- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22085+ (unsigned long)&_sdata, (unsigned long)&_edata,
22086+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22087
22088- (unsigned long)&_text, (unsigned long)&_etext,
22089+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22090 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22091
22092 /*
22093@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
22094 if (!kernel_set_to_readonly)
22095 return;
22096
22097+ start = ktla_ktva(start);
22098 pr_debug("Set kernel text: %lx - %lx for read write\n",
22099 start, start+size);
22100
22101@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
22102 if (!kernel_set_to_readonly)
22103 return;
22104
22105+ start = ktla_ktva(start);
22106 pr_debug("Set kernel text: %lx - %lx for read only\n",
22107 start, start+size);
22108
22109@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
22110 unsigned long start = PFN_ALIGN(_text);
22111 unsigned long size = PFN_ALIGN(_etext) - start;
22112
22113+ start = ktla_ktva(start);
22114 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22115 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22116 size >> 10);
22117diff -urNp linux-2.6.32.45/arch/x86/mm/init_64.c linux-2.6.32.45/arch/x86/mm/init_64.c
22118--- linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
22119+++ linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
22120@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
22121 pmd = fill_pmd(pud, vaddr);
22122 pte = fill_pte(pmd, vaddr);
22123
22124+ pax_open_kernel();
22125 set_pte(pte, new_pte);
22126+ pax_close_kernel();
22127
22128 /*
22129 * It's enough to flush this one mapping.
22130@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
22131 pgd = pgd_offset_k((unsigned long)__va(phys));
22132 if (pgd_none(*pgd)) {
22133 pud = (pud_t *) spp_getpage();
22134- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22135- _PAGE_USER));
22136+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22137 }
22138 pud = pud_offset(pgd, (unsigned long)__va(phys));
22139 if (pud_none(*pud)) {
22140 pmd = (pmd_t *) spp_getpage();
22141- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22142- _PAGE_USER));
22143+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22144 }
22145 pmd = pmd_offset(pud, phys);
22146 BUG_ON(!pmd_none(*pmd));
22147@@ -675,6 +675,12 @@ void __init mem_init(void)
22148
22149 pci_iommu_alloc();
22150
22151+#ifdef CONFIG_PAX_PER_CPU_PGD
22152+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22153+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22154+ KERNEL_PGD_PTRS);
22155+#endif
22156+
22157 /* clear_bss() already clear the empty_zero_page */
22158
22159 reservedpages = 0;
22160@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
22161 static struct vm_area_struct gate_vma = {
22162 .vm_start = VSYSCALL_START,
22163 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22164- .vm_page_prot = PAGE_READONLY_EXEC,
22165- .vm_flags = VM_READ | VM_EXEC
22166+ .vm_page_prot = PAGE_READONLY,
22167+ .vm_flags = VM_READ
22168 };
22169
22170 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
22171@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
22172
22173 const char *arch_vma_name(struct vm_area_struct *vma)
22174 {
22175- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22176+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22177 return "[vdso]";
22178 if (vma == &gate_vma)
22179 return "[vsyscall]";
22180diff -urNp linux-2.6.32.45/arch/x86/mm/init.c linux-2.6.32.45/arch/x86/mm/init.c
22181--- linux-2.6.32.45/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
22182+++ linux-2.6.32.45/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
22183@@ -69,11 +69,7 @@ static void __init find_early_table_spac
22184 * cause a hotspot and fill up ZONE_DMA. The page tables
22185 * need roughly 0.5KB per GB.
22186 */
22187-#ifdef CONFIG_X86_32
22188- start = 0x7000;
22189-#else
22190- start = 0x8000;
22191-#endif
22192+ start = 0x100000;
22193 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
22194 tables, PAGE_SIZE);
22195 if (e820_table_start == -1UL)
22196@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
22197 #endif
22198
22199 set_nx();
22200- if (nx_enabled)
22201+ if (nx_enabled && cpu_has_nx)
22202 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
22203
22204 /* Enable PSE if available */
22205@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
22206 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
22207 * mmio resources as well as potential bios/acpi data regions.
22208 */
22209+
22210 int devmem_is_allowed(unsigned long pagenr)
22211 {
22212+#ifdef CONFIG_GRKERNSEC_KMEM
22213+ /* allow BDA */
22214+ if (!pagenr)
22215+ return 1;
22216+ /* allow EBDA */
22217+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22218+ return 1;
22219+ /* allow ISA/video mem */
22220+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22221+ return 1;
22222+ /* throw out everything else below 1MB */
22223+ if (pagenr <= 256)
22224+ return 0;
22225+#else
22226 if (pagenr <= 256)
22227 return 1;
22228+#endif
22229+
22230 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22231 return 0;
22232 if (!page_is_ram(pagenr))
22233@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
22234
22235 void free_initmem(void)
22236 {
22237+
22238+#ifdef CONFIG_PAX_KERNEXEC
22239+#ifdef CONFIG_X86_32
22240+ /* PaX: limit KERNEL_CS to actual size */
22241+ unsigned long addr, limit;
22242+ struct desc_struct d;
22243+ int cpu;
22244+
22245+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22246+ limit = (limit - 1UL) >> PAGE_SHIFT;
22247+
22248+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22249+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22250+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22251+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22252+ }
22253+
22254+ /* PaX: make KERNEL_CS read-only */
22255+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22256+ if (!paravirt_enabled())
22257+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22258+/*
22259+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22260+ pgd = pgd_offset_k(addr);
22261+ pud = pud_offset(pgd, addr);
22262+ pmd = pmd_offset(pud, addr);
22263+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22264+ }
22265+*/
22266+#ifdef CONFIG_X86_PAE
22267+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22268+/*
22269+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22270+ pgd = pgd_offset_k(addr);
22271+ pud = pud_offset(pgd, addr);
22272+ pmd = pmd_offset(pud, addr);
22273+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22274+ }
22275+*/
22276+#endif
22277+
22278+#ifdef CONFIG_MODULES
22279+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22280+#endif
22281+
22282+#else
22283+ pgd_t *pgd;
22284+ pud_t *pud;
22285+ pmd_t *pmd;
22286+ unsigned long addr, end;
22287+
22288+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22289+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22290+ pgd = pgd_offset_k(addr);
22291+ pud = pud_offset(pgd, addr);
22292+ pmd = pmd_offset(pud, addr);
22293+ if (!pmd_present(*pmd))
22294+ continue;
22295+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22296+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22297+ else
22298+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22299+ }
22300+
22301+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22302+ end = addr + KERNEL_IMAGE_SIZE;
22303+ for (; addr < end; addr += PMD_SIZE) {
22304+ pgd = pgd_offset_k(addr);
22305+ pud = pud_offset(pgd, addr);
22306+ pmd = pmd_offset(pud, addr);
22307+ if (!pmd_present(*pmd))
22308+ continue;
22309+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22310+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22311+ }
22312+#endif
22313+
22314+ flush_tlb_all();
22315+#endif
22316+
22317 free_init_pages("unused kernel memory",
22318 (unsigned long)(&__init_begin),
22319 (unsigned long)(&__init_end));
22320diff -urNp linux-2.6.32.45/arch/x86/mm/iomap_32.c linux-2.6.32.45/arch/x86/mm/iomap_32.c
22321--- linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
22322+++ linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
22323@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
22324 debug_kmap_atomic(type);
22325 idx = type + KM_TYPE_NR * smp_processor_id();
22326 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22327+
22328+ pax_open_kernel();
22329 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22330+ pax_close_kernel();
22331+
22332 arch_flush_lazy_mmu_mode();
22333
22334 return (void *)vaddr;
22335diff -urNp linux-2.6.32.45/arch/x86/mm/ioremap.c linux-2.6.32.45/arch/x86/mm/ioremap.c
22336--- linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
22337+++ linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
22338@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
22339 * Second special case: Some BIOSen report the PC BIOS
22340 * area (640->1Mb) as ram even though it is not.
22341 */
22342- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
22343- pagenr < (BIOS_END >> PAGE_SHIFT))
22344+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
22345+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22346 return 0;
22347
22348 for (i = 0; i < e820.nr_map; i++) {
22349@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
22350 /*
22351 * Don't allow anybody to remap normal RAM that we're using..
22352 */
22353- for (pfn = phys_addr >> PAGE_SHIFT;
22354- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
22355- pfn++) {
22356-
22357+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
22358 int is_ram = page_is_ram(pfn);
22359
22360- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22361+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22362 return NULL;
22363 WARN_ON_ONCE(is_ram);
22364 }
22365@@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
22366 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22367
22368 static __initdata int after_paging_init;
22369-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22370+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22371
22372 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22373 {
22374@@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
22375 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22376
22377 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22378- memset(bm_pte, 0, sizeof(bm_pte));
22379- pmd_populate_kernel(&init_mm, pmd, bm_pte);
22380+ pmd_populate_user(&init_mm, pmd, bm_pte);
22381
22382 /*
22383 * The boot-ioremap range spans multiple pmds, for which
22384diff -urNp linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c
22385--- linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
22386+++ linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
22387@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
22388 * memory (e.g. tracked pages)? For now, we need this to avoid
22389 * invoking kmemcheck for PnP BIOS calls.
22390 */
22391- if (regs->flags & X86_VM_MASK)
22392+ if (v8086_mode(regs))
22393 return false;
22394- if (regs->cs != __KERNEL_CS)
22395+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22396 return false;
22397
22398 pte = kmemcheck_pte_lookup(address);
22399diff -urNp linux-2.6.32.45/arch/x86/mm/mmap.c linux-2.6.32.45/arch/x86/mm/mmap.c
22400--- linux-2.6.32.45/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
22401+++ linux-2.6.32.45/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
22402@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
22403 * Leave an at least ~128 MB hole with possible stack randomization.
22404 */
22405 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22406-#define MAX_GAP (TASK_SIZE/6*5)
22407+#define MAX_GAP (pax_task_size/6*5)
22408
22409 /*
22410 * True on X86_32 or when emulating IA32 on X86_64
22411@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22412 return rnd << PAGE_SHIFT;
22413 }
22414
22415-static unsigned long mmap_base(void)
22416+static unsigned long mmap_base(struct mm_struct *mm)
22417 {
22418 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
22419+ unsigned long pax_task_size = TASK_SIZE;
22420+
22421+#ifdef CONFIG_PAX_SEGMEXEC
22422+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22423+ pax_task_size = SEGMEXEC_TASK_SIZE;
22424+#endif
22425
22426 if (gap < MIN_GAP)
22427 gap = MIN_GAP;
22428 else if (gap > MAX_GAP)
22429 gap = MAX_GAP;
22430
22431- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22432+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22433 }
22434
22435 /*
22436 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22437 * does, but not when emulating X86_32
22438 */
22439-static unsigned long mmap_legacy_base(void)
22440+static unsigned long mmap_legacy_base(struct mm_struct *mm)
22441 {
22442- if (mmap_is_ia32())
22443+ if (mmap_is_ia32()) {
22444+
22445+#ifdef CONFIG_PAX_SEGMEXEC
22446+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22447+ return SEGMEXEC_TASK_UNMAPPED_BASE;
22448+ else
22449+#endif
22450+
22451 return TASK_UNMAPPED_BASE;
22452- else
22453+ } else
22454 return TASK_UNMAPPED_BASE + mmap_rnd();
22455 }
22456
22457@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22458 void arch_pick_mmap_layout(struct mm_struct *mm)
22459 {
22460 if (mmap_is_legacy()) {
22461- mm->mmap_base = mmap_legacy_base();
22462+ mm->mmap_base = mmap_legacy_base(mm);
22463+
22464+#ifdef CONFIG_PAX_RANDMMAP
22465+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22466+ mm->mmap_base += mm->delta_mmap;
22467+#endif
22468+
22469 mm->get_unmapped_area = arch_get_unmapped_area;
22470 mm->unmap_area = arch_unmap_area;
22471 } else {
22472- mm->mmap_base = mmap_base();
22473+ mm->mmap_base = mmap_base(mm);
22474+
22475+#ifdef CONFIG_PAX_RANDMMAP
22476+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22477+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22478+#endif
22479+
22480 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22481 mm->unmap_area = arch_unmap_area_topdown;
22482 }
22483diff -urNp linux-2.6.32.45/arch/x86/mm/mmio-mod.c linux-2.6.32.45/arch/x86/mm/mmio-mod.c
22484--- linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22485+++ linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22486@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22487 break;
22488 default:
22489 {
22490- unsigned char *ip = (unsigned char *)instptr;
22491+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22492 my_trace->opcode = MMIO_UNKNOWN_OP;
22493 my_trace->width = 0;
22494 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22495@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22496 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22497 void __iomem *addr)
22498 {
22499- static atomic_t next_id;
22500+ static atomic_unchecked_t next_id;
22501 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22502 /* These are page-unaligned. */
22503 struct mmiotrace_map map = {
22504@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22505 .private = trace
22506 },
22507 .phys = offset,
22508- .id = atomic_inc_return(&next_id)
22509+ .id = atomic_inc_return_unchecked(&next_id)
22510 };
22511 map.map_id = trace->id;
22512
22513diff -urNp linux-2.6.32.45/arch/x86/mm/numa_32.c linux-2.6.32.45/arch/x86/mm/numa_32.c
22514--- linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22515+++ linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22516@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22517 }
22518 #endif
22519
22520-extern unsigned long find_max_low_pfn(void);
22521 extern unsigned long highend_pfn, highstart_pfn;
22522
22523 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22524diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr.c linux-2.6.32.45/arch/x86/mm/pageattr.c
22525--- linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22526+++ linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22527@@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22528 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22529 */
22530 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22531- pgprot_val(forbidden) |= _PAGE_NX;
22532+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22533
22534 /*
22535 * The kernel text needs to be executable for obvious reasons
22536 * Does not cover __inittext since that is gone later on. On
22537 * 64bit we do not enforce !NX on the low mapping
22538 */
22539- if (within(address, (unsigned long)_text, (unsigned long)_etext))
22540- pgprot_val(forbidden) |= _PAGE_NX;
22541+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22542+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22543
22544+#ifdef CONFIG_DEBUG_RODATA
22545 /*
22546 * The .rodata section needs to be read-only. Using the pfn
22547 * catches all aliases.
22548@@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22549 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22550 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22551 pgprot_val(forbidden) |= _PAGE_RW;
22552+#endif
22553+
22554+#ifdef CONFIG_PAX_KERNEXEC
22555+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22556+ pgprot_val(forbidden) |= _PAGE_RW;
22557+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22558+ }
22559+#endif
22560
22561 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22562
22563@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22564 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22565 {
22566 /* change init_mm */
22567+ pax_open_kernel();
22568 set_pte_atomic(kpte, pte);
22569+
22570 #ifdef CONFIG_X86_32
22571 if (!SHARED_KERNEL_PMD) {
22572+
22573+#ifdef CONFIG_PAX_PER_CPU_PGD
22574+ unsigned long cpu;
22575+#else
22576 struct page *page;
22577+#endif
22578
22579+#ifdef CONFIG_PAX_PER_CPU_PGD
22580+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22581+ pgd_t *pgd = get_cpu_pgd(cpu);
22582+#else
22583 list_for_each_entry(page, &pgd_list, lru) {
22584- pgd_t *pgd;
22585+ pgd_t *pgd = (pgd_t *)page_address(page);
22586+#endif
22587+
22588 pud_t *pud;
22589 pmd_t *pmd;
22590
22591- pgd = (pgd_t *)page_address(page) + pgd_index(address);
22592+ pgd += pgd_index(address);
22593 pud = pud_offset(pgd, address);
22594 pmd = pmd_offset(pud, address);
22595 set_pte_atomic((pte_t *)pmd, pte);
22596 }
22597 }
22598 #endif
22599+ pax_close_kernel();
22600 }
22601
22602 static int
22603diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr-test.c linux-2.6.32.45/arch/x86/mm/pageattr-test.c
22604--- linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22605+++ linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22606@@ -36,7 +36,7 @@ enum {
22607
22608 static int pte_testbit(pte_t pte)
22609 {
22610- return pte_flags(pte) & _PAGE_UNUSED1;
22611+ return pte_flags(pte) & _PAGE_CPA_TEST;
22612 }
22613
22614 struct split_state {
22615diff -urNp linux-2.6.32.45/arch/x86/mm/pat.c linux-2.6.32.45/arch/x86/mm/pat.c
22616--- linux-2.6.32.45/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22617+++ linux-2.6.32.45/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22618@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22619
22620 conflict:
22621 printk(KERN_INFO "%s:%d conflicting memory types "
22622- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22623+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22624 new->end, cattr_name(new->type), cattr_name(entry->type));
22625 return -EBUSY;
22626 }
22627@@ -559,7 +559,7 @@ unlock_ret:
22628
22629 if (err) {
22630 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22631- current->comm, current->pid, start, end);
22632+ current->comm, task_pid_nr(current), start, end);
22633 }
22634
22635 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22636@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22637 while (cursor < to) {
22638 if (!devmem_is_allowed(pfn)) {
22639 printk(KERN_INFO
22640- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22641- current->comm, from, to);
22642+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22643+ current->comm, from, to, cursor);
22644 return 0;
22645 }
22646 cursor += PAGE_SIZE;
22647@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22648 printk(KERN_INFO
22649 "%s:%d ioremap_change_attr failed %s "
22650 "for %Lx-%Lx\n",
22651- current->comm, current->pid,
22652+ current->comm, task_pid_nr(current),
22653 cattr_name(flags),
22654 base, (unsigned long long)(base + size));
22655 return -EINVAL;
22656@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22657 free_memtype(paddr, paddr + size);
22658 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22659 " for %Lx-%Lx, got %s\n",
22660- current->comm, current->pid,
22661+ current->comm, task_pid_nr(current),
22662 cattr_name(want_flags),
22663 (unsigned long long)paddr,
22664 (unsigned long long)(paddr + size),
22665diff -urNp linux-2.6.32.45/arch/x86/mm/pf_in.c linux-2.6.32.45/arch/x86/mm/pf_in.c
22666--- linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22667+++ linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22668@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22669 int i;
22670 enum reason_type rv = OTHERS;
22671
22672- p = (unsigned char *)ins_addr;
22673+ p = (unsigned char *)ktla_ktva(ins_addr);
22674 p += skip_prefix(p, &prf);
22675 p += get_opcode(p, &opcode);
22676
22677@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22678 struct prefix_bits prf;
22679 int i;
22680
22681- p = (unsigned char *)ins_addr;
22682+ p = (unsigned char *)ktla_ktva(ins_addr);
22683 p += skip_prefix(p, &prf);
22684 p += get_opcode(p, &opcode);
22685
22686@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22687 struct prefix_bits prf;
22688 int i;
22689
22690- p = (unsigned char *)ins_addr;
22691+ p = (unsigned char *)ktla_ktva(ins_addr);
22692 p += skip_prefix(p, &prf);
22693 p += get_opcode(p, &opcode);
22694
22695@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22696 int i;
22697 unsigned long rv;
22698
22699- p = (unsigned char *)ins_addr;
22700+ p = (unsigned char *)ktla_ktva(ins_addr);
22701 p += skip_prefix(p, &prf);
22702 p += get_opcode(p, &opcode);
22703 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22704@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22705 int i;
22706 unsigned long rv;
22707
22708- p = (unsigned char *)ins_addr;
22709+ p = (unsigned char *)ktla_ktva(ins_addr);
22710 p += skip_prefix(p, &prf);
22711 p += get_opcode(p, &opcode);
22712 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22713diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable_32.c linux-2.6.32.45/arch/x86/mm/pgtable_32.c
22714--- linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22715+++ linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22716@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22717 return;
22718 }
22719 pte = pte_offset_kernel(pmd, vaddr);
22720+
22721+ pax_open_kernel();
22722 if (pte_val(pteval))
22723 set_pte_at(&init_mm, vaddr, pte, pteval);
22724 else
22725 pte_clear(&init_mm, vaddr, pte);
22726+ pax_close_kernel();
22727
22728 /*
22729 * It's enough to flush this one mapping.
22730diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable.c linux-2.6.32.45/arch/x86/mm/pgtable.c
22731--- linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22732+++ linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22733@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22734 list_del(&page->lru);
22735 }
22736
22737-#define UNSHARED_PTRS_PER_PGD \
22738- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22739+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22740+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22741
22742+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22743+{
22744+ while (count--)
22745+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22746+}
22747+#endif
22748+
22749+#ifdef CONFIG_PAX_PER_CPU_PGD
22750+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22751+{
22752+ while (count--)
22753+
22754+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22755+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22756+#else
22757+ *dst++ = *src++;
22758+#endif
22759+
22760+}
22761+#endif
22762+
22763+#ifdef CONFIG_X86_64
22764+#define pxd_t pud_t
22765+#define pyd_t pgd_t
22766+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22767+#define pxd_free(mm, pud) pud_free((mm), (pud))
22768+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22769+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22770+#define PYD_SIZE PGDIR_SIZE
22771+#else
22772+#define pxd_t pmd_t
22773+#define pyd_t pud_t
22774+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22775+#define pxd_free(mm, pud) pmd_free((mm), (pud))
22776+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22777+#define pyd_offset(mm ,address) pud_offset((mm), (address))
22778+#define PYD_SIZE PUD_SIZE
22779+#endif
22780+
22781+#ifdef CONFIG_PAX_PER_CPU_PGD
22782+static inline void pgd_ctor(pgd_t *pgd) {}
22783+static inline void pgd_dtor(pgd_t *pgd) {}
22784+#else
22785 static void pgd_ctor(pgd_t *pgd)
22786 {
22787 /* If the pgd points to a shared pagetable level (either the
22788@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22789 pgd_list_del(pgd);
22790 spin_unlock_irqrestore(&pgd_lock, flags);
22791 }
22792+#endif
22793
22794 /*
22795 * List of all pgd's needed for non-PAE so it can invalidate entries
22796@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22797 * -- wli
22798 */
22799
22800-#ifdef CONFIG_X86_PAE
22801+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22802 /*
22803 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22804 * updating the top-level pagetable entries to guarantee the
22805@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22806 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22807 * and initialize the kernel pmds here.
22808 */
22809-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22810+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22811
22812 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22813 {
22814@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22815 */
22816 flush_tlb_mm(mm);
22817 }
22818+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22819+#define PREALLOCATED_PXDS USER_PGD_PTRS
22820 #else /* !CONFIG_X86_PAE */
22821
22822 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22823-#define PREALLOCATED_PMDS 0
22824+#define PREALLOCATED_PXDS 0
22825
22826 #endif /* CONFIG_X86_PAE */
22827
22828-static void free_pmds(pmd_t *pmds[])
22829+static void free_pxds(pxd_t *pxds[])
22830 {
22831 int i;
22832
22833- for(i = 0; i < PREALLOCATED_PMDS; i++)
22834- if (pmds[i])
22835- free_page((unsigned long)pmds[i]);
22836+ for(i = 0; i < PREALLOCATED_PXDS; i++)
22837+ if (pxds[i])
22838+ free_page((unsigned long)pxds[i]);
22839 }
22840
22841-static int preallocate_pmds(pmd_t *pmds[])
22842+static int preallocate_pxds(pxd_t *pxds[])
22843 {
22844 int i;
22845 bool failed = false;
22846
22847- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22848- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22849- if (pmd == NULL)
22850+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22851+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22852+ if (pxd == NULL)
22853 failed = true;
22854- pmds[i] = pmd;
22855+ pxds[i] = pxd;
22856 }
22857
22858 if (failed) {
22859- free_pmds(pmds);
22860+ free_pxds(pxds);
22861 return -ENOMEM;
22862 }
22863
22864@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22865 * preallocate which never got a corresponding vma will need to be
22866 * freed manually.
22867 */
22868-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22869+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22870 {
22871 int i;
22872
22873- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22874+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22875 pgd_t pgd = pgdp[i];
22876
22877 if (pgd_val(pgd) != 0) {
22878- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22879+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22880
22881- pgdp[i] = native_make_pgd(0);
22882+ set_pgd(pgdp + i, native_make_pgd(0));
22883
22884- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22885- pmd_free(mm, pmd);
22886+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22887+ pxd_free(mm, pxd);
22888 }
22889 }
22890 }
22891
22892-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22893+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22894 {
22895- pud_t *pud;
22896+ pyd_t *pyd;
22897 unsigned long addr;
22898 int i;
22899
22900- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22901+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22902 return;
22903
22904- pud = pud_offset(pgd, 0);
22905+#ifdef CONFIG_X86_64
22906+ pyd = pyd_offset(mm, 0L);
22907+#else
22908+ pyd = pyd_offset(pgd, 0L);
22909+#endif
22910
22911- for (addr = i = 0; i < PREALLOCATED_PMDS;
22912- i++, pud++, addr += PUD_SIZE) {
22913- pmd_t *pmd = pmds[i];
22914+ for (addr = i = 0; i < PREALLOCATED_PXDS;
22915+ i++, pyd++, addr += PYD_SIZE) {
22916+ pxd_t *pxd = pxds[i];
22917
22918 if (i >= KERNEL_PGD_BOUNDARY)
22919- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22920- sizeof(pmd_t) * PTRS_PER_PMD);
22921+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22922+ sizeof(pxd_t) * PTRS_PER_PMD);
22923
22924- pud_populate(mm, pud, pmd);
22925+ pyd_populate(mm, pyd, pxd);
22926 }
22927 }
22928
22929 pgd_t *pgd_alloc(struct mm_struct *mm)
22930 {
22931 pgd_t *pgd;
22932- pmd_t *pmds[PREALLOCATED_PMDS];
22933+ pxd_t *pxds[PREALLOCATED_PXDS];
22934+
22935 unsigned long flags;
22936
22937 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22938@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22939
22940 mm->pgd = pgd;
22941
22942- if (preallocate_pmds(pmds) != 0)
22943+ if (preallocate_pxds(pxds) != 0)
22944 goto out_free_pgd;
22945
22946 if (paravirt_pgd_alloc(mm) != 0)
22947- goto out_free_pmds;
22948+ goto out_free_pxds;
22949
22950 /*
22951 * Make sure that pre-populating the pmds is atomic with
22952@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22953 spin_lock_irqsave(&pgd_lock, flags);
22954
22955 pgd_ctor(pgd);
22956- pgd_prepopulate_pmd(mm, pgd, pmds);
22957+ pgd_prepopulate_pxd(mm, pgd, pxds);
22958
22959 spin_unlock_irqrestore(&pgd_lock, flags);
22960
22961 return pgd;
22962
22963-out_free_pmds:
22964- free_pmds(pmds);
22965+out_free_pxds:
22966+ free_pxds(pxds);
22967 out_free_pgd:
22968 free_page((unsigned long)pgd);
22969 out:
22970@@ -287,7 +338,7 @@ out:
22971
22972 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22973 {
22974- pgd_mop_up_pmds(mm, pgd);
22975+ pgd_mop_up_pxds(mm, pgd);
22976 pgd_dtor(pgd);
22977 paravirt_pgd_free(mm, pgd);
22978 free_page((unsigned long)pgd);
22979diff -urNp linux-2.6.32.45/arch/x86/mm/setup_nx.c linux-2.6.32.45/arch/x86/mm/setup_nx.c
22980--- linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22981+++ linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22982@@ -4,11 +4,10 @@
22983
22984 #include <asm/pgtable.h>
22985
22986+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22987 int nx_enabled;
22988
22989-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22990-static int disable_nx __cpuinitdata;
22991-
22992+#ifndef CONFIG_PAX_PAGEEXEC
22993 /*
22994 * noexec = on|off
22995 *
22996@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22997 if (!str)
22998 return -EINVAL;
22999 if (!strncmp(str, "on", 2)) {
23000- __supported_pte_mask |= _PAGE_NX;
23001- disable_nx = 0;
23002+ nx_enabled = 1;
23003 } else if (!strncmp(str, "off", 3)) {
23004- disable_nx = 1;
23005- __supported_pte_mask &= ~_PAGE_NX;
23006+ nx_enabled = 0;
23007 }
23008 return 0;
23009 }
23010 early_param("noexec", noexec_setup);
23011 #endif
23012+#endif
23013
23014 #ifdef CONFIG_X86_PAE
23015 void __init set_nx(void)
23016 {
23017- unsigned int v[4], l, h;
23018+ if (!nx_enabled && cpu_has_nx) {
23019+ unsigned l, h;
23020
23021- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
23022- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
23023-
23024- if ((v[3] & (1 << 20)) && !disable_nx) {
23025- rdmsr(MSR_EFER, l, h);
23026- l |= EFER_NX;
23027- wrmsr(MSR_EFER, l, h);
23028- nx_enabled = 1;
23029- __supported_pte_mask |= _PAGE_NX;
23030- }
23031+ __supported_pte_mask &= ~_PAGE_NX;
23032+ rdmsr(MSR_EFER, l, h);
23033+ l &= ~EFER_NX;
23034+ wrmsr(MSR_EFER, l, h);
23035 }
23036 }
23037 #else
23038@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
23039 unsigned long efer;
23040
23041 rdmsrl(MSR_EFER, efer);
23042- if (!(efer & EFER_NX) || disable_nx)
23043+ if (!(efer & EFER_NX) || !nx_enabled)
23044 __supported_pte_mask &= ~_PAGE_NX;
23045 }
23046 #endif
23047diff -urNp linux-2.6.32.45/arch/x86/mm/tlb.c linux-2.6.32.45/arch/x86/mm/tlb.c
23048--- linux-2.6.32.45/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
23049+++ linux-2.6.32.45/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
23050@@ -61,7 +61,11 @@ void leave_mm(int cpu)
23051 BUG();
23052 cpumask_clear_cpu(cpu,
23053 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23054+
23055+#ifndef CONFIG_PAX_PER_CPU_PGD
23056 load_cr3(swapper_pg_dir);
23057+#endif
23058+
23059 }
23060 EXPORT_SYMBOL_GPL(leave_mm);
23061
23062diff -urNp linux-2.6.32.45/arch/x86/oprofile/backtrace.c linux-2.6.32.45/arch/x86/oprofile/backtrace.c
23063--- linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
23064+++ linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
23065@@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
23066 struct frame_head bufhead[2];
23067
23068 /* Also check accessibility of one struct frame_head beyond */
23069- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
23070+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
23071 return NULL;
23072 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
23073 return NULL;
23074@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
23075 {
23076 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
23077
23078- if (!user_mode_vm(regs)) {
23079+ if (!user_mode(regs)) {
23080 unsigned long stack = kernel_stack_pointer(regs);
23081 if (depth)
23082 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23083diff -urNp linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c
23084--- linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
23085+++ linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
23086@@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
23087 #endif
23088 }
23089
23090-static int inline addr_increment(void)
23091+static inline int addr_increment(void)
23092 {
23093 #ifdef CONFIG_SMP
23094 return smp_num_siblings == 2 ? 2 : 1;
23095diff -urNp linux-2.6.32.45/arch/x86/pci/common.c linux-2.6.32.45/arch/x86/pci/common.c
23096--- linux-2.6.32.45/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
23097+++ linux-2.6.32.45/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
23098@@ -31,8 +31,8 @@ int noioapicreroute = 1;
23099 int pcibios_last_bus = -1;
23100 unsigned long pirq_table_addr;
23101 struct pci_bus *pci_root_bus;
23102-struct pci_raw_ops *raw_pci_ops;
23103-struct pci_raw_ops *raw_pci_ext_ops;
23104+const struct pci_raw_ops *raw_pci_ops;
23105+const struct pci_raw_ops *raw_pci_ext_ops;
23106
23107 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
23108 int reg, int len, u32 *val)
23109diff -urNp linux-2.6.32.45/arch/x86/pci/direct.c linux-2.6.32.45/arch/x86/pci/direct.c
23110--- linux-2.6.32.45/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
23111+++ linux-2.6.32.45/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
23112@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
23113
23114 #undef PCI_CONF1_ADDRESS
23115
23116-struct pci_raw_ops pci_direct_conf1 = {
23117+const struct pci_raw_ops pci_direct_conf1 = {
23118 .read = pci_conf1_read,
23119 .write = pci_conf1_write,
23120 };
23121@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
23122
23123 #undef PCI_CONF2_ADDRESS
23124
23125-struct pci_raw_ops pci_direct_conf2 = {
23126+const struct pci_raw_ops pci_direct_conf2 = {
23127 .read = pci_conf2_read,
23128 .write = pci_conf2_write,
23129 };
23130@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
23131 * This should be close to trivial, but it isn't, because there are buggy
23132 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
23133 */
23134-static int __init pci_sanity_check(struct pci_raw_ops *o)
23135+static int __init pci_sanity_check(const struct pci_raw_ops *o)
23136 {
23137 u32 x = 0;
23138 int year, devfn;
23139diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_32.c linux-2.6.32.45/arch/x86/pci/mmconfig_32.c
23140--- linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
23141+++ linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
23142@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
23143 return 0;
23144 }
23145
23146-static struct pci_raw_ops pci_mmcfg = {
23147+static const struct pci_raw_ops pci_mmcfg = {
23148 .read = pci_mmcfg_read,
23149 .write = pci_mmcfg_write,
23150 };
23151diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_64.c linux-2.6.32.45/arch/x86/pci/mmconfig_64.c
23152--- linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
23153+++ linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
23154@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
23155 return 0;
23156 }
23157
23158-static struct pci_raw_ops pci_mmcfg = {
23159+static const struct pci_raw_ops pci_mmcfg = {
23160 .read = pci_mmcfg_read,
23161 .write = pci_mmcfg_write,
23162 };
23163diff -urNp linux-2.6.32.45/arch/x86/pci/numaq_32.c linux-2.6.32.45/arch/x86/pci/numaq_32.c
23164--- linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
23165+++ linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
23166@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
23167
23168 #undef PCI_CONF1_MQ_ADDRESS
23169
23170-static struct pci_raw_ops pci_direct_conf1_mq = {
23171+static const struct pci_raw_ops pci_direct_conf1_mq = {
23172 .read = pci_conf1_mq_read,
23173 .write = pci_conf1_mq_write
23174 };
23175diff -urNp linux-2.6.32.45/arch/x86/pci/olpc.c linux-2.6.32.45/arch/x86/pci/olpc.c
23176--- linux-2.6.32.45/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
23177+++ linux-2.6.32.45/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
23178@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
23179 return 0;
23180 }
23181
23182-static struct pci_raw_ops pci_olpc_conf = {
23183+static const struct pci_raw_ops pci_olpc_conf = {
23184 .read = pci_olpc_read,
23185 .write = pci_olpc_write,
23186 };
23187diff -urNp linux-2.6.32.45/arch/x86/pci/pcbios.c linux-2.6.32.45/arch/x86/pci/pcbios.c
23188--- linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
23189+++ linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
23190@@ -56,50 +56,93 @@ union bios32 {
23191 static struct {
23192 unsigned long address;
23193 unsigned short segment;
23194-} bios32_indirect = { 0, __KERNEL_CS };
23195+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23196
23197 /*
23198 * Returns the entry point for the given service, NULL on error
23199 */
23200
23201-static unsigned long bios32_service(unsigned long service)
23202+static unsigned long __devinit bios32_service(unsigned long service)
23203 {
23204 unsigned char return_code; /* %al */
23205 unsigned long address; /* %ebx */
23206 unsigned long length; /* %ecx */
23207 unsigned long entry; /* %edx */
23208 unsigned long flags;
23209+ struct desc_struct d, *gdt;
23210
23211 local_irq_save(flags);
23212- __asm__("lcall *(%%edi); cld"
23213+
23214+ gdt = get_cpu_gdt_table(smp_processor_id());
23215+
23216+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23217+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23218+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23219+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23220+
23221+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23222 : "=a" (return_code),
23223 "=b" (address),
23224 "=c" (length),
23225 "=d" (entry)
23226 : "0" (service),
23227 "1" (0),
23228- "D" (&bios32_indirect));
23229+ "D" (&bios32_indirect),
23230+ "r"(__PCIBIOS_DS)
23231+ : "memory");
23232+
23233+ pax_open_kernel();
23234+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23235+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23236+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23237+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23238+ pax_close_kernel();
23239+
23240 local_irq_restore(flags);
23241
23242 switch (return_code) {
23243- case 0:
23244- return address + entry;
23245- case 0x80: /* Not present */
23246- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23247- return 0;
23248- default: /* Shouldn't happen */
23249- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23250- service, return_code);
23251+ case 0: {
23252+ int cpu;
23253+ unsigned char flags;
23254+
23255+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23256+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23257+ printk(KERN_WARNING "bios32_service: not valid\n");
23258 return 0;
23259+ }
23260+ address = address + PAGE_OFFSET;
23261+ length += 16UL; /* some BIOSs underreport this... */
23262+ flags = 4;
23263+ if (length >= 64*1024*1024) {
23264+ length >>= PAGE_SHIFT;
23265+ flags |= 8;
23266+ }
23267+
23268+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23269+ gdt = get_cpu_gdt_table(cpu);
23270+ pack_descriptor(&d, address, length, 0x9b, flags);
23271+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23272+ pack_descriptor(&d, address, length, 0x93, flags);
23273+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23274+ }
23275+ return entry;
23276+ }
23277+ case 0x80: /* Not present */
23278+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23279+ return 0;
23280+ default: /* Shouldn't happen */
23281+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23282+ service, return_code);
23283+ return 0;
23284 }
23285 }
23286
23287 static struct {
23288 unsigned long address;
23289 unsigned short segment;
23290-} pci_indirect = { 0, __KERNEL_CS };
23291+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23292
23293-static int pci_bios_present;
23294+static int pci_bios_present __read_only;
23295
23296 static int __devinit check_pcibios(void)
23297 {
23298@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
23299 unsigned long flags, pcibios_entry;
23300
23301 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23302- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23303+ pci_indirect.address = pcibios_entry;
23304
23305 local_irq_save(flags);
23306- __asm__(
23307- "lcall *(%%edi); cld\n\t"
23308+ __asm__("movw %w6, %%ds\n\t"
23309+ "lcall *%%ss:(%%edi); cld\n\t"
23310+ "push %%ss\n\t"
23311+ "pop %%ds\n\t"
23312 "jc 1f\n\t"
23313 "xor %%ah, %%ah\n"
23314 "1:"
23315@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
23316 "=b" (ebx),
23317 "=c" (ecx)
23318 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23319- "D" (&pci_indirect)
23320+ "D" (&pci_indirect),
23321+ "r" (__PCIBIOS_DS)
23322 : "memory");
23323 local_irq_restore(flags);
23324
23325@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
23326
23327 switch (len) {
23328 case 1:
23329- __asm__("lcall *(%%esi); cld\n\t"
23330+ __asm__("movw %w6, %%ds\n\t"
23331+ "lcall *%%ss:(%%esi); cld\n\t"
23332+ "push %%ss\n\t"
23333+ "pop %%ds\n\t"
23334 "jc 1f\n\t"
23335 "xor %%ah, %%ah\n"
23336 "1:"
23337@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
23338 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23339 "b" (bx),
23340 "D" ((long)reg),
23341- "S" (&pci_indirect));
23342+ "S" (&pci_indirect),
23343+ "r" (__PCIBIOS_DS));
23344 /*
23345 * Zero-extend the result beyond 8 bits, do not trust the
23346 * BIOS having done it:
23347@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
23348 *value &= 0xff;
23349 break;
23350 case 2:
23351- __asm__("lcall *(%%esi); cld\n\t"
23352+ __asm__("movw %w6, %%ds\n\t"
23353+ "lcall *%%ss:(%%esi); cld\n\t"
23354+ "push %%ss\n\t"
23355+ "pop %%ds\n\t"
23356 "jc 1f\n\t"
23357 "xor %%ah, %%ah\n"
23358 "1:"
23359@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
23360 : "1" (PCIBIOS_READ_CONFIG_WORD),
23361 "b" (bx),
23362 "D" ((long)reg),
23363- "S" (&pci_indirect));
23364+ "S" (&pci_indirect),
23365+ "r" (__PCIBIOS_DS));
23366 /*
23367 * Zero-extend the result beyond 16 bits, do not trust the
23368 * BIOS having done it:
23369@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
23370 *value &= 0xffff;
23371 break;
23372 case 4:
23373- __asm__("lcall *(%%esi); cld\n\t"
23374+ __asm__("movw %w6, %%ds\n\t"
23375+ "lcall *%%ss:(%%esi); cld\n\t"
23376+ "push %%ss\n\t"
23377+ "pop %%ds\n\t"
23378 "jc 1f\n\t"
23379 "xor %%ah, %%ah\n"
23380 "1:"
23381@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
23382 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23383 "b" (bx),
23384 "D" ((long)reg),
23385- "S" (&pci_indirect));
23386+ "S" (&pci_indirect),
23387+ "r" (__PCIBIOS_DS));
23388 break;
23389 }
23390
23391@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
23392
23393 switch (len) {
23394 case 1:
23395- __asm__("lcall *(%%esi); cld\n\t"
23396+ __asm__("movw %w6, %%ds\n\t"
23397+ "lcall *%%ss:(%%esi); cld\n\t"
23398+ "push %%ss\n\t"
23399+ "pop %%ds\n\t"
23400 "jc 1f\n\t"
23401 "xor %%ah, %%ah\n"
23402 "1:"
23403@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
23404 "c" (value),
23405 "b" (bx),
23406 "D" ((long)reg),
23407- "S" (&pci_indirect));
23408+ "S" (&pci_indirect),
23409+ "r" (__PCIBIOS_DS));
23410 break;
23411 case 2:
23412- __asm__("lcall *(%%esi); cld\n\t"
23413+ __asm__("movw %w6, %%ds\n\t"
23414+ "lcall *%%ss:(%%esi); cld\n\t"
23415+ "push %%ss\n\t"
23416+ "pop %%ds\n\t"
23417 "jc 1f\n\t"
23418 "xor %%ah, %%ah\n"
23419 "1:"
23420@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
23421 "c" (value),
23422 "b" (bx),
23423 "D" ((long)reg),
23424- "S" (&pci_indirect));
23425+ "S" (&pci_indirect),
23426+ "r" (__PCIBIOS_DS));
23427 break;
23428 case 4:
23429- __asm__("lcall *(%%esi); cld\n\t"
23430+ __asm__("movw %w6, %%ds\n\t"
23431+ "lcall *%%ss:(%%esi); cld\n\t"
23432+ "push %%ss\n\t"
23433+ "pop %%ds\n\t"
23434 "jc 1f\n\t"
23435 "xor %%ah, %%ah\n"
23436 "1:"
23437@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
23438 "c" (value),
23439 "b" (bx),
23440 "D" ((long)reg),
23441- "S" (&pci_indirect));
23442+ "S" (&pci_indirect),
23443+ "r" (__PCIBIOS_DS));
23444 break;
23445 }
23446
23447@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
23448 * Function table for BIOS32 access
23449 */
23450
23451-static struct pci_raw_ops pci_bios_access = {
23452+static const struct pci_raw_ops pci_bios_access = {
23453 .read = pci_bios_read,
23454 .write = pci_bios_write
23455 };
23456@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23457 * Try to find PCI BIOS.
23458 */
23459
23460-static struct pci_raw_ops * __devinit pci_find_bios(void)
23461+static const struct pci_raw_ops * __devinit pci_find_bios(void)
23462 {
23463 union bios32 *check;
23464 unsigned char sum;
23465@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23466
23467 DBG("PCI: Fetching IRQ routing table... ");
23468 __asm__("push %%es\n\t"
23469+ "movw %w8, %%ds\n\t"
23470 "push %%ds\n\t"
23471 "pop %%es\n\t"
23472- "lcall *(%%esi); cld\n\t"
23473+ "lcall *%%ss:(%%esi); cld\n\t"
23474 "pop %%es\n\t"
23475+ "push %%ss\n\t"
23476+ "pop %%ds\n"
23477 "jc 1f\n\t"
23478 "xor %%ah, %%ah\n"
23479 "1:"
23480@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23481 "1" (0),
23482 "D" ((long) &opt),
23483 "S" (&pci_indirect),
23484- "m" (opt)
23485+ "m" (opt),
23486+ "r" (__PCIBIOS_DS)
23487 : "memory");
23488 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23489 if (ret & 0xff00)
23490@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23491 {
23492 int ret;
23493
23494- __asm__("lcall *(%%esi); cld\n\t"
23495+ __asm__("movw %w5, %%ds\n\t"
23496+ "lcall *%%ss:(%%esi); cld\n\t"
23497+ "push %%ss\n\t"
23498+ "pop %%ds\n"
23499 "jc 1f\n\t"
23500 "xor %%ah, %%ah\n"
23501 "1:"
23502@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23503 : "0" (PCIBIOS_SET_PCI_HW_INT),
23504 "b" ((dev->bus->number << 8) | dev->devfn),
23505 "c" ((irq << 8) | (pin + 10)),
23506- "S" (&pci_indirect));
23507+ "S" (&pci_indirect),
23508+ "r" (__PCIBIOS_DS));
23509 return !(ret & 0xff00);
23510 }
23511 EXPORT_SYMBOL(pcibios_set_irq_routing);
23512diff -urNp linux-2.6.32.45/arch/x86/power/cpu.c linux-2.6.32.45/arch/x86/power/cpu.c
23513--- linux-2.6.32.45/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23514+++ linux-2.6.32.45/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23515@@ -129,7 +129,7 @@ static void do_fpu_end(void)
23516 static void fix_processor_context(void)
23517 {
23518 int cpu = smp_processor_id();
23519- struct tss_struct *t = &per_cpu(init_tss, cpu);
23520+ struct tss_struct *t = init_tss + cpu;
23521
23522 set_tss_desc(cpu, t); /*
23523 * This just modifies memory; should not be
23524@@ -139,7 +139,9 @@ static void fix_processor_context(void)
23525 */
23526
23527 #ifdef CONFIG_X86_64
23528+ pax_open_kernel();
23529 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23530+ pax_close_kernel();
23531
23532 syscall_init(); /* This sets MSR_*STAR and related */
23533 #endif
23534diff -urNp linux-2.6.32.45/arch/x86/vdso/Makefile linux-2.6.32.45/arch/x86/vdso/Makefile
23535--- linux-2.6.32.45/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23536+++ linux-2.6.32.45/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23537@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23538 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23539 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23540
23541-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23542+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23543 GCOV_PROFILE := n
23544
23545 #
23546diff -urNp linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c
23547--- linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23548+++ linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23549@@ -22,24 +22,48 @@
23550 #include <asm/hpet.h>
23551 #include <asm/unistd.h>
23552 #include <asm/io.h>
23553+#include <asm/fixmap.h>
23554 #include "vextern.h"
23555
23556 #define gtod vdso_vsyscall_gtod_data
23557
23558+notrace noinline long __vdso_fallback_time(long *t)
23559+{
23560+ long secs;
23561+ asm volatile("syscall"
23562+ : "=a" (secs)
23563+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23564+ return secs;
23565+}
23566+
23567 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23568 {
23569 long ret;
23570 asm("syscall" : "=a" (ret) :
23571- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23572+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23573 return ret;
23574 }
23575
23576+notrace static inline cycle_t __vdso_vread_hpet(void)
23577+{
23578+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23579+}
23580+
23581+notrace static inline cycle_t __vdso_vread_tsc(void)
23582+{
23583+ cycle_t ret = (cycle_t)vget_cycles();
23584+
23585+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23586+}
23587+
23588 notrace static inline long vgetns(void)
23589 {
23590 long v;
23591- cycles_t (*vread)(void);
23592- vread = gtod->clock.vread;
23593- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23594+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23595+ v = __vdso_vread_tsc();
23596+ else
23597+ v = __vdso_vread_hpet();
23598+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23599 return (v * gtod->clock.mult) >> gtod->clock.shift;
23600 }
23601
23602@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23603
23604 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23605 {
23606- if (likely(gtod->sysctl_enabled))
23607+ if (likely(gtod->sysctl_enabled &&
23608+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23609+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23610 switch (clock) {
23611 case CLOCK_REALTIME:
23612 if (likely(gtod->clock.vread))
23613@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23614 int clock_gettime(clockid_t, struct timespec *)
23615 __attribute__((weak, alias("__vdso_clock_gettime")));
23616
23617-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23618+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23619 {
23620 long ret;
23621- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23622+ asm("syscall" : "=a" (ret) :
23623+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23624+ return ret;
23625+}
23626+
23627+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23628+{
23629+ if (likely(gtod->sysctl_enabled &&
23630+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23631+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23632+ {
23633 if (likely(tv != NULL)) {
23634 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23635 offsetof(struct timespec, tv_nsec) ||
23636@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23637 }
23638 return 0;
23639 }
23640- asm("syscall" : "=a" (ret) :
23641- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23642- return ret;
23643+ return __vdso_fallback_gettimeofday(tv, tz);
23644 }
23645 int gettimeofday(struct timeval *, struct timezone *)
23646 __attribute__((weak, alias("__vdso_gettimeofday")));
23647diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c
23648--- linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23649+++ linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23650@@ -25,6 +25,7 @@
23651 #include <asm/tlbflush.h>
23652 #include <asm/vdso.h>
23653 #include <asm/proto.h>
23654+#include <asm/mman.h>
23655
23656 enum {
23657 VDSO_DISABLED = 0,
23658@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23659 void enable_sep_cpu(void)
23660 {
23661 int cpu = get_cpu();
23662- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23663+ struct tss_struct *tss = init_tss + cpu;
23664
23665 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23666 put_cpu();
23667@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23668 gate_vma.vm_start = FIXADDR_USER_START;
23669 gate_vma.vm_end = FIXADDR_USER_END;
23670 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23671- gate_vma.vm_page_prot = __P101;
23672+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23673 /*
23674 * Make sure the vDSO gets into every core dump.
23675 * Dumping its contents makes post-mortem fully interpretable later
23676@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23677 if (compat)
23678 addr = VDSO_HIGH_BASE;
23679 else {
23680- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23681+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23682 if (IS_ERR_VALUE(addr)) {
23683 ret = addr;
23684 goto up_fail;
23685 }
23686 }
23687
23688- current->mm->context.vdso = (void *)addr;
23689+ current->mm->context.vdso = addr;
23690
23691 if (compat_uses_vma || !compat) {
23692 /*
23693@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23694 }
23695
23696 current_thread_info()->sysenter_return =
23697- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23698+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23699
23700 up_fail:
23701 if (ret)
23702- current->mm->context.vdso = NULL;
23703+ current->mm->context.vdso = 0;
23704
23705 up_write(&mm->mmap_sem);
23706
23707@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23708
23709 const char *arch_vma_name(struct vm_area_struct *vma)
23710 {
23711- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23712+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23713 return "[vdso]";
23714+
23715+#ifdef CONFIG_PAX_SEGMEXEC
23716+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23717+ return "[vdso]";
23718+#endif
23719+
23720 return NULL;
23721 }
23722
23723@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23724 struct mm_struct *mm = tsk->mm;
23725
23726 /* Check to see if this task was created in compat vdso mode */
23727- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23728+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23729 return &gate_vma;
23730 return NULL;
23731 }
23732diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso.lds.S linux-2.6.32.45/arch/x86/vdso/vdso.lds.S
23733--- linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23734+++ linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23735@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23736 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23737 #include "vextern.h"
23738 #undef VEXTERN
23739+
23740+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23741+VEXTERN(fallback_gettimeofday)
23742+VEXTERN(fallback_time)
23743+VEXTERN(getcpu)
23744+#undef VEXTERN
23745diff -urNp linux-2.6.32.45/arch/x86/vdso/vextern.h linux-2.6.32.45/arch/x86/vdso/vextern.h
23746--- linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23747+++ linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23748@@ -11,6 +11,5 @@
23749 put into vextern.h and be referenced as a pointer with vdso prefix.
23750 The main kernel later fills in the values. */
23751
23752-VEXTERN(jiffies)
23753 VEXTERN(vgetcpu_mode)
23754 VEXTERN(vsyscall_gtod_data)
23755diff -urNp linux-2.6.32.45/arch/x86/vdso/vma.c linux-2.6.32.45/arch/x86/vdso/vma.c
23756--- linux-2.6.32.45/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23757+++ linux-2.6.32.45/arch/x86/vdso/vma.c 2011-08-23 20:24:19.000000000 -0400
23758@@ -17,8 +17,6 @@
23759 #include "vextern.h" /* Just for VMAGIC. */
23760 #undef VEXTERN
23761
23762-unsigned int __read_mostly vdso_enabled = 1;
23763-
23764 extern char vdso_start[], vdso_end[];
23765 extern unsigned short vdso_sync_cpuid;
23766
23767@@ -27,10 +25,8 @@ static unsigned vdso_size;
23768
23769 static inline void *var_ref(void *p, char *name)
23770 {
23771- if (*(void **)p != (void *)VMAGIC) {
23772- printk("VDSO: variable %s broken\n", name);
23773- vdso_enabled = 0;
23774- }
23775+ if (*(void **)p != (void *)VMAGIC)
23776+ panic("VDSO: variable %s broken\n", name);
23777 return p;
23778 }
23779
23780@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
23781 if (!vbase)
23782 goto oom;
23783
23784- if (memcmp(vbase, "\177ELF", 4)) {
23785- printk("VDSO: I'm broken; not ELF\n");
23786- vdso_enabled = 0;
23787- }
23788+ if (memcmp(vbase, ELFMAG, SELFMAG))
23789+ panic("VDSO: I'm broken; not ELF\n");
23790
23791 #define VEXTERN(x) \
23792 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23793 #include "vextern.h"
23794 #undef VEXTERN
23795+ vunmap(vbase);
23796 return 0;
23797
23798 oom:
23799- printk("Cannot allocate vdso\n");
23800- vdso_enabled = 0;
23801- return -ENOMEM;
23802+ panic("Cannot allocate vdso\n");
23803 }
23804 __initcall(init_vdso_vars);
23805
23806@@ -105,9 +98,6 @@ int arch_setup_additional_pages(struct l
23807 unsigned long addr;
23808 int ret;
23809
23810- if (!vdso_enabled)
23811- return 0;
23812-
23813 down_write(&mm->mmap_sem);
23814 addr = vdso_addr(mm->start_stack, vdso_size);
23815 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
23816@@ -116,7 +106,7 @@ int arch_setup_additional_pages(struct l
23817 goto up_fail;
23818 }
23819
23820- current->mm->context.vdso = (void *)addr;
23821+ current->mm->context.vdso = addr;
23822
23823 ret = install_special_mapping(mm, addr, vdso_size,
23824 VM_READ|VM_EXEC|
23825@@ -124,7 +114,7 @@ int arch_setup_additional_pages(struct l
23826 VM_ALWAYSDUMP,
23827 vdso_pages);
23828 if (ret) {
23829- current->mm->context.vdso = NULL;
23830+ current->mm->context.vdso = 0;
23831 goto up_fail;
23832 }
23833
23834@@ -132,10 +122,3 @@ up_fail:
23835 up_write(&mm->mmap_sem);
23836 return ret;
23837 }
23838-
23839-static __init int vdso_setup(char *s)
23840-{
23841- vdso_enabled = simple_strtoul(s, NULL, 0);
23842- return 0;
23843-}
23844-__setup("vdso=", vdso_setup);
23845diff -urNp linux-2.6.32.45/arch/x86/xen/enlighten.c linux-2.6.32.45/arch/x86/xen/enlighten.c
23846--- linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23847+++ linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23848@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23849
23850 struct shared_info xen_dummy_shared_info;
23851
23852-void *xen_initial_gdt;
23853-
23854 /*
23855 * Point at some empty memory to start with. We map the real shared_info
23856 * page as soon as fixmap is up and running.
23857@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23858
23859 preempt_disable();
23860
23861- start = __get_cpu_var(idt_desc).address;
23862+ start = (unsigned long)__get_cpu_var(idt_desc).address;
23863 end = start + __get_cpu_var(idt_desc).size + 1;
23864
23865 xen_mc_flush();
23866@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23867 #endif
23868 };
23869
23870-static void xen_reboot(int reason)
23871+static __noreturn void xen_reboot(int reason)
23872 {
23873 struct sched_shutdown r = { .reason = reason };
23874
23875@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23876 BUG();
23877 }
23878
23879-static void xen_restart(char *msg)
23880+static __noreturn void xen_restart(char *msg)
23881 {
23882 xen_reboot(SHUTDOWN_reboot);
23883 }
23884
23885-static void xen_emergency_restart(void)
23886+static __noreturn void xen_emergency_restart(void)
23887 {
23888 xen_reboot(SHUTDOWN_reboot);
23889 }
23890
23891-static void xen_machine_halt(void)
23892+static __noreturn void xen_machine_halt(void)
23893 {
23894 xen_reboot(SHUTDOWN_poweroff);
23895 }
23896@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23897 */
23898 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23899
23900-#ifdef CONFIG_X86_64
23901 /* Work out if we support NX */
23902- check_efer();
23903+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23904+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23905+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23906+ unsigned l, h;
23907+
23908+#ifdef CONFIG_X86_PAE
23909+ nx_enabled = 1;
23910+#endif
23911+ __supported_pte_mask |= _PAGE_NX;
23912+ rdmsr(MSR_EFER, l, h);
23913+ l |= EFER_NX;
23914+ wrmsr(MSR_EFER, l, h);
23915+ }
23916 #endif
23917
23918 xen_setup_features();
23919@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23920
23921 machine_ops = xen_machine_ops;
23922
23923- /*
23924- * The only reliable way to retain the initial address of the
23925- * percpu gdt_page is to remember it here, so we can go and
23926- * mark it RW later, when the initial percpu area is freed.
23927- */
23928- xen_initial_gdt = &per_cpu(gdt_page, 0);
23929-
23930 xen_smp_init();
23931
23932 pgd = (pgd_t *)xen_start_info->pt_base;
23933diff -urNp linux-2.6.32.45/arch/x86/xen/mmu.c linux-2.6.32.45/arch/x86/xen/mmu.c
23934--- linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23935+++ linux-2.6.32.45/arch/x86/xen/mmu.c 2011-08-24 18:35:52.000000000 -0400
23936@@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23937 convert_pfn_mfn(init_level4_pgt);
23938 convert_pfn_mfn(level3_ident_pgt);
23939 convert_pfn_mfn(level3_kernel_pgt);
23940+ convert_pfn_mfn(level3_vmalloc_pgt);
23941+ convert_pfn_mfn(level3_vmemmap_pgt);
23942
23943 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23944 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23945@@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23946 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23947 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23948 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23949+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23950+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23951 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23952+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23953 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23954 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23955
23956@@ -1860,6 +1865,7 @@ static __init void xen_post_allocator_in
23957 pv_mmu_ops.set_pud = xen_set_pud;
23958 #if PAGETABLE_LEVELS == 4
23959 pv_mmu_ops.set_pgd = xen_set_pgd;
23960+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
23961 #endif
23962
23963 /* This will work as long as patching hasn't happened yet
23964@@ -1946,6 +1952,7 @@ static const struct pv_mmu_ops xen_mmu_o
23965 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
23966 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
23967 .set_pgd = xen_set_pgd_hyper,
23968+ .set_pgd_batched = xen_set_pgd_hyper,
23969
23970 .alloc_pud = xen_alloc_pmd_init,
23971 .release_pud = xen_release_pmd_init,
23972diff -urNp linux-2.6.32.45/arch/x86/xen/smp.c linux-2.6.32.45/arch/x86/xen/smp.c
23973--- linux-2.6.32.45/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23974+++ linux-2.6.32.45/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23975@@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23976 {
23977 BUG_ON(smp_processor_id() != 0);
23978 native_smp_prepare_boot_cpu();
23979-
23980- /* We've switched to the "real" per-cpu gdt, so make sure the
23981- old memory can be recycled */
23982- make_lowmem_page_readwrite(xen_initial_gdt);
23983-
23984 xen_setup_vcpu_info_placement();
23985 }
23986
23987@@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23988 gdt = get_cpu_gdt_table(cpu);
23989
23990 ctxt->flags = VGCF_IN_KERNEL;
23991- ctxt->user_regs.ds = __USER_DS;
23992- ctxt->user_regs.es = __USER_DS;
23993+ ctxt->user_regs.ds = __KERNEL_DS;
23994+ ctxt->user_regs.es = __KERNEL_DS;
23995 ctxt->user_regs.ss = __KERNEL_DS;
23996 #ifdef CONFIG_X86_32
23997 ctxt->user_regs.fs = __KERNEL_PERCPU;
23998- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23999+ savesegment(gs, ctxt->user_regs.gs);
24000 #else
24001 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24002 #endif
24003@@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
24004 int rc;
24005
24006 per_cpu(current_task, cpu) = idle;
24007+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
24008 #ifdef CONFIG_X86_32
24009 irq_ctx_init(cpu);
24010 #else
24011 clear_tsk_thread_flag(idle, TIF_FORK);
24012- per_cpu(kernel_stack, cpu) =
24013- (unsigned long)task_stack_page(idle) -
24014- KERNEL_STACK_OFFSET + THREAD_SIZE;
24015+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24016 #endif
24017 xen_setup_runstate_info(cpu);
24018 xen_setup_timer(cpu);
24019diff -urNp linux-2.6.32.45/arch/x86/xen/xen-asm_32.S linux-2.6.32.45/arch/x86/xen/xen-asm_32.S
24020--- linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
24021+++ linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
24022@@ -83,14 +83,14 @@ ENTRY(xen_iret)
24023 ESP_OFFSET=4 # bytes pushed onto stack
24024
24025 /*
24026- * Store vcpu_info pointer for easy access. Do it this way to
24027- * avoid having to reload %fs
24028+ * Store vcpu_info pointer for easy access.
24029 */
24030 #ifdef CONFIG_SMP
24031- GET_THREAD_INFO(%eax)
24032- movl TI_cpu(%eax), %eax
24033- movl __per_cpu_offset(,%eax,4), %eax
24034- mov per_cpu__xen_vcpu(%eax), %eax
24035+ push %fs
24036+ mov $(__KERNEL_PERCPU), %eax
24037+ mov %eax, %fs
24038+ mov PER_CPU_VAR(xen_vcpu), %eax
24039+ pop %fs
24040 #else
24041 movl per_cpu__xen_vcpu, %eax
24042 #endif
24043diff -urNp linux-2.6.32.45/arch/x86/xen/xen-head.S linux-2.6.32.45/arch/x86/xen/xen-head.S
24044--- linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
24045+++ linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
24046@@ -19,6 +19,17 @@ ENTRY(startup_xen)
24047 #ifdef CONFIG_X86_32
24048 mov %esi,xen_start_info
24049 mov $init_thread_union+THREAD_SIZE,%esp
24050+#ifdef CONFIG_SMP
24051+ movl $cpu_gdt_table,%edi
24052+ movl $__per_cpu_load,%eax
24053+ movw %ax,__KERNEL_PERCPU + 2(%edi)
24054+ rorl $16,%eax
24055+ movb %al,__KERNEL_PERCPU + 4(%edi)
24056+ movb %ah,__KERNEL_PERCPU + 7(%edi)
24057+ movl $__per_cpu_end - 1,%eax
24058+ subl $__per_cpu_start,%eax
24059+ movw %ax,__KERNEL_PERCPU + 0(%edi)
24060+#endif
24061 #else
24062 mov %rsi,xen_start_info
24063 mov $init_thread_union+THREAD_SIZE,%rsp
24064diff -urNp linux-2.6.32.45/arch/x86/xen/xen-ops.h linux-2.6.32.45/arch/x86/xen/xen-ops.h
24065--- linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
24066+++ linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
24067@@ -10,8 +10,6 @@
24068 extern const char xen_hypervisor_callback[];
24069 extern const char xen_failsafe_callback[];
24070
24071-extern void *xen_initial_gdt;
24072-
24073 struct trap_info;
24074 void xen_copy_trap_info(struct trap_info *traps);
24075
24076diff -urNp linux-2.6.32.45/block/blk-integrity.c linux-2.6.32.45/block/blk-integrity.c
24077--- linux-2.6.32.45/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
24078+++ linux-2.6.32.45/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
24079@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
24080 NULL,
24081 };
24082
24083-static struct sysfs_ops integrity_ops = {
24084+static const struct sysfs_ops integrity_ops = {
24085 .show = &integrity_attr_show,
24086 .store = &integrity_attr_store,
24087 };
24088diff -urNp linux-2.6.32.45/block/blk-iopoll.c linux-2.6.32.45/block/blk-iopoll.c
24089--- linux-2.6.32.45/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
24090+++ linux-2.6.32.45/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
24091@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
24092 }
24093 EXPORT_SYMBOL(blk_iopoll_complete);
24094
24095-static void blk_iopoll_softirq(struct softirq_action *h)
24096+static void blk_iopoll_softirq(void)
24097 {
24098 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24099 int rearm = 0, budget = blk_iopoll_budget;
24100diff -urNp linux-2.6.32.45/block/blk-map.c linux-2.6.32.45/block/blk-map.c
24101--- linux-2.6.32.45/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
24102+++ linux-2.6.32.45/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
24103@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
24104 * direct dma. else, set up kernel bounce buffers
24105 */
24106 uaddr = (unsigned long) ubuf;
24107- if (blk_rq_aligned(q, ubuf, len) && !map_data)
24108+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
24109 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
24110 else
24111 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
24112@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
24113 for (i = 0; i < iov_count; i++) {
24114 unsigned long uaddr = (unsigned long)iov[i].iov_base;
24115
24116+ if (!iov[i].iov_len)
24117+ return -EINVAL;
24118+
24119 if (uaddr & queue_dma_alignment(q)) {
24120 unaligned = 1;
24121 break;
24122 }
24123- if (!iov[i].iov_len)
24124- return -EINVAL;
24125 }
24126
24127 if (unaligned || (q->dma_pad_mask & len) || map_data)
24128@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
24129 if (!len || !kbuf)
24130 return -EINVAL;
24131
24132- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
24133+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
24134 if (do_copy)
24135 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24136 else
24137diff -urNp linux-2.6.32.45/block/blk-softirq.c linux-2.6.32.45/block/blk-softirq.c
24138--- linux-2.6.32.45/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
24139+++ linux-2.6.32.45/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
24140@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
24141 * Softirq action handler - move entries to local list and loop over them
24142 * while passing them to the queue registered handler.
24143 */
24144-static void blk_done_softirq(struct softirq_action *h)
24145+static void blk_done_softirq(void)
24146 {
24147 struct list_head *cpu_list, local_list;
24148
24149diff -urNp linux-2.6.32.45/block/blk-sysfs.c linux-2.6.32.45/block/blk-sysfs.c
24150--- linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
24151+++ linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
24152@@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
24153 kmem_cache_free(blk_requestq_cachep, q);
24154 }
24155
24156-static struct sysfs_ops queue_sysfs_ops = {
24157+static const struct sysfs_ops queue_sysfs_ops = {
24158 .show = queue_attr_show,
24159 .store = queue_attr_store,
24160 };
24161diff -urNp linux-2.6.32.45/block/bsg.c linux-2.6.32.45/block/bsg.c
24162--- linux-2.6.32.45/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
24163+++ linux-2.6.32.45/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
24164@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
24165 struct sg_io_v4 *hdr, struct bsg_device *bd,
24166 fmode_t has_write_perm)
24167 {
24168+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24169+ unsigned char *cmdptr;
24170+
24171 if (hdr->request_len > BLK_MAX_CDB) {
24172 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24173 if (!rq->cmd)
24174 return -ENOMEM;
24175- }
24176+ cmdptr = rq->cmd;
24177+ } else
24178+ cmdptr = tmpcmd;
24179
24180- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
24181+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
24182 hdr->request_len))
24183 return -EFAULT;
24184
24185+ if (cmdptr != rq->cmd)
24186+ memcpy(rq->cmd, cmdptr, hdr->request_len);
24187+
24188 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24189 if (blk_verify_command(rq->cmd, has_write_perm))
24190 return -EPERM;
24191diff -urNp linux-2.6.32.45/block/elevator.c linux-2.6.32.45/block/elevator.c
24192--- linux-2.6.32.45/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
24193+++ linux-2.6.32.45/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
24194@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
24195 return error;
24196 }
24197
24198-static struct sysfs_ops elv_sysfs_ops = {
24199+static const struct sysfs_ops elv_sysfs_ops = {
24200 .show = elv_attr_show,
24201 .store = elv_attr_store,
24202 };
24203diff -urNp linux-2.6.32.45/block/scsi_ioctl.c linux-2.6.32.45/block/scsi_ioctl.c
24204--- linux-2.6.32.45/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
24205+++ linux-2.6.32.45/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
24206@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
24207 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24208 struct sg_io_hdr *hdr, fmode_t mode)
24209 {
24210- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24211+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24212+ unsigned char *cmdptr;
24213+
24214+ if (rq->cmd != rq->__cmd)
24215+ cmdptr = rq->cmd;
24216+ else
24217+ cmdptr = tmpcmd;
24218+
24219+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24220 return -EFAULT;
24221+
24222+ if (cmdptr != rq->cmd)
24223+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24224+
24225 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24226 return -EPERM;
24227
24228@@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
24229 int err;
24230 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24231 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24232+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24233+ unsigned char *cmdptr;
24234
24235 if (!sic)
24236 return -EINVAL;
24237@@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
24238 */
24239 err = -EFAULT;
24240 rq->cmd_len = cmdlen;
24241- if (copy_from_user(rq->cmd, sic->data, cmdlen))
24242+
24243+ if (rq->cmd != rq->__cmd)
24244+ cmdptr = rq->cmd;
24245+ else
24246+ cmdptr = tmpcmd;
24247+
24248+ if (copy_from_user(cmdptr, sic->data, cmdlen))
24249 goto error;
24250
24251+ if (rq->cmd != cmdptr)
24252+ memcpy(rq->cmd, cmdptr, cmdlen);
24253+
24254 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24255 goto error;
24256
24257diff -urNp linux-2.6.32.45/crypto/cryptd.c linux-2.6.32.45/crypto/cryptd.c
24258--- linux-2.6.32.45/crypto/cryptd.c 2011-03-27 14:31:47.000000000 -0400
24259+++ linux-2.6.32.45/crypto/cryptd.c 2011-08-23 21:22:32.000000000 -0400
24260@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
24261
24262 struct cryptd_blkcipher_request_ctx {
24263 crypto_completion_t complete;
24264-};
24265+} __no_const;
24266
24267 struct cryptd_hash_ctx {
24268 struct crypto_shash *child;
24269diff -urNp linux-2.6.32.45/crypto/gf128mul.c linux-2.6.32.45/crypto/gf128mul.c
24270--- linux-2.6.32.45/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
24271+++ linux-2.6.32.45/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
24272@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
24273 for (i = 0; i < 7; ++i)
24274 gf128mul_x_lle(&p[i + 1], &p[i]);
24275
24276- memset(r, 0, sizeof(r));
24277+ memset(r, 0, sizeof(*r));
24278 for (i = 0;;) {
24279 u8 ch = ((u8 *)b)[15 - i];
24280
24281@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
24282 for (i = 0; i < 7; ++i)
24283 gf128mul_x_bbe(&p[i + 1], &p[i]);
24284
24285- memset(r, 0, sizeof(r));
24286+ memset(r, 0, sizeof(*r));
24287 for (i = 0;;) {
24288 u8 ch = ((u8 *)b)[i];
24289
24290diff -urNp linux-2.6.32.45/crypto/serpent.c linux-2.6.32.45/crypto/serpent.c
24291--- linux-2.6.32.45/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
24292+++ linux-2.6.32.45/crypto/serpent.c 2011-08-18 23:59:56.000000000 -0400
24293@@ -21,6 +21,7 @@
24294 #include <asm/byteorder.h>
24295 #include <linux/crypto.h>
24296 #include <linux/types.h>
24297+#include <linux/sched.h>
24298
24299 /* Key is padded to the maximum of 256 bits before round key generation.
24300 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
24301@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_
24302 u32 r0,r1,r2,r3,r4;
24303 int i;
24304
24305+ pax_track_stack();
24306+
24307 /* Copy key, add padding */
24308
24309 for (i = 0; i < keylen; ++i)
24310diff -urNp linux-2.6.32.45/Documentation/dontdiff linux-2.6.32.45/Documentation/dontdiff
24311--- linux-2.6.32.45/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
24312+++ linux-2.6.32.45/Documentation/dontdiff 2011-08-21 18:59:02.000000000 -0400
24313@@ -1,13 +1,16 @@
24314 *.a
24315 *.aux
24316 *.bin
24317+*.cis
24318 *.cpio
24319 *.csp
24320+*.dbg
24321 *.dsp
24322 *.dvi
24323 *.elf
24324 *.eps
24325 *.fw
24326+*.gcno
24327 *.gen.S
24328 *.gif
24329 *.grep
24330@@ -38,8 +41,10 @@
24331 *.tab.h
24332 *.tex
24333 *.ver
24334+*.vim
24335 *.xml
24336 *_MODULES
24337+*_reg_safe.h
24338 *_vga16.c
24339 *~
24340 *.9
24341@@ -49,11 +54,16 @@
24342 53c700_d.h
24343 CVS
24344 ChangeSet
24345+GPATH
24346+GRTAGS
24347+GSYMS
24348+GTAGS
24349 Image
24350 Kerntypes
24351 Module.markers
24352 Module.symvers
24353 PENDING
24354+PERF*
24355 SCCS
24356 System.map*
24357 TAGS
24358@@ -76,7 +86,11 @@ btfixupprep
24359 build
24360 bvmlinux
24361 bzImage*
24362+capability_names.h
24363+capflags.c
24364 classlist.h*
24365+clut_vga16.c
24366+common-cmds.h
24367 comp*.log
24368 compile.h*
24369 conf
24370@@ -97,19 +111,21 @@ elfconfig.h*
24371 fixdep
24372 fore200e_mkfirm
24373 fore200e_pca_fw.c*
24374+gate.lds
24375 gconf
24376 gen-devlist
24377 gen_crc32table
24378 gen_init_cpio
24379 genksyms
24380 *_gray256.c
24381+hash
24382 ihex2fw
24383 ikconfig.h*
24384 initramfs_data.cpio
24385+initramfs_data.cpio.bz2
24386 initramfs_data.cpio.gz
24387 initramfs_list
24388 kallsyms
24389-kconfig
24390 keywords.c
24391 ksym.c*
24392 ksym.h*
24393@@ -133,7 +149,9 @@ mkboot
24394 mkbugboot
24395 mkcpustr
24396 mkdep
24397+mkpiggy
24398 mkprep
24399+mkregtable
24400 mktables
24401 mktree
24402 modpost
24403@@ -149,6 +167,7 @@ patches*
24404 pca200e.bin
24405 pca200e_ecd.bin2
24406 piggy.gz
24407+piggy.S
24408 piggyback
24409 pnmtologo
24410 ppc_defs.h*
24411@@ -157,12 +176,15 @@ qconf
24412 raid6altivec*.c
24413 raid6int*.c
24414 raid6tables.c
24415+regdb.c
24416 relocs
24417+rlim_names.h
24418 series
24419 setup
24420 setup.bin
24421 setup.elf
24422 sImage
24423+slabinfo
24424 sm_tbl*
24425 split-include
24426 syscalltab.h
24427@@ -186,14 +208,20 @@ version.h*
24428 vmlinux
24429 vmlinux-*
24430 vmlinux.aout
24431+vmlinux.bin.all
24432+vmlinux.bin.bz2
24433 vmlinux.lds
24434+vmlinux.relocs
24435+voffset.h
24436 vsyscall.lds
24437 vsyscall_32.lds
24438 wanxlfw.inc
24439 uImage
24440 unifdef
24441+utsrelease.h
24442 wakeup.bin
24443 wakeup.elf
24444 wakeup.lds
24445 zImage*
24446 zconf.hash.c
24447+zoffset.h
24448diff -urNp linux-2.6.32.45/Documentation/kernel-parameters.txt linux-2.6.32.45/Documentation/kernel-parameters.txt
24449--- linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
24450+++ linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
24451@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
24452 the specified number of seconds. This is to be used if
24453 your oopses keep scrolling off the screen.
24454
24455+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
24456+ virtualization environments that don't cope well with the
24457+ expand down segment used by UDEREF on X86-32 or the frequent
24458+ page table updates on X86-64.
24459+
24460+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
24461+
24462 pcbit= [HW,ISDN]
24463
24464 pcd. [PARIDE]
24465diff -urNp linux-2.6.32.45/drivers/acpi/acpi_pad.c linux-2.6.32.45/drivers/acpi/acpi_pad.c
24466--- linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
24467+++ linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
24468@@ -30,7 +30,7 @@
24469 #include <acpi/acpi_bus.h>
24470 #include <acpi/acpi_drivers.h>
24471
24472-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
24473+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
24474 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
24475 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
24476 static DEFINE_MUTEX(isolated_cpus_lock);
24477diff -urNp linux-2.6.32.45/drivers/acpi/battery.c linux-2.6.32.45/drivers/acpi/battery.c
24478--- linux-2.6.32.45/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
24479+++ linux-2.6.32.45/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
24480@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
24481 }
24482
24483 static struct battery_file {
24484- struct file_operations ops;
24485+ const struct file_operations ops;
24486 mode_t mode;
24487 const char *name;
24488 } acpi_battery_file[] = {
24489diff -urNp linux-2.6.32.45/drivers/acpi/dock.c linux-2.6.32.45/drivers/acpi/dock.c
24490--- linux-2.6.32.45/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
24491+++ linux-2.6.32.45/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
24492@@ -77,7 +77,7 @@ struct dock_dependent_device {
24493 struct list_head list;
24494 struct list_head hotplug_list;
24495 acpi_handle handle;
24496- struct acpi_dock_ops *ops;
24497+ const struct acpi_dock_ops *ops;
24498 void *context;
24499 };
24500
24501@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
24502 * the dock driver after _DCK is executed.
24503 */
24504 int
24505-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
24506+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
24507 void *context)
24508 {
24509 struct dock_dependent_device *dd;
24510diff -urNp linux-2.6.32.45/drivers/acpi/osl.c linux-2.6.32.45/drivers/acpi/osl.c
24511--- linux-2.6.32.45/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
24512+++ linux-2.6.32.45/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
24513@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
24514 void __iomem *virt_addr;
24515
24516 virt_addr = ioremap(phys_addr, width);
24517+ if (!virt_addr)
24518+ return AE_NO_MEMORY;
24519 if (!value)
24520 value = &dummy;
24521
24522@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
24523 void __iomem *virt_addr;
24524
24525 virt_addr = ioremap(phys_addr, width);
24526+ if (!virt_addr)
24527+ return AE_NO_MEMORY;
24528
24529 switch (width) {
24530 case 8:
24531diff -urNp linux-2.6.32.45/drivers/acpi/power_meter.c linux-2.6.32.45/drivers/acpi/power_meter.c
24532--- linux-2.6.32.45/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
24533+++ linux-2.6.32.45/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
24534@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
24535 return res;
24536
24537 temp /= 1000;
24538- if (temp < 0)
24539- return -EINVAL;
24540
24541 mutex_lock(&resource->lock);
24542 resource->trip[attr->index - 7] = temp;
24543diff -urNp linux-2.6.32.45/drivers/acpi/proc.c linux-2.6.32.45/drivers/acpi/proc.c
24544--- linux-2.6.32.45/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24545+++ linux-2.6.32.45/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24546@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24547 size_t count, loff_t * ppos)
24548 {
24549 struct list_head *node, *next;
24550- char strbuf[5];
24551- char str[5] = "";
24552- unsigned int len = count;
24553+ char strbuf[5] = {0};
24554 struct acpi_device *found_dev = NULL;
24555
24556- if (len > 4)
24557- len = 4;
24558- if (len < 0)
24559- return -EFAULT;
24560+ if (count > 4)
24561+ count = 4;
24562
24563- if (copy_from_user(strbuf, buffer, len))
24564+ if (copy_from_user(strbuf, buffer, count))
24565 return -EFAULT;
24566- strbuf[len] = '\0';
24567- sscanf(strbuf, "%s", str);
24568+ strbuf[count] = '\0';
24569
24570 mutex_lock(&acpi_device_lock);
24571 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24572@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24573 if (!dev->wakeup.flags.valid)
24574 continue;
24575
24576- if (!strncmp(dev->pnp.bus_id, str, 4)) {
24577+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24578 dev->wakeup.state.enabled =
24579 dev->wakeup.state.enabled ? 0 : 1;
24580 found_dev = dev;
24581diff -urNp linux-2.6.32.45/drivers/acpi/processor_core.c linux-2.6.32.45/drivers/acpi/processor_core.c
24582--- linux-2.6.32.45/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24583+++ linux-2.6.32.45/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24584@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24585 return 0;
24586 }
24587
24588- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24589+ BUG_ON(pr->id >= nr_cpu_ids);
24590
24591 /*
24592 * Buggy BIOS check
24593diff -urNp linux-2.6.32.45/drivers/acpi/sbshc.c linux-2.6.32.45/drivers/acpi/sbshc.c
24594--- linux-2.6.32.45/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24595+++ linux-2.6.32.45/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24596@@ -17,7 +17,7 @@
24597
24598 #define PREFIX "ACPI: "
24599
24600-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24601+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24602 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24603
24604 struct acpi_smb_hc {
24605diff -urNp linux-2.6.32.45/drivers/acpi/sleep.c linux-2.6.32.45/drivers/acpi/sleep.c
24606--- linux-2.6.32.45/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24607+++ linux-2.6.32.45/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24608@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24609 }
24610 }
24611
24612-static struct platform_suspend_ops acpi_suspend_ops = {
24613+static const struct platform_suspend_ops acpi_suspend_ops = {
24614 .valid = acpi_suspend_state_valid,
24615 .begin = acpi_suspend_begin,
24616 .prepare_late = acpi_pm_prepare,
24617@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24618 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24619 * been requested.
24620 */
24621-static struct platform_suspend_ops acpi_suspend_ops_old = {
24622+static const struct platform_suspend_ops acpi_suspend_ops_old = {
24623 .valid = acpi_suspend_state_valid,
24624 .begin = acpi_suspend_begin_old,
24625 .prepare_late = acpi_pm_disable_gpes,
24626@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24627 acpi_enable_all_runtime_gpes();
24628 }
24629
24630-static struct platform_hibernation_ops acpi_hibernation_ops = {
24631+static const struct platform_hibernation_ops acpi_hibernation_ops = {
24632 .begin = acpi_hibernation_begin,
24633 .end = acpi_pm_end,
24634 .pre_snapshot = acpi_hibernation_pre_snapshot,
24635@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24636 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24637 * been requested.
24638 */
24639-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24640+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24641 .begin = acpi_hibernation_begin_old,
24642 .end = acpi_pm_end,
24643 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24644diff -urNp linux-2.6.32.45/drivers/acpi/video.c linux-2.6.32.45/drivers/acpi/video.c
24645--- linux-2.6.32.45/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24646+++ linux-2.6.32.45/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24647@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24648 vd->brightness->levels[request_level]);
24649 }
24650
24651-static struct backlight_ops acpi_backlight_ops = {
24652+static const struct backlight_ops acpi_backlight_ops = {
24653 .get_brightness = acpi_video_get_brightness,
24654 .update_status = acpi_video_set_brightness,
24655 };
24656diff -urNp linux-2.6.32.45/drivers/ata/ahci.c linux-2.6.32.45/drivers/ata/ahci.c
24657--- linux-2.6.32.45/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24658+++ linux-2.6.32.45/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24659@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24660 .sdev_attrs = ahci_sdev_attrs,
24661 };
24662
24663-static struct ata_port_operations ahci_ops = {
24664+static const struct ata_port_operations ahci_ops = {
24665 .inherits = &sata_pmp_port_ops,
24666
24667 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24668@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24669 .port_stop = ahci_port_stop,
24670 };
24671
24672-static struct ata_port_operations ahci_vt8251_ops = {
24673+static const struct ata_port_operations ahci_vt8251_ops = {
24674 .inherits = &ahci_ops,
24675 .hardreset = ahci_vt8251_hardreset,
24676 };
24677
24678-static struct ata_port_operations ahci_p5wdh_ops = {
24679+static const struct ata_port_operations ahci_p5wdh_ops = {
24680 .inherits = &ahci_ops,
24681 .hardreset = ahci_p5wdh_hardreset,
24682 };
24683
24684-static struct ata_port_operations ahci_sb600_ops = {
24685+static const struct ata_port_operations ahci_sb600_ops = {
24686 .inherits = &ahci_ops,
24687 .softreset = ahci_sb600_softreset,
24688 .pmp_softreset = ahci_sb600_softreset,
24689diff -urNp linux-2.6.32.45/drivers/ata/ata_generic.c linux-2.6.32.45/drivers/ata/ata_generic.c
24690--- linux-2.6.32.45/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24691+++ linux-2.6.32.45/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24692@@ -104,7 +104,7 @@ static struct scsi_host_template generic
24693 ATA_BMDMA_SHT(DRV_NAME),
24694 };
24695
24696-static struct ata_port_operations generic_port_ops = {
24697+static const struct ata_port_operations generic_port_ops = {
24698 .inherits = &ata_bmdma_port_ops,
24699 .cable_detect = ata_cable_unknown,
24700 .set_mode = generic_set_mode,
24701diff -urNp linux-2.6.32.45/drivers/ata/ata_piix.c linux-2.6.32.45/drivers/ata/ata_piix.c
24702--- linux-2.6.32.45/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24703+++ linux-2.6.32.45/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24704@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24705 ATA_BMDMA_SHT(DRV_NAME),
24706 };
24707
24708-static struct ata_port_operations piix_pata_ops = {
24709+static const struct ata_port_operations piix_pata_ops = {
24710 .inherits = &ata_bmdma32_port_ops,
24711 .cable_detect = ata_cable_40wire,
24712 .set_piomode = piix_set_piomode,
24713@@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24714 .prereset = piix_pata_prereset,
24715 };
24716
24717-static struct ata_port_operations piix_vmw_ops = {
24718+static const struct ata_port_operations piix_vmw_ops = {
24719 .inherits = &piix_pata_ops,
24720 .bmdma_status = piix_vmw_bmdma_status,
24721 };
24722
24723-static struct ata_port_operations ich_pata_ops = {
24724+static const struct ata_port_operations ich_pata_ops = {
24725 .inherits = &piix_pata_ops,
24726 .cable_detect = ich_pata_cable_detect,
24727 .set_dmamode = ich_set_dmamode,
24728 };
24729
24730-static struct ata_port_operations piix_sata_ops = {
24731+static const struct ata_port_operations piix_sata_ops = {
24732 .inherits = &ata_bmdma_port_ops,
24733 };
24734
24735-static struct ata_port_operations piix_sidpr_sata_ops = {
24736+static const struct ata_port_operations piix_sidpr_sata_ops = {
24737 .inherits = &piix_sata_ops,
24738 .hardreset = sata_std_hardreset,
24739 .scr_read = piix_sidpr_scr_read,
24740diff -urNp linux-2.6.32.45/drivers/ata/libata-acpi.c linux-2.6.32.45/drivers/ata/libata-acpi.c
24741--- linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24742+++ linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24743@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24744 ata_acpi_uevent(dev->link->ap, dev, event);
24745 }
24746
24747-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24748+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24749 .handler = ata_acpi_dev_notify_dock,
24750 .uevent = ata_acpi_dev_uevent,
24751 };
24752
24753-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24754+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24755 .handler = ata_acpi_ap_notify_dock,
24756 .uevent = ata_acpi_ap_uevent,
24757 };
24758diff -urNp linux-2.6.32.45/drivers/ata/libata-core.c linux-2.6.32.45/drivers/ata/libata-core.c
24759--- linux-2.6.32.45/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24760+++ linux-2.6.32.45/drivers/ata/libata-core.c 2011-08-05 20:33:55.000000000 -0400
24761@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24762 struct ata_port *ap;
24763 unsigned int tag;
24764
24765- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24766+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24767 ap = qc->ap;
24768
24769 qc->flags = 0;
24770@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24771 struct ata_port *ap;
24772 struct ata_link *link;
24773
24774- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24775+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24776 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24777 ap = qc->ap;
24778 link = qc->dev->link;
24779@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24780 * LOCKING:
24781 * None.
24782 */
24783-static void ata_finalize_port_ops(struct ata_port_operations *ops)
24784+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24785 {
24786 static DEFINE_SPINLOCK(lock);
24787 const struct ata_port_operations *cur;
24788@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24789 return;
24790
24791 spin_lock(&lock);
24792+ pax_open_kernel();
24793
24794 for (cur = ops->inherits; cur; cur = cur->inherits) {
24795 void **inherit = (void **)cur;
24796@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24797 if (IS_ERR(*pp))
24798 *pp = NULL;
24799
24800- ops->inherits = NULL;
24801+ *(struct ata_port_operations **)&ops->inherits = NULL;
24802
24803+ pax_close_kernel();
24804 spin_unlock(&lock);
24805 }
24806
24807@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24808 */
24809 /* KILLME - the only user left is ipr */
24810 void ata_host_init(struct ata_host *host, struct device *dev,
24811- unsigned long flags, struct ata_port_operations *ops)
24812+ unsigned long flags, const struct ata_port_operations *ops)
24813 {
24814 spin_lock_init(&host->lock);
24815 host->dev = dev;
24816@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24817 /* truly dummy */
24818 }
24819
24820-struct ata_port_operations ata_dummy_port_ops = {
24821+const struct ata_port_operations ata_dummy_port_ops = {
24822 .qc_prep = ata_noop_qc_prep,
24823 .qc_issue = ata_dummy_qc_issue,
24824 .error_handler = ata_dummy_error_handler,
24825diff -urNp linux-2.6.32.45/drivers/ata/libata-eh.c linux-2.6.32.45/drivers/ata/libata-eh.c
24826--- linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:35:28.000000000 -0400
24827+++ linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:33:59.000000000 -0400
24828@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24829 {
24830 struct ata_link *link;
24831
24832+ pax_track_stack();
24833+
24834 ata_for_each_link(link, ap, HOST_FIRST)
24835 ata_eh_link_report(link);
24836 }
24837@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24838 */
24839 void ata_std_error_handler(struct ata_port *ap)
24840 {
24841- struct ata_port_operations *ops = ap->ops;
24842+ const struct ata_port_operations *ops = ap->ops;
24843 ata_reset_fn_t hardreset = ops->hardreset;
24844
24845 /* ignore built-in hardreset if SCR access is not available */
24846diff -urNp linux-2.6.32.45/drivers/ata/libata-pmp.c linux-2.6.32.45/drivers/ata/libata-pmp.c
24847--- linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24848+++ linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24849@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24850 */
24851 static int sata_pmp_eh_recover(struct ata_port *ap)
24852 {
24853- struct ata_port_operations *ops = ap->ops;
24854+ const struct ata_port_operations *ops = ap->ops;
24855 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24856 struct ata_link *pmp_link = &ap->link;
24857 struct ata_device *pmp_dev = pmp_link->device;
24858diff -urNp linux-2.6.32.45/drivers/ata/pata_acpi.c linux-2.6.32.45/drivers/ata/pata_acpi.c
24859--- linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24860+++ linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24861@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24862 ATA_BMDMA_SHT(DRV_NAME),
24863 };
24864
24865-static struct ata_port_operations pacpi_ops = {
24866+static const struct ata_port_operations pacpi_ops = {
24867 .inherits = &ata_bmdma_port_ops,
24868 .qc_issue = pacpi_qc_issue,
24869 .cable_detect = pacpi_cable_detect,
24870diff -urNp linux-2.6.32.45/drivers/ata/pata_ali.c linux-2.6.32.45/drivers/ata/pata_ali.c
24871--- linux-2.6.32.45/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24872+++ linux-2.6.32.45/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24873@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24874 * Port operations for PIO only ALi
24875 */
24876
24877-static struct ata_port_operations ali_early_port_ops = {
24878+static const struct ata_port_operations ali_early_port_ops = {
24879 .inherits = &ata_sff_port_ops,
24880 .cable_detect = ata_cable_40wire,
24881 .set_piomode = ali_set_piomode,
24882@@ -382,7 +382,7 @@ static const struct ata_port_operations
24883 * Port operations for DMA capable ALi without cable
24884 * detect
24885 */
24886-static struct ata_port_operations ali_20_port_ops = {
24887+static const struct ata_port_operations ali_20_port_ops = {
24888 .inherits = &ali_dma_base_ops,
24889 .cable_detect = ata_cable_40wire,
24890 .mode_filter = ali_20_filter,
24891@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24892 /*
24893 * Port operations for DMA capable ALi with cable detect
24894 */
24895-static struct ata_port_operations ali_c2_port_ops = {
24896+static const struct ata_port_operations ali_c2_port_ops = {
24897 .inherits = &ali_dma_base_ops,
24898 .check_atapi_dma = ali_check_atapi_dma,
24899 .cable_detect = ali_c2_cable_detect,
24900@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24901 /*
24902 * Port operations for DMA capable ALi with cable detect
24903 */
24904-static struct ata_port_operations ali_c4_port_ops = {
24905+static const struct ata_port_operations ali_c4_port_ops = {
24906 .inherits = &ali_dma_base_ops,
24907 .check_atapi_dma = ali_check_atapi_dma,
24908 .cable_detect = ali_c2_cable_detect,
24909@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24910 /*
24911 * Port operations for DMA capable ALi with cable detect and LBA48
24912 */
24913-static struct ata_port_operations ali_c5_port_ops = {
24914+static const struct ata_port_operations ali_c5_port_ops = {
24915 .inherits = &ali_dma_base_ops,
24916 .check_atapi_dma = ali_check_atapi_dma,
24917 .dev_config = ali_warn_atapi_dma,
24918diff -urNp linux-2.6.32.45/drivers/ata/pata_amd.c linux-2.6.32.45/drivers/ata/pata_amd.c
24919--- linux-2.6.32.45/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24920+++ linux-2.6.32.45/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24921@@ -397,28 +397,28 @@ static const struct ata_port_operations
24922 .prereset = amd_pre_reset,
24923 };
24924
24925-static struct ata_port_operations amd33_port_ops = {
24926+static const struct ata_port_operations amd33_port_ops = {
24927 .inherits = &amd_base_port_ops,
24928 .cable_detect = ata_cable_40wire,
24929 .set_piomode = amd33_set_piomode,
24930 .set_dmamode = amd33_set_dmamode,
24931 };
24932
24933-static struct ata_port_operations amd66_port_ops = {
24934+static const struct ata_port_operations amd66_port_ops = {
24935 .inherits = &amd_base_port_ops,
24936 .cable_detect = ata_cable_unknown,
24937 .set_piomode = amd66_set_piomode,
24938 .set_dmamode = amd66_set_dmamode,
24939 };
24940
24941-static struct ata_port_operations amd100_port_ops = {
24942+static const struct ata_port_operations amd100_port_ops = {
24943 .inherits = &amd_base_port_ops,
24944 .cable_detect = ata_cable_unknown,
24945 .set_piomode = amd100_set_piomode,
24946 .set_dmamode = amd100_set_dmamode,
24947 };
24948
24949-static struct ata_port_operations amd133_port_ops = {
24950+static const struct ata_port_operations amd133_port_ops = {
24951 .inherits = &amd_base_port_ops,
24952 .cable_detect = amd_cable_detect,
24953 .set_piomode = amd133_set_piomode,
24954@@ -433,13 +433,13 @@ static const struct ata_port_operations
24955 .host_stop = nv_host_stop,
24956 };
24957
24958-static struct ata_port_operations nv100_port_ops = {
24959+static const struct ata_port_operations nv100_port_ops = {
24960 .inherits = &nv_base_port_ops,
24961 .set_piomode = nv100_set_piomode,
24962 .set_dmamode = nv100_set_dmamode,
24963 };
24964
24965-static struct ata_port_operations nv133_port_ops = {
24966+static const struct ata_port_operations nv133_port_ops = {
24967 .inherits = &nv_base_port_ops,
24968 .set_piomode = nv133_set_piomode,
24969 .set_dmamode = nv133_set_dmamode,
24970diff -urNp linux-2.6.32.45/drivers/ata/pata_artop.c linux-2.6.32.45/drivers/ata/pata_artop.c
24971--- linux-2.6.32.45/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24972+++ linux-2.6.32.45/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24973@@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24974 ATA_BMDMA_SHT(DRV_NAME),
24975 };
24976
24977-static struct ata_port_operations artop6210_ops = {
24978+static const struct ata_port_operations artop6210_ops = {
24979 .inherits = &ata_bmdma_port_ops,
24980 .cable_detect = ata_cable_40wire,
24981 .set_piomode = artop6210_set_piomode,
24982@@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24983 .qc_defer = artop6210_qc_defer,
24984 };
24985
24986-static struct ata_port_operations artop6260_ops = {
24987+static const struct ata_port_operations artop6260_ops = {
24988 .inherits = &ata_bmdma_port_ops,
24989 .cable_detect = artop6260_cable_detect,
24990 .set_piomode = artop6260_set_piomode,
24991diff -urNp linux-2.6.32.45/drivers/ata/pata_at32.c linux-2.6.32.45/drivers/ata/pata_at32.c
24992--- linux-2.6.32.45/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24993+++ linux-2.6.32.45/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24994@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24995 ATA_PIO_SHT(DRV_NAME),
24996 };
24997
24998-static struct ata_port_operations at32_port_ops = {
24999+static const struct ata_port_operations at32_port_ops = {
25000 .inherits = &ata_sff_port_ops,
25001 .cable_detect = ata_cable_40wire,
25002 .set_piomode = pata_at32_set_piomode,
25003diff -urNp linux-2.6.32.45/drivers/ata/pata_at91.c linux-2.6.32.45/drivers/ata/pata_at91.c
25004--- linux-2.6.32.45/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
25005+++ linux-2.6.32.45/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
25006@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
25007 ATA_PIO_SHT(DRV_NAME),
25008 };
25009
25010-static struct ata_port_operations pata_at91_port_ops = {
25011+static const struct ata_port_operations pata_at91_port_ops = {
25012 .inherits = &ata_sff_port_ops,
25013
25014 .sff_data_xfer = pata_at91_data_xfer_noirq,
25015diff -urNp linux-2.6.32.45/drivers/ata/pata_atiixp.c linux-2.6.32.45/drivers/ata/pata_atiixp.c
25016--- linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
25017+++ linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
25018@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
25019 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25020 };
25021
25022-static struct ata_port_operations atiixp_port_ops = {
25023+static const struct ata_port_operations atiixp_port_ops = {
25024 .inherits = &ata_bmdma_port_ops,
25025
25026 .qc_prep = ata_sff_dumb_qc_prep,
25027diff -urNp linux-2.6.32.45/drivers/ata/pata_atp867x.c linux-2.6.32.45/drivers/ata/pata_atp867x.c
25028--- linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
25029+++ linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
25030@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
25031 ATA_BMDMA_SHT(DRV_NAME),
25032 };
25033
25034-static struct ata_port_operations atp867x_ops = {
25035+static const struct ata_port_operations atp867x_ops = {
25036 .inherits = &ata_bmdma_port_ops,
25037 .cable_detect = atp867x_cable_detect,
25038 .set_piomode = atp867x_set_piomode,
25039diff -urNp linux-2.6.32.45/drivers/ata/pata_bf54x.c linux-2.6.32.45/drivers/ata/pata_bf54x.c
25040--- linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
25041+++ linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
25042@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
25043 .dma_boundary = ATA_DMA_BOUNDARY,
25044 };
25045
25046-static struct ata_port_operations bfin_pata_ops = {
25047+static const struct ata_port_operations bfin_pata_ops = {
25048 .inherits = &ata_sff_port_ops,
25049
25050 .set_piomode = bfin_set_piomode,
25051diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd640.c linux-2.6.32.45/drivers/ata/pata_cmd640.c
25052--- linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
25053+++ linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
25054@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
25055 ATA_BMDMA_SHT(DRV_NAME),
25056 };
25057
25058-static struct ata_port_operations cmd640_port_ops = {
25059+static const struct ata_port_operations cmd640_port_ops = {
25060 .inherits = &ata_bmdma_port_ops,
25061 /* In theory xfer_noirq is not needed once we kill the prefetcher */
25062 .sff_data_xfer = ata_sff_data_xfer_noirq,
25063diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd64x.c linux-2.6.32.45/drivers/ata/pata_cmd64x.c
25064--- linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
25065+++ linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
25066@@ -271,18 +271,18 @@ static const struct ata_port_operations
25067 .set_dmamode = cmd64x_set_dmamode,
25068 };
25069
25070-static struct ata_port_operations cmd64x_port_ops = {
25071+static const struct ata_port_operations cmd64x_port_ops = {
25072 .inherits = &cmd64x_base_ops,
25073 .cable_detect = ata_cable_40wire,
25074 };
25075
25076-static struct ata_port_operations cmd646r1_port_ops = {
25077+static const struct ata_port_operations cmd646r1_port_ops = {
25078 .inherits = &cmd64x_base_ops,
25079 .bmdma_stop = cmd646r1_bmdma_stop,
25080 .cable_detect = ata_cable_40wire,
25081 };
25082
25083-static struct ata_port_operations cmd648_port_ops = {
25084+static const struct ata_port_operations cmd648_port_ops = {
25085 .inherits = &cmd64x_base_ops,
25086 .bmdma_stop = cmd648_bmdma_stop,
25087 .cable_detect = cmd648_cable_detect,
25088diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5520.c linux-2.6.32.45/drivers/ata/pata_cs5520.c
25089--- linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
25090+++ linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
25091@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
25092 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25093 };
25094
25095-static struct ata_port_operations cs5520_port_ops = {
25096+static const struct ata_port_operations cs5520_port_ops = {
25097 .inherits = &ata_bmdma_port_ops,
25098 .qc_prep = ata_sff_dumb_qc_prep,
25099 .cable_detect = ata_cable_40wire,
25100diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5530.c linux-2.6.32.45/drivers/ata/pata_cs5530.c
25101--- linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
25102+++ linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
25103@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
25104 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25105 };
25106
25107-static struct ata_port_operations cs5530_port_ops = {
25108+static const struct ata_port_operations cs5530_port_ops = {
25109 .inherits = &ata_bmdma_port_ops,
25110
25111 .qc_prep = ata_sff_dumb_qc_prep,
25112diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5535.c linux-2.6.32.45/drivers/ata/pata_cs5535.c
25113--- linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
25114+++ linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
25115@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
25116 ATA_BMDMA_SHT(DRV_NAME),
25117 };
25118
25119-static struct ata_port_operations cs5535_port_ops = {
25120+static const struct ata_port_operations cs5535_port_ops = {
25121 .inherits = &ata_bmdma_port_ops,
25122 .cable_detect = cs5535_cable_detect,
25123 .set_piomode = cs5535_set_piomode,
25124diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5536.c linux-2.6.32.45/drivers/ata/pata_cs5536.c
25125--- linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
25126+++ linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
25127@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
25128 ATA_BMDMA_SHT(DRV_NAME),
25129 };
25130
25131-static struct ata_port_operations cs5536_port_ops = {
25132+static const struct ata_port_operations cs5536_port_ops = {
25133 .inherits = &ata_bmdma_port_ops,
25134 .cable_detect = cs5536_cable_detect,
25135 .set_piomode = cs5536_set_piomode,
25136diff -urNp linux-2.6.32.45/drivers/ata/pata_cypress.c linux-2.6.32.45/drivers/ata/pata_cypress.c
25137--- linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
25138+++ linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
25139@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
25140 ATA_BMDMA_SHT(DRV_NAME),
25141 };
25142
25143-static struct ata_port_operations cy82c693_port_ops = {
25144+static const struct ata_port_operations cy82c693_port_ops = {
25145 .inherits = &ata_bmdma_port_ops,
25146 .cable_detect = ata_cable_40wire,
25147 .set_piomode = cy82c693_set_piomode,
25148diff -urNp linux-2.6.32.45/drivers/ata/pata_efar.c linux-2.6.32.45/drivers/ata/pata_efar.c
25149--- linux-2.6.32.45/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
25150+++ linux-2.6.32.45/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
25151@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
25152 ATA_BMDMA_SHT(DRV_NAME),
25153 };
25154
25155-static struct ata_port_operations efar_ops = {
25156+static const struct ata_port_operations efar_ops = {
25157 .inherits = &ata_bmdma_port_ops,
25158 .cable_detect = efar_cable_detect,
25159 .set_piomode = efar_set_piomode,
25160diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt366.c linux-2.6.32.45/drivers/ata/pata_hpt366.c
25161--- linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
25162+++ linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
25163@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
25164 * Configuration for HPT366/68
25165 */
25166
25167-static struct ata_port_operations hpt366_port_ops = {
25168+static const struct ata_port_operations hpt366_port_ops = {
25169 .inherits = &ata_bmdma_port_ops,
25170 .cable_detect = hpt36x_cable_detect,
25171 .mode_filter = hpt366_filter,
25172diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt37x.c linux-2.6.32.45/drivers/ata/pata_hpt37x.c
25173--- linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
25174+++ linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
25175@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
25176 * Configuration for HPT370
25177 */
25178
25179-static struct ata_port_operations hpt370_port_ops = {
25180+static const struct ata_port_operations hpt370_port_ops = {
25181 .inherits = &ata_bmdma_port_ops,
25182
25183 .bmdma_stop = hpt370_bmdma_stop,
25184@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
25185 * Configuration for HPT370A. Close to 370 but less filters
25186 */
25187
25188-static struct ata_port_operations hpt370a_port_ops = {
25189+static const struct ata_port_operations hpt370a_port_ops = {
25190 .inherits = &hpt370_port_ops,
25191 .mode_filter = hpt370a_filter,
25192 };
25193@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
25194 * and DMA mode setting functionality.
25195 */
25196
25197-static struct ata_port_operations hpt372_port_ops = {
25198+static const struct ata_port_operations hpt372_port_ops = {
25199 .inherits = &ata_bmdma_port_ops,
25200
25201 .bmdma_stop = hpt37x_bmdma_stop,
25202@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
25203 * but we have a different cable detection procedure for function 1.
25204 */
25205
25206-static struct ata_port_operations hpt374_fn1_port_ops = {
25207+static const struct ata_port_operations hpt374_fn1_port_ops = {
25208 .inherits = &hpt372_port_ops,
25209 .prereset = hpt374_fn1_pre_reset,
25210 };
25211diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c
25212--- linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
25213+++ linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
25214@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
25215 * Configuration for HPT3x2n.
25216 */
25217
25218-static struct ata_port_operations hpt3x2n_port_ops = {
25219+static const struct ata_port_operations hpt3x2n_port_ops = {
25220 .inherits = &ata_bmdma_port_ops,
25221
25222 .bmdma_stop = hpt3x2n_bmdma_stop,
25223diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x3.c linux-2.6.32.45/drivers/ata/pata_hpt3x3.c
25224--- linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
25225+++ linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
25226@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
25227 ATA_BMDMA_SHT(DRV_NAME),
25228 };
25229
25230-static struct ata_port_operations hpt3x3_port_ops = {
25231+static const struct ata_port_operations hpt3x3_port_ops = {
25232 .inherits = &ata_bmdma_port_ops,
25233 .cable_detect = ata_cable_40wire,
25234 .set_piomode = hpt3x3_set_piomode,
25235diff -urNp linux-2.6.32.45/drivers/ata/pata_icside.c linux-2.6.32.45/drivers/ata/pata_icside.c
25236--- linux-2.6.32.45/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
25237+++ linux-2.6.32.45/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
25238@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
25239 }
25240 }
25241
25242-static struct ata_port_operations pata_icside_port_ops = {
25243+static const struct ata_port_operations pata_icside_port_ops = {
25244 .inherits = &ata_sff_port_ops,
25245 /* no need to build any PRD tables for DMA */
25246 .qc_prep = ata_noop_qc_prep,
25247diff -urNp linux-2.6.32.45/drivers/ata/pata_isapnp.c linux-2.6.32.45/drivers/ata/pata_isapnp.c
25248--- linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
25249+++ linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
25250@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
25251 ATA_PIO_SHT(DRV_NAME),
25252 };
25253
25254-static struct ata_port_operations isapnp_port_ops = {
25255+static const struct ata_port_operations isapnp_port_ops = {
25256 .inherits = &ata_sff_port_ops,
25257 .cable_detect = ata_cable_40wire,
25258 };
25259
25260-static struct ata_port_operations isapnp_noalt_port_ops = {
25261+static const struct ata_port_operations isapnp_noalt_port_ops = {
25262 .inherits = &ata_sff_port_ops,
25263 .cable_detect = ata_cable_40wire,
25264 /* No altstatus so we don't want to use the lost interrupt poll */
25265diff -urNp linux-2.6.32.45/drivers/ata/pata_it8213.c linux-2.6.32.45/drivers/ata/pata_it8213.c
25266--- linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
25267+++ linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
25268@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
25269 };
25270
25271
25272-static struct ata_port_operations it8213_ops = {
25273+static const struct ata_port_operations it8213_ops = {
25274 .inherits = &ata_bmdma_port_ops,
25275 .cable_detect = it8213_cable_detect,
25276 .set_piomode = it8213_set_piomode,
25277diff -urNp linux-2.6.32.45/drivers/ata/pata_it821x.c linux-2.6.32.45/drivers/ata/pata_it821x.c
25278--- linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
25279+++ linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
25280@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
25281 ATA_BMDMA_SHT(DRV_NAME),
25282 };
25283
25284-static struct ata_port_operations it821x_smart_port_ops = {
25285+static const struct ata_port_operations it821x_smart_port_ops = {
25286 .inherits = &ata_bmdma_port_ops,
25287
25288 .check_atapi_dma= it821x_check_atapi_dma,
25289@@ -814,7 +814,7 @@ static struct ata_port_operations it821x
25290 .port_start = it821x_port_start,
25291 };
25292
25293-static struct ata_port_operations it821x_passthru_port_ops = {
25294+static const struct ata_port_operations it821x_passthru_port_ops = {
25295 .inherits = &ata_bmdma_port_ops,
25296
25297 .check_atapi_dma= it821x_check_atapi_dma,
25298@@ -830,7 +830,7 @@ static struct ata_port_operations it821x
25299 .port_start = it821x_port_start,
25300 };
25301
25302-static struct ata_port_operations it821x_rdc_port_ops = {
25303+static const struct ata_port_operations it821x_rdc_port_ops = {
25304 .inherits = &ata_bmdma_port_ops,
25305
25306 .check_atapi_dma= it821x_check_atapi_dma,
25307diff -urNp linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c
25308--- linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
25309+++ linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
25310@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
25311 ATA_PIO_SHT(DRV_NAME),
25312 };
25313
25314-static struct ata_port_operations ixp4xx_port_ops = {
25315+static const struct ata_port_operations ixp4xx_port_ops = {
25316 .inherits = &ata_sff_port_ops,
25317 .sff_data_xfer = ixp4xx_mmio_data_xfer,
25318 .cable_detect = ata_cable_40wire,
25319diff -urNp linux-2.6.32.45/drivers/ata/pata_jmicron.c linux-2.6.32.45/drivers/ata/pata_jmicron.c
25320--- linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
25321+++ linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
25322@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
25323 ATA_BMDMA_SHT(DRV_NAME),
25324 };
25325
25326-static struct ata_port_operations jmicron_ops = {
25327+static const struct ata_port_operations jmicron_ops = {
25328 .inherits = &ata_bmdma_port_ops,
25329 .prereset = jmicron_pre_reset,
25330 };
25331diff -urNp linux-2.6.32.45/drivers/ata/pata_legacy.c linux-2.6.32.45/drivers/ata/pata_legacy.c
25332--- linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
25333+++ linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
25334@@ -106,7 +106,7 @@ struct legacy_probe {
25335
25336 struct legacy_controller {
25337 const char *name;
25338- struct ata_port_operations *ops;
25339+ const struct ata_port_operations *ops;
25340 unsigned int pio_mask;
25341 unsigned int flags;
25342 unsigned int pflags;
25343@@ -223,12 +223,12 @@ static const struct ata_port_operations
25344 * pio_mask as well.
25345 */
25346
25347-static struct ata_port_operations simple_port_ops = {
25348+static const struct ata_port_operations simple_port_ops = {
25349 .inherits = &legacy_base_port_ops,
25350 .sff_data_xfer = ata_sff_data_xfer_noirq,
25351 };
25352
25353-static struct ata_port_operations legacy_port_ops = {
25354+static const struct ata_port_operations legacy_port_ops = {
25355 .inherits = &legacy_base_port_ops,
25356 .sff_data_xfer = ata_sff_data_xfer_noirq,
25357 .set_mode = legacy_set_mode,
25358@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
25359 return buflen;
25360 }
25361
25362-static struct ata_port_operations pdc20230_port_ops = {
25363+static const struct ata_port_operations pdc20230_port_ops = {
25364 .inherits = &legacy_base_port_ops,
25365 .set_piomode = pdc20230_set_piomode,
25366 .sff_data_xfer = pdc_data_xfer_vlb,
25367@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
25368 ioread8(ap->ioaddr.status_addr);
25369 }
25370
25371-static struct ata_port_operations ht6560a_port_ops = {
25372+static const struct ata_port_operations ht6560a_port_ops = {
25373 .inherits = &legacy_base_port_ops,
25374 .set_piomode = ht6560a_set_piomode,
25375 };
25376@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
25377 ioread8(ap->ioaddr.status_addr);
25378 }
25379
25380-static struct ata_port_operations ht6560b_port_ops = {
25381+static const struct ata_port_operations ht6560b_port_ops = {
25382 .inherits = &legacy_base_port_ops,
25383 .set_piomode = ht6560b_set_piomode,
25384 };
25385@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
25386 }
25387
25388
25389-static struct ata_port_operations opti82c611a_port_ops = {
25390+static const struct ata_port_operations opti82c611a_port_ops = {
25391 .inherits = &legacy_base_port_ops,
25392 .set_piomode = opti82c611a_set_piomode,
25393 };
25394@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
25395 return ata_sff_qc_issue(qc);
25396 }
25397
25398-static struct ata_port_operations opti82c46x_port_ops = {
25399+static const struct ata_port_operations opti82c46x_port_ops = {
25400 .inherits = &legacy_base_port_ops,
25401 .set_piomode = opti82c46x_set_piomode,
25402 .qc_issue = opti82c46x_qc_issue,
25403@@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
25404 return 0;
25405 }
25406
25407-static struct ata_port_operations qdi6500_port_ops = {
25408+static const struct ata_port_operations qdi6500_port_ops = {
25409 .inherits = &legacy_base_port_ops,
25410 .set_piomode = qdi6500_set_piomode,
25411 .qc_issue = qdi_qc_issue,
25412 .sff_data_xfer = vlb32_data_xfer,
25413 };
25414
25415-static struct ata_port_operations qdi6580_port_ops = {
25416+static const struct ata_port_operations qdi6580_port_ops = {
25417 .inherits = &legacy_base_port_ops,
25418 .set_piomode = qdi6580_set_piomode,
25419 .sff_data_xfer = vlb32_data_xfer,
25420 };
25421
25422-static struct ata_port_operations qdi6580dp_port_ops = {
25423+static const struct ata_port_operations qdi6580dp_port_ops = {
25424 .inherits = &legacy_base_port_ops,
25425 .set_piomode = qdi6580dp_set_piomode,
25426 .sff_data_xfer = vlb32_data_xfer,
25427@@ -855,7 +855,7 @@ static int winbond_port(struct platform_
25428 return 0;
25429 }
25430
25431-static struct ata_port_operations winbond_port_ops = {
25432+static const struct ata_port_operations winbond_port_ops = {
25433 .inherits = &legacy_base_port_ops,
25434 .set_piomode = winbond_set_piomode,
25435 .sff_data_xfer = vlb32_data_xfer,
25436@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
25437 int pio_modes = controller->pio_mask;
25438 unsigned long io = probe->port;
25439 u32 mask = (1 << probe->slot);
25440- struct ata_port_operations *ops = controller->ops;
25441+ const struct ata_port_operations *ops = controller->ops;
25442 struct legacy_data *ld = &legacy_data[probe->slot];
25443 struct ata_host *host = NULL;
25444 struct ata_port *ap;
25445diff -urNp linux-2.6.32.45/drivers/ata/pata_marvell.c linux-2.6.32.45/drivers/ata/pata_marvell.c
25446--- linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
25447+++ linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
25448@@ -100,7 +100,7 @@ static struct scsi_host_template marvell
25449 ATA_BMDMA_SHT(DRV_NAME),
25450 };
25451
25452-static struct ata_port_operations marvell_ops = {
25453+static const struct ata_port_operations marvell_ops = {
25454 .inherits = &ata_bmdma_port_ops,
25455 .cable_detect = marvell_cable_detect,
25456 .prereset = marvell_pre_reset,
25457diff -urNp linux-2.6.32.45/drivers/ata/pata_mpc52xx.c linux-2.6.32.45/drivers/ata/pata_mpc52xx.c
25458--- linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
25459+++ linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
25460@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
25461 ATA_PIO_SHT(DRV_NAME),
25462 };
25463
25464-static struct ata_port_operations mpc52xx_ata_port_ops = {
25465+static const struct ata_port_operations mpc52xx_ata_port_ops = {
25466 .inherits = &ata_bmdma_port_ops,
25467 .sff_dev_select = mpc52xx_ata_dev_select,
25468 .set_piomode = mpc52xx_ata_set_piomode,
25469diff -urNp linux-2.6.32.45/drivers/ata/pata_mpiix.c linux-2.6.32.45/drivers/ata/pata_mpiix.c
25470--- linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
25471+++ linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
25472@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
25473 ATA_PIO_SHT(DRV_NAME),
25474 };
25475
25476-static struct ata_port_operations mpiix_port_ops = {
25477+static const struct ata_port_operations mpiix_port_ops = {
25478 .inherits = &ata_sff_port_ops,
25479 .qc_issue = mpiix_qc_issue,
25480 .cable_detect = ata_cable_40wire,
25481diff -urNp linux-2.6.32.45/drivers/ata/pata_netcell.c linux-2.6.32.45/drivers/ata/pata_netcell.c
25482--- linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
25483+++ linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
25484@@ -34,7 +34,7 @@ static struct scsi_host_template netcell
25485 ATA_BMDMA_SHT(DRV_NAME),
25486 };
25487
25488-static struct ata_port_operations netcell_ops = {
25489+static const struct ata_port_operations netcell_ops = {
25490 .inherits = &ata_bmdma_port_ops,
25491 .cable_detect = ata_cable_80wire,
25492 .read_id = netcell_read_id,
25493diff -urNp linux-2.6.32.45/drivers/ata/pata_ninja32.c linux-2.6.32.45/drivers/ata/pata_ninja32.c
25494--- linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
25495+++ linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
25496@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
25497 ATA_BMDMA_SHT(DRV_NAME),
25498 };
25499
25500-static struct ata_port_operations ninja32_port_ops = {
25501+static const struct ata_port_operations ninja32_port_ops = {
25502 .inherits = &ata_bmdma_port_ops,
25503 .sff_dev_select = ninja32_dev_select,
25504 .cable_detect = ata_cable_40wire,
25505diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87410.c linux-2.6.32.45/drivers/ata/pata_ns87410.c
25506--- linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
25507+++ linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
25508@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
25509 ATA_PIO_SHT(DRV_NAME),
25510 };
25511
25512-static struct ata_port_operations ns87410_port_ops = {
25513+static const struct ata_port_operations ns87410_port_ops = {
25514 .inherits = &ata_sff_port_ops,
25515 .qc_issue = ns87410_qc_issue,
25516 .cable_detect = ata_cable_40wire,
25517diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87415.c linux-2.6.32.45/drivers/ata/pata_ns87415.c
25518--- linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
25519+++ linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
25520@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
25521 }
25522 #endif /* 87560 SuperIO Support */
25523
25524-static struct ata_port_operations ns87415_pata_ops = {
25525+static const struct ata_port_operations ns87415_pata_ops = {
25526 .inherits = &ata_bmdma_port_ops,
25527
25528 .check_atapi_dma = ns87415_check_atapi_dma,
25529@@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
25530 };
25531
25532 #if defined(CONFIG_SUPERIO)
25533-static struct ata_port_operations ns87560_pata_ops = {
25534+static const struct ata_port_operations ns87560_pata_ops = {
25535 .inherits = &ns87415_pata_ops,
25536 .sff_tf_read = ns87560_tf_read,
25537 .sff_check_status = ns87560_check_status,
25538diff -urNp linux-2.6.32.45/drivers/ata/pata_octeon_cf.c linux-2.6.32.45/drivers/ata/pata_octeon_cf.c
25539--- linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25540+++ linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25541@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25542 return 0;
25543 }
25544
25545+/* cannot be const */
25546 static struct ata_port_operations octeon_cf_ops = {
25547 .inherits = &ata_sff_port_ops,
25548 .check_atapi_dma = octeon_cf_check_atapi_dma,
25549diff -urNp linux-2.6.32.45/drivers/ata/pata_oldpiix.c linux-2.6.32.45/drivers/ata/pata_oldpiix.c
25550--- linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25551+++ linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25552@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25553 ATA_BMDMA_SHT(DRV_NAME),
25554 };
25555
25556-static struct ata_port_operations oldpiix_pata_ops = {
25557+static const struct ata_port_operations oldpiix_pata_ops = {
25558 .inherits = &ata_bmdma_port_ops,
25559 .qc_issue = oldpiix_qc_issue,
25560 .cable_detect = ata_cable_40wire,
25561diff -urNp linux-2.6.32.45/drivers/ata/pata_opti.c linux-2.6.32.45/drivers/ata/pata_opti.c
25562--- linux-2.6.32.45/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25563+++ linux-2.6.32.45/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25564@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25565 ATA_PIO_SHT(DRV_NAME),
25566 };
25567
25568-static struct ata_port_operations opti_port_ops = {
25569+static const struct ata_port_operations opti_port_ops = {
25570 .inherits = &ata_sff_port_ops,
25571 .cable_detect = ata_cable_40wire,
25572 .set_piomode = opti_set_piomode,
25573diff -urNp linux-2.6.32.45/drivers/ata/pata_optidma.c linux-2.6.32.45/drivers/ata/pata_optidma.c
25574--- linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25575+++ linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25576@@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25577 ATA_BMDMA_SHT(DRV_NAME),
25578 };
25579
25580-static struct ata_port_operations optidma_port_ops = {
25581+static const struct ata_port_operations optidma_port_ops = {
25582 .inherits = &ata_bmdma_port_ops,
25583 .cable_detect = ata_cable_40wire,
25584 .set_piomode = optidma_set_pio_mode,
25585@@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25586 .prereset = optidma_pre_reset,
25587 };
25588
25589-static struct ata_port_operations optiplus_port_ops = {
25590+static const struct ata_port_operations optiplus_port_ops = {
25591 .inherits = &optidma_port_ops,
25592 .set_piomode = optiplus_set_pio_mode,
25593 .set_dmamode = optiplus_set_dma_mode,
25594diff -urNp linux-2.6.32.45/drivers/ata/pata_palmld.c linux-2.6.32.45/drivers/ata/pata_palmld.c
25595--- linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25596+++ linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25597@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25598 ATA_PIO_SHT(DRV_NAME),
25599 };
25600
25601-static struct ata_port_operations palmld_port_ops = {
25602+static const struct ata_port_operations palmld_port_ops = {
25603 .inherits = &ata_sff_port_ops,
25604 .sff_data_xfer = ata_sff_data_xfer_noirq,
25605 .cable_detect = ata_cable_40wire,
25606diff -urNp linux-2.6.32.45/drivers/ata/pata_pcmcia.c linux-2.6.32.45/drivers/ata/pata_pcmcia.c
25607--- linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25608+++ linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25609@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25610 ATA_PIO_SHT(DRV_NAME),
25611 };
25612
25613-static struct ata_port_operations pcmcia_port_ops = {
25614+static const struct ata_port_operations pcmcia_port_ops = {
25615 .inherits = &ata_sff_port_ops,
25616 .sff_data_xfer = ata_sff_data_xfer_noirq,
25617 .cable_detect = ata_cable_40wire,
25618 .set_mode = pcmcia_set_mode,
25619 };
25620
25621-static struct ata_port_operations pcmcia_8bit_port_ops = {
25622+static const struct ata_port_operations pcmcia_8bit_port_ops = {
25623 .inherits = &ata_sff_port_ops,
25624 .sff_data_xfer = ata_data_xfer_8bit,
25625 .cable_detect = ata_cable_40wire,
25626@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25627 unsigned long io_base, ctl_base;
25628 void __iomem *io_addr, *ctl_addr;
25629 int n_ports = 1;
25630- struct ata_port_operations *ops = &pcmcia_port_ops;
25631+ const struct ata_port_operations *ops = &pcmcia_port_ops;
25632
25633 info = kzalloc(sizeof(*info), GFP_KERNEL);
25634 if (info == NULL)
25635diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc2027x.c linux-2.6.32.45/drivers/ata/pata_pdc2027x.c
25636--- linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25637+++ linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25638@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25639 ATA_BMDMA_SHT(DRV_NAME),
25640 };
25641
25642-static struct ata_port_operations pdc2027x_pata100_ops = {
25643+static const struct ata_port_operations pdc2027x_pata100_ops = {
25644 .inherits = &ata_bmdma_port_ops,
25645 .check_atapi_dma = pdc2027x_check_atapi_dma,
25646 .cable_detect = pdc2027x_cable_detect,
25647 .prereset = pdc2027x_prereset,
25648 };
25649
25650-static struct ata_port_operations pdc2027x_pata133_ops = {
25651+static const struct ata_port_operations pdc2027x_pata133_ops = {
25652 .inherits = &pdc2027x_pata100_ops,
25653 .mode_filter = pdc2027x_mode_filter,
25654 .set_piomode = pdc2027x_set_piomode,
25655diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c
25656--- linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25657+++ linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25658@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25659 ATA_BMDMA_SHT(DRV_NAME),
25660 };
25661
25662-static struct ata_port_operations pdc2024x_port_ops = {
25663+static const struct ata_port_operations pdc2024x_port_ops = {
25664 .inherits = &ata_bmdma_port_ops,
25665
25666 .cable_detect = ata_cable_40wire,
25667@@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25668 .sff_exec_command = pdc202xx_exec_command,
25669 };
25670
25671-static struct ata_port_operations pdc2026x_port_ops = {
25672+static const struct ata_port_operations pdc2026x_port_ops = {
25673 .inherits = &pdc2024x_port_ops,
25674
25675 .check_atapi_dma = pdc2026x_check_atapi_dma,
25676diff -urNp linux-2.6.32.45/drivers/ata/pata_platform.c linux-2.6.32.45/drivers/ata/pata_platform.c
25677--- linux-2.6.32.45/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25678+++ linux-2.6.32.45/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25679@@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25680 ATA_PIO_SHT(DRV_NAME),
25681 };
25682
25683-static struct ata_port_operations pata_platform_port_ops = {
25684+static const struct ata_port_operations pata_platform_port_ops = {
25685 .inherits = &ata_sff_port_ops,
25686 .sff_data_xfer = ata_sff_data_xfer_noirq,
25687 .cable_detect = ata_cable_unknown,
25688diff -urNp linux-2.6.32.45/drivers/ata/pata_qdi.c linux-2.6.32.45/drivers/ata/pata_qdi.c
25689--- linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25690+++ linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25691@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25692 ATA_PIO_SHT(DRV_NAME),
25693 };
25694
25695-static struct ata_port_operations qdi6500_port_ops = {
25696+static const struct ata_port_operations qdi6500_port_ops = {
25697 .inherits = &ata_sff_port_ops,
25698 .qc_issue = qdi_qc_issue,
25699 .sff_data_xfer = qdi_data_xfer,
25700@@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25701 .set_piomode = qdi6500_set_piomode,
25702 };
25703
25704-static struct ata_port_operations qdi6580_port_ops = {
25705+static const struct ata_port_operations qdi6580_port_ops = {
25706 .inherits = &qdi6500_port_ops,
25707 .set_piomode = qdi6580_set_piomode,
25708 };
25709diff -urNp linux-2.6.32.45/drivers/ata/pata_radisys.c linux-2.6.32.45/drivers/ata/pata_radisys.c
25710--- linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25711+++ linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25712@@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25713 ATA_BMDMA_SHT(DRV_NAME),
25714 };
25715
25716-static struct ata_port_operations radisys_pata_ops = {
25717+static const struct ata_port_operations radisys_pata_ops = {
25718 .inherits = &ata_bmdma_port_ops,
25719 .qc_issue = radisys_qc_issue,
25720 .cable_detect = ata_cable_unknown,
25721diff -urNp linux-2.6.32.45/drivers/ata/pata_rb532_cf.c linux-2.6.32.45/drivers/ata/pata_rb532_cf.c
25722--- linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25723+++ linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25724@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25725 return IRQ_HANDLED;
25726 }
25727
25728-static struct ata_port_operations rb532_pata_port_ops = {
25729+static const struct ata_port_operations rb532_pata_port_ops = {
25730 .inherits = &ata_sff_port_ops,
25731 .sff_data_xfer = ata_sff_data_xfer32,
25732 };
25733diff -urNp linux-2.6.32.45/drivers/ata/pata_rdc.c linux-2.6.32.45/drivers/ata/pata_rdc.c
25734--- linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25735+++ linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25736@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25737 pci_write_config_byte(dev, 0x48, udma_enable);
25738 }
25739
25740-static struct ata_port_operations rdc_pata_ops = {
25741+static const struct ata_port_operations rdc_pata_ops = {
25742 .inherits = &ata_bmdma32_port_ops,
25743 .cable_detect = rdc_pata_cable_detect,
25744 .set_piomode = rdc_set_piomode,
25745diff -urNp linux-2.6.32.45/drivers/ata/pata_rz1000.c linux-2.6.32.45/drivers/ata/pata_rz1000.c
25746--- linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25747+++ linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25748@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25749 ATA_PIO_SHT(DRV_NAME),
25750 };
25751
25752-static struct ata_port_operations rz1000_port_ops = {
25753+static const struct ata_port_operations rz1000_port_ops = {
25754 .inherits = &ata_sff_port_ops,
25755 .cable_detect = ata_cable_40wire,
25756 .set_mode = rz1000_set_mode,
25757diff -urNp linux-2.6.32.45/drivers/ata/pata_sc1200.c linux-2.6.32.45/drivers/ata/pata_sc1200.c
25758--- linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25759+++ linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25760@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25761 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25762 };
25763
25764-static struct ata_port_operations sc1200_port_ops = {
25765+static const struct ata_port_operations sc1200_port_ops = {
25766 .inherits = &ata_bmdma_port_ops,
25767 .qc_prep = ata_sff_dumb_qc_prep,
25768 .qc_issue = sc1200_qc_issue,
25769diff -urNp linux-2.6.32.45/drivers/ata/pata_scc.c linux-2.6.32.45/drivers/ata/pata_scc.c
25770--- linux-2.6.32.45/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25771+++ linux-2.6.32.45/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25772@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25773 ATA_BMDMA_SHT(DRV_NAME),
25774 };
25775
25776-static struct ata_port_operations scc_pata_ops = {
25777+static const struct ata_port_operations scc_pata_ops = {
25778 .inherits = &ata_bmdma_port_ops,
25779
25780 .set_piomode = scc_set_piomode,
25781diff -urNp linux-2.6.32.45/drivers/ata/pata_sch.c linux-2.6.32.45/drivers/ata/pata_sch.c
25782--- linux-2.6.32.45/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25783+++ linux-2.6.32.45/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25784@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25785 ATA_BMDMA_SHT(DRV_NAME),
25786 };
25787
25788-static struct ata_port_operations sch_pata_ops = {
25789+static const struct ata_port_operations sch_pata_ops = {
25790 .inherits = &ata_bmdma_port_ops,
25791 .cable_detect = ata_cable_unknown,
25792 .set_piomode = sch_set_piomode,
25793diff -urNp linux-2.6.32.45/drivers/ata/pata_serverworks.c linux-2.6.32.45/drivers/ata/pata_serverworks.c
25794--- linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25795+++ linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25796@@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25797 ATA_BMDMA_SHT(DRV_NAME),
25798 };
25799
25800-static struct ata_port_operations serverworks_osb4_port_ops = {
25801+static const struct ata_port_operations serverworks_osb4_port_ops = {
25802 .inherits = &ata_bmdma_port_ops,
25803 .cable_detect = serverworks_cable_detect,
25804 .mode_filter = serverworks_osb4_filter,
25805@@ -307,7 +307,7 @@ static struct ata_port_operations server
25806 .set_dmamode = serverworks_set_dmamode,
25807 };
25808
25809-static struct ata_port_operations serverworks_csb_port_ops = {
25810+static const struct ata_port_operations serverworks_csb_port_ops = {
25811 .inherits = &serverworks_osb4_port_ops,
25812 .mode_filter = serverworks_csb_filter,
25813 };
25814diff -urNp linux-2.6.32.45/drivers/ata/pata_sil680.c linux-2.6.32.45/drivers/ata/pata_sil680.c
25815--- linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25816+++ linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25817@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25818 ATA_BMDMA_SHT(DRV_NAME),
25819 };
25820
25821-static struct ata_port_operations sil680_port_ops = {
25822+static const struct ata_port_operations sil680_port_ops = {
25823 .inherits = &ata_bmdma32_port_ops,
25824 .cable_detect = sil680_cable_detect,
25825 .set_piomode = sil680_set_piomode,
25826diff -urNp linux-2.6.32.45/drivers/ata/pata_sis.c linux-2.6.32.45/drivers/ata/pata_sis.c
25827--- linux-2.6.32.45/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25828+++ linux-2.6.32.45/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25829@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25830 ATA_BMDMA_SHT(DRV_NAME),
25831 };
25832
25833-static struct ata_port_operations sis_133_for_sata_ops = {
25834+static const struct ata_port_operations sis_133_for_sata_ops = {
25835 .inherits = &ata_bmdma_port_ops,
25836 .set_piomode = sis_133_set_piomode,
25837 .set_dmamode = sis_133_set_dmamode,
25838 .cable_detect = sis_133_cable_detect,
25839 };
25840
25841-static struct ata_port_operations sis_base_ops = {
25842+static const struct ata_port_operations sis_base_ops = {
25843 .inherits = &ata_bmdma_port_ops,
25844 .prereset = sis_pre_reset,
25845 };
25846
25847-static struct ata_port_operations sis_133_ops = {
25848+static const struct ata_port_operations sis_133_ops = {
25849 .inherits = &sis_base_ops,
25850 .set_piomode = sis_133_set_piomode,
25851 .set_dmamode = sis_133_set_dmamode,
25852 .cable_detect = sis_133_cable_detect,
25853 };
25854
25855-static struct ata_port_operations sis_133_early_ops = {
25856+static const struct ata_port_operations sis_133_early_ops = {
25857 .inherits = &sis_base_ops,
25858 .set_piomode = sis_100_set_piomode,
25859 .set_dmamode = sis_133_early_set_dmamode,
25860 .cable_detect = sis_66_cable_detect,
25861 };
25862
25863-static struct ata_port_operations sis_100_ops = {
25864+static const struct ata_port_operations sis_100_ops = {
25865 .inherits = &sis_base_ops,
25866 .set_piomode = sis_100_set_piomode,
25867 .set_dmamode = sis_100_set_dmamode,
25868 .cable_detect = sis_66_cable_detect,
25869 };
25870
25871-static struct ata_port_operations sis_66_ops = {
25872+static const struct ata_port_operations sis_66_ops = {
25873 .inherits = &sis_base_ops,
25874 .set_piomode = sis_old_set_piomode,
25875 .set_dmamode = sis_66_set_dmamode,
25876 .cable_detect = sis_66_cable_detect,
25877 };
25878
25879-static struct ata_port_operations sis_old_ops = {
25880+static const struct ata_port_operations sis_old_ops = {
25881 .inherits = &sis_base_ops,
25882 .set_piomode = sis_old_set_piomode,
25883 .set_dmamode = sis_old_set_dmamode,
25884diff -urNp linux-2.6.32.45/drivers/ata/pata_sl82c105.c linux-2.6.32.45/drivers/ata/pata_sl82c105.c
25885--- linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25886+++ linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25887@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25888 ATA_BMDMA_SHT(DRV_NAME),
25889 };
25890
25891-static struct ata_port_operations sl82c105_port_ops = {
25892+static const struct ata_port_operations sl82c105_port_ops = {
25893 .inherits = &ata_bmdma_port_ops,
25894 .qc_defer = sl82c105_qc_defer,
25895 .bmdma_start = sl82c105_bmdma_start,
25896diff -urNp linux-2.6.32.45/drivers/ata/pata_triflex.c linux-2.6.32.45/drivers/ata/pata_triflex.c
25897--- linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25898+++ linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25899@@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25900 ATA_BMDMA_SHT(DRV_NAME),
25901 };
25902
25903-static struct ata_port_operations triflex_port_ops = {
25904+static const struct ata_port_operations triflex_port_ops = {
25905 .inherits = &ata_bmdma_port_ops,
25906 .bmdma_start = triflex_bmdma_start,
25907 .bmdma_stop = triflex_bmdma_stop,
25908diff -urNp linux-2.6.32.45/drivers/ata/pata_via.c linux-2.6.32.45/drivers/ata/pata_via.c
25909--- linux-2.6.32.45/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25910+++ linux-2.6.32.45/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25911@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25912 ATA_BMDMA_SHT(DRV_NAME),
25913 };
25914
25915-static struct ata_port_operations via_port_ops = {
25916+static const struct ata_port_operations via_port_ops = {
25917 .inherits = &ata_bmdma_port_ops,
25918 .cable_detect = via_cable_detect,
25919 .set_piomode = via_set_piomode,
25920@@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25921 .port_start = via_port_start,
25922 };
25923
25924-static struct ata_port_operations via_port_ops_noirq = {
25925+static const struct ata_port_operations via_port_ops_noirq = {
25926 .inherits = &via_port_ops,
25927 .sff_data_xfer = ata_sff_data_xfer_noirq,
25928 };
25929diff -urNp linux-2.6.32.45/drivers/ata/pata_winbond.c linux-2.6.32.45/drivers/ata/pata_winbond.c
25930--- linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25931+++ linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25932@@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25933 ATA_PIO_SHT(DRV_NAME),
25934 };
25935
25936-static struct ata_port_operations winbond_port_ops = {
25937+static const struct ata_port_operations winbond_port_ops = {
25938 .inherits = &ata_sff_port_ops,
25939 .sff_data_xfer = winbond_data_xfer,
25940 .cable_detect = ata_cable_40wire,
25941diff -urNp linux-2.6.32.45/drivers/ata/pdc_adma.c linux-2.6.32.45/drivers/ata/pdc_adma.c
25942--- linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25943+++ linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25944@@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25945 .dma_boundary = ADMA_DMA_BOUNDARY,
25946 };
25947
25948-static struct ata_port_operations adma_ata_ops = {
25949+static const struct ata_port_operations adma_ata_ops = {
25950 .inherits = &ata_sff_port_ops,
25951
25952 .lost_interrupt = ATA_OP_NULL,
25953diff -urNp linux-2.6.32.45/drivers/ata/sata_fsl.c linux-2.6.32.45/drivers/ata/sata_fsl.c
25954--- linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25955+++ linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25956@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25957 .dma_boundary = ATA_DMA_BOUNDARY,
25958 };
25959
25960-static struct ata_port_operations sata_fsl_ops = {
25961+static const struct ata_port_operations sata_fsl_ops = {
25962 .inherits = &sata_pmp_port_ops,
25963
25964 .qc_defer = ata_std_qc_defer,
25965diff -urNp linux-2.6.32.45/drivers/ata/sata_inic162x.c linux-2.6.32.45/drivers/ata/sata_inic162x.c
25966--- linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25967+++ linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25968@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25969 return 0;
25970 }
25971
25972-static struct ata_port_operations inic_port_ops = {
25973+static const struct ata_port_operations inic_port_ops = {
25974 .inherits = &sata_port_ops,
25975
25976 .check_atapi_dma = inic_check_atapi_dma,
25977diff -urNp linux-2.6.32.45/drivers/ata/sata_mv.c linux-2.6.32.45/drivers/ata/sata_mv.c
25978--- linux-2.6.32.45/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25979+++ linux-2.6.32.45/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25980@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25981 .dma_boundary = MV_DMA_BOUNDARY,
25982 };
25983
25984-static struct ata_port_operations mv5_ops = {
25985+static const struct ata_port_operations mv5_ops = {
25986 .inherits = &ata_sff_port_ops,
25987
25988 .lost_interrupt = ATA_OP_NULL,
25989@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25990 .port_stop = mv_port_stop,
25991 };
25992
25993-static struct ata_port_operations mv6_ops = {
25994+static const struct ata_port_operations mv6_ops = {
25995 .inherits = &mv5_ops,
25996 .dev_config = mv6_dev_config,
25997 .scr_read = mv_scr_read,
25998@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25999 .bmdma_status = mv_bmdma_status,
26000 };
26001
26002-static struct ata_port_operations mv_iie_ops = {
26003+static const struct ata_port_operations mv_iie_ops = {
26004 .inherits = &mv6_ops,
26005 .dev_config = ATA_OP_NULL,
26006 .qc_prep = mv_qc_prep_iie,
26007diff -urNp linux-2.6.32.45/drivers/ata/sata_nv.c linux-2.6.32.45/drivers/ata/sata_nv.c
26008--- linux-2.6.32.45/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
26009+++ linux-2.6.32.45/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
26010@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
26011 * cases. Define nv_hardreset() which only kicks in for post-boot
26012 * probing and use it for all variants.
26013 */
26014-static struct ata_port_operations nv_generic_ops = {
26015+static const struct ata_port_operations nv_generic_ops = {
26016 .inherits = &ata_bmdma_port_ops,
26017 .lost_interrupt = ATA_OP_NULL,
26018 .scr_read = nv_scr_read,
26019@@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
26020 .hardreset = nv_hardreset,
26021 };
26022
26023-static struct ata_port_operations nv_nf2_ops = {
26024+static const struct ata_port_operations nv_nf2_ops = {
26025 .inherits = &nv_generic_ops,
26026 .freeze = nv_nf2_freeze,
26027 .thaw = nv_nf2_thaw,
26028 };
26029
26030-static struct ata_port_operations nv_ck804_ops = {
26031+static const struct ata_port_operations nv_ck804_ops = {
26032 .inherits = &nv_generic_ops,
26033 .freeze = nv_ck804_freeze,
26034 .thaw = nv_ck804_thaw,
26035 .host_stop = nv_ck804_host_stop,
26036 };
26037
26038-static struct ata_port_operations nv_adma_ops = {
26039+static const struct ata_port_operations nv_adma_ops = {
26040 .inherits = &nv_ck804_ops,
26041
26042 .check_atapi_dma = nv_adma_check_atapi_dma,
26043@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
26044 .host_stop = nv_adma_host_stop,
26045 };
26046
26047-static struct ata_port_operations nv_swncq_ops = {
26048+static const struct ata_port_operations nv_swncq_ops = {
26049 .inherits = &nv_generic_ops,
26050
26051 .qc_defer = ata_std_qc_defer,
26052diff -urNp linux-2.6.32.45/drivers/ata/sata_promise.c linux-2.6.32.45/drivers/ata/sata_promise.c
26053--- linux-2.6.32.45/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
26054+++ linux-2.6.32.45/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
26055@@ -195,7 +195,7 @@ static const struct ata_port_operations
26056 .error_handler = pdc_error_handler,
26057 };
26058
26059-static struct ata_port_operations pdc_sata_ops = {
26060+static const struct ata_port_operations pdc_sata_ops = {
26061 .inherits = &pdc_common_ops,
26062 .cable_detect = pdc_sata_cable_detect,
26063 .freeze = pdc_sata_freeze,
26064@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
26065
26066 /* First-generation chips need a more restrictive ->check_atapi_dma op,
26067 and ->freeze/thaw that ignore the hotplug controls. */
26068-static struct ata_port_operations pdc_old_sata_ops = {
26069+static const struct ata_port_operations pdc_old_sata_ops = {
26070 .inherits = &pdc_sata_ops,
26071 .freeze = pdc_freeze,
26072 .thaw = pdc_thaw,
26073 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
26074 };
26075
26076-static struct ata_port_operations pdc_pata_ops = {
26077+static const struct ata_port_operations pdc_pata_ops = {
26078 .inherits = &pdc_common_ops,
26079 .cable_detect = pdc_pata_cable_detect,
26080 .freeze = pdc_freeze,
26081diff -urNp linux-2.6.32.45/drivers/ata/sata_qstor.c linux-2.6.32.45/drivers/ata/sata_qstor.c
26082--- linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
26083+++ linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
26084@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
26085 .dma_boundary = QS_DMA_BOUNDARY,
26086 };
26087
26088-static struct ata_port_operations qs_ata_ops = {
26089+static const struct ata_port_operations qs_ata_ops = {
26090 .inherits = &ata_sff_port_ops,
26091
26092 .check_atapi_dma = qs_check_atapi_dma,
26093diff -urNp linux-2.6.32.45/drivers/ata/sata_sil24.c linux-2.6.32.45/drivers/ata/sata_sil24.c
26094--- linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
26095+++ linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
26096@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
26097 .dma_boundary = ATA_DMA_BOUNDARY,
26098 };
26099
26100-static struct ata_port_operations sil24_ops = {
26101+static const struct ata_port_operations sil24_ops = {
26102 .inherits = &sata_pmp_port_ops,
26103
26104 .qc_defer = sil24_qc_defer,
26105diff -urNp linux-2.6.32.45/drivers/ata/sata_sil.c linux-2.6.32.45/drivers/ata/sata_sil.c
26106--- linux-2.6.32.45/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
26107+++ linux-2.6.32.45/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
26108@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
26109 .sg_tablesize = ATA_MAX_PRD
26110 };
26111
26112-static struct ata_port_operations sil_ops = {
26113+static const struct ata_port_operations sil_ops = {
26114 .inherits = &ata_bmdma32_port_ops,
26115 .dev_config = sil_dev_config,
26116 .set_mode = sil_set_mode,
26117diff -urNp linux-2.6.32.45/drivers/ata/sata_sis.c linux-2.6.32.45/drivers/ata/sata_sis.c
26118--- linux-2.6.32.45/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
26119+++ linux-2.6.32.45/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
26120@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
26121 ATA_BMDMA_SHT(DRV_NAME),
26122 };
26123
26124-static struct ata_port_operations sis_ops = {
26125+static const struct ata_port_operations sis_ops = {
26126 .inherits = &ata_bmdma_port_ops,
26127 .scr_read = sis_scr_read,
26128 .scr_write = sis_scr_write,
26129diff -urNp linux-2.6.32.45/drivers/ata/sata_svw.c linux-2.6.32.45/drivers/ata/sata_svw.c
26130--- linux-2.6.32.45/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
26131+++ linux-2.6.32.45/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
26132@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
26133 };
26134
26135
26136-static struct ata_port_operations k2_sata_ops = {
26137+static const struct ata_port_operations k2_sata_ops = {
26138 .inherits = &ata_bmdma_port_ops,
26139 .sff_tf_load = k2_sata_tf_load,
26140 .sff_tf_read = k2_sata_tf_read,
26141diff -urNp linux-2.6.32.45/drivers/ata/sata_sx4.c linux-2.6.32.45/drivers/ata/sata_sx4.c
26142--- linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
26143+++ linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
26144@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
26145 };
26146
26147 /* TODO: inherit from base port_ops after converting to new EH */
26148-static struct ata_port_operations pdc_20621_ops = {
26149+static const struct ata_port_operations pdc_20621_ops = {
26150 .inherits = &ata_sff_port_ops,
26151
26152 .check_atapi_dma = pdc_check_atapi_dma,
26153diff -urNp linux-2.6.32.45/drivers/ata/sata_uli.c linux-2.6.32.45/drivers/ata/sata_uli.c
26154--- linux-2.6.32.45/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
26155+++ linux-2.6.32.45/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
26156@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
26157 ATA_BMDMA_SHT(DRV_NAME),
26158 };
26159
26160-static struct ata_port_operations uli_ops = {
26161+static const struct ata_port_operations uli_ops = {
26162 .inherits = &ata_bmdma_port_ops,
26163 .scr_read = uli_scr_read,
26164 .scr_write = uli_scr_write,
26165diff -urNp linux-2.6.32.45/drivers/ata/sata_via.c linux-2.6.32.45/drivers/ata/sata_via.c
26166--- linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
26167+++ linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
26168@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
26169 ATA_BMDMA_SHT(DRV_NAME),
26170 };
26171
26172-static struct ata_port_operations svia_base_ops = {
26173+static const struct ata_port_operations svia_base_ops = {
26174 .inherits = &ata_bmdma_port_ops,
26175 .sff_tf_load = svia_tf_load,
26176 };
26177
26178-static struct ata_port_operations vt6420_sata_ops = {
26179+static const struct ata_port_operations vt6420_sata_ops = {
26180 .inherits = &svia_base_ops,
26181 .freeze = svia_noop_freeze,
26182 .prereset = vt6420_prereset,
26183 .bmdma_start = vt6420_bmdma_start,
26184 };
26185
26186-static struct ata_port_operations vt6421_pata_ops = {
26187+static const struct ata_port_operations vt6421_pata_ops = {
26188 .inherits = &svia_base_ops,
26189 .cable_detect = vt6421_pata_cable_detect,
26190 .set_piomode = vt6421_set_pio_mode,
26191 .set_dmamode = vt6421_set_dma_mode,
26192 };
26193
26194-static struct ata_port_operations vt6421_sata_ops = {
26195+static const struct ata_port_operations vt6421_sata_ops = {
26196 .inherits = &svia_base_ops,
26197 .scr_read = svia_scr_read,
26198 .scr_write = svia_scr_write,
26199 };
26200
26201-static struct ata_port_operations vt8251_ops = {
26202+static const struct ata_port_operations vt8251_ops = {
26203 .inherits = &svia_base_ops,
26204 .hardreset = sata_std_hardreset,
26205 .scr_read = vt8251_scr_read,
26206diff -urNp linux-2.6.32.45/drivers/ata/sata_vsc.c linux-2.6.32.45/drivers/ata/sata_vsc.c
26207--- linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
26208+++ linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
26209@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
26210 };
26211
26212
26213-static struct ata_port_operations vsc_sata_ops = {
26214+static const struct ata_port_operations vsc_sata_ops = {
26215 .inherits = &ata_bmdma_port_ops,
26216 /* The IRQ handling is not quite standard SFF behaviour so we
26217 cannot use the default lost interrupt handler */
26218diff -urNp linux-2.6.32.45/drivers/atm/adummy.c linux-2.6.32.45/drivers/atm/adummy.c
26219--- linux-2.6.32.45/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
26220+++ linux-2.6.32.45/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
26221@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
26222 vcc->pop(vcc, skb);
26223 else
26224 dev_kfree_skb_any(skb);
26225- atomic_inc(&vcc->stats->tx);
26226+ atomic_inc_unchecked(&vcc->stats->tx);
26227
26228 return 0;
26229 }
26230diff -urNp linux-2.6.32.45/drivers/atm/ambassador.c linux-2.6.32.45/drivers/atm/ambassador.c
26231--- linux-2.6.32.45/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
26232+++ linux-2.6.32.45/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
26233@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
26234 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
26235
26236 // VC layer stats
26237- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26238+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26239
26240 // free the descriptor
26241 kfree (tx_descr);
26242@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
26243 dump_skb ("<<<", vc, skb);
26244
26245 // VC layer stats
26246- atomic_inc(&atm_vcc->stats->rx);
26247+ atomic_inc_unchecked(&atm_vcc->stats->rx);
26248 __net_timestamp(skb);
26249 // end of our responsability
26250 atm_vcc->push (atm_vcc, skb);
26251@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
26252 } else {
26253 PRINTK (KERN_INFO, "dropped over-size frame");
26254 // should we count this?
26255- atomic_inc(&atm_vcc->stats->rx_drop);
26256+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26257 }
26258
26259 } else {
26260@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
26261 }
26262
26263 if (check_area (skb->data, skb->len)) {
26264- atomic_inc(&atm_vcc->stats->tx_err);
26265+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26266 return -ENOMEM; // ?
26267 }
26268
26269diff -urNp linux-2.6.32.45/drivers/atm/atmtcp.c linux-2.6.32.45/drivers/atm/atmtcp.c
26270--- linux-2.6.32.45/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
26271+++ linux-2.6.32.45/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
26272@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
26273 if (vcc->pop) vcc->pop(vcc,skb);
26274 else dev_kfree_skb(skb);
26275 if (dev_data) return 0;
26276- atomic_inc(&vcc->stats->tx_err);
26277+ atomic_inc_unchecked(&vcc->stats->tx_err);
26278 return -ENOLINK;
26279 }
26280 size = skb->len+sizeof(struct atmtcp_hdr);
26281@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
26282 if (!new_skb) {
26283 if (vcc->pop) vcc->pop(vcc,skb);
26284 else dev_kfree_skb(skb);
26285- atomic_inc(&vcc->stats->tx_err);
26286+ atomic_inc_unchecked(&vcc->stats->tx_err);
26287 return -ENOBUFS;
26288 }
26289 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26290@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
26291 if (vcc->pop) vcc->pop(vcc,skb);
26292 else dev_kfree_skb(skb);
26293 out_vcc->push(out_vcc,new_skb);
26294- atomic_inc(&vcc->stats->tx);
26295- atomic_inc(&out_vcc->stats->rx);
26296+ atomic_inc_unchecked(&vcc->stats->tx);
26297+ atomic_inc_unchecked(&out_vcc->stats->rx);
26298 return 0;
26299 }
26300
26301@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
26302 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26303 read_unlock(&vcc_sklist_lock);
26304 if (!out_vcc) {
26305- atomic_inc(&vcc->stats->tx_err);
26306+ atomic_inc_unchecked(&vcc->stats->tx_err);
26307 goto done;
26308 }
26309 skb_pull(skb,sizeof(struct atmtcp_hdr));
26310@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
26311 __net_timestamp(new_skb);
26312 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26313 out_vcc->push(out_vcc,new_skb);
26314- atomic_inc(&vcc->stats->tx);
26315- atomic_inc(&out_vcc->stats->rx);
26316+ atomic_inc_unchecked(&vcc->stats->tx);
26317+ atomic_inc_unchecked(&out_vcc->stats->rx);
26318 done:
26319 if (vcc->pop) vcc->pop(vcc,skb);
26320 else dev_kfree_skb(skb);
26321diff -urNp linux-2.6.32.45/drivers/atm/eni.c linux-2.6.32.45/drivers/atm/eni.c
26322--- linux-2.6.32.45/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
26323+++ linux-2.6.32.45/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
26324@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26325 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26326 vcc->dev->number);
26327 length = 0;
26328- atomic_inc(&vcc->stats->rx_err);
26329+ atomic_inc_unchecked(&vcc->stats->rx_err);
26330 }
26331 else {
26332 length = ATM_CELL_SIZE-1; /* no HEC */
26333@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26334 size);
26335 }
26336 eff = length = 0;
26337- atomic_inc(&vcc->stats->rx_err);
26338+ atomic_inc_unchecked(&vcc->stats->rx_err);
26339 }
26340 else {
26341 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26342@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26343 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26344 vcc->dev->number,vcc->vci,length,size << 2,descr);
26345 length = eff = 0;
26346- atomic_inc(&vcc->stats->rx_err);
26347+ atomic_inc_unchecked(&vcc->stats->rx_err);
26348 }
26349 }
26350 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26351@@ -770,7 +770,7 @@ rx_dequeued++;
26352 vcc->push(vcc,skb);
26353 pushed++;
26354 }
26355- atomic_inc(&vcc->stats->rx);
26356+ atomic_inc_unchecked(&vcc->stats->rx);
26357 }
26358 wake_up(&eni_dev->rx_wait);
26359 }
26360@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
26361 PCI_DMA_TODEVICE);
26362 if (vcc->pop) vcc->pop(vcc,skb);
26363 else dev_kfree_skb_irq(skb);
26364- atomic_inc(&vcc->stats->tx);
26365+ atomic_inc_unchecked(&vcc->stats->tx);
26366 wake_up(&eni_dev->tx_wait);
26367 dma_complete++;
26368 }
26369diff -urNp linux-2.6.32.45/drivers/atm/firestream.c linux-2.6.32.45/drivers/atm/firestream.c
26370--- linux-2.6.32.45/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
26371+++ linux-2.6.32.45/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
26372@@ -748,7 +748,7 @@ static void process_txdone_queue (struct
26373 }
26374 }
26375
26376- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26377+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26378
26379 fs_dprintk (FS_DEBUG_TXMEM, "i");
26380 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26381@@ -815,7 +815,7 @@ static void process_incoming (struct fs_
26382 #endif
26383 skb_put (skb, qe->p1 & 0xffff);
26384 ATM_SKB(skb)->vcc = atm_vcc;
26385- atomic_inc(&atm_vcc->stats->rx);
26386+ atomic_inc_unchecked(&atm_vcc->stats->rx);
26387 __net_timestamp(skb);
26388 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26389 atm_vcc->push (atm_vcc, skb);
26390@@ -836,12 +836,12 @@ static void process_incoming (struct fs_
26391 kfree (pe);
26392 }
26393 if (atm_vcc)
26394- atomic_inc(&atm_vcc->stats->rx_drop);
26395+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26396 break;
26397 case 0x1f: /* Reassembly abort: no buffers. */
26398 /* Silently increment error counter. */
26399 if (atm_vcc)
26400- atomic_inc(&atm_vcc->stats->rx_drop);
26401+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26402 break;
26403 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26404 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26405diff -urNp linux-2.6.32.45/drivers/atm/fore200e.c linux-2.6.32.45/drivers/atm/fore200e.c
26406--- linux-2.6.32.45/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
26407+++ linux-2.6.32.45/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
26408@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
26409 #endif
26410 /* check error condition */
26411 if (*entry->status & STATUS_ERROR)
26412- atomic_inc(&vcc->stats->tx_err);
26413+ atomic_inc_unchecked(&vcc->stats->tx_err);
26414 else
26415- atomic_inc(&vcc->stats->tx);
26416+ atomic_inc_unchecked(&vcc->stats->tx);
26417 }
26418 }
26419
26420@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
26421 if (skb == NULL) {
26422 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26423
26424- atomic_inc(&vcc->stats->rx_drop);
26425+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26426 return -ENOMEM;
26427 }
26428
26429@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
26430
26431 dev_kfree_skb_any(skb);
26432
26433- atomic_inc(&vcc->stats->rx_drop);
26434+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26435 return -ENOMEM;
26436 }
26437
26438 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26439
26440 vcc->push(vcc, skb);
26441- atomic_inc(&vcc->stats->rx);
26442+ atomic_inc_unchecked(&vcc->stats->rx);
26443
26444 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26445
26446@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
26447 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
26448 fore200e->atm_dev->number,
26449 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
26450- atomic_inc(&vcc->stats->rx_err);
26451+ atomic_inc_unchecked(&vcc->stats->rx_err);
26452 }
26453 }
26454
26455@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
26456 goto retry_here;
26457 }
26458
26459- atomic_inc(&vcc->stats->tx_err);
26460+ atomic_inc_unchecked(&vcc->stats->tx_err);
26461
26462 fore200e->tx_sat++;
26463 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
26464diff -urNp linux-2.6.32.45/drivers/atm/he.c linux-2.6.32.45/drivers/atm/he.c
26465--- linux-2.6.32.45/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
26466+++ linux-2.6.32.45/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
26467@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26468
26469 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
26470 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
26471- atomic_inc(&vcc->stats->rx_drop);
26472+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26473 goto return_host_buffers;
26474 }
26475
26476@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26477 RBRQ_LEN_ERR(he_dev->rbrq_head)
26478 ? "LEN_ERR" : "",
26479 vcc->vpi, vcc->vci);
26480- atomic_inc(&vcc->stats->rx_err);
26481+ atomic_inc_unchecked(&vcc->stats->rx_err);
26482 goto return_host_buffers;
26483 }
26484
26485@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26486 vcc->push(vcc, skb);
26487 spin_lock(&he_dev->global_lock);
26488
26489- atomic_inc(&vcc->stats->rx);
26490+ atomic_inc_unchecked(&vcc->stats->rx);
26491
26492 return_host_buffers:
26493 ++pdus_assembled;
26494@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
26495 tpd->vcc->pop(tpd->vcc, tpd->skb);
26496 else
26497 dev_kfree_skb_any(tpd->skb);
26498- atomic_inc(&tpd->vcc->stats->tx_err);
26499+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
26500 }
26501 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
26502 return;
26503@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26504 vcc->pop(vcc, skb);
26505 else
26506 dev_kfree_skb_any(skb);
26507- atomic_inc(&vcc->stats->tx_err);
26508+ atomic_inc_unchecked(&vcc->stats->tx_err);
26509 return -EINVAL;
26510 }
26511
26512@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26513 vcc->pop(vcc, skb);
26514 else
26515 dev_kfree_skb_any(skb);
26516- atomic_inc(&vcc->stats->tx_err);
26517+ atomic_inc_unchecked(&vcc->stats->tx_err);
26518 return -EINVAL;
26519 }
26520 #endif
26521@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26522 vcc->pop(vcc, skb);
26523 else
26524 dev_kfree_skb_any(skb);
26525- atomic_inc(&vcc->stats->tx_err);
26526+ atomic_inc_unchecked(&vcc->stats->tx_err);
26527 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26528 return -ENOMEM;
26529 }
26530@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26531 vcc->pop(vcc, skb);
26532 else
26533 dev_kfree_skb_any(skb);
26534- atomic_inc(&vcc->stats->tx_err);
26535+ atomic_inc_unchecked(&vcc->stats->tx_err);
26536 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26537 return -ENOMEM;
26538 }
26539@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26540 __enqueue_tpd(he_dev, tpd, cid);
26541 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26542
26543- atomic_inc(&vcc->stats->tx);
26544+ atomic_inc_unchecked(&vcc->stats->tx);
26545
26546 return 0;
26547 }
26548diff -urNp linux-2.6.32.45/drivers/atm/horizon.c linux-2.6.32.45/drivers/atm/horizon.c
26549--- linux-2.6.32.45/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26550+++ linux-2.6.32.45/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26551@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26552 {
26553 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26554 // VC layer stats
26555- atomic_inc(&vcc->stats->rx);
26556+ atomic_inc_unchecked(&vcc->stats->rx);
26557 __net_timestamp(skb);
26558 // end of our responsability
26559 vcc->push (vcc, skb);
26560@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26561 dev->tx_iovec = NULL;
26562
26563 // VC layer stats
26564- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26565+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26566
26567 // free the skb
26568 hrz_kfree_skb (skb);
26569diff -urNp linux-2.6.32.45/drivers/atm/idt77252.c linux-2.6.32.45/drivers/atm/idt77252.c
26570--- linux-2.6.32.45/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26571+++ linux-2.6.32.45/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26572@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26573 else
26574 dev_kfree_skb(skb);
26575
26576- atomic_inc(&vcc->stats->tx);
26577+ atomic_inc_unchecked(&vcc->stats->tx);
26578 }
26579
26580 atomic_dec(&scq->used);
26581@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26582 if ((sb = dev_alloc_skb(64)) == NULL) {
26583 printk("%s: Can't allocate buffers for aal0.\n",
26584 card->name);
26585- atomic_add(i, &vcc->stats->rx_drop);
26586+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
26587 break;
26588 }
26589 if (!atm_charge(vcc, sb->truesize)) {
26590 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26591 card->name);
26592- atomic_add(i - 1, &vcc->stats->rx_drop);
26593+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26594 dev_kfree_skb(sb);
26595 break;
26596 }
26597@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26598 ATM_SKB(sb)->vcc = vcc;
26599 __net_timestamp(sb);
26600 vcc->push(vcc, sb);
26601- atomic_inc(&vcc->stats->rx);
26602+ atomic_inc_unchecked(&vcc->stats->rx);
26603
26604 cell += ATM_CELL_PAYLOAD;
26605 }
26606@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26607 "(CDC: %08x)\n",
26608 card->name, len, rpp->len, readl(SAR_REG_CDC));
26609 recycle_rx_pool_skb(card, rpp);
26610- atomic_inc(&vcc->stats->rx_err);
26611+ atomic_inc_unchecked(&vcc->stats->rx_err);
26612 return;
26613 }
26614 if (stat & SAR_RSQE_CRC) {
26615 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26616 recycle_rx_pool_skb(card, rpp);
26617- atomic_inc(&vcc->stats->rx_err);
26618+ atomic_inc_unchecked(&vcc->stats->rx_err);
26619 return;
26620 }
26621 if (skb_queue_len(&rpp->queue) > 1) {
26622@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26623 RXPRINTK("%s: Can't alloc RX skb.\n",
26624 card->name);
26625 recycle_rx_pool_skb(card, rpp);
26626- atomic_inc(&vcc->stats->rx_err);
26627+ atomic_inc_unchecked(&vcc->stats->rx_err);
26628 return;
26629 }
26630 if (!atm_charge(vcc, skb->truesize)) {
26631@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26632 __net_timestamp(skb);
26633
26634 vcc->push(vcc, skb);
26635- atomic_inc(&vcc->stats->rx);
26636+ atomic_inc_unchecked(&vcc->stats->rx);
26637
26638 return;
26639 }
26640@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26641 __net_timestamp(skb);
26642
26643 vcc->push(vcc, skb);
26644- atomic_inc(&vcc->stats->rx);
26645+ atomic_inc_unchecked(&vcc->stats->rx);
26646
26647 if (skb->truesize > SAR_FB_SIZE_3)
26648 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26649@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26650 if (vcc->qos.aal != ATM_AAL0) {
26651 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26652 card->name, vpi, vci);
26653- atomic_inc(&vcc->stats->rx_drop);
26654+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26655 goto drop;
26656 }
26657
26658 if ((sb = dev_alloc_skb(64)) == NULL) {
26659 printk("%s: Can't allocate buffers for AAL0.\n",
26660 card->name);
26661- atomic_inc(&vcc->stats->rx_err);
26662+ atomic_inc_unchecked(&vcc->stats->rx_err);
26663 goto drop;
26664 }
26665
26666@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26667 ATM_SKB(sb)->vcc = vcc;
26668 __net_timestamp(sb);
26669 vcc->push(vcc, sb);
26670- atomic_inc(&vcc->stats->rx);
26671+ atomic_inc_unchecked(&vcc->stats->rx);
26672
26673 drop:
26674 skb_pull(queue, 64);
26675@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26676
26677 if (vc == NULL) {
26678 printk("%s: NULL connection in send().\n", card->name);
26679- atomic_inc(&vcc->stats->tx_err);
26680+ atomic_inc_unchecked(&vcc->stats->tx_err);
26681 dev_kfree_skb(skb);
26682 return -EINVAL;
26683 }
26684 if (!test_bit(VCF_TX, &vc->flags)) {
26685 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26686- atomic_inc(&vcc->stats->tx_err);
26687+ atomic_inc_unchecked(&vcc->stats->tx_err);
26688 dev_kfree_skb(skb);
26689 return -EINVAL;
26690 }
26691@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26692 break;
26693 default:
26694 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26695- atomic_inc(&vcc->stats->tx_err);
26696+ atomic_inc_unchecked(&vcc->stats->tx_err);
26697 dev_kfree_skb(skb);
26698 return -EINVAL;
26699 }
26700
26701 if (skb_shinfo(skb)->nr_frags != 0) {
26702 printk("%s: No scatter-gather yet.\n", card->name);
26703- atomic_inc(&vcc->stats->tx_err);
26704+ atomic_inc_unchecked(&vcc->stats->tx_err);
26705 dev_kfree_skb(skb);
26706 return -EINVAL;
26707 }
26708@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26709
26710 err = queue_skb(card, vc, skb, oam);
26711 if (err) {
26712- atomic_inc(&vcc->stats->tx_err);
26713+ atomic_inc_unchecked(&vcc->stats->tx_err);
26714 dev_kfree_skb(skb);
26715 return err;
26716 }
26717@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26718 skb = dev_alloc_skb(64);
26719 if (!skb) {
26720 printk("%s: Out of memory in send_oam().\n", card->name);
26721- atomic_inc(&vcc->stats->tx_err);
26722+ atomic_inc_unchecked(&vcc->stats->tx_err);
26723 return -ENOMEM;
26724 }
26725 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26726diff -urNp linux-2.6.32.45/drivers/atm/iphase.c linux-2.6.32.45/drivers/atm/iphase.c
26727--- linux-2.6.32.45/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26728+++ linux-2.6.32.45/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26729@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26730 status = (u_short) (buf_desc_ptr->desc_mode);
26731 if (status & (RX_CER | RX_PTE | RX_OFL))
26732 {
26733- atomic_inc(&vcc->stats->rx_err);
26734+ atomic_inc_unchecked(&vcc->stats->rx_err);
26735 IF_ERR(printk("IA: bad packet, dropping it");)
26736 if (status & RX_CER) {
26737 IF_ERR(printk(" cause: packet CRC error\n");)
26738@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26739 len = dma_addr - buf_addr;
26740 if (len > iadev->rx_buf_sz) {
26741 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26742- atomic_inc(&vcc->stats->rx_err);
26743+ atomic_inc_unchecked(&vcc->stats->rx_err);
26744 goto out_free_desc;
26745 }
26746
26747@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26748 ia_vcc = INPH_IA_VCC(vcc);
26749 if (ia_vcc == NULL)
26750 {
26751- atomic_inc(&vcc->stats->rx_err);
26752+ atomic_inc_unchecked(&vcc->stats->rx_err);
26753 dev_kfree_skb_any(skb);
26754 atm_return(vcc, atm_guess_pdu2truesize(len));
26755 goto INCR_DLE;
26756@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26757 if ((length > iadev->rx_buf_sz) || (length >
26758 (skb->len - sizeof(struct cpcs_trailer))))
26759 {
26760- atomic_inc(&vcc->stats->rx_err);
26761+ atomic_inc_unchecked(&vcc->stats->rx_err);
26762 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26763 length, skb->len);)
26764 dev_kfree_skb_any(skb);
26765@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26766
26767 IF_RX(printk("rx_dle_intr: skb push");)
26768 vcc->push(vcc,skb);
26769- atomic_inc(&vcc->stats->rx);
26770+ atomic_inc_unchecked(&vcc->stats->rx);
26771 iadev->rx_pkt_cnt++;
26772 }
26773 INCR_DLE:
26774@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26775 {
26776 struct k_sonet_stats *stats;
26777 stats = &PRIV(_ia_dev[board])->sonet_stats;
26778- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26779- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26780- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26781- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26782- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26783- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26784- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26785- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26786- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26787+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26788+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26789+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26790+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26791+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26792+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26793+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26794+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26795+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26796 }
26797 ia_cmds.status = 0;
26798 break;
26799@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26800 if ((desc == 0) || (desc > iadev->num_tx_desc))
26801 {
26802 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26803- atomic_inc(&vcc->stats->tx);
26804+ atomic_inc_unchecked(&vcc->stats->tx);
26805 if (vcc->pop)
26806 vcc->pop(vcc, skb);
26807 else
26808@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26809 ATM_DESC(skb) = vcc->vci;
26810 skb_queue_tail(&iadev->tx_dma_q, skb);
26811
26812- atomic_inc(&vcc->stats->tx);
26813+ atomic_inc_unchecked(&vcc->stats->tx);
26814 iadev->tx_pkt_cnt++;
26815 /* Increment transaction counter */
26816 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26817
26818 #if 0
26819 /* add flow control logic */
26820- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26821+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26822 if (iavcc->vc_desc_cnt > 10) {
26823 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26824 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26825diff -urNp linux-2.6.32.45/drivers/atm/lanai.c linux-2.6.32.45/drivers/atm/lanai.c
26826--- linux-2.6.32.45/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26827+++ linux-2.6.32.45/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26828@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26829 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26830 lanai_endtx(lanai, lvcc);
26831 lanai_free_skb(lvcc->tx.atmvcc, skb);
26832- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26833+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26834 }
26835
26836 /* Try to fill the buffer - don't call unless there is backlog */
26837@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26838 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26839 __net_timestamp(skb);
26840 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26841- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26842+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26843 out:
26844 lvcc->rx.buf.ptr = end;
26845 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26846@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26847 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26848 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26849 lanai->stats.service_rxnotaal5++;
26850- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26851+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26852 return 0;
26853 }
26854 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26855@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26856 int bytes;
26857 read_unlock(&vcc_sklist_lock);
26858 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26859- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26860+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26861 lvcc->stats.x.aal5.service_trash++;
26862 bytes = (SERVICE_GET_END(s) * 16) -
26863 (((unsigned long) lvcc->rx.buf.ptr) -
26864@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26865 }
26866 if (s & SERVICE_STREAM) {
26867 read_unlock(&vcc_sklist_lock);
26868- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26869+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26870 lvcc->stats.x.aal5.service_stream++;
26871 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26872 "PDU on VCI %d!\n", lanai->number, vci);
26873@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26874 return 0;
26875 }
26876 DPRINTK("got rx crc error on vci %d\n", vci);
26877- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26878+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26879 lvcc->stats.x.aal5.service_rxcrc++;
26880 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26881 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26882diff -urNp linux-2.6.32.45/drivers/atm/nicstar.c linux-2.6.32.45/drivers/atm/nicstar.c
26883--- linux-2.6.32.45/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26884+++ linux-2.6.32.45/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26885@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26886 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26887 {
26888 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26889- atomic_inc(&vcc->stats->tx_err);
26890+ atomic_inc_unchecked(&vcc->stats->tx_err);
26891 dev_kfree_skb_any(skb);
26892 return -EINVAL;
26893 }
26894@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26895 if (!vc->tx)
26896 {
26897 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26898- atomic_inc(&vcc->stats->tx_err);
26899+ atomic_inc_unchecked(&vcc->stats->tx_err);
26900 dev_kfree_skb_any(skb);
26901 return -EINVAL;
26902 }
26903@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26904 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26905 {
26906 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26907- atomic_inc(&vcc->stats->tx_err);
26908+ atomic_inc_unchecked(&vcc->stats->tx_err);
26909 dev_kfree_skb_any(skb);
26910 return -EINVAL;
26911 }
26912@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26913 if (skb_shinfo(skb)->nr_frags != 0)
26914 {
26915 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26916- atomic_inc(&vcc->stats->tx_err);
26917+ atomic_inc_unchecked(&vcc->stats->tx_err);
26918 dev_kfree_skb_any(skb);
26919 return -EINVAL;
26920 }
26921@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26922
26923 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26924 {
26925- atomic_inc(&vcc->stats->tx_err);
26926+ atomic_inc_unchecked(&vcc->stats->tx_err);
26927 dev_kfree_skb_any(skb);
26928 return -EIO;
26929 }
26930- atomic_inc(&vcc->stats->tx);
26931+ atomic_inc_unchecked(&vcc->stats->tx);
26932
26933 return 0;
26934 }
26935@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26936 {
26937 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26938 card->index);
26939- atomic_add(i,&vcc->stats->rx_drop);
26940+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
26941 break;
26942 }
26943 if (!atm_charge(vcc, sb->truesize))
26944 {
26945 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26946 card->index);
26947- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26948+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26949 dev_kfree_skb_any(sb);
26950 break;
26951 }
26952@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26953 ATM_SKB(sb)->vcc = vcc;
26954 __net_timestamp(sb);
26955 vcc->push(vcc, sb);
26956- atomic_inc(&vcc->stats->rx);
26957+ atomic_inc_unchecked(&vcc->stats->rx);
26958 cell += ATM_CELL_PAYLOAD;
26959 }
26960
26961@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26962 if (iovb == NULL)
26963 {
26964 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26965- atomic_inc(&vcc->stats->rx_drop);
26966+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26967 recycle_rx_buf(card, skb);
26968 return;
26969 }
26970@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26971 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26972 {
26973 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26974- atomic_inc(&vcc->stats->rx_err);
26975+ atomic_inc_unchecked(&vcc->stats->rx_err);
26976 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26977 NS_SKB(iovb)->iovcnt = 0;
26978 iovb->len = 0;
26979@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26980 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26981 card->index);
26982 which_list(card, skb);
26983- atomic_inc(&vcc->stats->rx_err);
26984+ atomic_inc_unchecked(&vcc->stats->rx_err);
26985 recycle_rx_buf(card, skb);
26986 vc->rx_iov = NULL;
26987 recycle_iov_buf(card, iovb);
26988@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26989 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26990 card->index);
26991 which_list(card, skb);
26992- atomic_inc(&vcc->stats->rx_err);
26993+ atomic_inc_unchecked(&vcc->stats->rx_err);
26994 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26995 NS_SKB(iovb)->iovcnt);
26996 vc->rx_iov = NULL;
26997@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26998 printk(" - PDU size mismatch.\n");
26999 else
27000 printk(".\n");
27001- atomic_inc(&vcc->stats->rx_err);
27002+ atomic_inc_unchecked(&vcc->stats->rx_err);
27003 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
27004 NS_SKB(iovb)->iovcnt);
27005 vc->rx_iov = NULL;
27006@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
27007 if (!atm_charge(vcc, skb->truesize))
27008 {
27009 push_rxbufs(card, skb);
27010- atomic_inc(&vcc->stats->rx_drop);
27011+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27012 }
27013 else
27014 {
27015@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
27016 ATM_SKB(skb)->vcc = vcc;
27017 __net_timestamp(skb);
27018 vcc->push(vcc, skb);
27019- atomic_inc(&vcc->stats->rx);
27020+ atomic_inc_unchecked(&vcc->stats->rx);
27021 }
27022 }
27023 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
27024@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
27025 if (!atm_charge(vcc, sb->truesize))
27026 {
27027 push_rxbufs(card, sb);
27028- atomic_inc(&vcc->stats->rx_drop);
27029+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27030 }
27031 else
27032 {
27033@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
27034 ATM_SKB(sb)->vcc = vcc;
27035 __net_timestamp(sb);
27036 vcc->push(vcc, sb);
27037- atomic_inc(&vcc->stats->rx);
27038+ atomic_inc_unchecked(&vcc->stats->rx);
27039 }
27040
27041 push_rxbufs(card, skb);
27042@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
27043 if (!atm_charge(vcc, skb->truesize))
27044 {
27045 push_rxbufs(card, skb);
27046- atomic_inc(&vcc->stats->rx_drop);
27047+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27048 }
27049 else
27050 {
27051@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
27052 ATM_SKB(skb)->vcc = vcc;
27053 __net_timestamp(skb);
27054 vcc->push(vcc, skb);
27055- atomic_inc(&vcc->stats->rx);
27056+ atomic_inc_unchecked(&vcc->stats->rx);
27057 }
27058
27059 push_rxbufs(card, sb);
27060@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
27061 if (hb == NULL)
27062 {
27063 printk("nicstar%d: Out of huge buffers.\n", card->index);
27064- atomic_inc(&vcc->stats->rx_drop);
27065+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27066 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
27067 NS_SKB(iovb)->iovcnt);
27068 vc->rx_iov = NULL;
27069@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
27070 }
27071 else
27072 dev_kfree_skb_any(hb);
27073- atomic_inc(&vcc->stats->rx_drop);
27074+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27075 }
27076 else
27077 {
27078@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
27079 #endif /* NS_USE_DESTRUCTORS */
27080 __net_timestamp(hb);
27081 vcc->push(vcc, hb);
27082- atomic_inc(&vcc->stats->rx);
27083+ atomic_inc_unchecked(&vcc->stats->rx);
27084 }
27085 }
27086
27087diff -urNp linux-2.6.32.45/drivers/atm/solos-pci.c linux-2.6.32.45/drivers/atm/solos-pci.c
27088--- linux-2.6.32.45/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
27089+++ linux-2.6.32.45/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
27090@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
27091 }
27092 atm_charge(vcc, skb->truesize);
27093 vcc->push(vcc, skb);
27094- atomic_inc(&vcc->stats->rx);
27095+ atomic_inc_unchecked(&vcc->stats->rx);
27096 break;
27097
27098 case PKT_STATUS:
27099@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
27100 char msg[500];
27101 char item[10];
27102
27103+ pax_track_stack();
27104+
27105 len = buf->len;
27106 for (i = 0; i < len; i++){
27107 if(i % 8 == 0)
27108@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
27109 vcc = SKB_CB(oldskb)->vcc;
27110
27111 if (vcc) {
27112- atomic_inc(&vcc->stats->tx);
27113+ atomic_inc_unchecked(&vcc->stats->tx);
27114 solos_pop(vcc, oldskb);
27115 } else
27116 dev_kfree_skb_irq(oldskb);
27117diff -urNp linux-2.6.32.45/drivers/atm/suni.c linux-2.6.32.45/drivers/atm/suni.c
27118--- linux-2.6.32.45/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
27119+++ linux-2.6.32.45/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
27120@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
27121
27122
27123 #define ADD_LIMITED(s,v) \
27124- atomic_add((v),&stats->s); \
27125- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
27126+ atomic_add_unchecked((v),&stats->s); \
27127+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
27128
27129
27130 static void suni_hz(unsigned long from_timer)
27131diff -urNp linux-2.6.32.45/drivers/atm/uPD98402.c linux-2.6.32.45/drivers/atm/uPD98402.c
27132--- linux-2.6.32.45/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
27133+++ linux-2.6.32.45/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
27134@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
27135 struct sonet_stats tmp;
27136 int error = 0;
27137
27138- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27139+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27140 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
27141 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
27142 if (zero && !error) {
27143@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
27144
27145
27146 #define ADD_LIMITED(s,v) \
27147- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
27148- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
27149- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27150+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
27151+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
27152+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27153
27154
27155 static void stat_event(struct atm_dev *dev)
27156@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
27157 if (reason & uPD98402_INT_PFM) stat_event(dev);
27158 if (reason & uPD98402_INT_PCO) {
27159 (void) GET(PCOCR); /* clear interrupt cause */
27160- atomic_add(GET(HECCT),
27161+ atomic_add_unchecked(GET(HECCT),
27162 &PRIV(dev)->sonet_stats.uncorr_hcs);
27163 }
27164 if ((reason & uPD98402_INT_RFO) &&
27165@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
27166 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
27167 uPD98402_INT_LOS),PIMR); /* enable them */
27168 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
27169- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27170- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
27171- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
27172+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27173+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
27174+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
27175 return 0;
27176 }
27177
27178diff -urNp linux-2.6.32.45/drivers/atm/zatm.c linux-2.6.32.45/drivers/atm/zatm.c
27179--- linux-2.6.32.45/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
27180+++ linux-2.6.32.45/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
27181@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27182 }
27183 if (!size) {
27184 dev_kfree_skb_irq(skb);
27185- if (vcc) atomic_inc(&vcc->stats->rx_err);
27186+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
27187 continue;
27188 }
27189 if (!atm_charge(vcc,skb->truesize)) {
27190@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27191 skb->len = size;
27192 ATM_SKB(skb)->vcc = vcc;
27193 vcc->push(vcc,skb);
27194- atomic_inc(&vcc->stats->rx);
27195+ atomic_inc_unchecked(&vcc->stats->rx);
27196 }
27197 zout(pos & 0xffff,MTA(mbx));
27198 #if 0 /* probably a stupid idea */
27199@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
27200 skb_queue_head(&zatm_vcc->backlog,skb);
27201 break;
27202 }
27203- atomic_inc(&vcc->stats->tx);
27204+ atomic_inc_unchecked(&vcc->stats->tx);
27205 wake_up(&zatm_vcc->tx_wait);
27206 }
27207
27208diff -urNp linux-2.6.32.45/drivers/base/bus.c linux-2.6.32.45/drivers/base/bus.c
27209--- linux-2.6.32.45/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
27210+++ linux-2.6.32.45/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
27211@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
27212 return ret;
27213 }
27214
27215-static struct sysfs_ops driver_sysfs_ops = {
27216+static const struct sysfs_ops driver_sysfs_ops = {
27217 .show = drv_attr_show,
27218 .store = drv_attr_store,
27219 };
27220@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
27221 return ret;
27222 }
27223
27224-static struct sysfs_ops bus_sysfs_ops = {
27225+static const struct sysfs_ops bus_sysfs_ops = {
27226 .show = bus_attr_show,
27227 .store = bus_attr_store,
27228 };
27229@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
27230 return 0;
27231 }
27232
27233-static struct kset_uevent_ops bus_uevent_ops = {
27234+static const struct kset_uevent_ops bus_uevent_ops = {
27235 .filter = bus_uevent_filter,
27236 };
27237
27238diff -urNp linux-2.6.32.45/drivers/base/class.c linux-2.6.32.45/drivers/base/class.c
27239--- linux-2.6.32.45/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
27240+++ linux-2.6.32.45/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
27241@@ -63,7 +63,7 @@ static void class_release(struct kobject
27242 kfree(cp);
27243 }
27244
27245-static struct sysfs_ops class_sysfs_ops = {
27246+static const struct sysfs_ops class_sysfs_ops = {
27247 .show = class_attr_show,
27248 .store = class_attr_store,
27249 };
27250diff -urNp linux-2.6.32.45/drivers/base/core.c linux-2.6.32.45/drivers/base/core.c
27251--- linux-2.6.32.45/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
27252+++ linux-2.6.32.45/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
27253@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
27254 return ret;
27255 }
27256
27257-static struct sysfs_ops dev_sysfs_ops = {
27258+static const struct sysfs_ops dev_sysfs_ops = {
27259 .show = dev_attr_show,
27260 .store = dev_attr_store,
27261 };
27262@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
27263 return retval;
27264 }
27265
27266-static struct kset_uevent_ops device_uevent_ops = {
27267+static const struct kset_uevent_ops device_uevent_ops = {
27268 .filter = dev_uevent_filter,
27269 .name = dev_uevent_name,
27270 .uevent = dev_uevent,
27271diff -urNp linux-2.6.32.45/drivers/base/memory.c linux-2.6.32.45/drivers/base/memory.c
27272--- linux-2.6.32.45/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
27273+++ linux-2.6.32.45/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
27274@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
27275 return retval;
27276 }
27277
27278-static struct kset_uevent_ops memory_uevent_ops = {
27279+static const struct kset_uevent_ops memory_uevent_ops = {
27280 .name = memory_uevent_name,
27281 .uevent = memory_uevent,
27282 };
27283diff -urNp linux-2.6.32.45/drivers/base/sys.c linux-2.6.32.45/drivers/base/sys.c
27284--- linux-2.6.32.45/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
27285+++ linux-2.6.32.45/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
27286@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
27287 return -EIO;
27288 }
27289
27290-static struct sysfs_ops sysfs_ops = {
27291+static const struct sysfs_ops sysfs_ops = {
27292 .show = sysdev_show,
27293 .store = sysdev_store,
27294 };
27295@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
27296 return -EIO;
27297 }
27298
27299-static struct sysfs_ops sysfs_class_ops = {
27300+static const struct sysfs_ops sysfs_class_ops = {
27301 .show = sysdev_class_show,
27302 .store = sysdev_class_store,
27303 };
27304diff -urNp linux-2.6.32.45/drivers/block/cciss.c linux-2.6.32.45/drivers/block/cciss.c
27305--- linux-2.6.32.45/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
27306+++ linux-2.6.32.45/drivers/block/cciss.c 2011-08-05 20:33:55.000000000 -0400
27307@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
27308 int err;
27309 u32 cp;
27310
27311+ memset(&arg64, 0, sizeof(arg64));
27312+
27313 err = 0;
27314 err |=
27315 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27316@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ct
27317 /* Wait (up to 20 seconds) for a command to complete */
27318
27319 for (i = 20 * HZ; i > 0; i--) {
27320- done = hba[ctlr]->access.command_completed(hba[ctlr]);
27321+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
27322 if (done == FIFO_EMPTY)
27323 schedule_timeout_uninterruptible(1);
27324 else
27325@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h,
27326 resend_cmd1:
27327
27328 /* Disable interrupt on the board. */
27329- h->access.set_intr_mask(h, CCISS_INTR_OFF);
27330+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
27331
27332 /* Make sure there is room in the command FIFO */
27333 /* Actually it should be completely empty at this time */
27334@@ -2884,13 +2886,13 @@ resend_cmd1:
27335 /* tape side of the driver. */
27336 for (i = 200000; i > 0; i--) {
27337 /* if fifo isn't full go */
27338- if (!(h->access.fifo_full(h)))
27339+ if (!(h->access->fifo_full(h)))
27340 break;
27341 udelay(10);
27342 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
27343 " waiting!\n", h->ctlr);
27344 }
27345- h->access.submit_command(h, c); /* Send the cmd */
27346+ h->access->submit_command(h, c); /* Send the cmd */
27347 do {
27348 complete = pollcomplete(h->ctlr);
27349
27350@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
27351 while (!hlist_empty(&h->reqQ)) {
27352 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
27353 /* can't do anything if fifo is full */
27354- if ((h->access.fifo_full(h))) {
27355+ if ((h->access->fifo_full(h))) {
27356 printk(KERN_WARNING "cciss: fifo full\n");
27357 break;
27358 }
27359@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
27360 h->Qdepth--;
27361
27362 /* Tell the controller execute command */
27363- h->access.submit_command(h, c);
27364+ h->access->submit_command(h, c);
27365
27366 /* Put job onto the completed Q */
27367 addQ(&h->cmpQ, c);
27368@@ -3393,17 +3395,17 @@ startio:
27369
27370 static inline unsigned long get_next_completion(ctlr_info_t *h)
27371 {
27372- return h->access.command_completed(h);
27373+ return h->access->command_completed(h);
27374 }
27375
27376 static inline int interrupt_pending(ctlr_info_t *h)
27377 {
27378- return h->access.intr_pending(h);
27379+ return h->access->intr_pending(h);
27380 }
27381
27382 static inline long interrupt_not_for_us(ctlr_info_t *h)
27383 {
27384- return (((h->access.intr_pending(h) == 0) ||
27385+ return (((h->access->intr_pending(h) == 0) ||
27386 (h->interrupts_enabled == 0)));
27387 }
27388
27389@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr
27390 */
27391 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
27392 c->product_name = products[prod_index].product_name;
27393- c->access = *(products[prod_index].access);
27394+ c->access = products[prod_index].access;
27395 c->nr_cmds = c->max_commands - 4;
27396 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
27397 (readb(&c->cfgtable->Signature[1]) != 'I') ||
27398@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(stru
27399 }
27400
27401 /* make sure the board interrupts are off */
27402- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
27403+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
27404 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
27405 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
27406 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
27407@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(stru
27408 cciss_scsi_setup(i);
27409
27410 /* Turn the interrupts on so we can service requests */
27411- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
27412+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
27413
27414 /* Get the firmware version */
27415 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27416diff -urNp linux-2.6.32.45/drivers/block/cciss.h linux-2.6.32.45/drivers/block/cciss.h
27417--- linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:35:28.000000000 -0400
27418+++ linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:33:59.000000000 -0400
27419@@ -90,7 +90,7 @@ struct ctlr_info
27420 // information about each logical volume
27421 drive_info_struct *drv[CISS_MAX_LUN];
27422
27423- struct access_method access;
27424+ struct access_method *access;
27425
27426 /* queue and queue Info */
27427 struct hlist_head reqQ;
27428diff -urNp linux-2.6.32.45/drivers/block/cpqarray.c linux-2.6.32.45/drivers/block/cpqarray.c
27429--- linux-2.6.32.45/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
27430+++ linux-2.6.32.45/drivers/block/cpqarray.c 2011-08-05 20:33:55.000000000 -0400
27431@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr
27432 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27433 goto Enomem4;
27434 }
27435- hba[i]->access.set_intr_mask(hba[i], 0);
27436+ hba[i]->access->set_intr_mask(hba[i], 0);
27437 if (request_irq(hba[i]->intr, do_ida_intr,
27438 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27439 {
27440@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr
27441 add_timer(&hba[i]->timer);
27442
27443 /* Enable IRQ now that spinlock and rate limit timer are set up */
27444- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27445+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27446
27447 for(j=0; j<NWD; j++) {
27448 struct gendisk *disk = ida_gendisk[i][j];
27449@@ -695,7 +695,7 @@ DBGINFO(
27450 for(i=0; i<NR_PRODUCTS; i++) {
27451 if (board_id == products[i].board_id) {
27452 c->product_name = products[i].product_name;
27453- c->access = *(products[i].access);
27454+ c->access = products[i].access;
27455 break;
27456 }
27457 }
27458@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(v
27459 hba[ctlr]->intr = intr;
27460 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27461 hba[ctlr]->product_name = products[j].product_name;
27462- hba[ctlr]->access = *(products[j].access);
27463+ hba[ctlr]->access = products[j].access;
27464 hba[ctlr]->ctlr = ctlr;
27465 hba[ctlr]->board_id = board_id;
27466 hba[ctlr]->pci_dev = NULL; /* not PCI */
27467@@ -896,6 +896,8 @@ static void do_ida_request(struct reques
27468 struct scatterlist tmp_sg[SG_MAX];
27469 int i, dir, seg;
27470
27471+ pax_track_stack();
27472+
27473 if (blk_queue_plugged(q))
27474 goto startio;
27475
27476@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
27477
27478 while((c = h->reqQ) != NULL) {
27479 /* Can't do anything if we're busy */
27480- if (h->access.fifo_full(h) == 0)
27481+ if (h->access->fifo_full(h) == 0)
27482 return;
27483
27484 /* Get the first entry from the request Q */
27485@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
27486 h->Qdepth--;
27487
27488 /* Tell the controller to do our bidding */
27489- h->access.submit_command(h, c);
27490+ h->access->submit_command(h, c);
27491
27492 /* Get onto the completion Q */
27493 addQ(&h->cmpQ, c);
27494@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq,
27495 unsigned long flags;
27496 __u32 a,a1;
27497
27498- istat = h->access.intr_pending(h);
27499+ istat = h->access->intr_pending(h);
27500 /* Is this interrupt for us? */
27501 if (istat == 0)
27502 return IRQ_NONE;
27503@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq,
27504 */
27505 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
27506 if (istat & FIFO_NOT_EMPTY) {
27507- while((a = h->access.command_completed(h))) {
27508+ while((a = h->access->command_completed(h))) {
27509 a1 = a; a &= ~3;
27510 if ((c = h->cmpQ) == NULL)
27511 {
27512@@ -1434,11 +1436,11 @@ static int sendcmd(
27513 /*
27514 * Disable interrupt
27515 */
27516- info_p->access.set_intr_mask(info_p, 0);
27517+ info_p->access->set_intr_mask(info_p, 0);
27518 /* Make sure there is room in the command FIFO */
27519 /* Actually it should be completely empty at this time. */
27520 for (i = 200000; i > 0; i--) {
27521- temp = info_p->access.fifo_full(info_p);
27522+ temp = info_p->access->fifo_full(info_p);
27523 if (temp != 0) {
27524 break;
27525 }
27526@@ -1451,7 +1453,7 @@ DBG(
27527 /*
27528 * Send the cmd
27529 */
27530- info_p->access.submit_command(info_p, c);
27531+ info_p->access->submit_command(info_p, c);
27532 complete = pollcomplete(ctlr);
27533
27534 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
27535@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t
27536 * we check the new geometry. Then turn interrupts back on when
27537 * we're done.
27538 */
27539- host->access.set_intr_mask(host, 0);
27540+ host->access->set_intr_mask(host, 0);
27541 getgeometry(ctlr);
27542- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
27543+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
27544
27545 for(i=0; i<NWD; i++) {
27546 struct gendisk *disk = ida_gendisk[ctlr][i];
27547@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
27548 /* Wait (up to 2 seconds) for a command to complete */
27549
27550 for (i = 200000; i > 0; i--) {
27551- done = hba[ctlr]->access.command_completed(hba[ctlr]);
27552+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
27553 if (done == 0) {
27554 udelay(10); /* a short fixed delay */
27555 } else
27556diff -urNp linux-2.6.32.45/drivers/block/cpqarray.h linux-2.6.32.45/drivers/block/cpqarray.h
27557--- linux-2.6.32.45/drivers/block/cpqarray.h 2011-03-27 14:31:47.000000000 -0400
27558+++ linux-2.6.32.45/drivers/block/cpqarray.h 2011-08-05 20:33:55.000000000 -0400
27559@@ -99,7 +99,7 @@ struct ctlr_info {
27560 drv_info_t drv[NWD];
27561 struct proc_dir_entry *proc;
27562
27563- struct access_method access;
27564+ struct access_method *access;
27565
27566 cmdlist_t *reqQ;
27567 cmdlist_t *cmpQ;
27568diff -urNp linux-2.6.32.45/drivers/block/DAC960.c linux-2.6.32.45/drivers/block/DAC960.c
27569--- linux-2.6.32.45/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
27570+++ linux-2.6.32.45/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
27571@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
27572 unsigned long flags;
27573 int Channel, TargetID;
27574
27575+ pax_track_stack();
27576+
27577 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
27578 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
27579 sizeof(DAC960_SCSI_Inquiry_T) +
27580diff -urNp linux-2.6.32.45/drivers/block/nbd.c linux-2.6.32.45/drivers/block/nbd.c
27581--- linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
27582+++ linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
27583@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
27584 struct kvec iov;
27585 sigset_t blocked, oldset;
27586
27587+ pax_track_stack();
27588+
27589 if (unlikely(!sock)) {
27590 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
27591 lo->disk->disk_name, (send ? "send" : "recv"));
27592@@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
27593 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
27594 unsigned int cmd, unsigned long arg)
27595 {
27596+ pax_track_stack();
27597+
27598 switch (cmd) {
27599 case NBD_DISCONNECT: {
27600 struct request sreq;
27601diff -urNp linux-2.6.32.45/drivers/block/pktcdvd.c linux-2.6.32.45/drivers/block/pktcdvd.c
27602--- linux-2.6.32.45/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
27603+++ linux-2.6.32.45/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
27604@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
27605 return len;
27606 }
27607
27608-static struct sysfs_ops kobj_pkt_ops = {
27609+static const struct sysfs_ops kobj_pkt_ops = {
27610 .show = kobj_pkt_show,
27611 .store = kobj_pkt_store
27612 };
27613diff -urNp linux-2.6.32.45/drivers/char/agp/frontend.c linux-2.6.32.45/drivers/char/agp/frontend.c
27614--- linux-2.6.32.45/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
27615+++ linux-2.6.32.45/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
27616@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
27617 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27618 return -EFAULT;
27619
27620- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27621+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27622 return -EFAULT;
27623
27624 client = agp_find_client_by_pid(reserve.pid);
27625diff -urNp linux-2.6.32.45/drivers/char/briq_panel.c linux-2.6.32.45/drivers/char/briq_panel.c
27626--- linux-2.6.32.45/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
27627+++ linux-2.6.32.45/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
27628@@ -10,6 +10,7 @@
27629 #include <linux/types.h>
27630 #include <linux/errno.h>
27631 #include <linux/tty.h>
27632+#include <linux/mutex.h>
27633 #include <linux/timer.h>
27634 #include <linux/kernel.h>
27635 #include <linux/wait.h>
27636@@ -36,6 +37,7 @@ static int vfd_is_open;
27637 static unsigned char vfd[40];
27638 static int vfd_cursor;
27639 static unsigned char ledpb, led;
27640+static DEFINE_MUTEX(vfd_mutex);
27641
27642 static void update_vfd(void)
27643 {
27644@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
27645 if (!vfd_is_open)
27646 return -EBUSY;
27647
27648+ mutex_lock(&vfd_mutex);
27649 for (;;) {
27650 char c;
27651 if (!indx)
27652 break;
27653- if (get_user(c, buf))
27654+ if (get_user(c, buf)) {
27655+ mutex_unlock(&vfd_mutex);
27656 return -EFAULT;
27657+ }
27658 if (esc) {
27659 set_led(c);
27660 esc = 0;
27661@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
27662 buf++;
27663 }
27664 update_vfd();
27665+ mutex_unlock(&vfd_mutex);
27666
27667 return len;
27668 }
27669diff -urNp linux-2.6.32.45/drivers/char/genrtc.c linux-2.6.32.45/drivers/char/genrtc.c
27670--- linux-2.6.32.45/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
27671+++ linux-2.6.32.45/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
27672@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
27673 switch (cmd) {
27674
27675 case RTC_PLL_GET:
27676+ memset(&pll, 0, sizeof(pll));
27677 if (get_rtc_pll(&pll))
27678 return -EINVAL;
27679 else
27680diff -urNp linux-2.6.32.45/drivers/char/hpet.c linux-2.6.32.45/drivers/char/hpet.c
27681--- linux-2.6.32.45/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
27682+++ linux-2.6.32.45/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
27683@@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
27684 return 0;
27685 }
27686
27687-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
27688+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
27689
27690 static int
27691 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
27692@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
27693 }
27694
27695 static int
27696-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
27697+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
27698 {
27699 struct hpet_timer __iomem *timer;
27700 struct hpet __iomem *hpet;
27701@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
27702 {
27703 struct hpet_info info;
27704
27705+ memset(&info, 0, sizeof(info));
27706+
27707 if (devp->hd_ireqfreq)
27708 info.hi_ireqfreq =
27709 hpet_time_div(hpetp, devp->hd_ireqfreq);
27710- else
27711- info.hi_ireqfreq = 0;
27712 info.hi_flags =
27713 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
27714 info.hi_hpet = hpetp->hp_which;
27715diff -urNp linux-2.6.32.45/drivers/char/hvc_beat.c linux-2.6.32.45/drivers/char/hvc_beat.c
27716--- linux-2.6.32.45/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
27717+++ linux-2.6.32.45/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
27718@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
27719 return cnt;
27720 }
27721
27722-static struct hv_ops hvc_beat_get_put_ops = {
27723+static const struct hv_ops hvc_beat_get_put_ops = {
27724 .get_chars = hvc_beat_get_chars,
27725 .put_chars = hvc_beat_put_chars,
27726 };
27727diff -urNp linux-2.6.32.45/drivers/char/hvc_console.c linux-2.6.32.45/drivers/char/hvc_console.c
27728--- linux-2.6.32.45/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
27729+++ linux-2.6.32.45/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
27730@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
27731 * console interfaces but can still be used as a tty device. This has to be
27732 * static because kmalloc will not work during early console init.
27733 */
27734-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27735+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27736 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
27737 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
27738
27739@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
27740 * vty adapters do NOT get an hvc_instantiate() callback since they
27741 * appear after early console init.
27742 */
27743-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
27744+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
27745 {
27746 struct hvc_struct *hp;
27747
27748@@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
27749 };
27750
27751 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
27752- struct hv_ops *ops, int outbuf_size)
27753+ const struct hv_ops *ops, int outbuf_size)
27754 {
27755 struct hvc_struct *hp;
27756 int i;
27757diff -urNp linux-2.6.32.45/drivers/char/hvc_console.h linux-2.6.32.45/drivers/char/hvc_console.h
27758--- linux-2.6.32.45/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
27759+++ linux-2.6.32.45/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
27760@@ -55,7 +55,7 @@ struct hvc_struct {
27761 int outbuf_size;
27762 int n_outbuf;
27763 uint32_t vtermno;
27764- struct hv_ops *ops;
27765+ const struct hv_ops *ops;
27766 int irq_requested;
27767 int data;
27768 struct winsize ws;
27769@@ -76,11 +76,11 @@ struct hv_ops {
27770 };
27771
27772 /* Register a vterm and a slot index for use as a console (console_init) */
27773-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
27774+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
27775
27776 /* register a vterm for hvc tty operation (module_init or hotplug add) */
27777 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
27778- struct hv_ops *ops, int outbuf_size);
27779+ const struct hv_ops *ops, int outbuf_size);
27780 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27781 extern int hvc_remove(struct hvc_struct *hp);
27782
27783diff -urNp linux-2.6.32.45/drivers/char/hvc_iseries.c linux-2.6.32.45/drivers/char/hvc_iseries.c
27784--- linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27785+++ linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27786@@ -197,7 +197,7 @@ done:
27787 return sent;
27788 }
27789
27790-static struct hv_ops hvc_get_put_ops = {
27791+static const struct hv_ops hvc_get_put_ops = {
27792 .get_chars = get_chars,
27793 .put_chars = put_chars,
27794 .notifier_add = notifier_add_irq,
27795diff -urNp linux-2.6.32.45/drivers/char/hvc_iucv.c linux-2.6.32.45/drivers/char/hvc_iucv.c
27796--- linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27797+++ linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27798@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27799
27800
27801 /* HVC operations */
27802-static struct hv_ops hvc_iucv_ops = {
27803+static const struct hv_ops hvc_iucv_ops = {
27804 .get_chars = hvc_iucv_get_chars,
27805 .put_chars = hvc_iucv_put_chars,
27806 .notifier_add = hvc_iucv_notifier_add,
27807diff -urNp linux-2.6.32.45/drivers/char/hvc_rtas.c linux-2.6.32.45/drivers/char/hvc_rtas.c
27808--- linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27809+++ linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27810@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27811 return i;
27812 }
27813
27814-static struct hv_ops hvc_rtas_get_put_ops = {
27815+static const struct hv_ops hvc_rtas_get_put_ops = {
27816 .get_chars = hvc_rtas_read_console,
27817 .put_chars = hvc_rtas_write_console,
27818 };
27819diff -urNp linux-2.6.32.45/drivers/char/hvcs.c linux-2.6.32.45/drivers/char/hvcs.c
27820--- linux-2.6.32.45/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27821+++ linux-2.6.32.45/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27822@@ -82,6 +82,7 @@
27823 #include <asm/hvcserver.h>
27824 #include <asm/uaccess.h>
27825 #include <asm/vio.h>
27826+#include <asm/local.h>
27827
27828 /*
27829 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27830@@ -269,7 +270,7 @@ struct hvcs_struct {
27831 unsigned int index;
27832
27833 struct tty_struct *tty;
27834- int open_count;
27835+ local_t open_count;
27836
27837 /*
27838 * Used to tell the driver kernel_thread what operations need to take
27839@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27840
27841 spin_lock_irqsave(&hvcsd->lock, flags);
27842
27843- if (hvcsd->open_count > 0) {
27844+ if (local_read(&hvcsd->open_count) > 0) {
27845 spin_unlock_irqrestore(&hvcsd->lock, flags);
27846 printk(KERN_INFO "HVCS: vterm state unchanged. "
27847 "The hvcs device node is still in use.\n");
27848@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27849 if ((retval = hvcs_partner_connect(hvcsd)))
27850 goto error_release;
27851
27852- hvcsd->open_count = 1;
27853+ local_set(&hvcsd->open_count, 1);
27854 hvcsd->tty = tty;
27855 tty->driver_data = hvcsd;
27856
27857@@ -1169,7 +1170,7 @@ fast_open:
27858
27859 spin_lock_irqsave(&hvcsd->lock, flags);
27860 kref_get(&hvcsd->kref);
27861- hvcsd->open_count++;
27862+ local_inc(&hvcsd->open_count);
27863 hvcsd->todo_mask |= HVCS_SCHED_READ;
27864 spin_unlock_irqrestore(&hvcsd->lock, flags);
27865
27866@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27867 hvcsd = tty->driver_data;
27868
27869 spin_lock_irqsave(&hvcsd->lock, flags);
27870- if (--hvcsd->open_count == 0) {
27871+ if (local_dec_and_test(&hvcsd->open_count)) {
27872
27873 vio_disable_interrupts(hvcsd->vdev);
27874
27875@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27876 free_irq(irq, hvcsd);
27877 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27878 return;
27879- } else if (hvcsd->open_count < 0) {
27880+ } else if (local_read(&hvcsd->open_count) < 0) {
27881 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27882 " is missmanaged.\n",
27883- hvcsd->vdev->unit_address, hvcsd->open_count);
27884+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27885 }
27886
27887 spin_unlock_irqrestore(&hvcsd->lock, flags);
27888@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27889
27890 spin_lock_irqsave(&hvcsd->lock, flags);
27891 /* Preserve this so that we know how many kref refs to put */
27892- temp_open_count = hvcsd->open_count;
27893+ temp_open_count = local_read(&hvcsd->open_count);
27894
27895 /*
27896 * Don't kref put inside the spinlock because the destruction
27897@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27898 hvcsd->tty->driver_data = NULL;
27899 hvcsd->tty = NULL;
27900
27901- hvcsd->open_count = 0;
27902+ local_set(&hvcsd->open_count, 0);
27903
27904 /* This will drop any buffered data on the floor which is OK in a hangup
27905 * scenario. */
27906@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27907 * the middle of a write operation? This is a crummy place to do this
27908 * but we want to keep it all in the spinlock.
27909 */
27910- if (hvcsd->open_count <= 0) {
27911+ if (local_read(&hvcsd->open_count) <= 0) {
27912 spin_unlock_irqrestore(&hvcsd->lock, flags);
27913 return -ENODEV;
27914 }
27915@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27916 {
27917 struct hvcs_struct *hvcsd = tty->driver_data;
27918
27919- if (!hvcsd || hvcsd->open_count <= 0)
27920+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27921 return 0;
27922
27923 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27924diff -urNp linux-2.6.32.45/drivers/char/hvc_udbg.c linux-2.6.32.45/drivers/char/hvc_udbg.c
27925--- linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27926+++ linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27927@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27928 return i;
27929 }
27930
27931-static struct hv_ops hvc_udbg_ops = {
27932+static const struct hv_ops hvc_udbg_ops = {
27933 .get_chars = hvc_udbg_get,
27934 .put_chars = hvc_udbg_put,
27935 };
27936diff -urNp linux-2.6.32.45/drivers/char/hvc_vio.c linux-2.6.32.45/drivers/char/hvc_vio.c
27937--- linux-2.6.32.45/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27938+++ linux-2.6.32.45/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27939@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27940 return got;
27941 }
27942
27943-static struct hv_ops hvc_get_put_ops = {
27944+static const struct hv_ops hvc_get_put_ops = {
27945 .get_chars = filtered_get_chars,
27946 .put_chars = hvc_put_chars,
27947 .notifier_add = notifier_add_irq,
27948diff -urNp linux-2.6.32.45/drivers/char/hvc_xen.c linux-2.6.32.45/drivers/char/hvc_xen.c
27949--- linux-2.6.32.45/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27950+++ linux-2.6.32.45/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27951@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27952 return recv;
27953 }
27954
27955-static struct hv_ops hvc_ops = {
27956+static const struct hv_ops hvc_ops = {
27957 .get_chars = read_console,
27958 .put_chars = write_console,
27959 .notifier_add = notifier_add_irq,
27960diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c
27961--- linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27962+++ linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27963@@ -414,7 +414,7 @@ struct ipmi_smi {
27964 struct proc_dir_entry *proc_dir;
27965 char proc_dir_name[10];
27966
27967- atomic_t stats[IPMI_NUM_STATS];
27968+ atomic_unchecked_t stats[IPMI_NUM_STATS];
27969
27970 /*
27971 * run_to_completion duplicate of smb_info, smi_info
27972@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27973
27974
27975 #define ipmi_inc_stat(intf, stat) \
27976- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27977+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27978 #define ipmi_get_stat(intf, stat) \
27979- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27980+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27981
27982 static int is_lan_addr(struct ipmi_addr *addr)
27983 {
27984@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27985 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27986 init_waitqueue_head(&intf->waitq);
27987 for (i = 0; i < IPMI_NUM_STATS; i++)
27988- atomic_set(&intf->stats[i], 0);
27989+ atomic_set_unchecked(&intf->stats[i], 0);
27990
27991 intf->proc_dir = NULL;
27992
27993@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27994 struct ipmi_smi_msg smi_msg;
27995 struct ipmi_recv_msg recv_msg;
27996
27997+ pax_track_stack();
27998+
27999 si = (struct ipmi_system_interface_addr *) &addr;
28000 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
28001 si->channel = IPMI_BMC_CHANNEL;
28002diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c
28003--- linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
28004+++ linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
28005@@ -277,7 +277,7 @@ struct smi_info {
28006 unsigned char slave_addr;
28007
28008 /* Counters and things for the proc filesystem. */
28009- atomic_t stats[SI_NUM_STATS];
28010+ atomic_unchecked_t stats[SI_NUM_STATS];
28011
28012 struct task_struct *thread;
28013
28014@@ -285,9 +285,9 @@ struct smi_info {
28015 };
28016
28017 #define smi_inc_stat(smi, stat) \
28018- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
28019+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
28020 #define smi_get_stat(smi, stat) \
28021- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
28022+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
28023
28024 #define SI_MAX_PARMS 4
28025
28026@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
28027 atomic_set(&new_smi->req_events, 0);
28028 new_smi->run_to_completion = 0;
28029 for (i = 0; i < SI_NUM_STATS; i++)
28030- atomic_set(&new_smi->stats[i], 0);
28031+ atomic_set_unchecked(&new_smi->stats[i], 0);
28032
28033 new_smi->interrupt_disabled = 0;
28034 atomic_set(&new_smi->stop_operation, 0);
28035diff -urNp linux-2.6.32.45/drivers/char/istallion.c linux-2.6.32.45/drivers/char/istallion.c
28036--- linux-2.6.32.45/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
28037+++ linux-2.6.32.45/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
28038@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
28039 * re-used for each stats call.
28040 */
28041 static comstats_t stli_comstats;
28042-static combrd_t stli_brdstats;
28043 static struct asystats stli_cdkstats;
28044
28045 /*****************************************************************************/
28046@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
28047 {
28048 struct stlibrd *brdp;
28049 unsigned int i;
28050+ combrd_t stli_brdstats;
28051
28052 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
28053 return -EFAULT;
28054@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
28055 struct stliport stli_dummyport;
28056 struct stliport *portp;
28057
28058+ pax_track_stack();
28059+
28060 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
28061 return -EFAULT;
28062 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
28063@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
28064 struct stlibrd stli_dummybrd;
28065 struct stlibrd *brdp;
28066
28067+ pax_track_stack();
28068+
28069 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
28070 return -EFAULT;
28071 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
28072diff -urNp linux-2.6.32.45/drivers/char/Kconfig linux-2.6.32.45/drivers/char/Kconfig
28073--- linux-2.6.32.45/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
28074+++ linux-2.6.32.45/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
28075@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
28076
28077 config DEVKMEM
28078 bool "/dev/kmem virtual device support"
28079- default y
28080+ default n
28081+ depends on !GRKERNSEC_KMEM
28082 help
28083 Say Y here if you want to support the /dev/kmem device. The
28084 /dev/kmem device is rarely used, but can be used for certain
28085@@ -1114,6 +1115,7 @@ config DEVPORT
28086 bool
28087 depends on !M68K
28088 depends on ISA || PCI
28089+ depends on !GRKERNSEC_KMEM
28090 default y
28091
28092 source "drivers/s390/char/Kconfig"
28093diff -urNp linux-2.6.32.45/drivers/char/keyboard.c linux-2.6.32.45/drivers/char/keyboard.c
28094--- linux-2.6.32.45/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
28095+++ linux-2.6.32.45/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
28096@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
28097 kbd->kbdmode == VC_MEDIUMRAW) &&
28098 value != KVAL(K_SAK))
28099 return; /* SAK is allowed even in raw mode */
28100+
28101+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
28102+ {
28103+ void *func = fn_handler[value];
28104+ if (func == fn_show_state || func == fn_show_ptregs ||
28105+ func == fn_show_mem)
28106+ return;
28107+ }
28108+#endif
28109+
28110 fn_handler[value](vc);
28111 }
28112
28113@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
28114 .evbit = { BIT_MASK(EV_SND) },
28115 },
28116
28117- { }, /* Terminating entry */
28118+ { 0 }, /* Terminating entry */
28119 };
28120
28121 MODULE_DEVICE_TABLE(input, kbd_ids);
28122diff -urNp linux-2.6.32.45/drivers/char/mem.c linux-2.6.32.45/drivers/char/mem.c
28123--- linux-2.6.32.45/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
28124+++ linux-2.6.32.45/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
28125@@ -18,6 +18,7 @@
28126 #include <linux/raw.h>
28127 #include <linux/tty.h>
28128 #include <linux/capability.h>
28129+#include <linux/security.h>
28130 #include <linux/ptrace.h>
28131 #include <linux/device.h>
28132 #include <linux/highmem.h>
28133@@ -35,6 +36,10 @@
28134 # include <linux/efi.h>
28135 #endif
28136
28137+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28138+extern struct file_operations grsec_fops;
28139+#endif
28140+
28141 static inline unsigned long size_inside_page(unsigned long start,
28142 unsigned long size)
28143 {
28144@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
28145
28146 while (cursor < to) {
28147 if (!devmem_is_allowed(pfn)) {
28148+#ifdef CONFIG_GRKERNSEC_KMEM
28149+ gr_handle_mem_readwrite(from, to);
28150+#else
28151 printk(KERN_INFO
28152 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
28153 current->comm, from, to);
28154+#endif
28155 return 0;
28156 }
28157 cursor += PAGE_SIZE;
28158@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
28159 }
28160 return 1;
28161 }
28162+#elif defined(CONFIG_GRKERNSEC_KMEM)
28163+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28164+{
28165+ return 0;
28166+}
28167 #else
28168 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28169 {
28170@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
28171 #endif
28172
28173 while (count > 0) {
28174+ char *temp;
28175+
28176 /*
28177 * Handle first page in case it's not aligned
28178 */
28179@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
28180 if (!ptr)
28181 return -EFAULT;
28182
28183- if (copy_to_user(buf, ptr, sz)) {
28184+#ifdef CONFIG_PAX_USERCOPY
28185+ temp = kmalloc(sz, GFP_KERNEL);
28186+ if (!temp) {
28187+ unxlate_dev_mem_ptr(p, ptr);
28188+ return -ENOMEM;
28189+ }
28190+ memcpy(temp, ptr, sz);
28191+#else
28192+ temp = ptr;
28193+#endif
28194+
28195+ if (copy_to_user(buf, temp, sz)) {
28196+
28197+#ifdef CONFIG_PAX_USERCOPY
28198+ kfree(temp);
28199+#endif
28200+
28201 unxlate_dev_mem_ptr(p, ptr);
28202 return -EFAULT;
28203 }
28204
28205+#ifdef CONFIG_PAX_USERCOPY
28206+ kfree(temp);
28207+#endif
28208+
28209 unxlate_dev_mem_ptr(p, ptr);
28210
28211 buf += sz;
28212@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
28213 size_t count, loff_t *ppos)
28214 {
28215 unsigned long p = *ppos;
28216- ssize_t low_count, read, sz;
28217+ ssize_t low_count, read, sz, err = 0;
28218 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
28219- int err = 0;
28220
28221 read = 0;
28222 if (p < (unsigned long) high_memory) {
28223@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
28224 }
28225 #endif
28226 while (low_count > 0) {
28227+ char *temp;
28228+
28229 sz = size_inside_page(p, low_count);
28230
28231 /*
28232@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
28233 */
28234 kbuf = xlate_dev_kmem_ptr((char *)p);
28235
28236- if (copy_to_user(buf, kbuf, sz))
28237+#ifdef CONFIG_PAX_USERCOPY
28238+ temp = kmalloc(sz, GFP_KERNEL);
28239+ if (!temp)
28240+ return -ENOMEM;
28241+ memcpy(temp, kbuf, sz);
28242+#else
28243+ temp = kbuf;
28244+#endif
28245+
28246+ err = copy_to_user(buf, temp, sz);
28247+
28248+#ifdef CONFIG_PAX_USERCOPY
28249+ kfree(temp);
28250+#endif
28251+
28252+ if (err)
28253 return -EFAULT;
28254 buf += sz;
28255 p += sz;
28256@@ -889,6 +941,9 @@ static const struct memdev {
28257 #ifdef CONFIG_CRASH_DUMP
28258 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28259 #endif
28260+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28261+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28262+#endif
28263 };
28264
28265 static int memory_open(struct inode *inode, struct file *filp)
28266diff -urNp linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c
28267--- linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
28268+++ linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
28269@@ -29,6 +29,7 @@
28270 #include <linux/tty_driver.h>
28271 #include <linux/tty_flip.h>
28272 #include <linux/uaccess.h>
28273+#include <asm/local.h>
28274
28275 #include "tty.h"
28276 #include "network.h"
28277@@ -51,7 +52,7 @@ struct ipw_tty {
28278 int tty_type;
28279 struct ipw_network *network;
28280 struct tty_struct *linux_tty;
28281- int open_count;
28282+ local_t open_count;
28283 unsigned int control_lines;
28284 struct mutex ipw_tty_mutex;
28285 int tx_bytes_queued;
28286@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
28287 mutex_unlock(&tty->ipw_tty_mutex);
28288 return -ENODEV;
28289 }
28290- if (tty->open_count == 0)
28291+ if (local_read(&tty->open_count) == 0)
28292 tty->tx_bytes_queued = 0;
28293
28294- tty->open_count++;
28295+ local_inc(&tty->open_count);
28296
28297 tty->linux_tty = linux_tty;
28298 linux_tty->driver_data = tty;
28299@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
28300
28301 static void do_ipw_close(struct ipw_tty *tty)
28302 {
28303- tty->open_count--;
28304-
28305- if (tty->open_count == 0) {
28306+ if (local_dec_return(&tty->open_count) == 0) {
28307 struct tty_struct *linux_tty = tty->linux_tty;
28308
28309 if (linux_tty != NULL) {
28310@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
28311 return;
28312
28313 mutex_lock(&tty->ipw_tty_mutex);
28314- if (tty->open_count == 0) {
28315+ if (local_read(&tty->open_count) == 0) {
28316 mutex_unlock(&tty->ipw_tty_mutex);
28317 return;
28318 }
28319@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
28320 return;
28321 }
28322
28323- if (!tty->open_count) {
28324+ if (!local_read(&tty->open_count)) {
28325 mutex_unlock(&tty->ipw_tty_mutex);
28326 return;
28327 }
28328@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
28329 return -ENODEV;
28330
28331 mutex_lock(&tty->ipw_tty_mutex);
28332- if (!tty->open_count) {
28333+ if (!local_read(&tty->open_count)) {
28334 mutex_unlock(&tty->ipw_tty_mutex);
28335 return -EINVAL;
28336 }
28337@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
28338 if (!tty)
28339 return -ENODEV;
28340
28341- if (!tty->open_count)
28342+ if (!local_read(&tty->open_count))
28343 return -EINVAL;
28344
28345 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
28346@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
28347 if (!tty)
28348 return 0;
28349
28350- if (!tty->open_count)
28351+ if (!local_read(&tty->open_count))
28352 return 0;
28353
28354 return tty->tx_bytes_queued;
28355@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
28356 if (!tty)
28357 return -ENODEV;
28358
28359- if (!tty->open_count)
28360+ if (!local_read(&tty->open_count))
28361 return -EINVAL;
28362
28363 return get_control_lines(tty);
28364@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
28365 if (!tty)
28366 return -ENODEV;
28367
28368- if (!tty->open_count)
28369+ if (!local_read(&tty->open_count))
28370 return -EINVAL;
28371
28372 return set_control_lines(tty, set, clear);
28373@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
28374 if (!tty)
28375 return -ENODEV;
28376
28377- if (!tty->open_count)
28378+ if (!local_read(&tty->open_count))
28379 return -EINVAL;
28380
28381 /* FIXME: Exactly how is the tty object locked here .. */
28382@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
28383 against a parallel ioctl etc */
28384 mutex_lock(&ttyj->ipw_tty_mutex);
28385 }
28386- while (ttyj->open_count)
28387+ while (local_read(&ttyj->open_count))
28388 do_ipw_close(ttyj);
28389 ipwireless_disassociate_network_ttys(network,
28390 ttyj->channel_idx);
28391diff -urNp linux-2.6.32.45/drivers/char/pty.c linux-2.6.32.45/drivers/char/pty.c
28392--- linux-2.6.32.45/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
28393+++ linux-2.6.32.45/drivers/char/pty.c 2011-08-05 20:33:55.000000000 -0400
28394@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
28395 register_sysctl_table(pty_root_table);
28396
28397 /* Now create the /dev/ptmx special device */
28398+ pax_open_kernel();
28399 tty_default_fops(&ptmx_fops);
28400- ptmx_fops.open = ptmx_open;
28401+ *(void **)&ptmx_fops.open = ptmx_open;
28402+ pax_close_kernel();
28403
28404 cdev_init(&ptmx_cdev, &ptmx_fops);
28405 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
28406diff -urNp linux-2.6.32.45/drivers/char/random.c linux-2.6.32.45/drivers/char/random.c
28407--- linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:37:25.000000000 -0400
28408+++ linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:43:23.000000000 -0400
28409@@ -254,8 +254,13 @@
28410 /*
28411 * Configuration information
28412 */
28413+#ifdef CONFIG_GRKERNSEC_RANDNET
28414+#define INPUT_POOL_WORDS 512
28415+#define OUTPUT_POOL_WORDS 128
28416+#else
28417 #define INPUT_POOL_WORDS 128
28418 #define OUTPUT_POOL_WORDS 32
28419+#endif
28420 #define SEC_XFER_SIZE 512
28421
28422 /*
28423@@ -292,10 +297,17 @@ static struct poolinfo {
28424 int poolwords;
28425 int tap1, tap2, tap3, tap4, tap5;
28426 } poolinfo_table[] = {
28427+#ifdef CONFIG_GRKERNSEC_RANDNET
28428+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28429+ { 512, 411, 308, 208, 104, 1 },
28430+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28431+ { 128, 103, 76, 51, 25, 1 },
28432+#else
28433 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28434 { 128, 103, 76, 51, 25, 1 },
28435 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28436 { 32, 26, 20, 14, 7, 1 },
28437+#endif
28438 #if 0
28439 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28440 { 2048, 1638, 1231, 819, 411, 1 },
28441@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28442 #include <linux/sysctl.h>
28443
28444 static int min_read_thresh = 8, min_write_thresh;
28445-static int max_read_thresh = INPUT_POOL_WORDS * 32;
28446+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28447 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28448 static char sysctl_bootid[16];
28449
28450diff -urNp linux-2.6.32.45/drivers/char/rocket.c linux-2.6.32.45/drivers/char/rocket.c
28451--- linux-2.6.32.45/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
28452+++ linux-2.6.32.45/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
28453@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
28454 struct rocket_ports tmp;
28455 int board;
28456
28457+ pax_track_stack();
28458+
28459 if (!retports)
28460 return -EFAULT;
28461 memset(&tmp, 0, sizeof (tmp));
28462diff -urNp linux-2.6.32.45/drivers/char/sonypi.c linux-2.6.32.45/drivers/char/sonypi.c
28463--- linux-2.6.32.45/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
28464+++ linux-2.6.32.45/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
28465@@ -55,6 +55,7 @@
28466 #include <asm/uaccess.h>
28467 #include <asm/io.h>
28468 #include <asm/system.h>
28469+#include <asm/local.h>
28470
28471 #include <linux/sonypi.h>
28472
28473@@ -491,7 +492,7 @@ static struct sonypi_device {
28474 spinlock_t fifo_lock;
28475 wait_queue_head_t fifo_proc_list;
28476 struct fasync_struct *fifo_async;
28477- int open_count;
28478+ local_t open_count;
28479 int model;
28480 struct input_dev *input_jog_dev;
28481 struct input_dev *input_key_dev;
28482@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
28483 static int sonypi_misc_release(struct inode *inode, struct file *file)
28484 {
28485 mutex_lock(&sonypi_device.lock);
28486- sonypi_device.open_count--;
28487+ local_dec(&sonypi_device.open_count);
28488 mutex_unlock(&sonypi_device.lock);
28489 return 0;
28490 }
28491@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
28492 lock_kernel();
28493 mutex_lock(&sonypi_device.lock);
28494 /* Flush input queue on first open */
28495- if (!sonypi_device.open_count)
28496+ if (!local_read(&sonypi_device.open_count))
28497 kfifo_reset(sonypi_device.fifo);
28498- sonypi_device.open_count++;
28499+ local_inc(&sonypi_device.open_count);
28500 mutex_unlock(&sonypi_device.lock);
28501 unlock_kernel();
28502 return 0;
28503diff -urNp linux-2.6.32.45/drivers/char/stallion.c linux-2.6.32.45/drivers/char/stallion.c
28504--- linux-2.6.32.45/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
28505+++ linux-2.6.32.45/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
28506@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
28507 struct stlport stl_dummyport;
28508 struct stlport *portp;
28509
28510+ pax_track_stack();
28511+
28512 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
28513 return -EFAULT;
28514 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
28515diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm_bios.c linux-2.6.32.45/drivers/char/tpm/tpm_bios.c
28516--- linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
28517+++ linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
28518@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
28519 event = addr;
28520
28521 if ((event->event_type == 0 && event->event_size == 0) ||
28522- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28523+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28524 return NULL;
28525
28526 return addr;
28527@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
28528 return NULL;
28529
28530 if ((event->event_type == 0 && event->event_size == 0) ||
28531- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28532+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28533 return NULL;
28534
28535 (*pos)++;
28536@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
28537 int i;
28538
28539 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28540- seq_putc(m, data[i]);
28541+ if (!seq_putc(m, data[i]))
28542+ return -EFAULT;
28543
28544 return 0;
28545 }
28546@@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
28547 log->bios_event_log_end = log->bios_event_log + len;
28548
28549 virt = acpi_os_map_memory(start, len);
28550+ if (!virt) {
28551+ kfree(log->bios_event_log);
28552+ log->bios_event_log = NULL;
28553+ return -EFAULT;
28554+ }
28555
28556 memcpy(log->bios_event_log, virt, len);
28557
28558diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm.c linux-2.6.32.45/drivers/char/tpm/tpm.c
28559--- linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
28560+++ linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
28561@@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
28562 chip->vendor.req_complete_val)
28563 goto out_recv;
28564
28565- if ((status == chip->vendor.req_canceled)) {
28566+ if (status == chip->vendor.req_canceled) {
28567 dev_err(chip->dev, "Operation Canceled\n");
28568 rc = -ECANCELED;
28569 goto out;
28570@@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
28571
28572 struct tpm_chip *chip = dev_get_drvdata(dev);
28573
28574+ pax_track_stack();
28575+
28576 tpm_cmd.header.in = tpm_readpubek_header;
28577 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
28578 "attempting to read the PUBEK");
28579diff -urNp linux-2.6.32.45/drivers/char/tty_io.c linux-2.6.32.45/drivers/char/tty_io.c
28580--- linux-2.6.32.45/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
28581+++ linux-2.6.32.45/drivers/char/tty_io.c 2011-08-05 20:33:55.000000000 -0400
28582@@ -2582,8 +2582,10 @@ long tty_ioctl(struct file *file, unsign
28583 return retval;
28584 }
28585
28586+EXPORT_SYMBOL(tty_ioctl);
28587+
28588 #ifdef CONFIG_COMPAT
28589-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
28590+long tty_compat_ioctl(struct file *file, unsigned int cmd,
28591 unsigned long arg)
28592 {
28593 struct inode *inode = file->f_dentry->d_inode;
28594@@ -2607,6 +2609,8 @@ static long tty_compat_ioctl(struct file
28595
28596 return retval;
28597 }
28598+
28599+EXPORT_SYMBOL(tty_compat_ioctl);
28600 #endif
28601
28602 /*
28603@@ -3052,7 +3056,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
28604
28605 void tty_default_fops(struct file_operations *fops)
28606 {
28607- *fops = tty_fops;
28608+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
28609 }
28610
28611 /*
28612diff -urNp linux-2.6.32.45/drivers/char/tty_ldisc.c linux-2.6.32.45/drivers/char/tty_ldisc.c
28613--- linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
28614+++ linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
28615@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
28616 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
28617 struct tty_ldisc_ops *ldo = ld->ops;
28618
28619- ldo->refcount--;
28620+ atomic_dec(&ldo->refcount);
28621 module_put(ldo->owner);
28622 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28623
28624@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
28625 spin_lock_irqsave(&tty_ldisc_lock, flags);
28626 tty_ldiscs[disc] = new_ldisc;
28627 new_ldisc->num = disc;
28628- new_ldisc->refcount = 0;
28629+ atomic_set(&new_ldisc->refcount, 0);
28630 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28631
28632 return ret;
28633@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
28634 return -EINVAL;
28635
28636 spin_lock_irqsave(&tty_ldisc_lock, flags);
28637- if (tty_ldiscs[disc]->refcount)
28638+ if (atomic_read(&tty_ldiscs[disc]->refcount))
28639 ret = -EBUSY;
28640 else
28641 tty_ldiscs[disc] = NULL;
28642@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
28643 if (ldops) {
28644 ret = ERR_PTR(-EAGAIN);
28645 if (try_module_get(ldops->owner)) {
28646- ldops->refcount++;
28647+ atomic_inc(&ldops->refcount);
28648 ret = ldops;
28649 }
28650 }
28651@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
28652 unsigned long flags;
28653
28654 spin_lock_irqsave(&tty_ldisc_lock, flags);
28655- ldops->refcount--;
28656+ atomic_dec(&ldops->refcount);
28657 module_put(ldops->owner);
28658 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28659 }
28660diff -urNp linux-2.6.32.45/drivers/char/virtio_console.c linux-2.6.32.45/drivers/char/virtio_console.c
28661--- linux-2.6.32.45/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28662+++ linux-2.6.32.45/drivers/char/virtio_console.c 2011-08-05 20:33:55.000000000 -0400
28663@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *
28664 * virtqueue, so we let the drivers do some boutique early-output thing. */
28665 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
28666 {
28667- virtio_cons.put_chars = put_chars;
28668+ pax_open_kernel();
28669+ *(void **)&virtio_cons.put_chars = put_chars;
28670+ pax_close_kernel();
28671 return hvc_instantiate(0, 0, &virtio_cons);
28672 }
28673
28674@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(stru
28675 out_vq = vqs[1];
28676
28677 /* Start using the new console output. */
28678- virtio_cons.get_chars = get_chars;
28679- virtio_cons.put_chars = put_chars;
28680- virtio_cons.notifier_add = notifier_add_vio;
28681- virtio_cons.notifier_del = notifier_del_vio;
28682- virtio_cons.notifier_hangup = notifier_del_vio;
28683+ pax_open_kernel();
28684+ *(void **)&virtio_cons.get_chars = get_chars;
28685+ *(void **)&virtio_cons.put_chars = put_chars;
28686+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
28687+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
28688+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
28689+ pax_close_kernel();
28690
28691 /* The first argument of hvc_alloc() is the virtual console number, so
28692 * we use zero. The second argument is the parameter for the
28693diff -urNp linux-2.6.32.45/drivers/char/vt.c linux-2.6.32.45/drivers/char/vt.c
28694--- linux-2.6.32.45/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28695+++ linux-2.6.32.45/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28696@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28697
28698 static void notify_write(struct vc_data *vc, unsigned int unicode)
28699 {
28700- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28701+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
28702 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28703 }
28704
28705diff -urNp linux-2.6.32.45/drivers/char/vt_ioctl.c linux-2.6.32.45/drivers/char/vt_ioctl.c
28706--- linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28707+++ linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28708@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28709 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28710 return -EFAULT;
28711
28712- if (!capable(CAP_SYS_TTY_CONFIG))
28713- perm = 0;
28714-
28715 switch (cmd) {
28716 case KDGKBENT:
28717 key_map = key_maps[s];
28718@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28719 val = (i ? K_HOLE : K_NOSUCHMAP);
28720 return put_user(val, &user_kbe->kb_value);
28721 case KDSKBENT:
28722+ if (!capable(CAP_SYS_TTY_CONFIG))
28723+ perm = 0;
28724+
28725 if (!perm)
28726 return -EPERM;
28727+
28728 if (!i && v == K_NOSUCHMAP) {
28729 /* deallocate map */
28730 key_map = key_maps[s];
28731@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28732 int i, j, k;
28733 int ret;
28734
28735- if (!capable(CAP_SYS_TTY_CONFIG))
28736- perm = 0;
28737-
28738 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28739 if (!kbs) {
28740 ret = -ENOMEM;
28741@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28742 kfree(kbs);
28743 return ((p && *p) ? -EOVERFLOW : 0);
28744 case KDSKBSENT:
28745+ if (!capable(CAP_SYS_TTY_CONFIG))
28746+ perm = 0;
28747+
28748 if (!perm) {
28749 ret = -EPERM;
28750 goto reterr;
28751diff -urNp linux-2.6.32.45/drivers/cpufreq/cpufreq.c linux-2.6.32.45/drivers/cpufreq/cpufreq.c
28752--- linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28753+++ linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28754@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28755 complete(&policy->kobj_unregister);
28756 }
28757
28758-static struct sysfs_ops sysfs_ops = {
28759+static const struct sysfs_ops sysfs_ops = {
28760 .show = show,
28761 .store = store,
28762 };
28763diff -urNp linux-2.6.32.45/drivers/cpuidle/sysfs.c linux-2.6.32.45/drivers/cpuidle/sysfs.c
28764--- linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28765+++ linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28766@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28767 return ret;
28768 }
28769
28770-static struct sysfs_ops cpuidle_sysfs_ops = {
28771+static const struct sysfs_ops cpuidle_sysfs_ops = {
28772 .show = cpuidle_show,
28773 .store = cpuidle_store,
28774 };
28775@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28776 return ret;
28777 }
28778
28779-static struct sysfs_ops cpuidle_state_sysfs_ops = {
28780+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28781 .show = cpuidle_state_show,
28782 };
28783
28784@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28785 .release = cpuidle_state_sysfs_release,
28786 };
28787
28788-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28789+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28790 {
28791 kobject_put(&device->kobjs[i]->kobj);
28792 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28793diff -urNp linux-2.6.32.45/drivers/crypto/hifn_795x.c linux-2.6.32.45/drivers/crypto/hifn_795x.c
28794--- linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28795+++ linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28796@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28797 0xCA, 0x34, 0x2B, 0x2E};
28798 struct scatterlist sg;
28799
28800+ pax_track_stack();
28801+
28802 memset(src, 0, sizeof(src));
28803 memset(ctx.key, 0, sizeof(ctx.key));
28804
28805diff -urNp linux-2.6.32.45/drivers/crypto/padlock-aes.c linux-2.6.32.45/drivers/crypto/padlock-aes.c
28806--- linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28807+++ linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28808@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28809 struct crypto_aes_ctx gen_aes;
28810 int cpu;
28811
28812+ pax_track_stack();
28813+
28814 if (key_len % 8) {
28815 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28816 return -EINVAL;
28817diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.c linux-2.6.32.45/drivers/dma/ioat/dma.c
28818--- linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28819+++ linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28820@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28821 return entry->show(&chan->common, page);
28822 }
28823
28824-struct sysfs_ops ioat_sysfs_ops = {
28825+const struct sysfs_ops ioat_sysfs_ops = {
28826 .show = ioat_attr_show,
28827 };
28828
28829diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.h linux-2.6.32.45/drivers/dma/ioat/dma.h
28830--- linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28831+++ linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28832@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28833 unsigned long *phys_complete);
28834 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28835 void ioat_kobject_del(struct ioatdma_device *device);
28836-extern struct sysfs_ops ioat_sysfs_ops;
28837+extern const struct sysfs_ops ioat_sysfs_ops;
28838 extern struct ioat_sysfs_entry ioat_version_attr;
28839 extern struct ioat_sysfs_entry ioat_cap_attr;
28840 #endif /* IOATDMA_H */
28841diff -urNp linux-2.6.32.45/drivers/edac/edac_device_sysfs.c linux-2.6.32.45/drivers/edac/edac_device_sysfs.c
28842--- linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28843+++ linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28844@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28845 }
28846
28847 /* edac_dev file operations for an 'ctl_info' */
28848-static struct sysfs_ops device_ctl_info_ops = {
28849+static const struct sysfs_ops device_ctl_info_ops = {
28850 .show = edac_dev_ctl_info_show,
28851 .store = edac_dev_ctl_info_store
28852 };
28853@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28854 }
28855
28856 /* edac_dev file operations for an 'instance' */
28857-static struct sysfs_ops device_instance_ops = {
28858+static const struct sysfs_ops device_instance_ops = {
28859 .show = edac_dev_instance_show,
28860 .store = edac_dev_instance_store
28861 };
28862@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28863 }
28864
28865 /* edac_dev file operations for a 'block' */
28866-static struct sysfs_ops device_block_ops = {
28867+static const struct sysfs_ops device_block_ops = {
28868 .show = edac_dev_block_show,
28869 .store = edac_dev_block_store
28870 };
28871diff -urNp linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c
28872--- linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28873+++ linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28874@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28875 return -EIO;
28876 }
28877
28878-static struct sysfs_ops csrowfs_ops = {
28879+static const struct sysfs_ops csrowfs_ops = {
28880 .show = csrowdev_show,
28881 .store = csrowdev_store
28882 };
28883@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28884 }
28885
28886 /* Intermediate show/store table */
28887-static struct sysfs_ops mci_ops = {
28888+static const struct sysfs_ops mci_ops = {
28889 .show = mcidev_show,
28890 .store = mcidev_store
28891 };
28892diff -urNp linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c
28893--- linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28894+++ linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28895@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28896 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28897 static int edac_pci_poll_msec = 1000; /* one second workq period */
28898
28899-static atomic_t pci_parity_count = ATOMIC_INIT(0);
28900-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28901+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28902+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28903
28904 static struct kobject *edac_pci_top_main_kobj;
28905 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28906@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28907 }
28908
28909 /* fs_ops table */
28910-static struct sysfs_ops pci_instance_ops = {
28911+static const struct sysfs_ops pci_instance_ops = {
28912 .show = edac_pci_instance_show,
28913 .store = edac_pci_instance_store
28914 };
28915@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28916 return -EIO;
28917 }
28918
28919-static struct sysfs_ops edac_pci_sysfs_ops = {
28920+static const struct sysfs_ops edac_pci_sysfs_ops = {
28921 .show = edac_pci_dev_show,
28922 .store = edac_pci_dev_store
28923 };
28924@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28925 edac_printk(KERN_CRIT, EDAC_PCI,
28926 "Signaled System Error on %s\n",
28927 pci_name(dev));
28928- atomic_inc(&pci_nonparity_count);
28929+ atomic_inc_unchecked(&pci_nonparity_count);
28930 }
28931
28932 if (status & (PCI_STATUS_PARITY)) {
28933@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28934 "Master Data Parity Error on %s\n",
28935 pci_name(dev));
28936
28937- atomic_inc(&pci_parity_count);
28938+ atomic_inc_unchecked(&pci_parity_count);
28939 }
28940
28941 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28942@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28943 "Detected Parity Error on %s\n",
28944 pci_name(dev));
28945
28946- atomic_inc(&pci_parity_count);
28947+ atomic_inc_unchecked(&pci_parity_count);
28948 }
28949 }
28950
28951@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28952 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28953 "Signaled System Error on %s\n",
28954 pci_name(dev));
28955- atomic_inc(&pci_nonparity_count);
28956+ atomic_inc_unchecked(&pci_nonparity_count);
28957 }
28958
28959 if (status & (PCI_STATUS_PARITY)) {
28960@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28961 "Master Data Parity Error on "
28962 "%s\n", pci_name(dev));
28963
28964- atomic_inc(&pci_parity_count);
28965+ atomic_inc_unchecked(&pci_parity_count);
28966 }
28967
28968 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28969@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28970 "Detected Parity Error on %s\n",
28971 pci_name(dev));
28972
28973- atomic_inc(&pci_parity_count);
28974+ atomic_inc_unchecked(&pci_parity_count);
28975 }
28976 }
28977 }
28978@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28979 if (!check_pci_errors)
28980 return;
28981
28982- before_count = atomic_read(&pci_parity_count);
28983+ before_count = atomic_read_unchecked(&pci_parity_count);
28984
28985 /* scan all PCI devices looking for a Parity Error on devices and
28986 * bridges.
28987@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28988 /* Only if operator has selected panic on PCI Error */
28989 if (edac_pci_get_panic_on_pe()) {
28990 /* If the count is different 'after' from 'before' */
28991- if (before_count != atomic_read(&pci_parity_count))
28992+ if (before_count != atomic_read_unchecked(&pci_parity_count))
28993 panic("EDAC: PCI Parity Error");
28994 }
28995 }
28996diff -urNp linux-2.6.32.45/drivers/firewire/core-card.c linux-2.6.32.45/drivers/firewire/core-card.c
28997--- linux-2.6.32.45/drivers/firewire/core-card.c 2011-03-27 14:31:47.000000000 -0400
28998+++ linux-2.6.32.45/drivers/firewire/core-card.c 2011-08-23 21:22:32.000000000 -0400
28999@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
29000
29001 void fw_core_remove_card(struct fw_card *card)
29002 {
29003- struct fw_card_driver dummy_driver = dummy_driver_template;
29004+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
29005
29006 card->driver->update_phy_reg(card, 4,
29007 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29008diff -urNp linux-2.6.32.45/drivers/firewire/core-cdev.c linux-2.6.32.45/drivers/firewire/core-cdev.c
29009--- linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
29010+++ linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
29011@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
29012 int ret;
29013
29014 if ((request->channels == 0 && request->bandwidth == 0) ||
29015- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29016- request->bandwidth < 0)
29017+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29018 return -EINVAL;
29019
29020 r = kmalloc(sizeof(*r), GFP_KERNEL);
29021diff -urNp linux-2.6.32.45/drivers/firewire/core.h linux-2.6.32.45/drivers/firewire/core.h
29022--- linux-2.6.32.45/drivers/firewire/core.h 2011-03-27 14:31:47.000000000 -0400
29023+++ linux-2.6.32.45/drivers/firewire/core.h 2011-08-23 20:24:26.000000000 -0400
29024@@ -86,6 +86,7 @@ struct fw_card_driver {
29025
29026 int (*stop_iso)(struct fw_iso_context *ctx);
29027 };
29028+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29029
29030 void fw_card_initialize(struct fw_card *card,
29031 const struct fw_card_driver *driver, struct device *device);
29032diff -urNp linux-2.6.32.45/drivers/firewire/core-transaction.c linux-2.6.32.45/drivers/firewire/core-transaction.c
29033--- linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
29034+++ linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
29035@@ -36,6 +36,7 @@
29036 #include <linux/string.h>
29037 #include <linux/timer.h>
29038 #include <linux/types.h>
29039+#include <linux/sched.h>
29040
29041 #include <asm/byteorder.h>
29042
29043@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
29044 struct transaction_callback_data d;
29045 struct fw_transaction t;
29046
29047+ pax_track_stack();
29048+
29049 init_completion(&d.done);
29050 d.payload = payload;
29051 fw_send_request(card, &t, tcode, destination_id, generation, speed,
29052diff -urNp linux-2.6.32.45/drivers/firmware/dmi_scan.c linux-2.6.32.45/drivers/firmware/dmi_scan.c
29053--- linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
29054+++ linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
29055@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
29056 }
29057 }
29058 else {
29059- /*
29060- * no iounmap() for that ioremap(); it would be a no-op, but
29061- * it's so early in setup that sucker gets confused into doing
29062- * what it shouldn't if we actually call it.
29063- */
29064 p = dmi_ioremap(0xF0000, 0x10000);
29065 if (p == NULL)
29066 goto error;
29067diff -urNp linux-2.6.32.45/drivers/firmware/edd.c linux-2.6.32.45/drivers/firmware/edd.c
29068--- linux-2.6.32.45/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
29069+++ linux-2.6.32.45/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
29070@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
29071 return ret;
29072 }
29073
29074-static struct sysfs_ops edd_attr_ops = {
29075+static const struct sysfs_ops edd_attr_ops = {
29076 .show = edd_attr_show,
29077 };
29078
29079diff -urNp linux-2.6.32.45/drivers/firmware/efivars.c linux-2.6.32.45/drivers/firmware/efivars.c
29080--- linux-2.6.32.45/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
29081+++ linux-2.6.32.45/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
29082@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
29083 return ret;
29084 }
29085
29086-static struct sysfs_ops efivar_attr_ops = {
29087+static const struct sysfs_ops efivar_attr_ops = {
29088 .show = efivar_attr_show,
29089 .store = efivar_attr_store,
29090 };
29091diff -urNp linux-2.6.32.45/drivers/firmware/iscsi_ibft.c linux-2.6.32.45/drivers/firmware/iscsi_ibft.c
29092--- linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
29093+++ linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
29094@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
29095 return ret;
29096 }
29097
29098-static struct sysfs_ops ibft_attr_ops = {
29099+static const struct sysfs_ops ibft_attr_ops = {
29100 .show = ibft_show_attribute,
29101 };
29102
29103diff -urNp linux-2.6.32.45/drivers/firmware/memmap.c linux-2.6.32.45/drivers/firmware/memmap.c
29104--- linux-2.6.32.45/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
29105+++ linux-2.6.32.45/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
29106@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
29107 NULL
29108 };
29109
29110-static struct sysfs_ops memmap_attr_ops = {
29111+static const struct sysfs_ops memmap_attr_ops = {
29112 .show = memmap_attr_show,
29113 };
29114
29115diff -urNp linux-2.6.32.45/drivers/gpio/vr41xx_giu.c linux-2.6.32.45/drivers/gpio/vr41xx_giu.c
29116--- linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
29117+++ linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
29118@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29119 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29120 maskl, pendl, maskh, pendh);
29121
29122- atomic_inc(&irq_err_count);
29123+ atomic_inc_unchecked(&irq_err_count);
29124
29125 return -EINVAL;
29126 }
29127diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c
29128--- linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
29129+++ linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
29130@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
29131 struct drm_crtc *tmp;
29132 int crtc_mask = 1;
29133
29134- WARN(!crtc, "checking null crtc?");
29135+ BUG_ON(!crtc);
29136
29137 dev = crtc->dev;
29138
29139@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
29140
29141 adjusted_mode = drm_mode_duplicate(dev, mode);
29142
29143+ pax_track_stack();
29144+
29145 crtc->enabled = drm_helper_crtc_in_use(crtc);
29146
29147 if (!crtc->enabled)
29148diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_drv.c linux-2.6.32.45/drivers/gpu/drm/drm_drv.c
29149--- linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
29150+++ linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
29151@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
29152 char *kdata = NULL;
29153
29154 atomic_inc(&dev->ioctl_count);
29155- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29156+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29157 ++file_priv->ioctl_count;
29158
29159 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29160diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_fops.c linux-2.6.32.45/drivers/gpu/drm/drm_fops.c
29161--- linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
29162+++ linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
29163@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
29164 }
29165
29166 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29167- atomic_set(&dev->counts[i], 0);
29168+ atomic_set_unchecked(&dev->counts[i], 0);
29169
29170 dev->sigdata.lock = NULL;
29171
29172@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
29173
29174 retcode = drm_open_helper(inode, filp, dev);
29175 if (!retcode) {
29176- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29177+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29178 spin_lock(&dev->count_lock);
29179- if (!dev->open_count++) {
29180+ if (local_inc_return(&dev->open_count) == 1) {
29181 spin_unlock(&dev->count_lock);
29182 retcode = drm_setup(dev);
29183 goto out;
29184@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
29185
29186 lock_kernel();
29187
29188- DRM_DEBUG("open_count = %d\n", dev->open_count);
29189+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
29190
29191 if (dev->driver->preclose)
29192 dev->driver->preclose(dev, file_priv);
29193@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
29194 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29195 task_pid_nr(current),
29196 (long)old_encode_dev(file_priv->minor->device),
29197- dev->open_count);
29198+ local_read(&dev->open_count));
29199
29200 /* if the master has gone away we can't do anything with the lock */
29201 if (file_priv->minor->master)
29202@@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
29203 * End inline drm_release
29204 */
29205
29206- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29207+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29208 spin_lock(&dev->count_lock);
29209- if (!--dev->open_count) {
29210+ if (local_dec_and_test(&dev->open_count)) {
29211 if (atomic_read(&dev->ioctl_count)) {
29212 DRM_ERROR("Device busy: %d\n",
29213 atomic_read(&dev->ioctl_count));
29214diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_gem.c linux-2.6.32.45/drivers/gpu/drm/drm_gem.c
29215--- linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
29216+++ linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
29217@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
29218 spin_lock_init(&dev->object_name_lock);
29219 idr_init(&dev->object_name_idr);
29220 atomic_set(&dev->object_count, 0);
29221- atomic_set(&dev->object_memory, 0);
29222+ atomic_set_unchecked(&dev->object_memory, 0);
29223 atomic_set(&dev->pin_count, 0);
29224- atomic_set(&dev->pin_memory, 0);
29225+ atomic_set_unchecked(&dev->pin_memory, 0);
29226 atomic_set(&dev->gtt_count, 0);
29227- atomic_set(&dev->gtt_memory, 0);
29228+ atomic_set_unchecked(&dev->gtt_memory, 0);
29229
29230 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
29231 if (!mm) {
29232@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
29233 goto fput;
29234 }
29235 atomic_inc(&dev->object_count);
29236- atomic_add(obj->size, &dev->object_memory);
29237+ atomic_add_unchecked(obj->size, &dev->object_memory);
29238 return obj;
29239 fput:
29240 fput(obj->filp);
29241@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
29242
29243 fput(obj->filp);
29244 atomic_dec(&dev->object_count);
29245- atomic_sub(obj->size, &dev->object_memory);
29246+ atomic_sub_unchecked(obj->size, &dev->object_memory);
29247 kfree(obj);
29248 }
29249 EXPORT_SYMBOL(drm_gem_object_free);
29250diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_info.c linux-2.6.32.45/drivers/gpu/drm/drm_info.c
29251--- linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
29252+++ linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
29253@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
29254 struct drm_local_map *map;
29255 struct drm_map_list *r_list;
29256
29257- /* Hardcoded from _DRM_FRAME_BUFFER,
29258- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29259- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29260- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29261+ static const char * const types[] = {
29262+ [_DRM_FRAME_BUFFER] = "FB",
29263+ [_DRM_REGISTERS] = "REG",
29264+ [_DRM_SHM] = "SHM",
29265+ [_DRM_AGP] = "AGP",
29266+ [_DRM_SCATTER_GATHER] = "SG",
29267+ [_DRM_CONSISTENT] = "PCI",
29268+ [_DRM_GEM] = "GEM" };
29269 const char *type;
29270 int i;
29271
29272@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
29273 map = r_list->map;
29274 if (!map)
29275 continue;
29276- if (map->type < 0 || map->type > 5)
29277+ if (map->type >= ARRAY_SIZE(types))
29278 type = "??";
29279 else
29280 type = types[map->type];
29281@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
29282 struct drm_device *dev = node->minor->dev;
29283
29284 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
29285- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
29286+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
29287 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
29288- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
29289- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
29290+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
29291+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
29292 seq_printf(m, "%d gtt total\n", dev->gtt_total);
29293 return 0;
29294 }
29295@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
29296 mutex_lock(&dev->struct_mutex);
29297 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
29298 atomic_read(&dev->vma_count),
29299+#ifdef CONFIG_GRKERNSEC_HIDESYM
29300+ NULL, 0);
29301+#else
29302 high_memory, (u64)virt_to_phys(high_memory));
29303+#endif
29304
29305 list_for_each_entry(pt, &dev->vmalist, head) {
29306 vma = pt->vma;
29307@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
29308 continue;
29309 seq_printf(m,
29310 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
29311- pt->pid, vma->vm_start, vma->vm_end,
29312+ pt->pid,
29313+#ifdef CONFIG_GRKERNSEC_HIDESYM
29314+ 0, 0,
29315+#else
29316+ vma->vm_start, vma->vm_end,
29317+#endif
29318 vma->vm_flags & VM_READ ? 'r' : '-',
29319 vma->vm_flags & VM_WRITE ? 'w' : '-',
29320 vma->vm_flags & VM_EXEC ? 'x' : '-',
29321 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29322 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29323 vma->vm_flags & VM_IO ? 'i' : '-',
29324+#ifdef CONFIG_GRKERNSEC_HIDESYM
29325+ 0);
29326+#else
29327 vma->vm_pgoff);
29328+#endif
29329
29330 #if defined(__i386__)
29331 pgprot = pgprot_val(vma->vm_page_prot);
29332diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c
29333--- linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
29334+++ linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
29335@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
29336 stats->data[i].value =
29337 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29338 else
29339- stats->data[i].value = atomic_read(&dev->counts[i]);
29340+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29341 stats->data[i].type = dev->types[i];
29342 }
29343
29344diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_lock.c linux-2.6.32.45/drivers/gpu/drm/drm_lock.c
29345--- linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
29346+++ linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
29347@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
29348 if (drm_lock_take(&master->lock, lock->context)) {
29349 master->lock.file_priv = file_priv;
29350 master->lock.lock_time = jiffies;
29351- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29352+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29353 break; /* Got lock */
29354 }
29355
29356@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
29357 return -EINVAL;
29358 }
29359
29360- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29361+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29362
29363 /* kernel_context_switch isn't used by any of the x86 drm
29364 * modules but is required by the Sparc driver.
29365diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c
29366--- linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
29367+++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
29368@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
29369 dma->buflist[vertex->idx],
29370 vertex->discard, vertex->used);
29371
29372- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29373- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29374+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29375+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29376 sarea_priv->last_enqueue = dev_priv->counter - 1;
29377 sarea_priv->last_dispatch = (int)hw_status[5];
29378
29379@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
29380 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29381 mc->last_render);
29382
29383- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29384- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29385+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29386+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29387 sarea_priv->last_enqueue = dev_priv->counter - 1;
29388 sarea_priv->last_dispatch = (int)hw_status[5];
29389
29390diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h
29391--- linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
29392+++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
29393@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29394 int page_flipping;
29395
29396 wait_queue_head_t irq_queue;
29397- atomic_t irq_received;
29398- atomic_t irq_emitted;
29399+ atomic_unchecked_t irq_received;
29400+ atomic_unchecked_t irq_emitted;
29401
29402 int front_offset;
29403 } drm_i810_private_t;
29404diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h
29405--- linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
29406+++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
29407@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
29408 int page_flipping;
29409
29410 wait_queue_head_t irq_queue;
29411- atomic_t irq_received;
29412- atomic_t irq_emitted;
29413+ atomic_unchecked_t irq_received;
29414+ atomic_unchecked_t irq_emitted;
29415
29416 int use_mi_batchbuffer_start;
29417
29418diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c
29419--- linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
29420+++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
29421@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
29422
29423 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
29424
29425- atomic_inc(&dev_priv->irq_received);
29426+ atomic_inc_unchecked(&dev_priv->irq_received);
29427 wake_up_interruptible(&dev_priv->irq_queue);
29428
29429 return IRQ_HANDLED;
29430@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
29431
29432 DRM_DEBUG("%s\n", __func__);
29433
29434- atomic_inc(&dev_priv->irq_emitted);
29435+ atomic_inc_unchecked(&dev_priv->irq_emitted);
29436
29437 BEGIN_LP_RING(2);
29438 OUT_RING(0);
29439 OUT_RING(GFX_OP_USER_INTERRUPT);
29440 ADVANCE_LP_RING();
29441
29442- return atomic_read(&dev_priv->irq_emitted);
29443+ return atomic_read_unchecked(&dev_priv->irq_emitted);
29444 }
29445
29446 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
29447@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
29448
29449 DRM_DEBUG("%s\n", __func__);
29450
29451- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29452+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29453 return 0;
29454
29455 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
29456@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
29457
29458 for (;;) {
29459 __set_current_state(TASK_INTERRUPTIBLE);
29460- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29461+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29462 break;
29463 if ((signed)(end - jiffies) <= 0) {
29464 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
29465@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
29466 I830_WRITE16(I830REG_HWSTAM, 0xffff);
29467 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
29468 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
29469- atomic_set(&dev_priv->irq_received, 0);
29470- atomic_set(&dev_priv->irq_emitted, 0);
29471+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29472+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
29473 init_waitqueue_head(&dev_priv->irq_queue);
29474 }
29475
29476diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c
29477--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
29478+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
29479@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
29480 }
29481 }
29482
29483-struct intel_dvo_dev_ops ch7017_ops = {
29484+const struct intel_dvo_dev_ops ch7017_ops = {
29485 .init = ch7017_init,
29486 .detect = ch7017_detect,
29487 .mode_valid = ch7017_mode_valid,
29488diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c
29489--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
29490+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
29491@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
29492 }
29493 }
29494
29495-struct intel_dvo_dev_ops ch7xxx_ops = {
29496+const struct intel_dvo_dev_ops ch7xxx_ops = {
29497 .init = ch7xxx_init,
29498 .detect = ch7xxx_detect,
29499 .mode_valid = ch7xxx_mode_valid,
29500diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h
29501--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
29502+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
29503@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
29504 *
29505 * \return singly-linked list of modes or NULL if no modes found.
29506 */
29507- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
29508+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
29509
29510 /**
29511 * Clean up driver-specific bits of the output
29512 */
29513- void (*destroy) (struct intel_dvo_device *dvo);
29514+ void (* const destroy) (struct intel_dvo_device *dvo);
29515
29516 /**
29517 * Debugging hook to dump device registers to log file
29518 */
29519- void (*dump_regs)(struct intel_dvo_device *dvo);
29520+ void (* const dump_regs)(struct intel_dvo_device *dvo);
29521 };
29522
29523-extern struct intel_dvo_dev_ops sil164_ops;
29524-extern struct intel_dvo_dev_ops ch7xxx_ops;
29525-extern struct intel_dvo_dev_ops ivch_ops;
29526-extern struct intel_dvo_dev_ops tfp410_ops;
29527-extern struct intel_dvo_dev_ops ch7017_ops;
29528+extern const struct intel_dvo_dev_ops sil164_ops;
29529+extern const struct intel_dvo_dev_ops ch7xxx_ops;
29530+extern const struct intel_dvo_dev_ops ivch_ops;
29531+extern const struct intel_dvo_dev_ops tfp410_ops;
29532+extern const struct intel_dvo_dev_ops ch7017_ops;
29533
29534 #endif /* _INTEL_DVO_H */
29535diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c
29536--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
29537+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
29538@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
29539 }
29540 }
29541
29542-struct intel_dvo_dev_ops ivch_ops= {
29543+const struct intel_dvo_dev_ops ivch_ops= {
29544 .init = ivch_init,
29545 .dpms = ivch_dpms,
29546 .save = ivch_save,
29547diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c
29548--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
29549+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
29550@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
29551 }
29552 }
29553
29554-struct intel_dvo_dev_ops sil164_ops = {
29555+const struct intel_dvo_dev_ops sil164_ops = {
29556 .init = sil164_init,
29557 .detect = sil164_detect,
29558 .mode_valid = sil164_mode_valid,
29559diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c
29560--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
29561+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
29562@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
29563 }
29564 }
29565
29566-struct intel_dvo_dev_ops tfp410_ops = {
29567+const struct intel_dvo_dev_ops tfp410_ops = {
29568 .init = tfp410_init,
29569 .detect = tfp410_detect,
29570 .mode_valid = tfp410_mode_valid,
29571diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c
29572--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
29573+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
29574@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
29575 I915_READ(GTIMR));
29576 }
29577 seq_printf(m, "Interrupts received: %d\n",
29578- atomic_read(&dev_priv->irq_received));
29579+ atomic_read_unchecked(&dev_priv->irq_received));
29580 if (dev_priv->hw_status_page != NULL) {
29581 seq_printf(m, "Current sequence: %d\n",
29582 i915_get_gem_seqno(dev));
29583diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c
29584--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
29585+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
29586@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
29587 return i915_resume(dev);
29588 }
29589
29590-static struct vm_operations_struct i915_gem_vm_ops = {
29591+static const struct vm_operations_struct i915_gem_vm_ops = {
29592 .fault = i915_gem_fault,
29593 .open = drm_gem_vm_open,
29594 .close = drm_gem_vm_close,
29595diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h
29596--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
29597+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:33:55.000000000 -0400
29598@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
29599 /* display clock increase/decrease */
29600 /* pll clock increase/decrease */
29601 /* clock gating init */
29602-};
29603+} __no_const;
29604
29605 typedef struct drm_i915_private {
29606 struct drm_device *dev;
29607@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
29608 int page_flipping;
29609
29610 wait_queue_head_t irq_queue;
29611- atomic_t irq_received;
29612+ atomic_unchecked_t irq_received;
29613 /** Protects user_irq_refcount and irq_mask_reg */
29614 spinlock_t user_irq_lock;
29615 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
29616diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c
29617--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
29618+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
29619@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
29620
29621 args->aper_size = dev->gtt_total;
29622 args->aper_available_size = (args->aper_size -
29623- atomic_read(&dev->pin_memory));
29624+ atomic_read_unchecked(&dev->pin_memory));
29625
29626 return 0;
29627 }
29628@@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
29629 return -EINVAL;
29630 }
29631
29632+ if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29633+ drm_gem_object_unreference(obj);
29634+ return -EFAULT;
29635+ }
29636+
29637 if (i915_gem_object_needs_bit17_swizzle(obj)) {
29638 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
29639 } else {
29640@@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
29641 return -EINVAL;
29642 }
29643
29644+ if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29645+ drm_gem_object_unreference(obj);
29646+ return -EFAULT;
29647+ }
29648+
29649 /* We can only do the GTT pwrite on untiled buffers, as otherwise
29650 * it would end up going through the fenced access, and we'll get
29651 * different detiling behavior between reading and writing.
29652@@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
29653
29654 if (obj_priv->gtt_space) {
29655 atomic_dec(&dev->gtt_count);
29656- atomic_sub(obj->size, &dev->gtt_memory);
29657+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
29658
29659 drm_mm_put_block(obj_priv->gtt_space);
29660 obj_priv->gtt_space = NULL;
29661@@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
29662 goto search_free;
29663 }
29664 atomic_inc(&dev->gtt_count);
29665- atomic_add(obj->size, &dev->gtt_memory);
29666+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
29667
29668 /* Assert that the object is not currently in any GPU domain. As it
29669 * wasn't in the GTT, there shouldn't be any way it could have been in
29670@@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
29671 "%d/%d gtt bytes\n",
29672 atomic_read(&dev->object_count),
29673 atomic_read(&dev->pin_count),
29674- atomic_read(&dev->object_memory),
29675- atomic_read(&dev->pin_memory),
29676- atomic_read(&dev->gtt_memory),
29677+ atomic_read_unchecked(&dev->object_memory),
29678+ atomic_read_unchecked(&dev->pin_memory),
29679+ atomic_read_unchecked(&dev->gtt_memory),
29680 dev->gtt_total);
29681 }
29682 goto err;
29683@@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
29684 */
29685 if (obj_priv->pin_count == 1) {
29686 atomic_inc(&dev->pin_count);
29687- atomic_add(obj->size, &dev->pin_memory);
29688+ atomic_add_unchecked(obj->size, &dev->pin_memory);
29689 if (!obj_priv->active &&
29690 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
29691 !list_empty(&obj_priv->list))
29692@@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
29693 list_move_tail(&obj_priv->list,
29694 &dev_priv->mm.inactive_list);
29695 atomic_dec(&dev->pin_count);
29696- atomic_sub(obj->size, &dev->pin_memory);
29697+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
29698 }
29699 i915_verify_inactive(dev, __FILE__, __LINE__);
29700 }
29701diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c
29702--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
29703+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
29704@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
29705 int irq_received;
29706 int ret = IRQ_NONE;
29707
29708- atomic_inc(&dev_priv->irq_received);
29709+ atomic_inc_unchecked(&dev_priv->irq_received);
29710
29711 if (IS_IGDNG(dev))
29712 return igdng_irq_handler(dev);
29713@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29714 {
29715 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29716
29717- atomic_set(&dev_priv->irq_received, 0);
29718+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29719
29720 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29721 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29722diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c
29723--- linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-03-27 14:31:47.000000000 -0400
29724+++ linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-08-05 20:33:55.000000000 -0400
29725@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *
29726 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
29727
29728 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
29729- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29730+ pax_open_kernel();
29731+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29732+ pax_close_kernel();
29733
29734 /* Read the regs to test if we can talk to the device */
29735 for (i = 0; i < 0x40; i++) {
29736diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h
29737--- linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29738+++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29739@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29740 u32 clear_cmd;
29741 u32 maccess;
29742
29743- atomic_t vbl_received; /**< Number of vblanks received. */
29744+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29745 wait_queue_head_t fence_queue;
29746- atomic_t last_fence_retired;
29747+ atomic_unchecked_t last_fence_retired;
29748 u32 next_fence_to_post;
29749
29750 unsigned int fb_cpp;
29751diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c
29752--- linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29753+++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29754@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29755 if (crtc != 0)
29756 return 0;
29757
29758- return atomic_read(&dev_priv->vbl_received);
29759+ return atomic_read_unchecked(&dev_priv->vbl_received);
29760 }
29761
29762
29763@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29764 /* VBLANK interrupt */
29765 if (status & MGA_VLINEPEN) {
29766 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29767- atomic_inc(&dev_priv->vbl_received);
29768+ atomic_inc_unchecked(&dev_priv->vbl_received);
29769 drm_handle_vblank(dev, 0);
29770 handled = 1;
29771 }
29772@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29773 MGA_WRITE(MGA_PRIMEND, prim_end);
29774 }
29775
29776- atomic_inc(&dev_priv->last_fence_retired);
29777+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
29778 DRM_WAKEUP(&dev_priv->fence_queue);
29779 handled = 1;
29780 }
29781@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29782 * using fences.
29783 */
29784 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29785- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29786+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29787 - *sequence) <= (1 << 23)));
29788
29789 *sequence = cur_fence;
29790diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c
29791--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29792+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29793@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29794
29795 /* GH: Simple idle check.
29796 */
29797- atomic_set(&dev_priv->idle_count, 0);
29798+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29799
29800 /* We don't support anything other than bus-mastering ring mode,
29801 * but the ring can be in either AGP or PCI space for the ring
29802diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h
29803--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29804+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29805@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29806 int is_pci;
29807 unsigned long cce_buffers_offset;
29808
29809- atomic_t idle_count;
29810+ atomic_unchecked_t idle_count;
29811
29812 int page_flipping;
29813 int current_page;
29814 u32 crtc_offset;
29815 u32 crtc_offset_cntl;
29816
29817- atomic_t vbl_received;
29818+ atomic_unchecked_t vbl_received;
29819
29820 u32 color_fmt;
29821 unsigned int front_offset;
29822diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c
29823--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29824+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29825@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29826 if (crtc != 0)
29827 return 0;
29828
29829- return atomic_read(&dev_priv->vbl_received);
29830+ return atomic_read_unchecked(&dev_priv->vbl_received);
29831 }
29832
29833 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29834@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29835 /* VBLANK interrupt */
29836 if (status & R128_CRTC_VBLANK_INT) {
29837 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29838- atomic_inc(&dev_priv->vbl_received);
29839+ atomic_inc_unchecked(&dev_priv->vbl_received);
29840 drm_handle_vblank(dev, 0);
29841 return IRQ_HANDLED;
29842 }
29843diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c
29844--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29845+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29846@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29847
29848 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29849 {
29850- if (atomic_read(&dev_priv->idle_count) == 0) {
29851+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29852 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29853 } else {
29854- atomic_set(&dev_priv->idle_count, 0);
29855+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29856 }
29857 }
29858
29859diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c
29860--- linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
29861+++ linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
29862@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
29863 char name[512];
29864 int i;
29865
29866+ pax_track_stack();
29867+
29868 ctx->card = card;
29869 ctx->bios = bios;
29870
29871diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c
29872--- linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29873+++ linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29874@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29875 regex_t mask_rex;
29876 regmatch_t match[4];
29877 char buf[1024];
29878- size_t end;
29879+ long end;
29880 int len;
29881 int done = 0;
29882 int r;
29883 unsigned o;
29884 struct offset *offset;
29885 char last_reg_s[10];
29886- int last_reg;
29887+ unsigned long last_reg;
29888
29889 if (regcomp
29890 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29891diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c
29892--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29893+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29894@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29895 bool linkb;
29896 struct radeon_i2c_bus_rec ddc_bus;
29897
29898+ pax_track_stack();
29899+
29900 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29901
29902 if (data_offset == 0)
29903@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29904 }
29905 }
29906
29907-struct bios_connector {
29908+static struct bios_connector {
29909 bool valid;
29910 uint16_t line_mux;
29911 uint16_t devices;
29912 int connector_type;
29913 struct radeon_i2c_bus_rec ddc_bus;
29914-};
29915+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29916
29917 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29918 drm_device
29919@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29920 uint8_t dac;
29921 union atom_supported_devices *supported_devices;
29922 int i, j;
29923- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29924
29925 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29926
29927diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c
29928--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29929+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29930@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29931
29932 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29933 error = freq - current_freq;
29934- error = error < 0 ? 0xffffffff : error;
29935+ error = (int32_t)error < 0 ? 0xffffffff : error;
29936 } else
29937 error = abs(current_freq - freq);
29938 vco_diff = abs(vco - best_vco);
29939diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h
29940--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29941+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29942@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29943
29944 /* SW interrupt */
29945 wait_queue_head_t swi_queue;
29946- atomic_t swi_emitted;
29947+ atomic_unchecked_t swi_emitted;
29948 int vblank_crtc;
29949 uint32_t irq_enable_reg;
29950 uint32_t r500_disp_irq_reg;
29951diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c
29952--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29953+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29954@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29955 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29956 return 0;
29957 }
29958- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29959+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29960 if (!rdev->cp.ready) {
29961 /* FIXME: cp is not running assume everythings is done right
29962 * away
29963@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29964 return r;
29965 }
29966 WREG32(rdev->fence_drv.scratch_reg, 0);
29967- atomic_set(&rdev->fence_drv.seq, 0);
29968+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29969 INIT_LIST_HEAD(&rdev->fence_drv.created);
29970 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29971 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29972diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h
29973--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29974+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:33:55.000000000 -0400
29975@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29976 */
29977 struct radeon_fence_driver {
29978 uint32_t scratch_reg;
29979- atomic_t seq;
29980+ atomic_unchecked_t seq;
29981 uint32_t last_seq;
29982 unsigned long count_timeout;
29983 wait_queue_head_t queue;
29984@@ -640,7 +640,7 @@ struct radeon_asic {
29985 uint32_t offset, uint32_t obj_size);
29986 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
29987 void (*bandwidth_update)(struct radeon_device *rdev);
29988-};
29989+} __no_const;
29990
29991 /*
29992 * Asic structures
29993diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c
29994--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
29995+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
29996@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
29997 request = compat_alloc_user_space(sizeof(*request));
29998 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
29999 || __put_user(req32.param, &request->param)
30000- || __put_user((void __user *)(unsigned long)req32.value,
30001+ || __put_user((unsigned long)req32.value,
30002 &request->value))
30003 return -EFAULT;
30004
30005diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c
30006--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
30007+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
30008@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
30009 unsigned int ret;
30010 RING_LOCALS;
30011
30012- atomic_inc(&dev_priv->swi_emitted);
30013- ret = atomic_read(&dev_priv->swi_emitted);
30014+ atomic_inc_unchecked(&dev_priv->swi_emitted);
30015+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30016
30017 BEGIN_RING(4);
30018 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30019@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
30020 drm_radeon_private_t *dev_priv =
30021 (drm_radeon_private_t *) dev->dev_private;
30022
30023- atomic_set(&dev_priv->swi_emitted, 0);
30024+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30025 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30026
30027 dev->max_vblank_count = 0x001fffff;
30028diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c
30029--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
30030+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
30031@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
30032 {
30033 drm_radeon_private_t *dev_priv = dev->dev_private;
30034 drm_radeon_getparam_t *param = data;
30035- int value;
30036+ int value = 0;
30037
30038 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30039
30040diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c
30041--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
30042+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
30043@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
30044 DRM_INFO("radeon: ttm finalized\n");
30045 }
30046
30047-static struct vm_operations_struct radeon_ttm_vm_ops;
30048-static const struct vm_operations_struct *ttm_vm_ops = NULL;
30049-
30050-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
30051-{
30052- struct ttm_buffer_object *bo;
30053- int r;
30054-
30055- bo = (struct ttm_buffer_object *)vma->vm_private_data;
30056- if (bo == NULL) {
30057- return VM_FAULT_NOPAGE;
30058- }
30059- r = ttm_vm_ops->fault(vma, vmf);
30060- return r;
30061-}
30062-
30063 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30064 {
30065 struct drm_file *file_priv;
30066 struct radeon_device *rdev;
30067- int r;
30068
30069 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
30070 return drm_mmap(filp, vma);
30071@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
30072
30073 file_priv = (struct drm_file *)filp->private_data;
30074 rdev = file_priv->minor->dev->dev_private;
30075- if (rdev == NULL) {
30076+ if (!rdev)
30077 return -EINVAL;
30078- }
30079- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30080- if (unlikely(r != 0)) {
30081- return r;
30082- }
30083- if (unlikely(ttm_vm_ops == NULL)) {
30084- ttm_vm_ops = vma->vm_ops;
30085- radeon_ttm_vm_ops = *ttm_vm_ops;
30086- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30087- }
30088- vma->vm_ops = &radeon_ttm_vm_ops;
30089- return 0;
30090+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30091 }
30092
30093
30094diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c
30095--- linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
30096+++ linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
30097@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
30098 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30099 rdev->pm.sideport_bandwidth.full)
30100 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30101- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
30102+ read_delay_latency.full = rfixed_const(800 * 1000);
30103 read_delay_latency.full = rfixed_div(read_delay_latency,
30104 rdev->pm.igp_sideport_mclk);
30105+ a.full = rfixed_const(370);
30106+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
30107 } else {
30108 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30109 rdev->pm.k8_bandwidth.full)
30110diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c
30111--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
30112+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
30113@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
30114 NULL
30115 };
30116
30117-static struct sysfs_ops ttm_bo_global_ops = {
30118+static const struct sysfs_ops ttm_bo_global_ops = {
30119 .show = &ttm_bo_global_show
30120 };
30121
30122diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c
30123--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
30124+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
30125@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
30126 {
30127 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
30128 vma->vm_private_data;
30129- struct ttm_bo_device *bdev = bo->bdev;
30130+ struct ttm_bo_device *bdev;
30131 unsigned long bus_base;
30132 unsigned long bus_offset;
30133 unsigned long bus_size;
30134@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
30135 unsigned long address = (unsigned long)vmf->virtual_address;
30136 int retval = VM_FAULT_NOPAGE;
30137
30138+ if (!bo)
30139+ return VM_FAULT_NOPAGE;
30140+ bdev = bo->bdev;
30141+
30142 /*
30143 * Work around locking order reversal in fault / nopfn
30144 * between mmap_sem and bo_reserve: Perform a trylock operation
30145diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c
30146--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
30147+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
30148@@ -36,7 +36,7 @@
30149 struct ttm_global_item {
30150 struct mutex mutex;
30151 void *object;
30152- int refcount;
30153+ atomic_t refcount;
30154 };
30155
30156 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
30157@@ -49,7 +49,7 @@ void ttm_global_init(void)
30158 struct ttm_global_item *item = &glob[i];
30159 mutex_init(&item->mutex);
30160 item->object = NULL;
30161- item->refcount = 0;
30162+ atomic_set(&item->refcount, 0);
30163 }
30164 }
30165
30166@@ -59,7 +59,7 @@ void ttm_global_release(void)
30167 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
30168 struct ttm_global_item *item = &glob[i];
30169 BUG_ON(item->object != NULL);
30170- BUG_ON(item->refcount != 0);
30171+ BUG_ON(atomic_read(&item->refcount) != 0);
30172 }
30173 }
30174
30175@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
30176 void *object;
30177
30178 mutex_lock(&item->mutex);
30179- if (item->refcount == 0) {
30180+ if (atomic_read(&item->refcount) == 0) {
30181 item->object = kzalloc(ref->size, GFP_KERNEL);
30182 if (unlikely(item->object == NULL)) {
30183 ret = -ENOMEM;
30184@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
30185 goto out_err;
30186
30187 }
30188- ++item->refcount;
30189+ atomic_inc(&item->refcount);
30190 ref->object = item->object;
30191 object = item->object;
30192 mutex_unlock(&item->mutex);
30193@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
30194 struct ttm_global_item *item = &glob[ref->global_type];
30195
30196 mutex_lock(&item->mutex);
30197- BUG_ON(item->refcount == 0);
30198+ BUG_ON(atomic_read(&item->refcount) == 0);
30199 BUG_ON(ref->object != item->object);
30200- if (--item->refcount == 0) {
30201+ if (atomic_dec_and_test(&item->refcount)) {
30202 ref->release(ref);
30203 item->object = NULL;
30204 }
30205diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c
30206--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
30207+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
30208@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
30209 NULL
30210 };
30211
30212-static struct sysfs_ops ttm_mem_zone_ops = {
30213+static const struct sysfs_ops ttm_mem_zone_ops = {
30214 .show = &ttm_mem_zone_show,
30215 .store = &ttm_mem_zone_store
30216 };
30217diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h
30218--- linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
30219+++ linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
30220@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30221 typedef uint32_t maskarray_t[5];
30222
30223 typedef struct drm_via_irq {
30224- atomic_t irq_received;
30225+ atomic_unchecked_t irq_received;
30226 uint32_t pending_mask;
30227 uint32_t enable_mask;
30228 wait_queue_head_t irq_queue;
30229@@ -75,7 +75,7 @@ typedef struct drm_via_private {
30230 struct timeval last_vblank;
30231 int last_vblank_valid;
30232 unsigned usec_per_vblank;
30233- atomic_t vbl_received;
30234+ atomic_unchecked_t vbl_received;
30235 drm_via_state_t hc_state;
30236 char pci_buf[VIA_PCI_BUF_SIZE];
30237 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30238diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c
30239--- linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
30240+++ linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
30241@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
30242 if (crtc != 0)
30243 return 0;
30244
30245- return atomic_read(&dev_priv->vbl_received);
30246+ return atomic_read_unchecked(&dev_priv->vbl_received);
30247 }
30248
30249 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30250@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
30251
30252 status = VIA_READ(VIA_REG_INTERRUPT);
30253 if (status & VIA_IRQ_VBLANK_PENDING) {
30254- atomic_inc(&dev_priv->vbl_received);
30255- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30256+ atomic_inc_unchecked(&dev_priv->vbl_received);
30257+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30258 do_gettimeofday(&cur_vblank);
30259 if (dev_priv->last_vblank_valid) {
30260 dev_priv->usec_per_vblank =
30261@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30262 dev_priv->last_vblank = cur_vblank;
30263 dev_priv->last_vblank_valid = 1;
30264 }
30265- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30266+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30267 DRM_DEBUG("US per vblank is: %u\n",
30268 dev_priv->usec_per_vblank);
30269 }
30270@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30271
30272 for (i = 0; i < dev_priv->num_irqs; ++i) {
30273 if (status & cur_irq->pending_mask) {
30274- atomic_inc(&cur_irq->irq_received);
30275+ atomic_inc_unchecked(&cur_irq->irq_received);
30276 DRM_WAKEUP(&cur_irq->irq_queue);
30277 handled = 1;
30278 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
30279@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
30280 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30281 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30282 masks[irq][4]));
30283- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30284+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30285 } else {
30286 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30287 (((cur_irq_sequence =
30288- atomic_read(&cur_irq->irq_received)) -
30289+ atomic_read_unchecked(&cur_irq->irq_received)) -
30290 *sequence) <= (1 << 23)));
30291 }
30292 *sequence = cur_irq_sequence;
30293@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
30294 }
30295
30296 for (i = 0; i < dev_priv->num_irqs; ++i) {
30297- atomic_set(&cur_irq->irq_received, 0);
30298+ atomic_set_unchecked(&cur_irq->irq_received, 0);
30299 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30300 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30301 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30302@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
30303 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30304 case VIA_IRQ_RELATIVE:
30305 irqwait->request.sequence +=
30306- atomic_read(&cur_irq->irq_received);
30307+ atomic_read_unchecked(&cur_irq->irq_received);
30308 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30309 case VIA_IRQ_ABSOLUTE:
30310 break;
30311diff -urNp linux-2.6.32.45/drivers/hid/hid-core.c linux-2.6.32.45/drivers/hid/hid-core.c
30312--- linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
30313+++ linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
30314@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
30315
30316 int hid_add_device(struct hid_device *hdev)
30317 {
30318- static atomic_t id = ATOMIC_INIT(0);
30319+ static atomic_unchecked_t id = ATOMIC_INIT(0);
30320 int ret;
30321
30322 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30323@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
30324 /* XXX hack, any other cleaner solution after the driver core
30325 * is converted to allow more than 20 bytes as the device name? */
30326 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30327- hdev->vendor, hdev->product, atomic_inc_return(&id));
30328+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30329
30330 ret = device_add(&hdev->dev);
30331 if (!ret)
30332diff -urNp linux-2.6.32.45/drivers/hid/usbhid/hiddev.c linux-2.6.32.45/drivers/hid/usbhid/hiddev.c
30333--- linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
30334+++ linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
30335@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
30336 return put_user(HID_VERSION, (int __user *)arg);
30337
30338 case HIDIOCAPPLICATION:
30339- if (arg < 0 || arg >= hid->maxapplication)
30340+ if (arg >= hid->maxapplication)
30341 return -EINVAL;
30342
30343 for (i = 0; i < hid->maxcollection; i++)
30344diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.c linux-2.6.32.45/drivers/hwmon/lis3lv02d.c
30345--- linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
30346+++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
30347@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
30348 * the lid is closed. This leads to interrupts as soon as a little move
30349 * is done.
30350 */
30351- atomic_inc(&lis3_dev.count);
30352+ atomic_inc_unchecked(&lis3_dev.count);
30353
30354 wake_up_interruptible(&lis3_dev.misc_wait);
30355 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30356@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
30357 if (test_and_set_bit(0, &lis3_dev.misc_opened))
30358 return -EBUSY; /* already open */
30359
30360- atomic_set(&lis3_dev.count, 0);
30361+ atomic_set_unchecked(&lis3_dev.count, 0);
30362
30363 /*
30364 * The sensor can generate interrupts for free-fall and direction
30365@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
30366 add_wait_queue(&lis3_dev.misc_wait, &wait);
30367 while (true) {
30368 set_current_state(TASK_INTERRUPTIBLE);
30369- data = atomic_xchg(&lis3_dev.count, 0);
30370+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30371 if (data)
30372 break;
30373
30374@@ -244,7 +244,7 @@ out:
30375 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30376 {
30377 poll_wait(file, &lis3_dev.misc_wait, wait);
30378- if (atomic_read(&lis3_dev.count))
30379+ if (atomic_read_unchecked(&lis3_dev.count))
30380 return POLLIN | POLLRDNORM;
30381 return 0;
30382 }
30383diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.h linux-2.6.32.45/drivers/hwmon/lis3lv02d.h
30384--- linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
30385+++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
30386@@ -201,7 +201,7 @@ struct lis3lv02d {
30387
30388 struct input_polled_dev *idev; /* input device */
30389 struct platform_device *pdev; /* platform device */
30390- atomic_t count; /* interrupt count after last read */
30391+ atomic_unchecked_t count; /* interrupt count after last read */
30392 int xcalib; /* calibrated null value for x */
30393 int ycalib; /* calibrated null value for y */
30394 int zcalib; /* calibrated null value for z */
30395diff -urNp linux-2.6.32.45/drivers/hwmon/sht15.c linux-2.6.32.45/drivers/hwmon/sht15.c
30396--- linux-2.6.32.45/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
30397+++ linux-2.6.32.45/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
30398@@ -112,7 +112,7 @@ struct sht15_data {
30399 int supply_uV;
30400 int supply_uV_valid;
30401 struct work_struct update_supply_work;
30402- atomic_t interrupt_handled;
30403+ atomic_unchecked_t interrupt_handled;
30404 };
30405
30406 /**
30407@@ -245,13 +245,13 @@ static inline int sht15_update_single_va
30408 return ret;
30409
30410 gpio_direction_input(data->pdata->gpio_data);
30411- atomic_set(&data->interrupt_handled, 0);
30412+ atomic_set_unchecked(&data->interrupt_handled, 0);
30413
30414 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30415 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30416 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30417 /* Only relevant if the interrupt hasn't occured. */
30418- if (!atomic_read(&data->interrupt_handled))
30419+ if (!atomic_read_unchecked(&data->interrupt_handled))
30420 schedule_work(&data->read_work);
30421 }
30422 ret = wait_event_timeout(data->wait_queue,
30423@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
30424 struct sht15_data *data = d;
30425 /* First disable the interrupt */
30426 disable_irq_nosync(irq);
30427- atomic_inc(&data->interrupt_handled);
30428+ atomic_inc_unchecked(&data->interrupt_handled);
30429 /* Then schedule a reading work struct */
30430 if (data->flag != SHT15_READING_NOTHING)
30431 schedule_work(&data->read_work);
30432@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
30433 here as could have gone low in meantime so verify
30434 it hasn't!
30435 */
30436- atomic_set(&data->interrupt_handled, 0);
30437+ atomic_set_unchecked(&data->interrupt_handled, 0);
30438 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30439 /* If still not occured or another handler has been scheduled */
30440 if (gpio_get_value(data->pdata->gpio_data)
30441- || atomic_read(&data->interrupt_handled))
30442+ || atomic_read_unchecked(&data->interrupt_handled))
30443 return;
30444 }
30445 /* Read the data back from the device */
30446diff -urNp linux-2.6.32.45/drivers/hwmon/w83791d.c linux-2.6.32.45/drivers/hwmon/w83791d.c
30447--- linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
30448+++ linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
30449@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
30450 struct i2c_board_info *info);
30451 static int w83791d_remove(struct i2c_client *client);
30452
30453-static int w83791d_read(struct i2c_client *client, u8 register);
30454-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
30455+static int w83791d_read(struct i2c_client *client, u8 reg);
30456+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
30457 static struct w83791d_data *w83791d_update_device(struct device *dev);
30458
30459 #ifdef DEBUG
30460diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c
30461--- linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-03-27 14:31:47.000000000 -0400
30462+++ linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:22:32.000000000 -0400
30463@@ -43,7 +43,7 @@
30464 extern struct i2c_adapter amd756_smbus;
30465
30466 static struct i2c_adapter *s4882_adapter;
30467-static struct i2c_algorithm *s4882_algo;
30468+static i2c_algorithm_no_const *s4882_algo;
30469
30470 /* Wrapper access functions for multiplexed SMBus */
30471 static DEFINE_MUTEX(amd756_lock);
30472diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c
30473--- linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-03-27 14:31:47.000000000 -0400
30474+++ linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:22:32.000000000 -0400
30475@@ -41,7 +41,7 @@
30476 extern struct i2c_adapter *nforce2_smbus;
30477
30478 static struct i2c_adapter *s4985_adapter;
30479-static struct i2c_algorithm *s4985_algo;
30480+static i2c_algorithm_no_const *s4985_algo;
30481
30482 /* Wrapper access functions for multiplexed SMBus */
30483 static DEFINE_MUTEX(nforce2_lock);
30484diff -urNp linux-2.6.32.45/drivers/ide/ide-cd.c linux-2.6.32.45/drivers/ide/ide-cd.c
30485--- linux-2.6.32.45/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
30486+++ linux-2.6.32.45/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
30487@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
30488 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30489 if ((unsigned long)buf & alignment
30490 || blk_rq_bytes(rq) & q->dma_pad_mask
30491- || object_is_on_stack(buf))
30492+ || object_starts_on_stack(buf))
30493 drive->dma = 0;
30494 }
30495 }
30496diff -urNp linux-2.6.32.45/drivers/ide/ide-floppy.c linux-2.6.32.45/drivers/ide/ide-floppy.c
30497--- linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
30498+++ linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
30499@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
30500 u8 pc_buf[256], header_len, desc_cnt;
30501 int i, rc = 1, blocks, length;
30502
30503+ pax_track_stack();
30504+
30505 ide_debug_log(IDE_DBG_FUNC, "enter");
30506
30507 drive->bios_cyl = 0;
30508diff -urNp linux-2.6.32.45/drivers/ide/setup-pci.c linux-2.6.32.45/drivers/ide/setup-pci.c
30509--- linux-2.6.32.45/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
30510+++ linux-2.6.32.45/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
30511@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
30512 int ret, i, n_ports = dev2 ? 4 : 2;
30513 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
30514
30515+ pax_track_stack();
30516+
30517 for (i = 0; i < n_ports / 2; i++) {
30518 ret = ide_setup_pci_controller(pdev[i], d, !i);
30519 if (ret < 0)
30520diff -urNp linux-2.6.32.45/drivers/ieee1394/dv1394.c linux-2.6.32.45/drivers/ieee1394/dv1394.c
30521--- linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
30522+++ linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
30523@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
30524 based upon DIF section and sequence
30525 */
30526
30527-static void inline
30528+static inline void
30529 frame_put_packet (struct frame *f, struct packet *p)
30530 {
30531 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
30532diff -urNp linux-2.6.32.45/drivers/ieee1394/hosts.c linux-2.6.32.45/drivers/ieee1394/hosts.c
30533--- linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
30534+++ linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
30535@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
30536 }
30537
30538 static struct hpsb_host_driver dummy_driver = {
30539+ .name = "dummy",
30540 .transmit_packet = dummy_transmit_packet,
30541 .devctl = dummy_devctl,
30542 .isoctl = dummy_isoctl
30543diff -urNp linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c
30544--- linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
30545+++ linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
30546@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
30547 for (func = 0; func < 8; func++) {
30548 u32 class = read_pci_config(num,slot,func,
30549 PCI_CLASS_REVISION);
30550- if ((class == 0xffffffff))
30551+ if (class == 0xffffffff)
30552 continue; /* No device at this func */
30553
30554 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
30555diff -urNp linux-2.6.32.45/drivers/ieee1394/ohci1394.c linux-2.6.32.45/drivers/ieee1394/ohci1394.c
30556--- linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
30557+++ linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
30558@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
30559 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
30560
30561 /* Module Parameters */
30562-static int phys_dma = 1;
30563+static int phys_dma;
30564 module_param(phys_dma, int, 0444);
30565-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
30566+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
30567
30568 static void dma_trm_tasklet(unsigned long data);
30569 static void dma_trm_reset(struct dma_trm_ctx *d);
30570diff -urNp linux-2.6.32.45/drivers/ieee1394/sbp2.c linux-2.6.32.45/drivers/ieee1394/sbp2.c
30571--- linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
30572+++ linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
30573@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
30574 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
30575 MODULE_LICENSE("GPL");
30576
30577-static int sbp2_module_init(void)
30578+static int __init sbp2_module_init(void)
30579 {
30580 int ret;
30581
30582diff -urNp linux-2.6.32.45/drivers/infiniband/core/cm.c linux-2.6.32.45/drivers/infiniband/core/cm.c
30583--- linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
30584+++ linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
30585@@ -112,7 +112,7 @@ static char const counter_group_names[CM
30586
30587 struct cm_counter_group {
30588 struct kobject obj;
30589- atomic_long_t counter[CM_ATTR_COUNT];
30590+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30591 };
30592
30593 struct cm_counter_attribute {
30594@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
30595 struct ib_mad_send_buf *msg = NULL;
30596 int ret;
30597
30598- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30599+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30600 counter[CM_REQ_COUNTER]);
30601
30602 /* Quick state check to discard duplicate REQs. */
30603@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
30604 if (!cm_id_priv)
30605 return;
30606
30607- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30608+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30609 counter[CM_REP_COUNTER]);
30610 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30611 if (ret)
30612@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
30613 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30614 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30615 spin_unlock_irq(&cm_id_priv->lock);
30616- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30617+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30618 counter[CM_RTU_COUNTER]);
30619 goto out;
30620 }
30621@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
30622 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30623 dreq_msg->local_comm_id);
30624 if (!cm_id_priv) {
30625- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30626+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30627 counter[CM_DREQ_COUNTER]);
30628 cm_issue_drep(work->port, work->mad_recv_wc);
30629 return -EINVAL;
30630@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
30631 case IB_CM_MRA_REP_RCVD:
30632 break;
30633 case IB_CM_TIMEWAIT:
30634- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30635+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30636 counter[CM_DREQ_COUNTER]);
30637 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30638 goto unlock;
30639@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
30640 cm_free_msg(msg);
30641 goto deref;
30642 case IB_CM_DREQ_RCVD:
30643- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30644+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30645 counter[CM_DREQ_COUNTER]);
30646 goto unlock;
30647 default:
30648@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
30649 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30650 cm_id_priv->msg, timeout)) {
30651 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30652- atomic_long_inc(&work->port->
30653+ atomic_long_inc_unchecked(&work->port->
30654 counter_group[CM_RECV_DUPLICATES].
30655 counter[CM_MRA_COUNTER]);
30656 goto out;
30657@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
30658 break;
30659 case IB_CM_MRA_REQ_RCVD:
30660 case IB_CM_MRA_REP_RCVD:
30661- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30662+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30663 counter[CM_MRA_COUNTER]);
30664 /* fall through */
30665 default:
30666@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
30667 case IB_CM_LAP_IDLE:
30668 break;
30669 case IB_CM_MRA_LAP_SENT:
30670- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30671+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30672 counter[CM_LAP_COUNTER]);
30673 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30674 goto unlock;
30675@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
30676 cm_free_msg(msg);
30677 goto deref;
30678 case IB_CM_LAP_RCVD:
30679- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30680+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30681 counter[CM_LAP_COUNTER]);
30682 goto unlock;
30683 default:
30684@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
30685 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30686 if (cur_cm_id_priv) {
30687 spin_unlock_irq(&cm.lock);
30688- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30689+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30690 counter[CM_SIDR_REQ_COUNTER]);
30691 goto out; /* Duplicate message. */
30692 }
30693@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
30694 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30695 msg->retries = 1;
30696
30697- atomic_long_add(1 + msg->retries,
30698+ atomic_long_add_unchecked(1 + msg->retries,
30699 &port->counter_group[CM_XMIT].counter[attr_index]);
30700 if (msg->retries)
30701- atomic_long_add(msg->retries,
30702+ atomic_long_add_unchecked(msg->retries,
30703 &port->counter_group[CM_XMIT_RETRIES].
30704 counter[attr_index]);
30705
30706@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
30707 }
30708
30709 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30710- atomic_long_inc(&port->counter_group[CM_RECV].
30711+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30712 counter[attr_id - CM_ATTR_ID_OFFSET]);
30713
30714 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30715@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
30716 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30717
30718 return sprintf(buf, "%ld\n",
30719- atomic_long_read(&group->counter[cm_attr->index]));
30720+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30721 }
30722
30723-static struct sysfs_ops cm_counter_ops = {
30724+static const struct sysfs_ops cm_counter_ops = {
30725 .show = cm_show_counter
30726 };
30727
30728diff -urNp linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c
30729--- linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
30730+++ linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
30731@@ -97,8 +97,8 @@ struct ib_fmr_pool {
30732
30733 struct task_struct *thread;
30734
30735- atomic_t req_ser;
30736- atomic_t flush_ser;
30737+ atomic_unchecked_t req_ser;
30738+ atomic_unchecked_t flush_ser;
30739
30740 wait_queue_head_t force_wait;
30741 };
30742@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
30743 struct ib_fmr_pool *pool = pool_ptr;
30744
30745 do {
30746- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30747+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30748 ib_fmr_batch_release(pool);
30749
30750- atomic_inc(&pool->flush_ser);
30751+ atomic_inc_unchecked(&pool->flush_ser);
30752 wake_up_interruptible(&pool->force_wait);
30753
30754 if (pool->flush_function)
30755@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30756 }
30757
30758 set_current_state(TASK_INTERRUPTIBLE);
30759- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30760+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30761 !kthread_should_stop())
30762 schedule();
30763 __set_current_state(TASK_RUNNING);
30764@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
30765 pool->dirty_watermark = params->dirty_watermark;
30766 pool->dirty_len = 0;
30767 spin_lock_init(&pool->pool_lock);
30768- atomic_set(&pool->req_ser, 0);
30769- atomic_set(&pool->flush_ser, 0);
30770+ atomic_set_unchecked(&pool->req_ser, 0);
30771+ atomic_set_unchecked(&pool->flush_ser, 0);
30772 init_waitqueue_head(&pool->force_wait);
30773
30774 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30775@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
30776 }
30777 spin_unlock_irq(&pool->pool_lock);
30778
30779- serial = atomic_inc_return(&pool->req_ser);
30780+ serial = atomic_inc_return_unchecked(&pool->req_ser);
30781 wake_up_process(pool->thread);
30782
30783 if (wait_event_interruptible(pool->force_wait,
30784- atomic_read(&pool->flush_ser) - serial >= 0))
30785+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30786 return -EINTR;
30787
30788 return 0;
30789@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30790 } else {
30791 list_add_tail(&fmr->list, &pool->dirty_list);
30792 if (++pool->dirty_len >= pool->dirty_watermark) {
30793- atomic_inc(&pool->req_ser);
30794+ atomic_inc_unchecked(&pool->req_ser);
30795 wake_up_process(pool->thread);
30796 }
30797 }
30798diff -urNp linux-2.6.32.45/drivers/infiniband/core/sysfs.c linux-2.6.32.45/drivers/infiniband/core/sysfs.c
30799--- linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30800+++ linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30801@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30802 return port_attr->show(p, port_attr, buf);
30803 }
30804
30805-static struct sysfs_ops port_sysfs_ops = {
30806+static const struct sysfs_ops port_sysfs_ops = {
30807 .show = port_attr_show
30808 };
30809
30810diff -urNp linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c
30811--- linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30812+++ linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30813@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
30814 dst->grh.sgid_index = src->grh.sgid_index;
30815 dst->grh.hop_limit = src->grh.hop_limit;
30816 dst->grh.traffic_class = src->grh.traffic_class;
30817+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
30818 dst->dlid = src->dlid;
30819 dst->sl = src->sl;
30820 dst->src_path_bits = src->src_path_bits;
30821 dst->static_rate = src->static_rate;
30822 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
30823 dst->port_num = src->port_num;
30824+ dst->reserved = 0;
30825 }
30826 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
30827
30828 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
30829 struct ib_qp_attr *src)
30830 {
30831+ dst->qp_state = src->qp_state;
30832 dst->cur_qp_state = src->cur_qp_state;
30833 dst->path_mtu = src->path_mtu;
30834 dst->path_mig_state = src->path_mig_state;
30835@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
30836 dst->rnr_retry = src->rnr_retry;
30837 dst->alt_port_num = src->alt_port_num;
30838 dst->alt_timeout = src->alt_timeout;
30839+ memset(dst->reserved, 0, sizeof(dst->reserved));
30840 }
30841 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
30842
30843diff -urNp linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c
30844--- linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
30845+++ linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
30846@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
30847 struct infinipath_counters counters;
30848 struct ipath_devdata *dd;
30849
30850+ pax_track_stack();
30851+
30852 dd = file->f_path.dentry->d_inode->i_private;
30853 dd->ipath_f_read_counters(dd, &counters);
30854
30855diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c
30856--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
30857+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
30858@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
30859 LIST_HEAD(nes_adapter_list);
30860 static LIST_HEAD(nes_dev_list);
30861
30862-atomic_t qps_destroyed;
30863+atomic_unchecked_t qps_destroyed;
30864
30865 static unsigned int ee_flsh_adapter;
30866 static unsigned int sysfs_nonidx_addr;
30867@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
30868 struct nes_adapter *nesadapter = nesdev->nesadapter;
30869 u32 qp_id;
30870
30871- atomic_inc(&qps_destroyed);
30872+ atomic_inc_unchecked(&qps_destroyed);
30873
30874 /* Free the control structures */
30875
30876diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c
30877--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
30878+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
30879@@ -69,11 +69,11 @@ u32 cm_packets_received;
30880 u32 cm_listens_created;
30881 u32 cm_listens_destroyed;
30882 u32 cm_backlog_drops;
30883-atomic_t cm_loopbacks;
30884-atomic_t cm_nodes_created;
30885-atomic_t cm_nodes_destroyed;
30886-atomic_t cm_accel_dropped_pkts;
30887-atomic_t cm_resets_recvd;
30888+atomic_unchecked_t cm_loopbacks;
30889+atomic_unchecked_t cm_nodes_created;
30890+atomic_unchecked_t cm_nodes_destroyed;
30891+atomic_unchecked_t cm_accel_dropped_pkts;
30892+atomic_unchecked_t cm_resets_recvd;
30893
30894 static inline int mini_cm_accelerated(struct nes_cm_core *,
30895 struct nes_cm_node *);
30896@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
30897
30898 static struct nes_cm_core *g_cm_core;
30899
30900-atomic_t cm_connects;
30901-atomic_t cm_accepts;
30902-atomic_t cm_disconnects;
30903-atomic_t cm_closes;
30904-atomic_t cm_connecteds;
30905-atomic_t cm_connect_reqs;
30906-atomic_t cm_rejects;
30907+atomic_unchecked_t cm_connects;
30908+atomic_unchecked_t cm_accepts;
30909+atomic_unchecked_t cm_disconnects;
30910+atomic_unchecked_t cm_closes;
30911+atomic_unchecked_t cm_connecteds;
30912+atomic_unchecked_t cm_connect_reqs;
30913+atomic_unchecked_t cm_rejects;
30914
30915
30916 /**
30917@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30918 cm_node->rem_mac);
30919
30920 add_hte_node(cm_core, cm_node);
30921- atomic_inc(&cm_nodes_created);
30922+ atomic_inc_unchecked(&cm_nodes_created);
30923
30924 return cm_node;
30925 }
30926@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30927 }
30928
30929 atomic_dec(&cm_core->node_cnt);
30930- atomic_inc(&cm_nodes_destroyed);
30931+ atomic_inc_unchecked(&cm_nodes_destroyed);
30932 nesqp = cm_node->nesqp;
30933 if (nesqp) {
30934 nesqp->cm_node = NULL;
30935@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30936
30937 static void drop_packet(struct sk_buff *skb)
30938 {
30939- atomic_inc(&cm_accel_dropped_pkts);
30940+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30941 dev_kfree_skb_any(skb);
30942 }
30943
30944@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30945
30946 int reset = 0; /* whether to send reset in case of err.. */
30947 int passive_state;
30948- atomic_inc(&cm_resets_recvd);
30949+ atomic_inc_unchecked(&cm_resets_recvd);
30950 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30951 " refcnt=%d\n", cm_node, cm_node->state,
30952 atomic_read(&cm_node->ref_count));
30953@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30954 rem_ref_cm_node(cm_node->cm_core, cm_node);
30955 return NULL;
30956 }
30957- atomic_inc(&cm_loopbacks);
30958+ atomic_inc_unchecked(&cm_loopbacks);
30959 loopbackremotenode->loopbackpartner = cm_node;
30960 loopbackremotenode->tcp_cntxt.rcv_wscale =
30961 NES_CM_DEFAULT_RCV_WND_SCALE;
30962@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30963 add_ref_cm_node(cm_node);
30964 } else if (cm_node->state == NES_CM_STATE_TSA) {
30965 rem_ref_cm_node(cm_core, cm_node);
30966- atomic_inc(&cm_accel_dropped_pkts);
30967+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30968 dev_kfree_skb_any(skb);
30969 break;
30970 }
30971@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30972
30973 if ((cm_id) && (cm_id->event_handler)) {
30974 if (issue_disconn) {
30975- atomic_inc(&cm_disconnects);
30976+ atomic_inc_unchecked(&cm_disconnects);
30977 cm_event.event = IW_CM_EVENT_DISCONNECT;
30978 cm_event.status = disconn_status;
30979 cm_event.local_addr = cm_id->local_addr;
30980@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30981 }
30982
30983 if (issue_close) {
30984- atomic_inc(&cm_closes);
30985+ atomic_inc_unchecked(&cm_closes);
30986 nes_disconnect(nesqp, 1);
30987
30988 cm_id->provider_data = nesqp;
30989@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30990
30991 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30992 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30993- atomic_inc(&cm_accepts);
30994+ atomic_inc_unchecked(&cm_accepts);
30995
30996 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30997 atomic_read(&nesvnic->netdev->refcnt));
30998@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
30999
31000 struct nes_cm_core *cm_core;
31001
31002- atomic_inc(&cm_rejects);
31003+ atomic_inc_unchecked(&cm_rejects);
31004 cm_node = (struct nes_cm_node *) cm_id->provider_data;
31005 loopback = cm_node->loopbackpartner;
31006 cm_core = cm_node->cm_core;
31007@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
31008 ntohl(cm_id->local_addr.sin_addr.s_addr),
31009 ntohs(cm_id->local_addr.sin_port));
31010
31011- atomic_inc(&cm_connects);
31012+ atomic_inc_unchecked(&cm_connects);
31013 nesqp->active_conn = 1;
31014
31015 /* cache the cm_id in the qp */
31016@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
31017 if (nesqp->destroyed) {
31018 return;
31019 }
31020- atomic_inc(&cm_connecteds);
31021+ atomic_inc_unchecked(&cm_connecteds);
31022 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
31023 " local port 0x%04X. jiffies = %lu.\n",
31024 nesqp->hwqp.qp_id,
31025@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
31026
31027 ret = cm_id->event_handler(cm_id, &cm_event);
31028 cm_id->add_ref(cm_id);
31029- atomic_inc(&cm_closes);
31030+ atomic_inc_unchecked(&cm_closes);
31031 cm_event.event = IW_CM_EVENT_CLOSE;
31032 cm_event.status = IW_CM_EVENT_STATUS_OK;
31033 cm_event.provider_data = cm_id->provider_data;
31034@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
31035 return;
31036 cm_id = cm_node->cm_id;
31037
31038- atomic_inc(&cm_connect_reqs);
31039+ atomic_inc_unchecked(&cm_connect_reqs);
31040 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31041 cm_node, cm_id, jiffies);
31042
31043@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
31044 return;
31045 cm_id = cm_node->cm_id;
31046
31047- atomic_inc(&cm_connect_reqs);
31048+ atomic_inc_unchecked(&cm_connect_reqs);
31049 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31050 cm_node, cm_id, jiffies);
31051
31052diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h
31053--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
31054+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
31055@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
31056 extern unsigned int wqm_quanta;
31057 extern struct list_head nes_adapter_list;
31058
31059-extern atomic_t cm_connects;
31060-extern atomic_t cm_accepts;
31061-extern atomic_t cm_disconnects;
31062-extern atomic_t cm_closes;
31063-extern atomic_t cm_connecteds;
31064-extern atomic_t cm_connect_reqs;
31065-extern atomic_t cm_rejects;
31066-extern atomic_t mod_qp_timouts;
31067-extern atomic_t qps_created;
31068-extern atomic_t qps_destroyed;
31069-extern atomic_t sw_qps_destroyed;
31070+extern atomic_unchecked_t cm_connects;
31071+extern atomic_unchecked_t cm_accepts;
31072+extern atomic_unchecked_t cm_disconnects;
31073+extern atomic_unchecked_t cm_closes;
31074+extern atomic_unchecked_t cm_connecteds;
31075+extern atomic_unchecked_t cm_connect_reqs;
31076+extern atomic_unchecked_t cm_rejects;
31077+extern atomic_unchecked_t mod_qp_timouts;
31078+extern atomic_unchecked_t qps_created;
31079+extern atomic_unchecked_t qps_destroyed;
31080+extern atomic_unchecked_t sw_qps_destroyed;
31081 extern u32 mh_detected;
31082 extern u32 mh_pauses_sent;
31083 extern u32 cm_packets_sent;
31084@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
31085 extern u32 cm_listens_created;
31086 extern u32 cm_listens_destroyed;
31087 extern u32 cm_backlog_drops;
31088-extern atomic_t cm_loopbacks;
31089-extern atomic_t cm_nodes_created;
31090-extern atomic_t cm_nodes_destroyed;
31091-extern atomic_t cm_accel_dropped_pkts;
31092-extern atomic_t cm_resets_recvd;
31093+extern atomic_unchecked_t cm_loopbacks;
31094+extern atomic_unchecked_t cm_nodes_created;
31095+extern atomic_unchecked_t cm_nodes_destroyed;
31096+extern atomic_unchecked_t cm_accel_dropped_pkts;
31097+extern atomic_unchecked_t cm_resets_recvd;
31098
31099 extern u32 int_mod_timer_init;
31100 extern u32 int_mod_cq_depth_256;
31101diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c
31102--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
31103+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
31104@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
31105 target_stat_values[++index] = mh_detected;
31106 target_stat_values[++index] = mh_pauses_sent;
31107 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
31108- target_stat_values[++index] = atomic_read(&cm_connects);
31109- target_stat_values[++index] = atomic_read(&cm_accepts);
31110- target_stat_values[++index] = atomic_read(&cm_disconnects);
31111- target_stat_values[++index] = atomic_read(&cm_connecteds);
31112- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
31113- target_stat_values[++index] = atomic_read(&cm_rejects);
31114- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
31115- target_stat_values[++index] = atomic_read(&qps_created);
31116- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
31117- target_stat_values[++index] = atomic_read(&qps_destroyed);
31118- target_stat_values[++index] = atomic_read(&cm_closes);
31119+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
31120+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
31121+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
31122+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
31123+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
31124+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
31125+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
31126+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
31127+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
31128+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
31129+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
31130 target_stat_values[++index] = cm_packets_sent;
31131 target_stat_values[++index] = cm_packets_bounced;
31132 target_stat_values[++index] = cm_packets_created;
31133@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
31134 target_stat_values[++index] = cm_listens_created;
31135 target_stat_values[++index] = cm_listens_destroyed;
31136 target_stat_values[++index] = cm_backlog_drops;
31137- target_stat_values[++index] = atomic_read(&cm_loopbacks);
31138- target_stat_values[++index] = atomic_read(&cm_nodes_created);
31139- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
31140- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
31141- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
31142+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
31143+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
31144+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
31145+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
31146+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
31147 target_stat_values[++index] = int_mod_timer_init;
31148 target_stat_values[++index] = int_mod_cq_depth_1;
31149 target_stat_values[++index] = int_mod_cq_depth_4;
31150diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c
31151--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
31152+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
31153@@ -45,9 +45,9 @@
31154
31155 #include <rdma/ib_umem.h>
31156
31157-atomic_t mod_qp_timouts;
31158-atomic_t qps_created;
31159-atomic_t sw_qps_destroyed;
31160+atomic_unchecked_t mod_qp_timouts;
31161+atomic_unchecked_t qps_created;
31162+atomic_unchecked_t sw_qps_destroyed;
31163
31164 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
31165
31166@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
31167 if (init_attr->create_flags)
31168 return ERR_PTR(-EINVAL);
31169
31170- atomic_inc(&qps_created);
31171+ atomic_inc_unchecked(&qps_created);
31172 switch (init_attr->qp_type) {
31173 case IB_QPT_RC:
31174 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
31175@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
31176 struct iw_cm_event cm_event;
31177 int ret;
31178
31179- atomic_inc(&sw_qps_destroyed);
31180+ atomic_inc_unchecked(&sw_qps_destroyed);
31181 nesqp->destroyed = 1;
31182
31183 /* Blow away the connection if it exists. */
31184diff -urNp linux-2.6.32.45/drivers/input/gameport/gameport.c linux-2.6.32.45/drivers/input/gameport/gameport.c
31185--- linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
31186+++ linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
31187@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
31188 */
31189 static void gameport_init_port(struct gameport *gameport)
31190 {
31191- static atomic_t gameport_no = ATOMIC_INIT(0);
31192+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
31193
31194 __module_get(THIS_MODULE);
31195
31196 mutex_init(&gameport->drv_mutex);
31197 device_initialize(&gameport->dev);
31198- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
31199+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
31200 gameport->dev.bus = &gameport_bus;
31201 gameport->dev.release = gameport_release_port;
31202 if (gameport->parent)
31203diff -urNp linux-2.6.32.45/drivers/input/input.c linux-2.6.32.45/drivers/input/input.c
31204--- linux-2.6.32.45/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
31205+++ linux-2.6.32.45/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
31206@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
31207 */
31208 int input_register_device(struct input_dev *dev)
31209 {
31210- static atomic_t input_no = ATOMIC_INIT(0);
31211+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
31212 struct input_handler *handler;
31213 const char *path;
31214 int error;
31215@@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
31216 dev->setkeycode = input_default_setkeycode;
31217
31218 dev_set_name(&dev->dev, "input%ld",
31219- (unsigned long) atomic_inc_return(&input_no) - 1);
31220+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
31221
31222 error = device_add(&dev->dev);
31223 if (error)
31224diff -urNp linux-2.6.32.45/drivers/input/joystick/sidewinder.c linux-2.6.32.45/drivers/input/joystick/sidewinder.c
31225--- linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
31226+++ linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
31227@@ -30,6 +30,7 @@
31228 #include <linux/kernel.h>
31229 #include <linux/module.h>
31230 #include <linux/slab.h>
31231+#include <linux/sched.h>
31232 #include <linux/init.h>
31233 #include <linux/input.h>
31234 #include <linux/gameport.h>
31235@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
31236 unsigned char buf[SW_LENGTH];
31237 int i;
31238
31239+ pax_track_stack();
31240+
31241 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
31242
31243 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
31244diff -urNp linux-2.6.32.45/drivers/input/joystick/xpad.c linux-2.6.32.45/drivers/input/joystick/xpad.c
31245--- linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
31246+++ linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
31247@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
31248
31249 static int xpad_led_probe(struct usb_xpad *xpad)
31250 {
31251- static atomic_t led_seq = ATOMIC_INIT(0);
31252+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
31253 long led_no;
31254 struct xpad_led *led;
31255 struct led_classdev *led_cdev;
31256@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
31257 if (!led)
31258 return -ENOMEM;
31259
31260- led_no = (long)atomic_inc_return(&led_seq) - 1;
31261+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
31262
31263 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
31264 led->xpad = xpad;
31265diff -urNp linux-2.6.32.45/drivers/input/serio/serio.c linux-2.6.32.45/drivers/input/serio/serio.c
31266--- linux-2.6.32.45/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
31267+++ linux-2.6.32.45/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
31268@@ -527,7 +527,7 @@ static void serio_release_port(struct de
31269 */
31270 static void serio_init_port(struct serio *serio)
31271 {
31272- static atomic_t serio_no = ATOMIC_INIT(0);
31273+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
31274
31275 __module_get(THIS_MODULE);
31276
31277@@ -536,7 +536,7 @@ static void serio_init_port(struct serio
31278 mutex_init(&serio->drv_mutex);
31279 device_initialize(&serio->dev);
31280 dev_set_name(&serio->dev, "serio%ld",
31281- (long)atomic_inc_return(&serio_no) - 1);
31282+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
31283 serio->dev.bus = &serio_bus;
31284 serio->dev.release = serio_release_port;
31285 if (serio->parent) {
31286diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/common.c linux-2.6.32.45/drivers/isdn/gigaset/common.c
31287--- linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
31288+++ linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
31289@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
31290 cs->commands_pending = 0;
31291 cs->cur_at_seq = 0;
31292 cs->gotfwver = -1;
31293- cs->open_count = 0;
31294+ local_set(&cs->open_count, 0);
31295 cs->dev = NULL;
31296 cs->tty = NULL;
31297 cs->tty_dev = NULL;
31298diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h
31299--- linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
31300+++ linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
31301@@ -34,6 +34,7 @@
31302 #include <linux/tty_driver.h>
31303 #include <linux/list.h>
31304 #include <asm/atomic.h>
31305+#include <asm/local.h>
31306
31307 #define GIG_VERSION {0,5,0,0}
31308 #define GIG_COMPAT {0,4,0,0}
31309@@ -446,7 +447,7 @@ struct cardstate {
31310 spinlock_t cmdlock;
31311 unsigned curlen, cmdbytes;
31312
31313- unsigned open_count;
31314+ local_t open_count;
31315 struct tty_struct *tty;
31316 struct tasklet_struct if_wake_tasklet;
31317 unsigned control_state;
31318diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/interface.c linux-2.6.32.45/drivers/isdn/gigaset/interface.c
31319--- linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
31320+++ linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
31321@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
31322 return -ERESTARTSYS; // FIXME -EINTR?
31323 tty->driver_data = cs;
31324
31325- ++cs->open_count;
31326-
31327- if (cs->open_count == 1) {
31328+ if (local_inc_return(&cs->open_count) == 1) {
31329 spin_lock_irqsave(&cs->lock, flags);
31330 cs->tty = tty;
31331 spin_unlock_irqrestore(&cs->lock, flags);
31332@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
31333
31334 if (!cs->connected)
31335 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31336- else if (!cs->open_count)
31337+ else if (!local_read(&cs->open_count))
31338 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31339 else {
31340- if (!--cs->open_count) {
31341+ if (!local_dec_return(&cs->open_count)) {
31342 spin_lock_irqsave(&cs->lock, flags);
31343 cs->tty = NULL;
31344 spin_unlock_irqrestore(&cs->lock, flags);
31345@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
31346 if (!cs->connected) {
31347 gig_dbg(DEBUG_IF, "not connected");
31348 retval = -ENODEV;
31349- } else if (!cs->open_count)
31350+ } else if (!local_read(&cs->open_count))
31351 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31352 else {
31353 retval = 0;
31354@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
31355 if (!cs->connected) {
31356 gig_dbg(DEBUG_IF, "not connected");
31357 retval = -ENODEV;
31358- } else if (!cs->open_count)
31359+ } else if (!local_read(&cs->open_count))
31360 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31361 else if (cs->mstate != MS_LOCKED) {
31362 dev_warn(cs->dev, "can't write to unlocked device\n");
31363@@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
31364 if (!cs->connected) {
31365 gig_dbg(DEBUG_IF, "not connected");
31366 retval = -ENODEV;
31367- } else if (!cs->open_count)
31368+ } else if (!local_read(&cs->open_count))
31369 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31370 else if (cs->mstate != MS_LOCKED) {
31371 dev_warn(cs->dev, "can't write to unlocked device\n");
31372@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
31373
31374 if (!cs->connected)
31375 gig_dbg(DEBUG_IF, "not connected");
31376- else if (!cs->open_count)
31377+ else if (!local_read(&cs->open_count))
31378 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31379 else if (cs->mstate != MS_LOCKED)
31380 dev_warn(cs->dev, "can't write to unlocked device\n");
31381@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
31382
31383 if (!cs->connected)
31384 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31385- else if (!cs->open_count)
31386+ else if (!local_read(&cs->open_count))
31387 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31388 else {
31389 //FIXME
31390@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
31391
31392 if (!cs->connected)
31393 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31394- else if (!cs->open_count)
31395+ else if (!local_read(&cs->open_count))
31396 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31397 else {
31398 //FIXME
31399@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
31400 goto out;
31401 }
31402
31403- if (!cs->open_count) {
31404+ if (!local_read(&cs->open_count)) {
31405 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31406 goto out;
31407 }
31408diff -urNp linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c
31409--- linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
31410+++ linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
31411@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
31412 }
31413 if (left) {
31414 if (t4file->user) {
31415- if (copy_from_user(buf, dp, left))
31416+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31417 return -EFAULT;
31418 } else {
31419 memcpy(buf, dp, left);
31420@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
31421 }
31422 if (left) {
31423 if (config->user) {
31424- if (copy_from_user(buf, dp, left))
31425+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31426 return -EFAULT;
31427 } else {
31428 memcpy(buf, dp, left);
31429diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c
31430--- linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
31431+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
31432@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
31433 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
31434 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
31435
31436+ pax_track_stack();
31437
31438 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
31439 {
31440diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c
31441--- linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
31442+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
31443@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
31444 IDI_SYNC_REQ req;
31445 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31446
31447+ pax_track_stack();
31448+
31449 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31450
31451 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31452diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c
31453--- linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
31454+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
31455@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
31456 IDI_SYNC_REQ req;
31457 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31458
31459+ pax_track_stack();
31460+
31461 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31462
31463 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31464diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c
31465--- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
31466+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
31467@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
31468 IDI_SYNC_REQ req;
31469 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31470
31471+ pax_track_stack();
31472+
31473 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31474
31475 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31476diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h
31477--- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-03-27 14:31:47.000000000 -0400
31478+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:33:55.000000000 -0400
31479@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31480 } diva_didd_add_adapter_t;
31481 typedef struct _diva_didd_remove_adapter {
31482 IDI_CALL p_request;
31483-} diva_didd_remove_adapter_t;
31484+} __no_const diva_didd_remove_adapter_t;
31485 typedef struct _diva_didd_read_adapter_array {
31486 void * buffer;
31487 dword length;
31488diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c
31489--- linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
31490+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
31491@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
31492 IDI_SYNC_REQ req;
31493 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31494
31495+ pax_track_stack();
31496+
31497 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31498
31499 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31500diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c
31501--- linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
31502+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
31503@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
31504 dword d;
31505 word w;
31506
31507+ pax_track_stack();
31508+
31509 a = plci->adapter;
31510 Id = ((word)plci->Id<<8)|a->Id;
31511 PUT_WORD(&SS_Ind[4],0x0000);
31512@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
31513 word j, n, w;
31514 dword d;
31515
31516+ pax_track_stack();
31517+
31518
31519 for(i=0;i<8;i++) bp_parms[i].length = 0;
31520 for(i=0;i<2;i++) global_config[i].length = 0;
31521@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
31522 const byte llc3[] = {4,3,2,2,6,6,0};
31523 const byte header[] = {0,2,3,3,0,0,0};
31524
31525+ pax_track_stack();
31526+
31527 for(i=0;i<8;i++) bp_parms[i].length = 0;
31528 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
31529 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
31530@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
31531 word appl_number_group_type[MAX_APPL];
31532 PLCI *auxplci;
31533
31534+ pax_track_stack();
31535+
31536 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
31537
31538 if(!a->group_optimization_enabled)
31539diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c
31540--- linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
31541+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
31542@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
31543 IDI_SYNC_REQ req;
31544 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31545
31546+ pax_track_stack();
31547+
31548 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31549
31550 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31551diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h
31552--- linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-03-27 14:31:47.000000000 -0400
31553+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:33:55.000000000 -0400
31554@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31555 typedef struct _diva_os_idi_adapter_interface {
31556 diva_init_card_proc_t cleanup_adapter_proc;
31557 diva_cmd_card_proc_t cmd_proc;
31558-} diva_os_idi_adapter_interface_t;
31559+} __no_const diva_os_idi_adapter_interface_t;
31560
31561 typedef struct _diva_os_xdi_adapter {
31562 struct list_head link;
31563diff -urNp linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c
31564--- linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
31565+++ linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
31566@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
31567 } iocpar;
31568 void __user *argp = (void __user *)arg;
31569
31570+ pax_track_stack();
31571+
31572 #define name iocpar.name
31573 #define bname iocpar.bname
31574 #define iocts iocpar.iocts
31575diff -urNp linux-2.6.32.45/drivers/isdn/icn/icn.c linux-2.6.32.45/drivers/isdn/icn/icn.c
31576--- linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
31577+++ linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
31578@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
31579 if (count > len)
31580 count = len;
31581 if (user) {
31582- if (copy_from_user(msg, buf, count))
31583+ if (count > sizeof msg || copy_from_user(msg, buf, count))
31584 return -EFAULT;
31585 } else
31586 memcpy(msg, buf, count);
31587diff -urNp linux-2.6.32.45/drivers/isdn/mISDN/socket.c linux-2.6.32.45/drivers/isdn/mISDN/socket.c
31588--- linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
31589+++ linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
31590@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
31591 if (dev) {
31592 struct mISDN_devinfo di;
31593
31594+ memset(&di, 0, sizeof(di));
31595 di.id = dev->id;
31596 di.Dprotocols = dev->Dprotocols;
31597 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31598@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
31599 if (dev) {
31600 struct mISDN_devinfo di;
31601
31602+ memset(&di, 0, sizeof(di));
31603 di.id = dev->id;
31604 di.Dprotocols = dev->Dprotocols;
31605 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31606diff -urNp linux-2.6.32.45/drivers/isdn/sc/interrupt.c linux-2.6.32.45/drivers/isdn/sc/interrupt.c
31607--- linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
31608+++ linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
31609@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
31610 }
31611 else if(callid>=0x0000 && callid<=0x7FFF)
31612 {
31613+ int len;
31614+
31615 pr_debug("%s: Got Incoming Call\n",
31616 sc_adapter[card]->devicename);
31617- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
31618- strcpy(setup.eazmsn,
31619- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
31620+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
31621+ sizeof(setup.phone));
31622+ if (len >= sizeof(setup.phone))
31623+ continue;
31624+ len = strlcpy(setup.eazmsn,
31625+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31626+ sizeof(setup.eazmsn));
31627+ if (len >= sizeof(setup.eazmsn))
31628+ continue;
31629 setup.si1 = 7;
31630 setup.si2 = 0;
31631 setup.plan = 0;
31632@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
31633 * Handle a GetMyNumber Rsp
31634 */
31635 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
31636- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
31637+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31638+ rcvmsg.msg_data.byte_array,
31639+ sizeof(rcvmsg.msg_data.byte_array));
31640 continue;
31641 }
31642
31643diff -urNp linux-2.6.32.45/drivers/lguest/core.c linux-2.6.32.45/drivers/lguest/core.c
31644--- linux-2.6.32.45/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
31645+++ linux-2.6.32.45/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
31646@@ -91,9 +91,17 @@ static __init int map_switcher(void)
31647 * it's worked so far. The end address needs +1 because __get_vm_area
31648 * allocates an extra guard page, so we need space for that.
31649 */
31650+
31651+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31652+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31653+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31654+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31655+#else
31656 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31657 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31658 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31659+#endif
31660+
31661 if (!switcher_vma) {
31662 err = -ENOMEM;
31663 printk("lguest: could not map switcher pages high\n");
31664@@ -118,7 +126,7 @@ static __init int map_switcher(void)
31665 * Now the Switcher is mapped at the right address, we can't fail!
31666 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
31667 */
31668- memcpy(switcher_vma->addr, start_switcher_text,
31669+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31670 end_switcher_text - start_switcher_text);
31671
31672 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31673diff -urNp linux-2.6.32.45/drivers/lguest/x86/core.c linux-2.6.32.45/drivers/lguest/x86/core.c
31674--- linux-2.6.32.45/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
31675+++ linux-2.6.32.45/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
31676@@ -59,7 +59,7 @@ static struct {
31677 /* Offset from where switcher.S was compiled to where we've copied it */
31678 static unsigned long switcher_offset(void)
31679 {
31680- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31681+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31682 }
31683
31684 /* This cpu's struct lguest_pages. */
31685@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
31686 * These copies are pretty cheap, so we do them unconditionally: */
31687 /* Save the current Host top-level page directory.
31688 */
31689+
31690+#ifdef CONFIG_PAX_PER_CPU_PGD
31691+ pages->state.host_cr3 = read_cr3();
31692+#else
31693 pages->state.host_cr3 = __pa(current->mm->pgd);
31694+#endif
31695+
31696 /*
31697 * Set up the Guest's page tables to see this CPU's pages (and no
31698 * other CPU's pages).
31699@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
31700 * compiled-in switcher code and the high-mapped copy we just made.
31701 */
31702 for (i = 0; i < IDT_ENTRIES; i++)
31703- default_idt_entries[i] += switcher_offset();
31704+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31705
31706 /*
31707 * Set up the Switcher's per-cpu areas.
31708@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
31709 * it will be undisturbed when we switch. To change %cs and jump we
31710 * need this structure to feed to Intel's "lcall" instruction.
31711 */
31712- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31713+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31714 lguest_entry.segment = LGUEST_CS;
31715
31716 /*
31717diff -urNp linux-2.6.32.45/drivers/lguest/x86/switcher_32.S linux-2.6.32.45/drivers/lguest/x86/switcher_32.S
31718--- linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
31719+++ linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
31720@@ -87,6 +87,7 @@
31721 #include <asm/page.h>
31722 #include <asm/segment.h>
31723 #include <asm/lguest.h>
31724+#include <asm/processor-flags.h>
31725
31726 // We mark the start of the code to copy
31727 // It's placed in .text tho it's never run here
31728@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31729 // Changes type when we load it: damn Intel!
31730 // For after we switch over our page tables
31731 // That entry will be read-only: we'd crash.
31732+
31733+#ifdef CONFIG_PAX_KERNEXEC
31734+ mov %cr0, %edx
31735+ xor $X86_CR0_WP, %edx
31736+ mov %edx, %cr0
31737+#endif
31738+
31739 movl $(GDT_ENTRY_TSS*8), %edx
31740 ltr %dx
31741
31742@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31743 // Let's clear it again for our return.
31744 // The GDT descriptor of the Host
31745 // Points to the table after two "size" bytes
31746- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31747+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31748 // Clear "used" from type field (byte 5, bit 2)
31749- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31750+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31751+
31752+#ifdef CONFIG_PAX_KERNEXEC
31753+ mov %cr0, %eax
31754+ xor $X86_CR0_WP, %eax
31755+ mov %eax, %cr0
31756+#endif
31757
31758 // Once our page table's switched, the Guest is live!
31759 // The Host fades as we run this final step.
31760@@ -295,13 +309,12 @@ deliver_to_host:
31761 // I consulted gcc, and it gave
31762 // These instructions, which I gladly credit:
31763 leal (%edx,%ebx,8), %eax
31764- movzwl (%eax),%edx
31765- movl 4(%eax), %eax
31766- xorw %ax, %ax
31767- orl %eax, %edx
31768+ movl 4(%eax), %edx
31769+ movw (%eax), %dx
31770 // Now the address of the handler's in %edx
31771 // We call it now: its "iret" drops us home.
31772- jmp *%edx
31773+ ljmp $__KERNEL_CS, $1f
31774+1: jmp *%edx
31775
31776 // Every interrupt can come to us here
31777 // But we must truly tell each apart.
31778diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c
31779--- linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
31780+++ linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
31781@@ -15,7 +15,7 @@
31782
31783 #define MAX_PMU_LEVEL 0xFF
31784
31785-static struct backlight_ops pmu_backlight_data;
31786+static const struct backlight_ops pmu_backlight_data;
31787 static DEFINE_SPINLOCK(pmu_backlight_lock);
31788 static int sleeping, uses_pmu_bl;
31789 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
31790@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
31791 return bd->props.brightness;
31792 }
31793
31794-static struct backlight_ops pmu_backlight_data = {
31795+static const struct backlight_ops pmu_backlight_data = {
31796 .get_brightness = pmu_backlight_get_brightness,
31797 .update_status = pmu_backlight_update_status,
31798
31799diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu.c linux-2.6.32.45/drivers/macintosh/via-pmu.c
31800--- linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
31801+++ linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
31802@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
31803 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
31804 }
31805
31806-static struct platform_suspend_ops pmu_pm_ops = {
31807+static const struct platform_suspend_ops pmu_pm_ops = {
31808 .enter = powerbook_sleep,
31809 .valid = pmu_sleep_valid,
31810 };
31811diff -urNp linux-2.6.32.45/drivers/md/dm.c linux-2.6.32.45/drivers/md/dm.c
31812--- linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:35:29.000000000 -0400
31813+++ linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:33:59.000000000 -0400
31814@@ -165,9 +165,9 @@ struct mapped_device {
31815 /*
31816 * Event handling.
31817 */
31818- atomic_t event_nr;
31819+ atomic_unchecked_t event_nr;
31820 wait_queue_head_t eventq;
31821- atomic_t uevent_seq;
31822+ atomic_unchecked_t uevent_seq;
31823 struct list_head uevent_list;
31824 spinlock_t uevent_lock; /* Protect access to uevent_list */
31825
31826@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(i
31827 rwlock_init(&md->map_lock);
31828 atomic_set(&md->holders, 1);
31829 atomic_set(&md->open_count, 0);
31830- atomic_set(&md->event_nr, 0);
31831- atomic_set(&md->uevent_seq, 0);
31832+ atomic_set_unchecked(&md->event_nr, 0);
31833+ atomic_set_unchecked(&md->uevent_seq, 0);
31834 INIT_LIST_HEAD(&md->uevent_list);
31835 spin_lock_init(&md->uevent_lock);
31836
31837@@ -1927,7 +1927,7 @@ static void event_callback(void *context
31838
31839 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31840
31841- atomic_inc(&md->event_nr);
31842+ atomic_inc_unchecked(&md->event_nr);
31843 wake_up(&md->eventq);
31844 }
31845
31846@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_dev
31847
31848 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31849 {
31850- return atomic_add_return(1, &md->uevent_seq);
31851+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31852 }
31853
31854 uint32_t dm_get_event_nr(struct mapped_device *md)
31855 {
31856- return atomic_read(&md->event_nr);
31857+ return atomic_read_unchecked(&md->event_nr);
31858 }
31859
31860 int dm_wait_event(struct mapped_device *md, int event_nr)
31861 {
31862 return wait_event_interruptible(md->eventq,
31863- (event_nr != atomic_read(&md->event_nr)));
31864+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31865 }
31866
31867 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31868diff -urNp linux-2.6.32.45/drivers/md/dm-ioctl.c linux-2.6.32.45/drivers/md/dm-ioctl.c
31869--- linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
31870+++ linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
31871@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
31872 cmd == DM_LIST_VERSIONS_CMD)
31873 return 0;
31874
31875- if ((cmd == DM_DEV_CREATE_CMD)) {
31876+ if (cmd == DM_DEV_CREATE_CMD) {
31877 if (!*param->name) {
31878 DMWARN("name not supplied when creating device");
31879 return -EINVAL;
31880diff -urNp linux-2.6.32.45/drivers/md/dm-raid1.c linux-2.6.32.45/drivers/md/dm-raid1.c
31881--- linux-2.6.32.45/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
31882+++ linux-2.6.32.45/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
31883@@ -41,7 +41,7 @@ enum dm_raid1_error {
31884
31885 struct mirror {
31886 struct mirror_set *ms;
31887- atomic_t error_count;
31888+ atomic_unchecked_t error_count;
31889 unsigned long error_type;
31890 struct dm_dev *dev;
31891 sector_t offset;
31892@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
31893 * simple way to tell if a device has encountered
31894 * errors.
31895 */
31896- atomic_inc(&m->error_count);
31897+ atomic_inc_unchecked(&m->error_count);
31898
31899 if (test_and_set_bit(error_type, &m->error_type))
31900 return;
31901@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
31902 }
31903
31904 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
31905- if (!atomic_read(&new->error_count)) {
31906+ if (!atomic_read_unchecked(&new->error_count)) {
31907 set_default_mirror(new);
31908 break;
31909 }
31910@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
31911 struct mirror *m = get_default_mirror(ms);
31912
31913 do {
31914- if (likely(!atomic_read(&m->error_count)))
31915+ if (likely(!atomic_read_unchecked(&m->error_count)))
31916 return m;
31917
31918 if (m-- == ms->mirror)
31919@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
31920 {
31921 struct mirror *default_mirror = get_default_mirror(m->ms);
31922
31923- return !atomic_read(&default_mirror->error_count);
31924+ return !atomic_read_unchecked(&default_mirror->error_count);
31925 }
31926
31927 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31928@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31929 */
31930 if (likely(region_in_sync(ms, region, 1)))
31931 m = choose_mirror(ms, bio->bi_sector);
31932- else if (m && atomic_read(&m->error_count))
31933+ else if (m && atomic_read_unchecked(&m->error_count))
31934 m = NULL;
31935
31936 if (likely(m))
31937@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31938 }
31939
31940 ms->mirror[mirror].ms = ms;
31941- atomic_set(&(ms->mirror[mirror].error_count), 0);
31942+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31943 ms->mirror[mirror].error_type = 0;
31944 ms->mirror[mirror].offset = offset;
31945
31946@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31947 */
31948 static char device_status_char(struct mirror *m)
31949 {
31950- if (!atomic_read(&(m->error_count)))
31951+ if (!atomic_read_unchecked(&(m->error_count)))
31952 return 'A';
31953
31954 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31955diff -urNp linux-2.6.32.45/drivers/md/dm-stripe.c linux-2.6.32.45/drivers/md/dm-stripe.c
31956--- linux-2.6.32.45/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31957+++ linux-2.6.32.45/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31958@@ -20,7 +20,7 @@ struct stripe {
31959 struct dm_dev *dev;
31960 sector_t physical_start;
31961
31962- atomic_t error_count;
31963+ atomic_unchecked_t error_count;
31964 };
31965
31966 struct stripe_c {
31967@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31968 kfree(sc);
31969 return r;
31970 }
31971- atomic_set(&(sc->stripe[i].error_count), 0);
31972+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31973 }
31974
31975 ti->private = sc;
31976@@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31977 DMEMIT("%d ", sc->stripes);
31978 for (i = 0; i < sc->stripes; i++) {
31979 DMEMIT("%s ", sc->stripe[i].dev->name);
31980- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31981+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31982 'D' : 'A';
31983 }
31984 buffer[i] = '\0';
31985@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31986 */
31987 for (i = 0; i < sc->stripes; i++)
31988 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31989- atomic_inc(&(sc->stripe[i].error_count));
31990- if (atomic_read(&(sc->stripe[i].error_count)) <
31991+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31992+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31993 DM_IO_ERROR_THRESHOLD)
31994 queue_work(kstriped, &sc->kstriped_ws);
31995 }
31996diff -urNp linux-2.6.32.45/drivers/md/dm-sysfs.c linux-2.6.32.45/drivers/md/dm-sysfs.c
31997--- linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
31998+++ linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
31999@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
32000 NULL,
32001 };
32002
32003-static struct sysfs_ops dm_sysfs_ops = {
32004+static const struct sysfs_ops dm_sysfs_ops = {
32005 .show = dm_attr_show,
32006 };
32007
32008diff -urNp linux-2.6.32.45/drivers/md/dm-table.c linux-2.6.32.45/drivers/md/dm-table.c
32009--- linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
32010+++ linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
32011@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
32012 if (!dev_size)
32013 return 0;
32014
32015- if ((start >= dev_size) || (start + len > dev_size)) {
32016+ if ((start >= dev_size) || (len > dev_size - start)) {
32017 DMWARN("%s: %s too small for target: "
32018 "start=%llu, len=%llu, dev_size=%llu",
32019 dm_device_name(ti->table->md), bdevname(bdev, b),
32020diff -urNp linux-2.6.32.45/drivers/md/md.c linux-2.6.32.45/drivers/md/md.c
32021--- linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
32022+++ linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
32023@@ -153,10 +153,10 @@ static int start_readonly;
32024 * start build, activate spare
32025 */
32026 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
32027-static atomic_t md_event_count;
32028+static atomic_unchecked_t md_event_count;
32029 void md_new_event(mddev_t *mddev)
32030 {
32031- atomic_inc(&md_event_count);
32032+ atomic_inc_unchecked(&md_event_count);
32033 wake_up(&md_event_waiters);
32034 }
32035 EXPORT_SYMBOL_GPL(md_new_event);
32036@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
32037 */
32038 static void md_new_event_inintr(mddev_t *mddev)
32039 {
32040- atomic_inc(&md_event_count);
32041+ atomic_inc_unchecked(&md_event_count);
32042 wake_up(&md_event_waiters);
32043 }
32044
32045@@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
32046
32047 rdev->preferred_minor = 0xffff;
32048 rdev->data_offset = le64_to_cpu(sb->data_offset);
32049- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32050+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32051
32052 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
32053 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
32054@@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
32055 else
32056 sb->resync_offset = cpu_to_le64(0);
32057
32058- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
32059+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
32060
32061 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
32062 sb->size = cpu_to_le64(mddev->dev_sectors);
32063@@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
32064 static ssize_t
32065 errors_show(mdk_rdev_t *rdev, char *page)
32066 {
32067- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
32068+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
32069 }
32070
32071 static ssize_t
32072@@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
32073 char *e;
32074 unsigned long n = simple_strtoul(buf, &e, 10);
32075 if (*buf && (*e == 0 || *e == '\n')) {
32076- atomic_set(&rdev->corrected_errors, n);
32077+ atomic_set_unchecked(&rdev->corrected_errors, n);
32078 return len;
32079 }
32080 return -EINVAL;
32081@@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
32082 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
32083 kfree(rdev);
32084 }
32085-static struct sysfs_ops rdev_sysfs_ops = {
32086+static const struct sysfs_ops rdev_sysfs_ops = {
32087 .show = rdev_attr_show,
32088 .store = rdev_attr_store,
32089 };
32090@@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
32091 rdev->data_offset = 0;
32092 rdev->sb_events = 0;
32093 atomic_set(&rdev->nr_pending, 0);
32094- atomic_set(&rdev->read_errors, 0);
32095- atomic_set(&rdev->corrected_errors, 0);
32096+ atomic_set_unchecked(&rdev->read_errors, 0);
32097+ atomic_set_unchecked(&rdev->corrected_errors, 0);
32098
32099 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
32100 if (!size) {
32101@@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
32102 kfree(mddev);
32103 }
32104
32105-static struct sysfs_ops md_sysfs_ops = {
32106+static const struct sysfs_ops md_sysfs_ops = {
32107 .show = md_attr_show,
32108 .store = md_attr_store,
32109 };
32110@@ -4474,7 +4474,8 @@ out:
32111 err = 0;
32112 blk_integrity_unregister(disk);
32113 md_new_event(mddev);
32114- sysfs_notify_dirent(mddev->sysfs_state);
32115+ if (mddev->sysfs_state)
32116+ sysfs_notify_dirent(mddev->sysfs_state);
32117 return err;
32118 }
32119
32120@@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
32121
32122 spin_unlock(&pers_lock);
32123 seq_printf(seq, "\n");
32124- mi->event = atomic_read(&md_event_count);
32125+ mi->event = atomic_read_unchecked(&md_event_count);
32126 return 0;
32127 }
32128 if (v == (void*)2) {
32129@@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
32130 chunk_kb ? "KB" : "B");
32131 if (bitmap->file) {
32132 seq_printf(seq, ", file: ");
32133- seq_path(seq, &bitmap->file->f_path, " \t\n");
32134+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32135 }
32136
32137 seq_printf(seq, "\n");
32138@@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
32139 else {
32140 struct seq_file *p = file->private_data;
32141 p->private = mi;
32142- mi->event = atomic_read(&md_event_count);
32143+ mi->event = atomic_read_unchecked(&md_event_count);
32144 }
32145 return error;
32146 }
32147@@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
32148 /* always allow read */
32149 mask = POLLIN | POLLRDNORM;
32150
32151- if (mi->event != atomic_read(&md_event_count))
32152+ if (mi->event != atomic_read_unchecked(&md_event_count))
32153 mask |= POLLERR | POLLPRI;
32154 return mask;
32155 }
32156@@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
32157 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
32158 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
32159 (int)part_stat_read(&disk->part0, sectors[1]) -
32160- atomic_read(&disk->sync_io);
32161+ atomic_read_unchecked(&disk->sync_io);
32162 /* sync IO will cause sync_io to increase before the disk_stats
32163 * as sync_io is counted when a request starts, and
32164 * disk_stats is counted when it completes.
32165diff -urNp linux-2.6.32.45/drivers/md/md.h linux-2.6.32.45/drivers/md/md.h
32166--- linux-2.6.32.45/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
32167+++ linux-2.6.32.45/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
32168@@ -94,10 +94,10 @@ struct mdk_rdev_s
32169 * only maintained for arrays that
32170 * support hot removal
32171 */
32172- atomic_t read_errors; /* number of consecutive read errors that
32173+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
32174 * we have tried to ignore.
32175 */
32176- atomic_t corrected_errors; /* number of corrected read errors,
32177+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
32178 * for reporting to userspace and storing
32179 * in superblock.
32180 */
32181@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
32182
32183 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
32184 {
32185- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32186+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32187 }
32188
32189 struct mdk_personality
32190diff -urNp linux-2.6.32.45/drivers/md/raid10.c linux-2.6.32.45/drivers/md/raid10.c
32191--- linux-2.6.32.45/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
32192+++ linux-2.6.32.45/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
32193@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
32194 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
32195 set_bit(R10BIO_Uptodate, &r10_bio->state);
32196 else {
32197- atomic_add(r10_bio->sectors,
32198+ atomic_add_unchecked(r10_bio->sectors,
32199 &conf->mirrors[d].rdev->corrected_errors);
32200 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
32201 md_error(r10_bio->mddev,
32202@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
32203 test_bit(In_sync, &rdev->flags)) {
32204 atomic_inc(&rdev->nr_pending);
32205 rcu_read_unlock();
32206- atomic_add(s, &rdev->corrected_errors);
32207+ atomic_add_unchecked(s, &rdev->corrected_errors);
32208 if (sync_page_io(rdev->bdev,
32209 r10_bio->devs[sl].addr +
32210 sect + rdev->data_offset,
32211diff -urNp linux-2.6.32.45/drivers/md/raid1.c linux-2.6.32.45/drivers/md/raid1.c
32212--- linux-2.6.32.45/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
32213+++ linux-2.6.32.45/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
32214@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
32215 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
32216 continue;
32217 rdev = conf->mirrors[d].rdev;
32218- atomic_add(s, &rdev->corrected_errors);
32219+ atomic_add_unchecked(s, &rdev->corrected_errors);
32220 if (sync_page_io(rdev->bdev,
32221 sect + rdev->data_offset,
32222 s<<9,
32223@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
32224 /* Well, this device is dead */
32225 md_error(mddev, rdev);
32226 else {
32227- atomic_add(s, &rdev->corrected_errors);
32228+ atomic_add_unchecked(s, &rdev->corrected_errors);
32229 printk(KERN_INFO
32230 "raid1:%s: read error corrected "
32231 "(%d sectors at %llu on %s)\n",
32232diff -urNp linux-2.6.32.45/drivers/md/raid5.c linux-2.6.32.45/drivers/md/raid5.c
32233--- linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
32234+++ linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
32235@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
32236 bi->bi_next = NULL;
32237 if ((rw & WRITE) &&
32238 test_bit(R5_ReWrite, &sh->dev[i].flags))
32239- atomic_add(STRIPE_SECTORS,
32240+ atomic_add_unchecked(STRIPE_SECTORS,
32241 &rdev->corrected_errors);
32242 generic_make_request(bi);
32243 } else {
32244@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
32245 clear_bit(R5_ReadError, &sh->dev[i].flags);
32246 clear_bit(R5_ReWrite, &sh->dev[i].flags);
32247 }
32248- if (atomic_read(&conf->disks[i].rdev->read_errors))
32249- atomic_set(&conf->disks[i].rdev->read_errors, 0);
32250+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
32251+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
32252 } else {
32253 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
32254 int retry = 0;
32255 rdev = conf->disks[i].rdev;
32256
32257 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32258- atomic_inc(&rdev->read_errors);
32259+ atomic_inc_unchecked(&rdev->read_errors);
32260 if (conf->mddev->degraded >= conf->max_degraded)
32261 printk_rl(KERN_WARNING
32262 "raid5:%s: read error not correctable "
32263@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
32264 (unsigned long long)(sh->sector
32265 + rdev->data_offset),
32266 bdn);
32267- else if (atomic_read(&rdev->read_errors)
32268+ else if (atomic_read_unchecked(&rdev->read_errors)
32269 > conf->max_nr_stripes)
32270 printk(KERN_WARNING
32271 "raid5:%s: Too many read errors, failing device %s.\n",
32272@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
32273 sector_t r_sector;
32274 struct stripe_head sh2;
32275
32276+ pax_track_stack();
32277
32278 chunk_offset = sector_div(new_sector, sectors_per_chunk);
32279 stripe = new_sector;
32280diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_hlp.c linux-2.6.32.45/drivers/media/common/saa7146_hlp.c
32281--- linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
32282+++ linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
32283@@ -353,6 +353,8 @@ static void calculate_clipping_registers
32284
32285 int x[32], y[32], w[32], h[32];
32286
32287+ pax_track_stack();
32288+
32289 /* clear out memory */
32290 memset(&line_list[0], 0x00, sizeof(u32)*32);
32291 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
32292diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
32293--- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
32294+++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
32295@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
32296 u8 buf[HOST_LINK_BUF_SIZE];
32297 int i;
32298
32299+ pax_track_stack();
32300+
32301 dprintk("%s\n", __func__);
32302
32303 /* check if we have space for a link buf in the rx_buffer */
32304@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
32305 unsigned long timeout;
32306 int written;
32307
32308+ pax_track_stack();
32309+
32310 dprintk("%s\n", __func__);
32311
32312 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
32313diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h
32314--- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-03-27 14:31:47.000000000 -0400
32315+++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:33:55.000000000 -0400
32316@@ -71,7 +71,7 @@ struct dvb_demux_feed {
32317 union {
32318 dmx_ts_cb ts;
32319 dmx_section_cb sec;
32320- } cb;
32321+ } __no_const cb;
32322
32323 struct dvb_demux *demux;
32324 void *priv;
32325diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c
32326--- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
32327+++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-23 21:22:32.000000000 -0400
32328@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapt
32329 const struct dvb_device *template, void *priv, int type)
32330 {
32331 struct dvb_device *dvbdev;
32332- struct file_operations *dvbdevfops;
32333+ file_operations_no_const *dvbdevfops;
32334 struct device *clsdev;
32335 int minor;
32336 int id;
32337diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c
32338--- linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-03-27 14:31:47.000000000 -0400
32339+++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:33:55.000000000 -0400
32340@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_di
32341 struct dib0700_adapter_state {
32342 int (*set_param_save) (struct dvb_frontend *,
32343 struct dvb_frontend_parameters *);
32344-};
32345+} __no_const;
32346
32347 static int dib7070_set_param_override(struct dvb_frontend *fe,
32348 struct dvb_frontend_parameters *fep)
32349diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c
32350--- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
32351+++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
32352@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
32353
32354 u8 buf[260];
32355
32356+ pax_track_stack();
32357+
32358 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
32359 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
32360
32361diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c
32362--- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-05-10 22:12:01.000000000 -0400
32363+++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-08-05 20:33:55.000000000 -0400
32364@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "
32365
32366 struct dib0700_adapter_state {
32367 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
32368-};
32369+} __no_const;
32370
32371 /* Hauppauge Nova-T 500 (aka Bristol)
32372 * has a LNA on GPIO0 which is enabled by setting 1 */
32373diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h
32374--- linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-03-27 14:31:47.000000000 -0400
32375+++ linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:33:55.000000000 -0400
32376@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
32377 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
32378 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
32379 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
32380-};
32381+} __no_const;
32382
32383 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
32384 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
32385diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c
32386--- linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
32387+++ linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
32388@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
32389 u8 tudata[585];
32390 int i;
32391
32392+ pax_track_stack();
32393+
32394 dprintk("Firmware is %zd bytes\n",fw->size);
32395
32396 /* Get eprom data */
32397diff -urNp linux-2.6.32.45/drivers/media/radio/radio-cadet.c linux-2.6.32.45/drivers/media/radio/radio-cadet.c
32398--- linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
32399+++ linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
32400@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
32401 while (i < count && dev->rdsin != dev->rdsout)
32402 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
32403
32404- if (copy_to_user(data, readbuf, i))
32405+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
32406 return -EFAULT;
32407 return i;
32408 }
32409diff -urNp linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c
32410--- linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
32411+++ linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
32412@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
32413
32414 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
32415
32416-static atomic_t cx18_instance = ATOMIC_INIT(0);
32417+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
32418
32419 /* Parameter declarations */
32420 static int cardtype[CX18_MAX_CARDS];
32421@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
32422 struct i2c_client c;
32423 u8 eedata[256];
32424
32425+ pax_track_stack();
32426+
32427 memset(&c, 0, sizeof(c));
32428 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
32429 c.adapter = &cx->i2c_adap[0];
32430@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
32431 struct cx18 *cx;
32432
32433 /* FIXME - module parameter arrays constrain max instances */
32434- i = atomic_inc_return(&cx18_instance) - 1;
32435+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
32436 if (i >= CX18_MAX_CARDS) {
32437 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
32438 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
32439diff -urNp linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c
32440--- linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
32441+++ linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
32442@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
32443 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
32444
32445 /* ivtv instance counter */
32446-static atomic_t ivtv_instance = ATOMIC_INIT(0);
32447+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
32448
32449 /* Parameter declarations */
32450 static int cardtype[IVTV_MAX_CARDS];
32451diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.c linux-2.6.32.45/drivers/media/video/omap24xxcam.c
32452--- linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
32453+++ linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
32454@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
32455 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
32456
32457 do_gettimeofday(&vb->ts);
32458- vb->field_count = atomic_add_return(2, &fh->field_count);
32459+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
32460 if (csr & csr_error) {
32461 vb->state = VIDEOBUF_ERROR;
32462 if (!atomic_read(&fh->cam->in_reset)) {
32463diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.h linux-2.6.32.45/drivers/media/video/omap24xxcam.h
32464--- linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
32465+++ linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
32466@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
32467 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
32468 struct videobuf_queue vbq;
32469 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
32470- atomic_t field_count; /* field counter for videobuf_buffer */
32471+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
32472 /* accessing cam here doesn't need serialisation: it's constant */
32473 struct omap24xxcam_device *cam;
32474 };
32475diff -urNp linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32476--- linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
32477+++ linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
32478@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
32479 u8 *eeprom;
32480 struct tveeprom tvdata;
32481
32482+ pax_track_stack();
32483+
32484 memset(&tvdata,0,sizeof(tvdata));
32485
32486 eeprom = pvr2_eeprom_fetch(hdw);
32487diff -urNp linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
32488--- linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-03-27 14:31:47.000000000 -0400
32489+++ linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-08-23 21:22:38.000000000 -0400
32490@@ -195,7 +195,7 @@ struct pvr2_hdw {
32491
32492 /* I2C stuff */
32493 struct i2c_adapter i2c_adap;
32494- struct i2c_algorithm i2c_algo;
32495+ i2c_algorithm_no_const i2c_algo;
32496 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
32497 int i2c_cx25840_hack_state;
32498 int i2c_linked;
32499diff -urNp linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c
32500--- linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
32501+++ linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
32502@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
32503 unsigned char localPAT[256];
32504 unsigned char localPMT[256];
32505
32506+ pax_track_stack();
32507+
32508 /* Set video format - must be done first as it resets other settings */
32509 set_reg8(client, 0x41, h->video_format);
32510
32511diff -urNp linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c
32512--- linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
32513+++ linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
32514@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
32515 wait_queue_head_t *q = 0;
32516 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32517
32518+ pax_track_stack();
32519+
32520 /* While any outstand message on the bus exists... */
32521 do {
32522
32523@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
32524 u8 tmp[512];
32525 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32526
32527+ pax_track_stack();
32528+
32529 while (loop) {
32530
32531 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
32532diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c
32533--- linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-03-27 14:31:47.000000000 -0400
32534+++ linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-08-05 20:33:55.000000000 -0400
32535@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] =
32536 static int __init ibmcam_init(void)
32537 {
32538 struct usbvideo_cb cbTbl;
32539- memset(&cbTbl, 0, sizeof(cbTbl));
32540- cbTbl.probe = ibmcam_probe;
32541- cbTbl.setupOnOpen = ibmcam_setup_on_open;
32542- cbTbl.videoStart = ibmcam_video_start;
32543- cbTbl.videoStop = ibmcam_video_stop;
32544- cbTbl.processData = ibmcam_ProcessIsocData;
32545- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32546- cbTbl.adjustPicture = ibmcam_adjust_picture;
32547- cbTbl.getFPS = ibmcam_calculate_fps;
32548+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
32549+ *(void **)&cbTbl.probe = ibmcam_probe;
32550+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
32551+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
32552+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
32553+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
32554+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32555+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
32556+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
32557 return usbvideo_register(
32558 &cams,
32559 MAX_IBMCAM,
32560diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c
32561--- linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
32562+++ linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-08-05 20:33:55.000000000 -0400
32563@@ -225,7 +225,7 @@ static void konicawc_register_input(stru
32564 int error;
32565
32566 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32567- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32568+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32569
32570 cam->input = input_dev = input_allocate_device();
32571 if (!input_dev) {
32572@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
32573 struct usbvideo_cb cbTbl;
32574 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
32575 DRIVER_DESC "\n");
32576- memset(&cbTbl, 0, sizeof(cbTbl));
32577- cbTbl.probe = konicawc_probe;
32578- cbTbl.setupOnOpen = konicawc_setup_on_open;
32579- cbTbl.processData = konicawc_process_isoc;
32580- cbTbl.getFPS = konicawc_calculate_fps;
32581- cbTbl.setVideoMode = konicawc_set_video_mode;
32582- cbTbl.startDataPump = konicawc_start_data;
32583- cbTbl.stopDataPump = konicawc_stop_data;
32584- cbTbl.adjustPicture = konicawc_adjust_picture;
32585- cbTbl.userFree = konicawc_free_uvd;
32586+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
32587+ *(void **)&cbTbl.probe = konicawc_probe;
32588+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
32589+ *(void **)&cbTbl.processData = konicawc_process_isoc;
32590+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
32591+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
32592+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
32593+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
32594+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
32595+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
32596 return usbvideo_register(
32597 &cams,
32598 MAX_CAMERAS,
32599diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c
32600--- linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
32601+++ linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
32602@@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
32603 int error;
32604
32605 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32606- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32607+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32608
32609 cam->input = input_dev = input_allocate_device();
32610 if (!input_dev) {
32611diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c
32612--- linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-03-27 14:31:47.000000000 -0400
32613+++ linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-08-05 20:33:55.000000000 -0400
32614@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
32615 {
32616 struct usbvideo_cb cbTbl;
32617 memset(&cbTbl, 0, sizeof(cbTbl));
32618- cbTbl.probe = ultracam_probe;
32619- cbTbl.setupOnOpen = ultracam_setup_on_open;
32620- cbTbl.videoStart = ultracam_video_start;
32621- cbTbl.videoStop = ultracam_video_stop;
32622- cbTbl.processData = ultracam_ProcessIsocData;
32623- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32624- cbTbl.adjustPicture = ultracam_adjust_picture;
32625- cbTbl.getFPS = ultracam_calculate_fps;
32626+ *(void **)&cbTbl.probe = ultracam_probe;
32627+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
32628+ *(void **)&cbTbl.videoStart = ultracam_video_start;
32629+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
32630+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
32631+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32632+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
32633+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
32634 return usbvideo_register(
32635 &cams,
32636 MAX_CAMERAS,
32637diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c
32638--- linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-03-27 14:31:47.000000000 -0400
32639+++ linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-08-05 20:33:55.000000000 -0400
32640@@ -697,15 +697,15 @@ int usbvideo_register(
32641 __func__, cams, base_size, num_cams);
32642
32643 /* Copy callbacks, apply defaults for those that are not set */
32644- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
32645+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
32646 if (cams->cb.getFrame == NULL)
32647- cams->cb.getFrame = usbvideo_GetFrame;
32648+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
32649 if (cams->cb.disconnect == NULL)
32650- cams->cb.disconnect = usbvideo_Disconnect;
32651+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
32652 if (cams->cb.startDataPump == NULL)
32653- cams->cb.startDataPump = usbvideo_StartDataPump;
32654+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
32655 if (cams->cb.stopDataPump == NULL)
32656- cams->cb.stopDataPump = usbvideo_StopDataPump;
32657+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
32658
32659 cams->num_cameras = num_cams;
32660 cams->cam = (struct uvd *) &cams[1];
32661diff -urNp linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c
32662--- linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
32663+++ linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
32664@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
32665 unsigned char rv, gv, bv;
32666 static unsigned char *Y, *U, *V;
32667
32668+ pax_track_stack();
32669+
32670 frame = usbvision->curFrame;
32671 imageSize = frame->frmwidth * frame->frmheight;
32672 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
32673diff -urNp linux-2.6.32.45/drivers/media/video/v4l2-device.c linux-2.6.32.45/drivers/media/video/v4l2-device.c
32674--- linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
32675+++ linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
32676@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
32677 EXPORT_SYMBOL_GPL(v4l2_device_register);
32678
32679 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
32680- atomic_t *instance)
32681+ atomic_unchecked_t *instance)
32682 {
32683- int num = atomic_inc_return(instance) - 1;
32684+ int num = atomic_inc_return_unchecked(instance) - 1;
32685 int len = strlen(basename);
32686
32687 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
32688diff -urNp linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c
32689--- linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
32690+++ linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
32691@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
32692 {
32693 struct videobuf_queue q;
32694
32695+ pax_track_stack();
32696+
32697 /* Required to make generic handler to call __videobuf_alloc */
32698 q.int_ops = &sg_ops;
32699
32700diff -urNp linux-2.6.32.45/drivers/message/fusion/mptbase.c linux-2.6.32.45/drivers/message/fusion/mptbase.c
32701--- linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
32702+++ linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
32703@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
32704 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
32705 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
32706
32707+#ifdef CONFIG_GRKERNSEC_HIDESYM
32708+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32709+ NULL, NULL);
32710+#else
32711 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32712 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
32713+#endif
32714+
32715 /*
32716 * Rounding UP to nearest 4-kB boundary here...
32717 */
32718diff -urNp linux-2.6.32.45/drivers/message/fusion/mptsas.c linux-2.6.32.45/drivers/message/fusion/mptsas.c
32719--- linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
32720+++ linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
32721@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
32722 return 0;
32723 }
32724
32725+static inline void
32726+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32727+{
32728+ if (phy_info->port_details) {
32729+ phy_info->port_details->rphy = rphy;
32730+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32731+ ioc->name, rphy));
32732+ }
32733+
32734+ if (rphy) {
32735+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32736+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32737+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32738+ ioc->name, rphy, rphy->dev.release));
32739+ }
32740+}
32741+
32742 /* no mutex */
32743 static void
32744 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
32745@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
32746 return NULL;
32747 }
32748
32749-static inline void
32750-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32751-{
32752- if (phy_info->port_details) {
32753- phy_info->port_details->rphy = rphy;
32754- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32755- ioc->name, rphy));
32756- }
32757-
32758- if (rphy) {
32759- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32760- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32761- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32762- ioc->name, rphy, rphy->dev.release));
32763- }
32764-}
32765-
32766 static inline struct sas_port *
32767 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32768 {
32769diff -urNp linux-2.6.32.45/drivers/message/fusion/mptscsih.c linux-2.6.32.45/drivers/message/fusion/mptscsih.c
32770--- linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
32771+++ linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
32772@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32773
32774 h = shost_priv(SChost);
32775
32776- if (h) {
32777- if (h->info_kbuf == NULL)
32778- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32779- return h->info_kbuf;
32780- h->info_kbuf[0] = '\0';
32781+ if (!h)
32782+ return NULL;
32783
32784- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32785- h->info_kbuf[size-1] = '\0';
32786- }
32787+ if (h->info_kbuf == NULL)
32788+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32789+ return h->info_kbuf;
32790+ h->info_kbuf[0] = '\0';
32791+
32792+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32793+ h->info_kbuf[size-1] = '\0';
32794
32795 return h->info_kbuf;
32796 }
32797diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_config.c linux-2.6.32.45/drivers/message/i2o/i2o_config.c
32798--- linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
32799+++ linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
32800@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
32801 struct i2o_message *msg;
32802 unsigned int iop;
32803
32804+ pax_track_stack();
32805+
32806 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32807 return -EFAULT;
32808
32809diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_proc.c linux-2.6.32.45/drivers/message/i2o/i2o_proc.c
32810--- linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
32811+++ linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
32812@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
32813 "Array Controller Device"
32814 };
32815
32816-static char *chtostr(u8 * chars, int n)
32817-{
32818- char tmp[256];
32819- tmp[0] = 0;
32820- return strncat(tmp, (char *)chars, n);
32821-}
32822-
32823 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32824 char *group)
32825 {
32826@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
32827
32828 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32829 seq_printf(seq, "%-#8x", ddm_table.module_id);
32830- seq_printf(seq, "%-29s",
32831- chtostr(ddm_table.module_name_version, 28));
32832+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32833 seq_printf(seq, "%9d ", ddm_table.data_size);
32834 seq_printf(seq, "%8d", ddm_table.code_size);
32835
32836@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
32837
32838 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32839 seq_printf(seq, "%-#8x", dst->module_id);
32840- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32841- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32842+ seq_printf(seq, "%-.28s", dst->module_name_version);
32843+ seq_printf(seq, "%-.8s", dst->date);
32844 seq_printf(seq, "%8d ", dst->module_size);
32845 seq_printf(seq, "%8d ", dst->mpb_size);
32846 seq_printf(seq, "0x%04x", dst->module_flags);
32847@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
32848 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32849 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32850 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32851- seq_printf(seq, "Vendor info : %s\n",
32852- chtostr((u8 *) (work32 + 2), 16));
32853- seq_printf(seq, "Product info : %s\n",
32854- chtostr((u8 *) (work32 + 6), 16));
32855- seq_printf(seq, "Description : %s\n",
32856- chtostr((u8 *) (work32 + 10), 16));
32857- seq_printf(seq, "Product rev. : %s\n",
32858- chtostr((u8 *) (work32 + 14), 8));
32859+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32860+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32861+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32862+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32863
32864 seq_printf(seq, "Serial number : ");
32865 print_serial_number(seq, (u8 *) (work32 + 16),
32866@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
32867 }
32868
32869 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32870- seq_printf(seq, "Module name : %s\n",
32871- chtostr(result.module_name, 24));
32872- seq_printf(seq, "Module revision : %s\n",
32873- chtostr(result.module_rev, 8));
32874+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
32875+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32876
32877 seq_printf(seq, "Serial number : ");
32878 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32879@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
32880 return 0;
32881 }
32882
32883- seq_printf(seq, "Device name : %s\n",
32884- chtostr(result.device_name, 64));
32885- seq_printf(seq, "Service name : %s\n",
32886- chtostr(result.service_name, 64));
32887- seq_printf(seq, "Physical name : %s\n",
32888- chtostr(result.physical_location, 64));
32889- seq_printf(seq, "Instance number : %s\n",
32890- chtostr(result.instance_number, 4));
32891+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
32892+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
32893+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32894+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32895
32896 return 0;
32897 }
32898diff -urNp linux-2.6.32.45/drivers/message/i2o/iop.c linux-2.6.32.45/drivers/message/i2o/iop.c
32899--- linux-2.6.32.45/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
32900+++ linux-2.6.32.45/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
32901@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
32902
32903 spin_lock_irqsave(&c->context_list_lock, flags);
32904
32905- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32906- atomic_inc(&c->context_list_counter);
32907+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32908+ atomic_inc_unchecked(&c->context_list_counter);
32909
32910- entry->context = atomic_read(&c->context_list_counter);
32911+ entry->context = atomic_read_unchecked(&c->context_list_counter);
32912
32913 list_add(&entry->list, &c->context_list);
32914
32915@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
32916
32917 #if BITS_PER_LONG == 64
32918 spin_lock_init(&c->context_list_lock);
32919- atomic_set(&c->context_list_counter, 0);
32920+ atomic_set_unchecked(&c->context_list_counter, 0);
32921 INIT_LIST_HEAD(&c->context_list);
32922 #endif
32923
32924diff -urNp linux-2.6.32.45/drivers/mfd/wm8350-i2c.c linux-2.6.32.45/drivers/mfd/wm8350-i2c.c
32925--- linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
32926+++ linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
32927@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
32928 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32929 int ret;
32930
32931+ pax_track_stack();
32932+
32933 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32934 return -EINVAL;
32935
32936diff -urNp linux-2.6.32.45/drivers/misc/kgdbts.c linux-2.6.32.45/drivers/misc/kgdbts.c
32937--- linux-2.6.32.45/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
32938+++ linux-2.6.32.45/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
32939@@ -118,7 +118,7 @@
32940 } while (0)
32941 #define MAX_CONFIG_LEN 40
32942
32943-static struct kgdb_io kgdbts_io_ops;
32944+static const struct kgdb_io kgdbts_io_ops;
32945 static char get_buf[BUFMAX];
32946 static int get_buf_cnt;
32947 static char put_buf[BUFMAX];
32948@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
32949 module_put(THIS_MODULE);
32950 }
32951
32952-static struct kgdb_io kgdbts_io_ops = {
32953+static const struct kgdb_io kgdbts_io_ops = {
32954 .name = "kgdbts",
32955 .read_char = kgdbts_get_char,
32956 .write_char = kgdbts_put_char,
32957diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c
32958--- linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
32959+++ linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
32960@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
32961
32962 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32963 {
32964- atomic_long_inc(&mcs_op_statistics[op].count);
32965- atomic_long_add(clks, &mcs_op_statistics[op].total);
32966+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32967+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
32968 if (mcs_op_statistics[op].max < clks)
32969 mcs_op_statistics[op].max = clks;
32970 }
32971diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c
32972--- linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
32973+++ linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
32974@@ -32,9 +32,9 @@
32975
32976 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32977
32978-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32979+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32980 {
32981- unsigned long val = atomic_long_read(v);
32982+ unsigned long val = atomic_long_read_unchecked(v);
32983
32984 if (val)
32985 seq_printf(s, "%16lu %s\n", val, id);
32986@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
32987 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
32988
32989 for (op = 0; op < mcsop_last; op++) {
32990- count = atomic_long_read(&mcs_op_statistics[op].count);
32991- total = atomic_long_read(&mcs_op_statistics[op].total);
32992+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32993+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32994 max = mcs_op_statistics[op].max;
32995 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32996 count ? total / count : 0, max);
32997diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h
32998--- linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
32999+++ linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
33000@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
33001 * GRU statistics.
33002 */
33003 struct gru_stats_s {
33004- atomic_long_t vdata_alloc;
33005- atomic_long_t vdata_free;
33006- atomic_long_t gts_alloc;
33007- atomic_long_t gts_free;
33008- atomic_long_t vdata_double_alloc;
33009- atomic_long_t gts_double_allocate;
33010- atomic_long_t assign_context;
33011- atomic_long_t assign_context_failed;
33012- atomic_long_t free_context;
33013- atomic_long_t load_user_context;
33014- atomic_long_t load_kernel_context;
33015- atomic_long_t lock_kernel_context;
33016- atomic_long_t unlock_kernel_context;
33017- atomic_long_t steal_user_context;
33018- atomic_long_t steal_kernel_context;
33019- atomic_long_t steal_context_failed;
33020- atomic_long_t nopfn;
33021- atomic_long_t break_cow;
33022- atomic_long_t asid_new;
33023- atomic_long_t asid_next;
33024- atomic_long_t asid_wrap;
33025- atomic_long_t asid_reuse;
33026- atomic_long_t intr;
33027- atomic_long_t intr_mm_lock_failed;
33028- atomic_long_t call_os;
33029- atomic_long_t call_os_offnode_reference;
33030- atomic_long_t call_os_check_for_bug;
33031- atomic_long_t call_os_wait_queue;
33032- atomic_long_t user_flush_tlb;
33033- atomic_long_t user_unload_context;
33034- atomic_long_t user_exception;
33035- atomic_long_t set_context_option;
33036- atomic_long_t migrate_check;
33037- atomic_long_t migrated_retarget;
33038- atomic_long_t migrated_unload;
33039- atomic_long_t migrated_unload_delay;
33040- atomic_long_t migrated_nopfn_retarget;
33041- atomic_long_t migrated_nopfn_unload;
33042- atomic_long_t tlb_dropin;
33043- atomic_long_t tlb_dropin_fail_no_asid;
33044- atomic_long_t tlb_dropin_fail_upm;
33045- atomic_long_t tlb_dropin_fail_invalid;
33046- atomic_long_t tlb_dropin_fail_range_active;
33047- atomic_long_t tlb_dropin_fail_idle;
33048- atomic_long_t tlb_dropin_fail_fmm;
33049- atomic_long_t tlb_dropin_fail_no_exception;
33050- atomic_long_t tlb_dropin_fail_no_exception_war;
33051- atomic_long_t tfh_stale_on_fault;
33052- atomic_long_t mmu_invalidate_range;
33053- atomic_long_t mmu_invalidate_page;
33054- atomic_long_t mmu_clear_flush_young;
33055- atomic_long_t flush_tlb;
33056- atomic_long_t flush_tlb_gru;
33057- atomic_long_t flush_tlb_gru_tgh;
33058- atomic_long_t flush_tlb_gru_zero_asid;
33059-
33060- atomic_long_t copy_gpa;
33061-
33062- atomic_long_t mesq_receive;
33063- atomic_long_t mesq_receive_none;
33064- atomic_long_t mesq_send;
33065- atomic_long_t mesq_send_failed;
33066- atomic_long_t mesq_noop;
33067- atomic_long_t mesq_send_unexpected_error;
33068- atomic_long_t mesq_send_lb_overflow;
33069- atomic_long_t mesq_send_qlimit_reached;
33070- atomic_long_t mesq_send_amo_nacked;
33071- atomic_long_t mesq_send_put_nacked;
33072- atomic_long_t mesq_qf_not_full;
33073- atomic_long_t mesq_qf_locked;
33074- atomic_long_t mesq_qf_noop_not_full;
33075- atomic_long_t mesq_qf_switch_head_failed;
33076- atomic_long_t mesq_qf_unexpected_error;
33077- atomic_long_t mesq_noop_unexpected_error;
33078- atomic_long_t mesq_noop_lb_overflow;
33079- atomic_long_t mesq_noop_qlimit_reached;
33080- atomic_long_t mesq_noop_amo_nacked;
33081- atomic_long_t mesq_noop_put_nacked;
33082+ atomic_long_unchecked_t vdata_alloc;
33083+ atomic_long_unchecked_t vdata_free;
33084+ atomic_long_unchecked_t gts_alloc;
33085+ atomic_long_unchecked_t gts_free;
33086+ atomic_long_unchecked_t vdata_double_alloc;
33087+ atomic_long_unchecked_t gts_double_allocate;
33088+ atomic_long_unchecked_t assign_context;
33089+ atomic_long_unchecked_t assign_context_failed;
33090+ atomic_long_unchecked_t free_context;
33091+ atomic_long_unchecked_t load_user_context;
33092+ atomic_long_unchecked_t load_kernel_context;
33093+ atomic_long_unchecked_t lock_kernel_context;
33094+ atomic_long_unchecked_t unlock_kernel_context;
33095+ atomic_long_unchecked_t steal_user_context;
33096+ atomic_long_unchecked_t steal_kernel_context;
33097+ atomic_long_unchecked_t steal_context_failed;
33098+ atomic_long_unchecked_t nopfn;
33099+ atomic_long_unchecked_t break_cow;
33100+ atomic_long_unchecked_t asid_new;
33101+ atomic_long_unchecked_t asid_next;
33102+ atomic_long_unchecked_t asid_wrap;
33103+ atomic_long_unchecked_t asid_reuse;
33104+ atomic_long_unchecked_t intr;
33105+ atomic_long_unchecked_t intr_mm_lock_failed;
33106+ atomic_long_unchecked_t call_os;
33107+ atomic_long_unchecked_t call_os_offnode_reference;
33108+ atomic_long_unchecked_t call_os_check_for_bug;
33109+ atomic_long_unchecked_t call_os_wait_queue;
33110+ atomic_long_unchecked_t user_flush_tlb;
33111+ atomic_long_unchecked_t user_unload_context;
33112+ atomic_long_unchecked_t user_exception;
33113+ atomic_long_unchecked_t set_context_option;
33114+ atomic_long_unchecked_t migrate_check;
33115+ atomic_long_unchecked_t migrated_retarget;
33116+ atomic_long_unchecked_t migrated_unload;
33117+ atomic_long_unchecked_t migrated_unload_delay;
33118+ atomic_long_unchecked_t migrated_nopfn_retarget;
33119+ atomic_long_unchecked_t migrated_nopfn_unload;
33120+ atomic_long_unchecked_t tlb_dropin;
33121+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33122+ atomic_long_unchecked_t tlb_dropin_fail_upm;
33123+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
33124+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
33125+ atomic_long_unchecked_t tlb_dropin_fail_idle;
33126+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
33127+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33128+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
33129+ atomic_long_unchecked_t tfh_stale_on_fault;
33130+ atomic_long_unchecked_t mmu_invalidate_range;
33131+ atomic_long_unchecked_t mmu_invalidate_page;
33132+ atomic_long_unchecked_t mmu_clear_flush_young;
33133+ atomic_long_unchecked_t flush_tlb;
33134+ atomic_long_unchecked_t flush_tlb_gru;
33135+ atomic_long_unchecked_t flush_tlb_gru_tgh;
33136+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33137+
33138+ atomic_long_unchecked_t copy_gpa;
33139+
33140+ atomic_long_unchecked_t mesq_receive;
33141+ atomic_long_unchecked_t mesq_receive_none;
33142+ atomic_long_unchecked_t mesq_send;
33143+ atomic_long_unchecked_t mesq_send_failed;
33144+ atomic_long_unchecked_t mesq_noop;
33145+ atomic_long_unchecked_t mesq_send_unexpected_error;
33146+ atomic_long_unchecked_t mesq_send_lb_overflow;
33147+ atomic_long_unchecked_t mesq_send_qlimit_reached;
33148+ atomic_long_unchecked_t mesq_send_amo_nacked;
33149+ atomic_long_unchecked_t mesq_send_put_nacked;
33150+ atomic_long_unchecked_t mesq_qf_not_full;
33151+ atomic_long_unchecked_t mesq_qf_locked;
33152+ atomic_long_unchecked_t mesq_qf_noop_not_full;
33153+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
33154+ atomic_long_unchecked_t mesq_qf_unexpected_error;
33155+ atomic_long_unchecked_t mesq_noop_unexpected_error;
33156+ atomic_long_unchecked_t mesq_noop_lb_overflow;
33157+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
33158+ atomic_long_unchecked_t mesq_noop_amo_nacked;
33159+ atomic_long_unchecked_t mesq_noop_put_nacked;
33160
33161 };
33162
33163@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
33164 cchop_deallocate, tghop_invalidate, mcsop_last};
33165
33166 struct mcs_op_statistic {
33167- atomic_long_t count;
33168- atomic_long_t total;
33169+ atomic_long_unchecked_t count;
33170+ atomic_long_unchecked_t total;
33171 unsigned long max;
33172 };
33173
33174@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
33175
33176 #define STAT(id) do { \
33177 if (gru_options & OPT_STATS) \
33178- atomic_long_inc(&gru_stats.id); \
33179+ atomic_long_inc_unchecked(&gru_stats.id); \
33180 } while (0)
33181
33182 #ifdef CONFIG_SGI_GRU_DEBUG
33183diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h
33184--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-03-27 14:31:47.000000000 -0400
33185+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-08-05 20:33:55.000000000 -0400
33186@@ -876,7 +876,7 @@ extern struct xpc_registration xpc_regis
33187 /* found in xpc_main.c */
33188 extern struct device *xpc_part;
33189 extern struct device *xpc_chan;
33190-extern struct xpc_arch_operations xpc_arch_ops;
33191+extern const struct xpc_arch_operations xpc_arch_ops;
33192 extern int xpc_disengage_timelimit;
33193 extern int xpc_disengage_timedout;
33194 extern int xpc_activate_IRQ_rcvd;
33195diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c
33196--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-03-27 14:31:47.000000000 -0400
33197+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-08-05 20:33:55.000000000 -0400
33198@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_not
33199 .notifier_call = xpc_system_die,
33200 };
33201
33202-struct xpc_arch_operations xpc_arch_ops;
33203+const struct xpc_arch_operations xpc_arch_ops;
33204
33205 /*
33206 * Timer function to enforce the timelimit on the partition disengage.
33207diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c
33208--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-03-27 14:31:47.000000000 -0400
33209+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-08-05 20:33:55.000000000 -0400
33210@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_chan
33211 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
33212 }
33213
33214-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
33215+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
33216 .setup_partitions = xpc_setup_partitions_sn2,
33217 .teardown_partitions = xpc_teardown_partitions_sn2,
33218 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
33219@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
33220 int ret;
33221 size_t buf_size;
33222
33223- xpc_arch_ops = xpc_arch_ops_sn2;
33224+ pax_open_kernel();
33225+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
33226+ pax_close_kernel();
33227
33228 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
33229 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
33230diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c
33231--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-03-27 14:31:47.000000000 -0400
33232+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-08-05 20:33:55.000000000 -0400
33233@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_chann
33234 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
33235 }
33236
33237-static struct xpc_arch_operations xpc_arch_ops_uv = {
33238+static const struct xpc_arch_operations xpc_arch_ops_uv = {
33239 .setup_partitions = xpc_setup_partitions_uv,
33240 .teardown_partitions = xpc_teardown_partitions_uv,
33241 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
33242@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_ar
33243 int
33244 xpc_init_uv(void)
33245 {
33246- xpc_arch_ops = xpc_arch_ops_uv;
33247+ pax_open_kernel();
33248+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
33249+ pax_close_kernel();
33250
33251 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
33252 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
33253diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xp.h linux-2.6.32.45/drivers/misc/sgi-xp/xp.h
33254--- linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-03-27 14:31:47.000000000 -0400
33255+++ linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-08-05 20:33:55.000000000 -0400
33256@@ -289,7 +289,7 @@ struct xpc_interface {
33257 xpc_notify_func, void *);
33258 void (*received) (short, int, void *);
33259 enum xp_retval (*partid_to_nasids) (short, void *);
33260-};
33261+} __no_const;
33262
33263 extern struct xpc_interface xpc_interface;
33264
33265diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c
33266--- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
33267+++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
33268@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
33269 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
33270 unsigned long timeo = jiffies + HZ;
33271
33272+ pax_track_stack();
33273+
33274 /* Prevent setting state FL_SYNCING for chip in suspended state. */
33275 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
33276 goto sleep;
33277@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
33278 unsigned long initial_adr;
33279 int initial_len = len;
33280
33281+ pax_track_stack();
33282+
33283 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
33284 adr += chip->start;
33285 initial_adr = adr;
33286@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
33287 int retries = 3;
33288 int ret;
33289
33290+ pax_track_stack();
33291+
33292 adr += chip->start;
33293
33294 retry:
33295diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c
33296--- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
33297+++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
33298@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
33299 unsigned long cmd_addr;
33300 struct cfi_private *cfi = map->fldrv_priv;
33301
33302+ pax_track_stack();
33303+
33304 adr += chip->start;
33305
33306 /* Ensure cmd read/writes are aligned. */
33307@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
33308 DECLARE_WAITQUEUE(wait, current);
33309 int wbufsize, z;
33310
33311+ pax_track_stack();
33312+
33313 /* M58LW064A requires bus alignment for buffer wriets -- saw */
33314 if (adr & (map_bankwidth(map)-1))
33315 return -EINVAL;
33316@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
33317 DECLARE_WAITQUEUE(wait, current);
33318 int ret = 0;
33319
33320+ pax_track_stack();
33321+
33322 adr += chip->start;
33323
33324 /* Let's determine this according to the interleave only once */
33325@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
33326 unsigned long timeo = jiffies + HZ;
33327 DECLARE_WAITQUEUE(wait, current);
33328
33329+ pax_track_stack();
33330+
33331 adr += chip->start;
33332
33333 /* Let's determine this according to the interleave only once */
33334@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
33335 unsigned long timeo = jiffies + HZ;
33336 DECLARE_WAITQUEUE(wait, current);
33337
33338+ pax_track_stack();
33339+
33340 adr += chip->start;
33341
33342 /* Let's determine this according to the interleave only once */
33343diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2000.c linux-2.6.32.45/drivers/mtd/devices/doc2000.c
33344--- linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
33345+++ linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
33346@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
33347
33348 /* The ECC will not be calculated correctly if less than 512 is written */
33349 /* DBB-
33350- if (len != 0x200 && eccbuf)
33351+ if (len != 0x200)
33352 printk(KERN_WARNING
33353 "ECC needs a full sector write (adr: %lx size %lx)\n",
33354 (long) to, (long) len);
33355diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2001.c linux-2.6.32.45/drivers/mtd/devices/doc2001.c
33356--- linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
33357+++ linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
33358@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
33359 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33360
33361 /* Don't allow read past end of device */
33362- if (from >= this->totlen)
33363+ if (from >= this->totlen || !len)
33364 return -EINVAL;
33365
33366 /* Don't allow a single read to cross a 512-byte block boundary */
33367diff -urNp linux-2.6.32.45/drivers/mtd/ftl.c linux-2.6.32.45/drivers/mtd/ftl.c
33368--- linux-2.6.32.45/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
33369+++ linux-2.6.32.45/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
33370@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
33371 loff_t offset;
33372 uint16_t srcunitswap = cpu_to_le16(srcunit);
33373
33374+ pax_track_stack();
33375+
33376 eun = &part->EUNInfo[srcunit];
33377 xfer = &part->XferInfo[xferunit];
33378 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
33379diff -urNp linux-2.6.32.45/drivers/mtd/inftlcore.c linux-2.6.32.45/drivers/mtd/inftlcore.c
33380--- linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
33381+++ linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
33382@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
33383 struct inftl_oob oob;
33384 size_t retlen;
33385
33386+ pax_track_stack();
33387+
33388 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
33389 "pending=%d)\n", inftl, thisVUC, pendingblock);
33390
33391diff -urNp linux-2.6.32.45/drivers/mtd/inftlmount.c linux-2.6.32.45/drivers/mtd/inftlmount.c
33392--- linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
33393+++ linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
33394@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
33395 struct INFTLPartition *ip;
33396 size_t retlen;
33397
33398+ pax_track_stack();
33399+
33400 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
33401
33402 /*
33403diff -urNp linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c
33404--- linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
33405+++ linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
33406@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
33407 {
33408 map_word pfow_val[4];
33409
33410+ pax_track_stack();
33411+
33412 /* Check identification string */
33413 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
33414 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
33415diff -urNp linux-2.6.32.45/drivers/mtd/mtdchar.c linux-2.6.32.45/drivers/mtd/mtdchar.c
33416--- linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
33417+++ linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
33418@@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
33419 u_long size;
33420 struct mtd_info_user info;
33421
33422+ pax_track_stack();
33423+
33424 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
33425
33426 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
33427diff -urNp linux-2.6.32.45/drivers/mtd/nftlcore.c linux-2.6.32.45/drivers/mtd/nftlcore.c
33428--- linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
33429+++ linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
33430@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
33431 int inplace = 1;
33432 size_t retlen;
33433
33434+ pax_track_stack();
33435+
33436 memset(BlockMap, 0xff, sizeof(BlockMap));
33437 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
33438
33439diff -urNp linux-2.6.32.45/drivers/mtd/nftlmount.c linux-2.6.32.45/drivers/mtd/nftlmount.c
33440--- linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
33441+++ linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
33442@@ -23,6 +23,7 @@
33443 #include <asm/errno.h>
33444 #include <linux/delay.h>
33445 #include <linux/slab.h>
33446+#include <linux/sched.h>
33447 #include <linux/mtd/mtd.h>
33448 #include <linux/mtd/nand.h>
33449 #include <linux/mtd/nftl.h>
33450@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
33451 struct mtd_info *mtd = nftl->mbd.mtd;
33452 unsigned int i;
33453
33454+ pax_track_stack();
33455+
33456 /* Assume logical EraseSize == physical erasesize for starting the scan.
33457 We'll sort it out later if we find a MediaHeader which says otherwise */
33458 /* Actually, we won't. The new DiskOnChip driver has already scanned
33459diff -urNp linux-2.6.32.45/drivers/mtd/ubi/build.c linux-2.6.32.45/drivers/mtd/ubi/build.c
33460--- linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
33461+++ linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
33462@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
33463 static int __init bytes_str_to_int(const char *str)
33464 {
33465 char *endp;
33466- unsigned long result;
33467+ unsigned long result, scale = 1;
33468
33469 result = simple_strtoul(str, &endp, 0);
33470 if (str == endp || result >= INT_MAX) {
33471@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
33472
33473 switch (*endp) {
33474 case 'G':
33475- result *= 1024;
33476+ scale *= 1024;
33477 case 'M':
33478- result *= 1024;
33479+ scale *= 1024;
33480 case 'K':
33481- result *= 1024;
33482+ scale *= 1024;
33483 if (endp[1] == 'i' && endp[2] == 'B')
33484 endp += 2;
33485 case '\0':
33486@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
33487 return -EINVAL;
33488 }
33489
33490- return result;
33491+ if ((intoverflow_t)result*scale >= INT_MAX) {
33492+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33493+ str);
33494+ return -EINVAL;
33495+ }
33496+
33497+ return result*scale;
33498 }
33499
33500 /**
33501diff -urNp linux-2.6.32.45/drivers/net/bnx2.c linux-2.6.32.45/drivers/net/bnx2.c
33502--- linux-2.6.32.45/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
33503+++ linux-2.6.32.45/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
33504@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33505 int rc = 0;
33506 u32 magic, csum;
33507
33508+ pax_track_stack();
33509+
33510 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33511 goto test_nvram_done;
33512
33513diff -urNp linux-2.6.32.45/drivers/net/cxgb3/l2t.h linux-2.6.32.45/drivers/net/cxgb3/l2t.h
33514--- linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-03-27 14:31:47.000000000 -0400
33515+++ linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-08-05 20:33:55.000000000 -0400
33516@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
33517 */
33518 struct l2t_skb_cb {
33519 arp_failure_handler_func arp_failure_handler;
33520-};
33521+} __no_const;
33522
33523 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33524
33525diff -urNp linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c
33526--- linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
33527+++ linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
33528@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
33529 int i, addr, ret;
33530 struct t3_vpd vpd;
33531
33532+ pax_track_stack();
33533+
33534 /*
33535 * Card information is normally at VPD_BASE but some early cards had
33536 * it at 0.
33537diff -urNp linux-2.6.32.45/drivers/net/e1000e/82571.c linux-2.6.32.45/drivers/net/e1000e/82571.c
33538--- linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
33539+++ linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-08-23 21:22:32.000000000 -0400
33540@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(s
33541 {
33542 struct e1000_hw *hw = &adapter->hw;
33543 struct e1000_mac_info *mac = &hw->mac;
33544- struct e1000_mac_operations *func = &mac->ops;
33545+ e1000_mac_operations_no_const *func = &mac->ops;
33546 u32 swsm = 0;
33547 u32 swsm2 = 0;
33548 bool force_clear_smbi = false;
33549@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
33550 temp = er32(ICRXDMTC);
33551 }
33552
33553-static struct e1000_mac_operations e82571_mac_ops = {
33554+static const struct e1000_mac_operations e82571_mac_ops = {
33555 /* .check_mng_mode: mac type dependent */
33556 /* .check_for_link: media type dependent */
33557 .id_led_init = e1000e_id_led_init,
33558@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
33559 .setup_led = e1000e_setup_led_generic,
33560 };
33561
33562-static struct e1000_phy_operations e82_phy_ops_igp = {
33563+static const struct e1000_phy_operations e82_phy_ops_igp = {
33564 .acquire_phy = e1000_get_hw_semaphore_82571,
33565 .check_reset_block = e1000e_check_reset_block_generic,
33566 .commit_phy = NULL,
33567@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
33568 .cfg_on_link_up = NULL,
33569 };
33570
33571-static struct e1000_phy_operations e82_phy_ops_m88 = {
33572+static const struct e1000_phy_operations e82_phy_ops_m88 = {
33573 .acquire_phy = e1000_get_hw_semaphore_82571,
33574 .check_reset_block = e1000e_check_reset_block_generic,
33575 .commit_phy = e1000e_phy_sw_reset,
33576@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
33577 .cfg_on_link_up = NULL,
33578 };
33579
33580-static struct e1000_phy_operations e82_phy_ops_bm = {
33581+static const struct e1000_phy_operations e82_phy_ops_bm = {
33582 .acquire_phy = e1000_get_hw_semaphore_82571,
33583 .check_reset_block = e1000e_check_reset_block_generic,
33584 .commit_phy = e1000e_phy_sw_reset,
33585@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
33586 .cfg_on_link_up = NULL,
33587 };
33588
33589-static struct e1000_nvm_operations e82571_nvm_ops = {
33590+static const struct e1000_nvm_operations e82571_nvm_ops = {
33591 .acquire_nvm = e1000_acquire_nvm_82571,
33592 .read_nvm = e1000e_read_nvm_eerd,
33593 .release_nvm = e1000_release_nvm_82571,
33594diff -urNp linux-2.6.32.45/drivers/net/e1000e/e1000.h linux-2.6.32.45/drivers/net/e1000e/e1000.h
33595--- linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
33596+++ linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
33597@@ -375,9 +375,9 @@ struct e1000_info {
33598 u32 pba;
33599 u32 max_hw_frame_size;
33600 s32 (*get_variants)(struct e1000_adapter *);
33601- struct e1000_mac_operations *mac_ops;
33602- struct e1000_phy_operations *phy_ops;
33603- struct e1000_nvm_operations *nvm_ops;
33604+ const struct e1000_mac_operations *mac_ops;
33605+ const struct e1000_phy_operations *phy_ops;
33606+ const struct e1000_nvm_operations *nvm_ops;
33607 };
33608
33609 /* hardware capability, feature, and workaround flags */
33610diff -urNp linux-2.6.32.45/drivers/net/e1000e/es2lan.c linux-2.6.32.45/drivers/net/e1000e/es2lan.c
33611--- linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
33612+++ linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-08-23 21:22:32.000000000 -0400
33613@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es
33614 {
33615 struct e1000_hw *hw = &adapter->hw;
33616 struct e1000_mac_info *mac = &hw->mac;
33617- struct e1000_mac_operations *func = &mac->ops;
33618+ e1000_mac_operations_no_const *func = &mac->ops;
33619
33620 /* Set media type */
33621 switch (adapter->pdev->device) {
33622@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
33623 temp = er32(ICRXDMTC);
33624 }
33625
33626-static struct e1000_mac_operations es2_mac_ops = {
33627+static const struct e1000_mac_operations es2_mac_ops = {
33628 .id_led_init = e1000e_id_led_init,
33629 .check_mng_mode = e1000e_check_mng_mode_generic,
33630 /* check_for_link dependent on media type */
33631@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
33632 .setup_led = e1000e_setup_led_generic,
33633 };
33634
33635-static struct e1000_phy_operations es2_phy_ops = {
33636+static const struct e1000_phy_operations es2_phy_ops = {
33637 .acquire_phy = e1000_acquire_phy_80003es2lan,
33638 .check_reset_block = e1000e_check_reset_block_generic,
33639 .commit_phy = e1000e_phy_sw_reset,
33640@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
33641 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
33642 };
33643
33644-static struct e1000_nvm_operations es2_nvm_ops = {
33645+static const struct e1000_nvm_operations es2_nvm_ops = {
33646 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
33647 .read_nvm = e1000e_read_nvm_eerd,
33648 .release_nvm = e1000_release_nvm_80003es2lan,
33649diff -urNp linux-2.6.32.45/drivers/net/e1000e/hw.h linux-2.6.32.45/drivers/net/e1000e/hw.h
33650--- linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
33651+++ linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-08-23 21:27:38.000000000 -0400
33652@@ -753,6 +753,7 @@ struct e1000_mac_operations {
33653 s32 (*setup_physical_interface)(struct e1000_hw *);
33654 s32 (*setup_led)(struct e1000_hw *);
33655 };
33656+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33657
33658 /* Function pointers for the PHY. */
33659 struct e1000_phy_operations {
33660@@ -774,6 +775,7 @@ struct e1000_phy_operations {
33661 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33662 s32 (*cfg_on_link_up)(struct e1000_hw *);
33663 };
33664+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33665
33666 /* Function pointers for the NVM. */
33667 struct e1000_nvm_operations {
33668@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
33669 s32 (*validate_nvm)(struct e1000_hw *);
33670 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33671 };
33672+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33673
33674 struct e1000_mac_info {
33675- struct e1000_mac_operations ops;
33676+ e1000_mac_operations_no_const ops;
33677
33678 u8 addr[6];
33679 u8 perm_addr[6];
33680@@ -823,7 +826,7 @@ struct e1000_mac_info {
33681 };
33682
33683 struct e1000_phy_info {
33684- struct e1000_phy_operations ops;
33685+ e1000_phy_operations_no_const ops;
33686
33687 enum e1000_phy_type type;
33688
33689@@ -857,7 +860,7 @@ struct e1000_phy_info {
33690 };
33691
33692 struct e1000_nvm_info {
33693- struct e1000_nvm_operations ops;
33694+ e1000_nvm_operations_no_const ops;
33695
33696 enum e1000_nvm_type type;
33697 enum e1000_nvm_override override;
33698diff -urNp linux-2.6.32.45/drivers/net/e1000e/ich8lan.c linux-2.6.32.45/drivers/net/e1000e/ich8lan.c
33699--- linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
33700+++ linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-08-23 21:22:32.000000000 -0400
33701@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
33702 }
33703 }
33704
33705-static struct e1000_mac_operations ich8_mac_ops = {
33706+static const struct e1000_mac_operations ich8_mac_ops = {
33707 .id_led_init = e1000e_id_led_init,
33708 .check_mng_mode = e1000_check_mng_mode_ich8lan,
33709 .check_for_link = e1000_check_for_copper_link_ich8lan,
33710@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
33711 /* id_led_init dependent on mac type */
33712 };
33713
33714-static struct e1000_phy_operations ich8_phy_ops = {
33715+static const struct e1000_phy_operations ich8_phy_ops = {
33716 .acquire_phy = e1000_acquire_swflag_ich8lan,
33717 .check_reset_block = e1000_check_reset_block_ich8lan,
33718 .commit_phy = NULL,
33719@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
33720 .write_phy_reg = e1000e_write_phy_reg_igp,
33721 };
33722
33723-static struct e1000_nvm_operations ich8_nvm_ops = {
33724+static const struct e1000_nvm_operations ich8_nvm_ops = {
33725 .acquire_nvm = e1000_acquire_nvm_ich8lan,
33726 .read_nvm = e1000_read_nvm_ich8lan,
33727 .release_nvm = e1000_release_nvm_ich8lan,
33728diff -urNp linux-2.6.32.45/drivers/net/hamradio/6pack.c linux-2.6.32.45/drivers/net/hamradio/6pack.c
33729--- linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
33730+++ linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
33731@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
33732 unsigned char buf[512];
33733 int count1;
33734
33735+ pax_track_stack();
33736+
33737 if (!count)
33738 return;
33739
33740diff -urNp linux-2.6.32.45/drivers/net/ibmveth.c linux-2.6.32.45/drivers/net/ibmveth.c
33741--- linux-2.6.32.45/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
33742+++ linux-2.6.32.45/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
33743@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
33744 NULL,
33745 };
33746
33747-static struct sysfs_ops veth_pool_ops = {
33748+static const struct sysfs_ops veth_pool_ops = {
33749 .show = veth_pool_show,
33750 .store = veth_pool_store,
33751 };
33752diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_82575.c linux-2.6.32.45/drivers/net/igb/e1000_82575.c
33753--- linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
33754+++ linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-08-23 21:22:32.000000000 -0400
33755@@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
33756 wr32(E1000_VT_CTL, vt_ctl);
33757 }
33758
33759-static struct e1000_mac_operations e1000_mac_ops_82575 = {
33760+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
33761 .reset_hw = igb_reset_hw_82575,
33762 .init_hw = igb_init_hw_82575,
33763 .check_for_link = igb_check_for_link_82575,
33764@@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
33765 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
33766 };
33767
33768-static struct e1000_phy_operations e1000_phy_ops_82575 = {
33769+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
33770 .acquire = igb_acquire_phy_82575,
33771 .get_cfg_done = igb_get_cfg_done_82575,
33772 .release = igb_release_phy_82575,
33773 };
33774
33775-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33776+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33777 .acquire = igb_acquire_nvm_82575,
33778 .read = igb_read_nvm_eerd,
33779 .release = igb_release_nvm_82575,
33780diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_hw.h linux-2.6.32.45/drivers/net/igb/e1000_hw.h
33781--- linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
33782+++ linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-08-23 21:28:01.000000000 -0400
33783@@ -288,6 +288,7 @@ struct e1000_mac_operations {
33784 s32 (*read_mac_addr)(struct e1000_hw *);
33785 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
33786 };
33787+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33788
33789 struct e1000_phy_operations {
33790 s32 (*acquire)(struct e1000_hw *);
33791@@ -303,6 +304,7 @@ struct e1000_phy_operations {
33792 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33793 s32 (*write_reg)(struct e1000_hw *, u32, u16);
33794 };
33795+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33796
33797 struct e1000_nvm_operations {
33798 s32 (*acquire)(struct e1000_hw *);
33799@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
33800 void (*release)(struct e1000_hw *);
33801 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
33802 };
33803+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33804
33805 struct e1000_info {
33806 s32 (*get_invariants)(struct e1000_hw *);
33807@@ -321,7 +324,7 @@ struct e1000_info {
33808 extern const struct e1000_info e1000_82575_info;
33809
33810 struct e1000_mac_info {
33811- struct e1000_mac_operations ops;
33812+ e1000_mac_operations_no_const ops;
33813
33814 u8 addr[6];
33815 u8 perm_addr[6];
33816@@ -365,7 +368,7 @@ struct e1000_mac_info {
33817 };
33818
33819 struct e1000_phy_info {
33820- struct e1000_phy_operations ops;
33821+ e1000_phy_operations_no_const ops;
33822
33823 enum e1000_phy_type type;
33824
33825@@ -400,7 +403,7 @@ struct e1000_phy_info {
33826 };
33827
33828 struct e1000_nvm_info {
33829- struct e1000_nvm_operations ops;
33830+ e1000_nvm_operations_no_const ops;
33831
33832 enum e1000_nvm_type type;
33833 enum e1000_nvm_override override;
33834@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
33835 s32 (*check_for_ack)(struct e1000_hw *, u16);
33836 s32 (*check_for_rst)(struct e1000_hw *, u16);
33837 };
33838+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33839
33840 struct e1000_mbx_stats {
33841 u32 msgs_tx;
33842@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
33843 };
33844
33845 struct e1000_mbx_info {
33846- struct e1000_mbx_operations ops;
33847+ e1000_mbx_operations_no_const ops;
33848 struct e1000_mbx_stats stats;
33849 u32 timeout;
33850 u32 usec_delay;
33851diff -urNp linux-2.6.32.45/drivers/net/igbvf/vf.h linux-2.6.32.45/drivers/net/igbvf/vf.h
33852--- linux-2.6.32.45/drivers/net/igbvf/vf.h 2011-03-27 14:31:47.000000000 -0400
33853+++ linux-2.6.32.45/drivers/net/igbvf/vf.h 2011-08-23 21:22:38.000000000 -0400
33854@@ -187,9 +187,10 @@ struct e1000_mac_operations {
33855 s32 (*read_mac_addr)(struct e1000_hw *);
33856 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
33857 };
33858+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33859
33860 struct e1000_mac_info {
33861- struct e1000_mac_operations ops;
33862+ e1000_mac_operations_no_const ops;
33863 u8 addr[6];
33864 u8 perm_addr[6];
33865
33866@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
33867 s32 (*check_for_ack)(struct e1000_hw *);
33868 s32 (*check_for_rst)(struct e1000_hw *);
33869 };
33870+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33871
33872 struct e1000_mbx_stats {
33873 u32 msgs_tx;
33874@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
33875 };
33876
33877 struct e1000_mbx_info {
33878- struct e1000_mbx_operations ops;
33879+ e1000_mbx_operations_no_const ops;
33880 struct e1000_mbx_stats stats;
33881 u32 timeout;
33882 u32 usec_delay;
33883diff -urNp linux-2.6.32.45/drivers/net/iseries_veth.c linux-2.6.32.45/drivers/net/iseries_veth.c
33884--- linux-2.6.32.45/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
33885+++ linux-2.6.32.45/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
33886@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
33887 NULL
33888 };
33889
33890-static struct sysfs_ops veth_cnx_sysfs_ops = {
33891+static const struct sysfs_ops veth_cnx_sysfs_ops = {
33892 .show = veth_cnx_attribute_show
33893 };
33894
33895@@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
33896 NULL
33897 };
33898
33899-static struct sysfs_ops veth_port_sysfs_ops = {
33900+static const struct sysfs_ops veth_port_sysfs_ops = {
33901 .show = veth_port_attribute_show
33902 };
33903
33904diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c
33905--- linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
33906+++ linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
33907@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
33908 u32 rctl;
33909 int i;
33910
33911+ pax_track_stack();
33912+
33913 /* Check for Promiscuous and All Multicast modes */
33914
33915 rctl = IXGB_READ_REG(hw, RCTL);
33916diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c
33917--- linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
33918+++ linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
33919@@ -260,6 +260,9 @@ void __devinit
33920 ixgb_check_options(struct ixgb_adapter *adapter)
33921 {
33922 int bd = adapter->bd_number;
33923+
33924+ pax_track_stack();
33925+
33926 if (bd >= IXGB_MAX_NIC) {
33927 printk(KERN_NOTICE
33928 "Warning: no configuration for board #%i\n", bd);
33929diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h
33930--- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h 2011-03-27 14:31:47.000000000 -0400
33931+++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:22:38.000000000 -0400
33932@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
33933 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
33934 s32 (*update_checksum)(struct ixgbe_hw *);
33935 };
33936+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
33937
33938 struct ixgbe_mac_operations {
33939 s32 (*init_hw)(struct ixgbe_hw *);
33940@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
33941 /* Flow Control */
33942 s32 (*fc_enable)(struct ixgbe_hw *, s32);
33943 };
33944+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33945
33946 struct ixgbe_phy_operations {
33947 s32 (*identify)(struct ixgbe_hw *);
33948@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
33949 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
33950 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
33951 };
33952+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
33953
33954 struct ixgbe_eeprom_info {
33955- struct ixgbe_eeprom_operations ops;
33956+ ixgbe_eeprom_operations_no_const ops;
33957 enum ixgbe_eeprom_type type;
33958 u32 semaphore_delay;
33959 u16 word_size;
33960@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
33961 };
33962
33963 struct ixgbe_mac_info {
33964- struct ixgbe_mac_operations ops;
33965+ ixgbe_mac_operations_no_const ops;
33966 enum ixgbe_mac_type type;
33967 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33968 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33969@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
33970 };
33971
33972 struct ixgbe_phy_info {
33973- struct ixgbe_phy_operations ops;
33974+ ixgbe_phy_operations_no_const ops;
33975 struct mdio_if_info mdio;
33976 enum ixgbe_phy_type type;
33977 u32 id;
33978diff -urNp linux-2.6.32.45/drivers/net/mlx4/main.c linux-2.6.32.45/drivers/net/mlx4/main.c
33979--- linux-2.6.32.45/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
33980+++ linux-2.6.32.45/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
33981@@ -38,6 +38,7 @@
33982 #include <linux/errno.h>
33983 #include <linux/pci.h>
33984 #include <linux/dma-mapping.h>
33985+#include <linux/sched.h>
33986
33987 #include <linux/mlx4/device.h>
33988 #include <linux/mlx4/doorbell.h>
33989@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
33990 u64 icm_size;
33991 int err;
33992
33993+ pax_track_stack();
33994+
33995 err = mlx4_QUERY_FW(dev);
33996 if (err) {
33997 if (err == -EACCES)
33998diff -urNp linux-2.6.32.45/drivers/net/niu.c linux-2.6.32.45/drivers/net/niu.c
33999--- linux-2.6.32.45/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
34000+++ linux-2.6.32.45/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
34001@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
34002 int i, num_irqs, err;
34003 u8 first_ldg;
34004
34005+ pax_track_stack();
34006+
34007 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
34008 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
34009 ldg_num_map[i] = first_ldg + i;
34010diff -urNp linux-2.6.32.45/drivers/net/pcnet32.c linux-2.6.32.45/drivers/net/pcnet32.c
34011--- linux-2.6.32.45/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
34012+++ linux-2.6.32.45/drivers/net/pcnet32.c 2011-08-05 20:33:55.000000000 -0400
34013@@ -79,7 +79,7 @@ static int cards_found;
34014 /*
34015 * VLB I/O addresses
34016 */
34017-static unsigned int pcnet32_portlist[] __initdata =
34018+static unsigned int pcnet32_portlist[] __devinitdata =
34019 { 0x300, 0x320, 0x340, 0x360, 0 };
34020
34021 static int pcnet32_debug = 0;
34022@@ -267,7 +267,7 @@ struct pcnet32_private {
34023 struct sk_buff **rx_skbuff;
34024 dma_addr_t *tx_dma_addr;
34025 dma_addr_t *rx_dma_addr;
34026- struct pcnet32_access a;
34027+ struct pcnet32_access *a;
34028 spinlock_t lock; /* Guard lock */
34029 unsigned int cur_rx, cur_tx; /* The next free ring entry */
34030 unsigned int rx_ring_size; /* current rx ring size */
34031@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct n
34032 u16 val;
34033
34034 netif_wake_queue(dev);
34035- val = lp->a.read_csr(ioaddr, CSR3);
34036+ val = lp->a->read_csr(ioaddr, CSR3);
34037 val &= 0x00ff;
34038- lp->a.write_csr(ioaddr, CSR3, val);
34039+ lp->a->write_csr(ioaddr, CSR3, val);
34040 napi_enable(&lp->napi);
34041 }
34042
34043@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_d
34044 r = mii_link_ok(&lp->mii_if);
34045 } else if (lp->chip_version >= PCNET32_79C970A) {
34046 ulong ioaddr = dev->base_addr; /* card base I/O address */
34047- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34048+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34049 } else { /* can not detect link on really old chips */
34050 r = 1;
34051 }
34052@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct
34053 pcnet32_netif_stop(dev);
34054
34055 spin_lock_irqsave(&lp->lock, flags);
34056- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34057+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34058
34059 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
34060
34061@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct
34062 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
34063 {
34064 struct pcnet32_private *lp = netdev_priv(dev);
34065- struct pcnet32_access *a = &lp->a; /* access to registers */
34066+ struct pcnet32_access *a = lp->a; /* access to registers */
34067 ulong ioaddr = dev->base_addr; /* card base I/O address */
34068 struct sk_buff *skb; /* sk buff */
34069 int x, i; /* counters */
34070@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct
34071 pcnet32_netif_stop(dev);
34072
34073 spin_lock_irqsave(&lp->lock, flags);
34074- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34075+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34076
34077 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
34078
34079 /* Reset the PCNET32 */
34080- lp->a.reset(ioaddr);
34081- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34082+ lp->a->reset(ioaddr);
34083+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34084
34085 /* switch pcnet32 to 32bit mode */
34086- lp->a.write_bcr(ioaddr, 20, 2);
34087+ lp->a->write_bcr(ioaddr, 20, 2);
34088
34089 /* purge & init rings but don't actually restart */
34090 pcnet32_restart(dev, 0x0000);
34091
34092- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34093+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34094
34095 /* Initialize Transmit buffers. */
34096 size = data_len + 15;
34097@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct
34098
34099 /* set int loopback in CSR15 */
34100 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
34101- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
34102+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
34103
34104 teststatus = cpu_to_le16(0x8000);
34105- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34106+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34107
34108 /* Check status of descriptors */
34109 for (x = 0; x < numbuffs; x++) {
34110@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct
34111 }
34112 }
34113
34114- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34115+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34116 wmb();
34117 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
34118 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
34119@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct
34120 pcnet32_restart(dev, CSR0_NORMAL);
34121 } else {
34122 pcnet32_purge_rx_ring(dev);
34123- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34124+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34125 }
34126 spin_unlock_irqrestore(&lp->lock, flags);
34127
34128@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct
34129 static void pcnet32_led_blink_callback(struct net_device *dev)
34130 {
34131 struct pcnet32_private *lp = netdev_priv(dev);
34132- struct pcnet32_access *a = &lp->a;
34133+ struct pcnet32_access *a = lp->a;
34134 ulong ioaddr = dev->base_addr;
34135 unsigned long flags;
34136 int i;
34137@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(s
34138 static int pcnet32_phys_id(struct net_device *dev, u32 data)
34139 {
34140 struct pcnet32_private *lp = netdev_priv(dev);
34141- struct pcnet32_access *a = &lp->a;
34142+ struct pcnet32_access *a = lp->a;
34143 ulong ioaddr = dev->base_addr;
34144 unsigned long flags;
34145 int i, regs[4];
34146@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_de
34147 {
34148 int csr5;
34149 struct pcnet32_private *lp = netdev_priv(dev);
34150- struct pcnet32_access *a = &lp->a;
34151+ struct pcnet32_access *a = lp->a;
34152 ulong ioaddr = dev->base_addr;
34153 int ticks;
34154
34155@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_stru
34156 spin_lock_irqsave(&lp->lock, flags);
34157 if (pcnet32_tx(dev)) {
34158 /* reset the chip to clear the error condition, then restart */
34159- lp->a.reset(ioaddr);
34160- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34161+ lp->a->reset(ioaddr);
34162+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34163 pcnet32_restart(dev, CSR0_START);
34164 netif_wake_queue(dev);
34165 }
34166@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_stru
34167 __napi_complete(napi);
34168
34169 /* clear interrupt masks */
34170- val = lp->a.read_csr(ioaddr, CSR3);
34171+ val = lp->a->read_csr(ioaddr, CSR3);
34172 val &= 0x00ff;
34173- lp->a.write_csr(ioaddr, CSR3, val);
34174+ lp->a->write_csr(ioaddr, CSR3, val);
34175
34176 /* Set interrupt enable. */
34177- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
34178+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
34179
34180 spin_unlock_irqrestore(&lp->lock, flags);
34181 }
34182@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_
34183 int i, csr0;
34184 u16 *buff = ptr;
34185 struct pcnet32_private *lp = netdev_priv(dev);
34186- struct pcnet32_access *a = &lp->a;
34187+ struct pcnet32_access *a = lp->a;
34188 ulong ioaddr = dev->base_addr;
34189 unsigned long flags;
34190
34191@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_
34192 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
34193 if (lp->phymask & (1 << j)) {
34194 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
34195- lp->a.write_bcr(ioaddr, 33,
34196+ lp->a->write_bcr(ioaddr, 33,
34197 (j << 5) | i);
34198- *buff++ = lp->a.read_bcr(ioaddr, 34);
34199+ *buff++ = lp->a->read_bcr(ioaddr, 34);
34200 }
34201 }
34202 }
34203@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34204 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
34205 lp->options |= PCNET32_PORT_FD;
34206
34207- lp->a = *a;
34208+ lp->a = a;
34209
34210 /* prior to register_netdev, dev->name is not yet correct */
34211 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
34212@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34213 if (lp->mii) {
34214 /* lp->phycount and lp->phymask are set to 0 by memset above */
34215
34216- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34217+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34218 /* scan for PHYs */
34219 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34220 unsigned short id1, id2;
34221@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34222 "Found PHY %04x:%04x at address %d.\n",
34223 id1, id2, i);
34224 }
34225- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34226+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34227 if (lp->phycount > 1) {
34228 lp->options |= PCNET32_PORT_MII;
34229 }
34230@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_devic
34231 }
34232
34233 /* Reset the PCNET32 */
34234- lp->a.reset(ioaddr);
34235+ lp->a->reset(ioaddr);
34236
34237 /* switch pcnet32 to 32bit mode */
34238- lp->a.write_bcr(ioaddr, 20, 2);
34239+ lp->a->write_bcr(ioaddr, 20, 2);
34240
34241 if (netif_msg_ifup(lp))
34242 printk(KERN_DEBUG
34243@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_devic
34244 (u32) (lp->init_dma_addr));
34245
34246 /* set/reset autoselect bit */
34247- val = lp->a.read_bcr(ioaddr, 2) & ~2;
34248+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
34249 if (lp->options & PCNET32_PORT_ASEL)
34250 val |= 2;
34251- lp->a.write_bcr(ioaddr, 2, val);
34252+ lp->a->write_bcr(ioaddr, 2, val);
34253
34254 /* handle full duplex setting */
34255 if (lp->mii_if.full_duplex) {
34256- val = lp->a.read_bcr(ioaddr, 9) & ~3;
34257+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
34258 if (lp->options & PCNET32_PORT_FD) {
34259 val |= 1;
34260 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
34261@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_devic
34262 if (lp->chip_version == 0x2627)
34263 val |= 3;
34264 }
34265- lp->a.write_bcr(ioaddr, 9, val);
34266+ lp->a->write_bcr(ioaddr, 9, val);
34267 }
34268
34269 /* set/reset GPSI bit in test register */
34270- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
34271+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
34272 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
34273 val |= 0x10;
34274- lp->a.write_csr(ioaddr, 124, val);
34275+ lp->a->write_csr(ioaddr, 124, val);
34276
34277 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
34278 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
34279@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_devic
34280 * duplex, and/or enable auto negotiation, and clear DANAS
34281 */
34282 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
34283- lp->a.write_bcr(ioaddr, 32,
34284- lp->a.read_bcr(ioaddr, 32) | 0x0080);
34285+ lp->a->write_bcr(ioaddr, 32,
34286+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
34287 /* disable Auto Negotiation, set 10Mpbs, HD */
34288- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
34289+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
34290 if (lp->options & PCNET32_PORT_FD)
34291 val |= 0x10;
34292 if (lp->options & PCNET32_PORT_100)
34293 val |= 0x08;
34294- lp->a.write_bcr(ioaddr, 32, val);
34295+ lp->a->write_bcr(ioaddr, 32, val);
34296 } else {
34297 if (lp->options & PCNET32_PORT_ASEL) {
34298- lp->a.write_bcr(ioaddr, 32,
34299- lp->a.read_bcr(ioaddr,
34300+ lp->a->write_bcr(ioaddr, 32,
34301+ lp->a->read_bcr(ioaddr,
34302 32) | 0x0080);
34303 /* enable auto negotiate, setup, disable fd */
34304- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
34305+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
34306 val |= 0x20;
34307- lp->a.write_bcr(ioaddr, 32, val);
34308+ lp->a->write_bcr(ioaddr, 32, val);
34309 }
34310 }
34311 } else {
34312@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_devic
34313 * There is really no good other way to handle multiple PHYs
34314 * other than turning off all automatics
34315 */
34316- val = lp->a.read_bcr(ioaddr, 2);
34317- lp->a.write_bcr(ioaddr, 2, val & ~2);
34318- val = lp->a.read_bcr(ioaddr, 32);
34319- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34320+ val = lp->a->read_bcr(ioaddr, 2);
34321+ lp->a->write_bcr(ioaddr, 2, val & ~2);
34322+ val = lp->a->read_bcr(ioaddr, 32);
34323+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34324
34325 if (!(lp->options & PCNET32_PORT_ASEL)) {
34326 /* setup ecmd */
34327@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_devic
34328 ecmd.speed =
34329 lp->
34330 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
34331- bcr9 = lp->a.read_bcr(ioaddr, 9);
34332+ bcr9 = lp->a->read_bcr(ioaddr, 9);
34333
34334 if (lp->options & PCNET32_PORT_FD) {
34335 ecmd.duplex = DUPLEX_FULL;
34336@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_devic
34337 ecmd.duplex = DUPLEX_HALF;
34338 bcr9 |= ~(1 << 0);
34339 }
34340- lp->a.write_bcr(ioaddr, 9, bcr9);
34341+ lp->a->write_bcr(ioaddr, 9, bcr9);
34342 }
34343
34344 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34345@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_devic
34346
34347 #ifdef DO_DXSUFLO
34348 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
34349- val = lp->a.read_csr(ioaddr, CSR3);
34350+ val = lp->a->read_csr(ioaddr, CSR3);
34351 val |= 0x40;
34352- lp->a.write_csr(ioaddr, CSR3, val);
34353+ lp->a->write_csr(ioaddr, CSR3, val);
34354 }
34355 #endif
34356
34357@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_devic
34358 napi_enable(&lp->napi);
34359
34360 /* Re-initialize the PCNET32, and start it when done. */
34361- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34362- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34363+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34364+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34365
34366- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34367- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34368+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34369+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34370
34371 netif_start_queue(dev);
34372
34373@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_devic
34374
34375 i = 0;
34376 while (i++ < 100)
34377- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34378+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34379 break;
34380 /*
34381 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
34382 * reports that doing so triggers a bug in the '974.
34383 */
34384- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
34385+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
34386
34387 if (netif_msg_ifup(lp))
34388 printk(KERN_DEBUG
34389 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
34390 dev->name, i,
34391 (u32) (lp->init_dma_addr),
34392- lp->a.read_csr(ioaddr, CSR0));
34393+ lp->a->read_csr(ioaddr, CSR0));
34394
34395 spin_unlock_irqrestore(&lp->lock, flags);
34396
34397@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_devic
34398 * Switch back to 16bit mode to avoid problems with dumb
34399 * DOS packet driver after a warm reboot
34400 */
34401- lp->a.write_bcr(ioaddr, 20, 4);
34402+ lp->a->write_bcr(ioaddr, 20, 4);
34403
34404 err_free_irq:
34405 spin_unlock_irqrestore(&lp->lock, flags);
34406@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_d
34407
34408 /* wait for stop */
34409 for (i = 0; i < 100; i++)
34410- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
34411+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
34412 break;
34413
34414 if (i >= 100 && netif_msg_drv(lp))
34415@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_d
34416 return;
34417
34418 /* ReInit Ring */
34419- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34420+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34421 i = 0;
34422 while (i++ < 1000)
34423- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34424+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34425 break;
34426
34427- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
34428+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
34429 }
34430
34431 static void pcnet32_tx_timeout(struct net_device *dev)
34432@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct ne
34433 if (pcnet32_debug & NETIF_MSG_DRV)
34434 printk(KERN_ERR
34435 "%s: transmit timed out, status %4.4x, resetting.\n",
34436- dev->name, lp->a.read_csr(ioaddr, CSR0));
34437- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34438+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34439+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34440 dev->stats.tx_errors++;
34441 if (netif_msg_tx_err(lp)) {
34442 int i;
34443@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34444 if (netif_msg_tx_queued(lp)) {
34445 printk(KERN_DEBUG
34446 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
34447- dev->name, lp->a.read_csr(ioaddr, CSR0));
34448+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34449 }
34450
34451 /* Default status -- will not enable Successful-TxDone
34452@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34453 dev->stats.tx_bytes += skb->len;
34454
34455 /* Trigger an immediate send poll. */
34456- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34457+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34458
34459 dev->trans_start = jiffies;
34460
34461@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
34462
34463 spin_lock(&lp->lock);
34464
34465- csr0 = lp->a.read_csr(ioaddr, CSR0);
34466+ csr0 = lp->a->read_csr(ioaddr, CSR0);
34467 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
34468 if (csr0 == 0xffff) {
34469 break; /* PCMCIA remove happened */
34470 }
34471 /* Acknowledge all of the current interrupt sources ASAP. */
34472- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34473+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34474
34475 if (netif_msg_intr(lp))
34476 printk(KERN_DEBUG
34477 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
34478- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
34479+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
34480
34481 /* Log misc errors. */
34482 if (csr0 & 0x4000)
34483@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
34484 if (napi_schedule_prep(&lp->napi)) {
34485 u16 val;
34486 /* set interrupt masks */
34487- val = lp->a.read_csr(ioaddr, CSR3);
34488+ val = lp->a->read_csr(ioaddr, CSR3);
34489 val |= 0x5f00;
34490- lp->a.write_csr(ioaddr, CSR3, val);
34491+ lp->a->write_csr(ioaddr, CSR3, val);
34492
34493 __napi_schedule(&lp->napi);
34494 break;
34495 }
34496- csr0 = lp->a.read_csr(ioaddr, CSR0);
34497+ csr0 = lp->a->read_csr(ioaddr, CSR0);
34498 }
34499
34500 if (netif_msg_intr(lp))
34501 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
34502- dev->name, lp->a.read_csr(ioaddr, CSR0));
34503+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34504
34505 spin_unlock(&lp->lock);
34506
34507@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_devi
34508
34509 spin_lock_irqsave(&lp->lock, flags);
34510
34511- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34512+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34513
34514 if (netif_msg_ifdown(lp))
34515 printk(KERN_DEBUG
34516 "%s: Shutting down ethercard, status was %2.2x.\n",
34517- dev->name, lp->a.read_csr(ioaddr, CSR0));
34518+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34519
34520 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
34521- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34522+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34523
34524 /*
34525 * Switch back to 16bit mode to avoid problems with dumb
34526 * DOS packet driver after a warm reboot
34527 */
34528- lp->a.write_bcr(ioaddr, 20, 4);
34529+ lp->a->write_bcr(ioaddr, 20, 4);
34530
34531 spin_unlock_irqrestore(&lp->lock, flags);
34532
34533@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_
34534 unsigned long flags;
34535
34536 spin_lock_irqsave(&lp->lock, flags);
34537- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34538+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34539 spin_unlock_irqrestore(&lp->lock, flags);
34540
34541 return &dev->stats;
34542@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struc
34543 if (dev->flags & IFF_ALLMULTI) {
34544 ib->filter[0] = cpu_to_le32(~0U);
34545 ib->filter[1] = cpu_to_le32(~0U);
34546- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34547- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34548- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34549- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34550+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34551+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34552+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34553+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34554 return;
34555 }
34556 /* clear the multicast filter */
34557@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struc
34558 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
34559 }
34560 for (i = 0; i < 4; i++)
34561- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
34562+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
34563 le16_to_cpu(mcast_table[i]));
34564 return;
34565 }
34566@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(s
34567
34568 spin_lock_irqsave(&lp->lock, flags);
34569 suspended = pcnet32_suspend(dev, &flags, 0);
34570- csr15 = lp->a.read_csr(ioaddr, CSR15);
34571+ csr15 = lp->a->read_csr(ioaddr, CSR15);
34572 if (dev->flags & IFF_PROMISC) {
34573 /* Log any net taps. */
34574 if (netif_msg_hw(lp))
34575@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(s
34576 lp->init_block->mode =
34577 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
34578 7);
34579- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
34580+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
34581 } else {
34582 lp->init_block->mode =
34583 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
34584- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34585+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34586 pcnet32_load_multicast(dev);
34587 }
34588
34589 if (suspended) {
34590 int csr5;
34591 /* clear SUSPEND (SPND) - CSR5 bit 0 */
34592- csr5 = lp->a.read_csr(ioaddr, CSR5);
34593- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34594+ csr5 = lp->a->read_csr(ioaddr, CSR5);
34595+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34596 } else {
34597- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34598+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34599 pcnet32_restart(dev, CSR0_NORMAL);
34600 netif_wake_queue(dev);
34601 }
34602@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *
34603 if (!lp->mii)
34604 return 0;
34605
34606- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34607- val_out = lp->a.read_bcr(ioaddr, 34);
34608+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34609+ val_out = lp->a->read_bcr(ioaddr, 34);
34610
34611 return val_out;
34612 }
34613@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device
34614 if (!lp->mii)
34615 return;
34616
34617- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34618- lp->a.write_bcr(ioaddr, 34, val);
34619+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34620+ lp->a->write_bcr(ioaddr, 34, val);
34621 }
34622
34623 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34624@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct n
34625 curr_link = mii_link_ok(&lp->mii_if);
34626 } else {
34627 ulong ioaddr = dev->base_addr; /* card base I/O address */
34628- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34629+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34630 }
34631 if (!curr_link) {
34632 if (prev_link || verbose) {
34633@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct n
34634 (ecmd.duplex ==
34635 DUPLEX_FULL) ? "full" : "half");
34636 }
34637- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
34638+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
34639 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
34640 if (lp->mii_if.full_duplex)
34641 bcr9 |= (1 << 0);
34642 else
34643 bcr9 &= ~(1 << 0);
34644- lp->a.write_bcr(dev->base_addr, 9, bcr9);
34645+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
34646 }
34647 } else {
34648 if (netif_msg_link(lp))
34649diff -urNp linux-2.6.32.45/drivers/net/tg3.h linux-2.6.32.45/drivers/net/tg3.h
34650--- linux-2.6.32.45/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
34651+++ linux-2.6.32.45/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
34652@@ -95,6 +95,7 @@
34653 #define CHIPREV_ID_5750_A0 0x4000
34654 #define CHIPREV_ID_5750_A1 0x4001
34655 #define CHIPREV_ID_5750_A3 0x4003
34656+#define CHIPREV_ID_5750_C1 0x4201
34657 #define CHIPREV_ID_5750_C2 0x4202
34658 #define CHIPREV_ID_5752_A0_HW 0x5000
34659 #define CHIPREV_ID_5752_A0 0x6000
34660diff -urNp linux-2.6.32.45/drivers/net/tokenring/abyss.c linux-2.6.32.45/drivers/net/tokenring/abyss.c
34661--- linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-03-27 14:31:47.000000000 -0400
34662+++ linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-08-05 20:33:55.000000000 -0400
34663@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
34664
34665 static int __init abyss_init (void)
34666 {
34667- abyss_netdev_ops = tms380tr_netdev_ops;
34668+ pax_open_kernel();
34669+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34670
34671- abyss_netdev_ops.ndo_open = abyss_open;
34672- abyss_netdev_ops.ndo_stop = abyss_close;
34673+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34674+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34675+ pax_close_kernel();
34676
34677 return pci_register_driver(&abyss_driver);
34678 }
34679diff -urNp linux-2.6.32.45/drivers/net/tokenring/madgemc.c linux-2.6.32.45/drivers/net/tokenring/madgemc.c
34680--- linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-03-27 14:31:47.000000000 -0400
34681+++ linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-08-05 20:33:55.000000000 -0400
34682@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver
34683
34684 static int __init madgemc_init (void)
34685 {
34686- madgemc_netdev_ops = tms380tr_netdev_ops;
34687- madgemc_netdev_ops.ndo_open = madgemc_open;
34688- madgemc_netdev_ops.ndo_stop = madgemc_close;
34689+ pax_open_kernel();
34690+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34691+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34692+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34693+ pax_close_kernel();
34694
34695 return mca_register_driver (&madgemc_driver);
34696 }
34697diff -urNp linux-2.6.32.45/drivers/net/tokenring/proteon.c linux-2.6.32.45/drivers/net/tokenring/proteon.c
34698--- linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-03-27 14:31:47.000000000 -0400
34699+++ linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-08-05 20:33:55.000000000 -0400
34700@@ -353,9 +353,11 @@ static int __init proteon_init(void)
34701 struct platform_device *pdev;
34702 int i, num = 0, err = 0;
34703
34704- proteon_netdev_ops = tms380tr_netdev_ops;
34705- proteon_netdev_ops.ndo_open = proteon_open;
34706- proteon_netdev_ops.ndo_stop = tms380tr_close;
34707+ pax_open_kernel();
34708+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34709+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34710+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34711+ pax_close_kernel();
34712
34713 err = platform_driver_register(&proteon_driver);
34714 if (err)
34715diff -urNp linux-2.6.32.45/drivers/net/tokenring/skisa.c linux-2.6.32.45/drivers/net/tokenring/skisa.c
34716--- linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-03-27 14:31:47.000000000 -0400
34717+++ linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-08-05 20:33:55.000000000 -0400
34718@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34719 struct platform_device *pdev;
34720 int i, num = 0, err = 0;
34721
34722- sk_isa_netdev_ops = tms380tr_netdev_ops;
34723- sk_isa_netdev_ops.ndo_open = sk_isa_open;
34724- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34725+ pax_open_kernel();
34726+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34727+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34728+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34729+ pax_close_kernel();
34730
34731 err = platform_driver_register(&sk_isa_driver);
34732 if (err)
34733diff -urNp linux-2.6.32.45/drivers/net/tulip/de2104x.c linux-2.6.32.45/drivers/net/tulip/de2104x.c
34734--- linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
34735+++ linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
34736@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
34737 struct de_srom_info_leaf *il;
34738 void *bufp;
34739
34740+ pax_track_stack();
34741+
34742 /* download entire eeprom */
34743 for (i = 0; i < DE_EEPROM_WORDS; i++)
34744 ((__le16 *)ee_data)[i] =
34745diff -urNp linux-2.6.32.45/drivers/net/tulip/de4x5.c linux-2.6.32.45/drivers/net/tulip/de4x5.c
34746--- linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
34747+++ linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
34748@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
34749 for (i=0; i<ETH_ALEN; i++) {
34750 tmp.addr[i] = dev->dev_addr[i];
34751 }
34752- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34753+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34754 break;
34755
34756 case DE4X5_SET_HWADDR: /* Set the hardware address */
34757@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
34758 spin_lock_irqsave(&lp->lock, flags);
34759 memcpy(&statbuf, &lp->pktStats, ioc->len);
34760 spin_unlock_irqrestore(&lp->lock, flags);
34761- if (copy_to_user(ioc->data, &statbuf, ioc->len))
34762+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34763 return -EFAULT;
34764 break;
34765 }
34766diff -urNp linux-2.6.32.45/drivers/net/usb/hso.c linux-2.6.32.45/drivers/net/usb/hso.c
34767--- linux-2.6.32.45/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
34768+++ linux-2.6.32.45/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
34769@@ -71,7 +71,7 @@
34770 #include <asm/byteorder.h>
34771 #include <linux/serial_core.h>
34772 #include <linux/serial.h>
34773-
34774+#include <asm/local.h>
34775
34776 #define DRIVER_VERSION "1.2"
34777 #define MOD_AUTHOR "Option Wireless"
34778@@ -258,7 +258,7 @@ struct hso_serial {
34779
34780 /* from usb_serial_port */
34781 struct tty_struct *tty;
34782- int open_count;
34783+ local_t open_count;
34784 spinlock_t serial_lock;
34785
34786 int (*write_data) (struct hso_serial *serial);
34787@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
34788 struct urb *urb;
34789
34790 urb = serial->rx_urb[0];
34791- if (serial->open_count > 0) {
34792+ if (local_read(&serial->open_count) > 0) {
34793 count = put_rxbuf_data(urb, serial);
34794 if (count == -1)
34795 return;
34796@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
34797 DUMP1(urb->transfer_buffer, urb->actual_length);
34798
34799 /* Anyone listening? */
34800- if (serial->open_count == 0)
34801+ if (local_read(&serial->open_count) == 0)
34802 return;
34803
34804 if (status == 0) {
34805@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
34806 spin_unlock_irq(&serial->serial_lock);
34807
34808 /* check for port already opened, if not set the termios */
34809- serial->open_count++;
34810- if (serial->open_count == 1) {
34811+ if (local_inc_return(&serial->open_count) == 1) {
34812 tty->low_latency = 1;
34813 serial->rx_state = RX_IDLE;
34814 /* Force default termio settings */
34815@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
34816 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34817 if (result) {
34818 hso_stop_serial_device(serial->parent);
34819- serial->open_count--;
34820+ local_dec(&serial->open_count);
34821 kref_put(&serial->parent->ref, hso_serial_ref_free);
34822 }
34823 } else {
34824@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
34825
34826 /* reset the rts and dtr */
34827 /* do the actual close */
34828- serial->open_count--;
34829+ local_dec(&serial->open_count);
34830
34831- if (serial->open_count <= 0) {
34832- serial->open_count = 0;
34833+ if (local_read(&serial->open_count) <= 0) {
34834+ local_set(&serial->open_count, 0);
34835 spin_lock_irq(&serial->serial_lock);
34836 if (serial->tty == tty) {
34837 serial->tty->driver_data = NULL;
34838@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
34839
34840 /* the actual setup */
34841 spin_lock_irqsave(&serial->serial_lock, flags);
34842- if (serial->open_count)
34843+ if (local_read(&serial->open_count))
34844 _hso_serial_set_termios(tty, old);
34845 else
34846 tty->termios = old;
34847@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
34848 /* Start all serial ports */
34849 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34850 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34851- if (dev2ser(serial_table[i])->open_count) {
34852+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
34853 result =
34854 hso_start_serial_device(serial_table[i], GFP_NOIO);
34855 hso_kick_transmit(dev2ser(serial_table[i]));
34856diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-config.h linux-2.6.32.45/drivers/net/vxge/vxge-config.h
34857--- linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-03-27 14:31:47.000000000 -0400
34858+++ linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-08-05 20:33:55.000000000 -0400
34859@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
34860 void (*link_down)(struct __vxge_hw_device *devh);
34861 void (*crit_err)(struct __vxge_hw_device *devh,
34862 enum vxge_hw_event type, u64 ext_data);
34863-};
34864+} __no_const;
34865
34866 /*
34867 * struct __vxge_hw_blockpool_entry - Block private data structure
34868diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-main.c linux-2.6.32.45/drivers/net/vxge/vxge-main.c
34869--- linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
34870+++ linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
34871@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
34872 struct sk_buff *completed[NR_SKB_COMPLETED];
34873 int more;
34874
34875+ pax_track_stack();
34876+
34877 do {
34878 more = 0;
34879 skb_ptr = completed;
34880@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
34881 u8 mtable[256] = {0}; /* CPU to vpath mapping */
34882 int index;
34883
34884+ pax_track_stack();
34885+
34886 /*
34887 * Filling
34888 * - itable with bucket numbers
34889diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h
34890--- linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-03-27 14:31:47.000000000 -0400
34891+++ linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:33:55.000000000 -0400
34892@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
34893 struct vxge_hw_mempool_dma *dma_object,
34894 u32 index,
34895 u32 is_last);
34896-};
34897+} __no_const;
34898
34899 void
34900 __vxge_hw_mempool_destroy(
34901diff -urNp linux-2.6.32.45/drivers/net/wan/cycx_x25.c linux-2.6.32.45/drivers/net/wan/cycx_x25.c
34902--- linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
34903+++ linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
34904@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
34905 unsigned char hex[1024],
34906 * phex = hex;
34907
34908+ pax_track_stack();
34909+
34910 if (len >= (sizeof(hex) / 2))
34911 len = (sizeof(hex) / 2) - 1;
34912
34913diff -urNp linux-2.6.32.45/drivers/net/wan/hdlc_x25.c linux-2.6.32.45/drivers/net/wan/hdlc_x25.c
34914--- linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-03-27 14:31:47.000000000 -0400
34915+++ linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-08-05 20:33:55.000000000 -0400
34916@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
34917
34918 static int x25_open(struct net_device *dev)
34919 {
34920- struct lapb_register_struct cb;
34921+ static struct lapb_register_struct cb = {
34922+ .connect_confirmation = x25_connected,
34923+ .connect_indication = x25_connected,
34924+ .disconnect_confirmation = x25_disconnected,
34925+ .disconnect_indication = x25_disconnected,
34926+ .data_indication = x25_data_indication,
34927+ .data_transmit = x25_data_transmit
34928+ };
34929 int result;
34930
34931- cb.connect_confirmation = x25_connected;
34932- cb.connect_indication = x25_connected;
34933- cb.disconnect_confirmation = x25_disconnected;
34934- cb.disconnect_indication = x25_disconnected;
34935- cb.data_indication = x25_data_indication;
34936- cb.data_transmit = x25_data_transmit;
34937-
34938 result = lapb_register(dev, &cb);
34939 if (result != LAPB_OK)
34940 return result;
34941diff -urNp linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c
34942--- linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
34943+++ linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
34944@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
34945 int do_autopm = 1;
34946 DECLARE_COMPLETION_ONSTACK(notif_completion);
34947
34948+ pax_track_stack();
34949+
34950 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
34951 i2400m, ack, ack_size);
34952 BUG_ON(_ack == i2400m->bm_ack_buf);
34953diff -urNp linux-2.6.32.45/drivers/net/wireless/airo.c linux-2.6.32.45/drivers/net/wireless/airo.c
34954--- linux-2.6.32.45/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
34955+++ linux-2.6.32.45/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
34956@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
34957 BSSListElement * loop_net;
34958 BSSListElement * tmp_net;
34959
34960+ pax_track_stack();
34961+
34962 /* Blow away current list of scan results */
34963 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
34964 list_move_tail (&loop_net->list, &ai->network_free_list);
34965@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
34966 WepKeyRid wkr;
34967 int rc;
34968
34969+ pax_track_stack();
34970+
34971 memset( &mySsid, 0, sizeof( mySsid ) );
34972 kfree (ai->flash);
34973 ai->flash = NULL;
34974@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
34975 __le32 *vals = stats.vals;
34976 int len;
34977
34978+ pax_track_stack();
34979+
34980 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34981 return -ENOMEM;
34982 data = (struct proc_data *)file->private_data;
34983@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
34984 /* If doLoseSync is not 1, we won't do a Lose Sync */
34985 int doLoseSync = -1;
34986
34987+ pax_track_stack();
34988+
34989 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34990 return -ENOMEM;
34991 data = (struct proc_data *)file->private_data;
34992@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
34993 int i;
34994 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
34995
34996+ pax_track_stack();
34997+
34998 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
34999 if (!qual)
35000 return -ENOMEM;
35001@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
35002 CapabilityRid cap_rid;
35003 __le32 *vals = stats_rid.vals;
35004
35005+ pax_track_stack();
35006+
35007 /* Get stats out of the card */
35008 clear_bit(JOB_WSTATS, &local->jobs);
35009 if (local->power.event) {
35010diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c
35011--- linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
35012+++ linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
35013@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
35014 unsigned int v;
35015 u64 tsf;
35016
35017+ pax_track_stack();
35018+
35019 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
35020 len += snprintf(buf+len, sizeof(buf)-len,
35021 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
35022@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
35023 unsigned int len = 0;
35024 unsigned int i;
35025
35026+ pax_track_stack();
35027+
35028 len += snprintf(buf+len, sizeof(buf)-len,
35029 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
35030
35031diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c
35032--- linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
35033+++ linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
35034@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
35035 char buf[512];
35036 unsigned int len = 0;
35037
35038+ pax_track_stack();
35039+
35040 len += snprintf(buf + len, sizeof(buf) - len,
35041 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
35042 len += snprintf(buf + len, sizeof(buf) - len,
35043@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
35044 int i;
35045 u8 addr[ETH_ALEN];
35046
35047+ pax_track_stack();
35048+
35049 len += snprintf(buf + len, sizeof(buf) - len,
35050 "primary: %s (%s chan=%d ht=%d)\n",
35051 wiphy_name(sc->pri_wiphy->hw->wiphy),
35052diff -urNp linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c
35053--- linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35054+++ linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35055@@ -43,7 +43,7 @@ static struct dentry *rootdir;
35056 struct b43_debugfs_fops {
35057 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
35058 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
35059- struct file_operations fops;
35060+ const struct file_operations fops;
35061 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
35062 size_t file_struct_offset;
35063 };
35064diff -urNp linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c
35065--- linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35066+++ linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35067@@ -44,7 +44,7 @@ static struct dentry *rootdir;
35068 struct b43legacy_debugfs_fops {
35069 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
35070 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
35071- struct file_operations fops;
35072+ const struct file_operations fops;
35073 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
35074 size_t file_struct_offset;
35075 /* Take wl->irq_lock before calling read/write? */
35076diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c
35077--- linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
35078+++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
35079@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
35080 int err;
35081 DECLARE_SSID_BUF(ssid);
35082
35083+ pax_track_stack();
35084+
35085 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
35086
35087 if (ssid_len)
35088@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
35089 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
35090 int err;
35091
35092+ pax_track_stack();
35093+
35094 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
35095 idx, keylen, len);
35096
35097diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c
35098--- linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
35099+++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
35100@@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
35101 unsigned long flags;
35102 DECLARE_SSID_BUF(ssid);
35103
35104+ pax_track_stack();
35105+
35106 LIBIPW_DEBUG_SCAN("'%s' (%pM"
35107 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
35108 print_ssid(ssid, info_element->data, info_element->len),
35109diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c
35110--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
35111+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
35112@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
35113 },
35114 };
35115
35116-static struct iwl_ops iwl1000_ops = {
35117+static const struct iwl_ops iwl1000_ops = {
35118 .ucode = &iwl5000_ucode,
35119 .lib = &iwl1000_lib,
35120 .hcmd = &iwl5000_hcmd,
35121diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c
35122--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-03-27 14:31:47.000000000 -0400
35123+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-08-05 20:33:55.000000000 -0400
35124@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_
35125 */
35126 if (iwl3945_mod_params.disable_hw_scan) {
35127 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
35128- iwl3945_hw_ops.hw_scan = NULL;
35129+ pax_open_kernel();
35130+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
35131+ pax_close_kernel();
35132 }
35133
35134
35135diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c
35136--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
35137+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
35138@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
35139 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
35140 };
35141
35142-static struct iwl_ops iwl3945_ops = {
35143+static const struct iwl_ops iwl3945_ops = {
35144 .ucode = &iwl3945_ucode,
35145 .lib = &iwl3945_lib,
35146 .hcmd = &iwl3945_hcmd,
35147diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c
35148--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
35149+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
35150@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
35151 },
35152 };
35153
35154-static struct iwl_ops iwl4965_ops = {
35155+static const struct iwl_ops iwl4965_ops = {
35156 .ucode = &iwl4965_ucode,
35157 .lib = &iwl4965_lib,
35158 .hcmd = &iwl4965_hcmd,
35159diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c
35160--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
35161+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
35162@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
35163 },
35164 };
35165
35166-struct iwl_ops iwl5000_ops = {
35167+const struct iwl_ops iwl5000_ops = {
35168 .ucode = &iwl5000_ucode,
35169 .lib = &iwl5000_lib,
35170 .hcmd = &iwl5000_hcmd,
35171 .utils = &iwl5000_hcmd_utils,
35172 };
35173
35174-static struct iwl_ops iwl5150_ops = {
35175+static const struct iwl_ops iwl5150_ops = {
35176 .ucode = &iwl5000_ucode,
35177 .lib = &iwl5150_lib,
35178 .hcmd = &iwl5000_hcmd,
35179diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c
35180--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
35181+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
35182@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
35183 .calc_rssi = iwl5000_calc_rssi,
35184 };
35185
35186-static struct iwl_ops iwl6000_ops = {
35187+static const struct iwl_ops iwl6000_ops = {
35188 .ucode = &iwl5000_ucode,
35189 .lib = &iwl6000_lib,
35190 .hcmd = &iwl5000_hcmd,
35191diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c
35192--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-03-27 14:31:47.000000000 -0400
35193+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:33:55.000000000 -0400
35194@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev
35195 if (iwl_debug_level & IWL_DL_INFO)
35196 dev_printk(KERN_DEBUG, &(pdev->dev),
35197 "Disabling hw_scan\n");
35198- iwl_hw_ops.hw_scan = NULL;
35199+ pax_open_kernel();
35200+ *(void **)&iwl_hw_ops.hw_scan = NULL;
35201+ pax_close_kernel();
35202 }
35203
35204 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
35205diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35206--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
35207+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
35208@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
35209 u8 active_index = 0;
35210 s32 tpt = 0;
35211
35212+ pax_track_stack();
35213+
35214 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
35215
35216 if (!ieee80211_is_data(hdr->frame_control) ||
35217@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
35218 u8 valid_tx_ant = 0;
35219 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
35220
35221+ pax_track_stack();
35222+
35223 /* Override starting rate (index 0) if needed for debug purposes */
35224 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
35225
35226diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35227--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
35228+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
35229@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
35230 int pos = 0;
35231 const size_t bufsz = sizeof(buf);
35232
35233+ pax_track_stack();
35234+
35235 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
35236 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
35237 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
35238@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
35239 const size_t bufsz = sizeof(buf);
35240 ssize_t ret;
35241
35242+ pax_track_stack();
35243+
35244 for (i = 0; i < AC_NUM; i++) {
35245 pos += scnprintf(buf + pos, bufsz - pos,
35246 "\tcw_min\tcw_max\taifsn\ttxop\n");
35247diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h
35248--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
35249+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
35250@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
35251 #endif
35252
35253 #else
35254-#define IWL_DEBUG(__priv, level, fmt, args...)
35255-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
35256+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
35257+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
35258 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
35259 void *p, u32 len)
35260 {}
35261diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h
35262--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
35263+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
35264@@ -68,7 +68,7 @@ struct iwl_tx_queue;
35265
35266 /* shared structures from iwl-5000.c */
35267 extern struct iwl_mod_params iwl50_mod_params;
35268-extern struct iwl_ops iwl5000_ops;
35269+extern const struct iwl_ops iwl5000_ops;
35270 extern struct iwl_ucode_ops iwl5000_ucode;
35271 extern struct iwl_lib_ops iwl5000_lib;
35272 extern struct iwl_hcmd_ops iwl5000_hcmd;
35273diff -urNp linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c
35274--- linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35275+++ linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
35276@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
35277 int buf_len = 512;
35278 size_t len = 0;
35279
35280+ pax_track_stack();
35281+
35282 if (*ppos != 0)
35283 return 0;
35284 if (count < sizeof(buf))
35285diff -urNp linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c
35286--- linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35287+++ linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35288@@ -708,7 +708,7 @@ out_unlock:
35289 struct lbs_debugfs_files {
35290 const char *name;
35291 int perm;
35292- struct file_operations fops;
35293+ const struct file_operations fops;
35294 };
35295
35296 static const struct lbs_debugfs_files debugfs_files[] = {
35297diff -urNp linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c
35298--- linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
35299+++ linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
35300@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
35301
35302 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
35303
35304- if (rts_threshold < 0 || rts_threshold > 2347)
35305+ if (rts_threshold > 2347)
35306 rts_threshold = 2347;
35307
35308 tmp = cpu_to_le32(rts_threshold);
35309diff -urNp linux-2.6.32.45/drivers/oprofile/buffer_sync.c linux-2.6.32.45/drivers/oprofile/buffer_sync.c
35310--- linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
35311+++ linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
35312@@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
35313 if (cookie == NO_COOKIE)
35314 offset = pc;
35315 if (cookie == INVALID_COOKIE) {
35316- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35317+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35318 offset = pc;
35319 }
35320 if (cookie != last_cookie) {
35321@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
35322 /* add userspace sample */
35323
35324 if (!mm) {
35325- atomic_inc(&oprofile_stats.sample_lost_no_mm);
35326+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35327 return 0;
35328 }
35329
35330 cookie = lookup_dcookie(mm, s->eip, &offset);
35331
35332 if (cookie == INVALID_COOKIE) {
35333- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35334+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35335 return 0;
35336 }
35337
35338@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
35339 /* ignore backtraces if failed to add a sample */
35340 if (state == sb_bt_start) {
35341 state = sb_bt_ignore;
35342- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35343+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35344 }
35345 }
35346 release_mm(mm);
35347diff -urNp linux-2.6.32.45/drivers/oprofile/event_buffer.c linux-2.6.32.45/drivers/oprofile/event_buffer.c
35348--- linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
35349+++ linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
35350@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
35351 }
35352
35353 if (buffer_pos == buffer_size) {
35354- atomic_inc(&oprofile_stats.event_lost_overflow);
35355+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35356 return;
35357 }
35358
35359diff -urNp linux-2.6.32.45/drivers/oprofile/oprof.c linux-2.6.32.45/drivers/oprofile/oprof.c
35360--- linux-2.6.32.45/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
35361+++ linux-2.6.32.45/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
35362@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
35363 if (oprofile_ops.switch_events())
35364 return;
35365
35366- atomic_inc(&oprofile_stats.multiplex_counter);
35367+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35368 start_switch_worker();
35369 }
35370
35371diff -urNp linux-2.6.32.45/drivers/oprofile/oprofilefs.c linux-2.6.32.45/drivers/oprofile/oprofilefs.c
35372--- linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
35373+++ linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
35374@@ -187,7 +187,7 @@ static const struct file_operations atom
35375
35376
35377 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35378- char const *name, atomic_t *val)
35379+ char const *name, atomic_unchecked_t *val)
35380 {
35381 struct dentry *d = __oprofilefs_create_file(sb, root, name,
35382 &atomic_ro_fops, 0444);
35383diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.c linux-2.6.32.45/drivers/oprofile/oprofile_stats.c
35384--- linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
35385+++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
35386@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35387 cpu_buf->sample_invalid_eip = 0;
35388 }
35389
35390- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35391- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35392- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35393- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35394- atomic_set(&oprofile_stats.multiplex_counter, 0);
35395+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35396+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35397+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35398+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35399+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35400 }
35401
35402
35403diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.h linux-2.6.32.45/drivers/oprofile/oprofile_stats.h
35404--- linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
35405+++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
35406@@ -13,11 +13,11 @@
35407 #include <asm/atomic.h>
35408
35409 struct oprofile_stat_struct {
35410- atomic_t sample_lost_no_mm;
35411- atomic_t sample_lost_no_mapping;
35412- atomic_t bt_lost_no_mapping;
35413- atomic_t event_lost_overflow;
35414- atomic_t multiplex_counter;
35415+ atomic_unchecked_t sample_lost_no_mm;
35416+ atomic_unchecked_t sample_lost_no_mapping;
35417+ atomic_unchecked_t bt_lost_no_mapping;
35418+ atomic_unchecked_t event_lost_overflow;
35419+ atomic_unchecked_t multiplex_counter;
35420 };
35421
35422 extern struct oprofile_stat_struct oprofile_stats;
35423diff -urNp linux-2.6.32.45/drivers/parisc/pdc_stable.c linux-2.6.32.45/drivers/parisc/pdc_stable.c
35424--- linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
35425+++ linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
35426@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
35427 return ret;
35428 }
35429
35430-static struct sysfs_ops pdcspath_attr_ops = {
35431+static const struct sysfs_ops pdcspath_attr_ops = {
35432 .show = pdcspath_attr_show,
35433 .store = pdcspath_attr_store,
35434 };
35435diff -urNp linux-2.6.32.45/drivers/parport/procfs.c linux-2.6.32.45/drivers/parport/procfs.c
35436--- linux-2.6.32.45/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
35437+++ linux-2.6.32.45/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
35438@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
35439
35440 *ppos += len;
35441
35442- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35443+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35444 }
35445
35446 #ifdef CONFIG_PARPORT_1284
35447@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
35448
35449 *ppos += len;
35450
35451- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35452+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35453 }
35454 #endif /* IEEE1284.3 support. */
35455
35456diff -urNp linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c
35457--- linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
35458+++ linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
35459@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
35460 }
35461
35462
35463-static struct acpi_dock_ops acpiphp_dock_ops = {
35464+static const struct acpi_dock_ops acpiphp_dock_ops = {
35465 .handler = handle_hotplug_event_func,
35466 };
35467
35468diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h
35469--- linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-03-27 14:31:47.000000000 -0400
35470+++ linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:33:55.000000000 -0400
35471@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35472 int (*hardware_test) (struct slot* slot, u32 value);
35473 u8 (*get_power) (struct slot* slot);
35474 int (*set_power) (struct slot* slot, int value);
35475-};
35476+} __no_const;
35477
35478 struct cpci_hp_controller {
35479 unsigned int irq;
35480diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c
35481--- linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
35482+++ linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
35483@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
35484
35485 void compaq_nvram_init (void __iomem *rom_start)
35486 {
35487+
35488+#ifndef CONFIG_PAX_KERNEXEC
35489 if (rom_start) {
35490 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35491 }
35492+#endif
35493+
35494 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35495
35496 /* initialize our int15 lock */
35497diff -urNp linux-2.6.32.45/drivers/pci/hotplug/fakephp.c linux-2.6.32.45/drivers/pci/hotplug/fakephp.c
35498--- linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
35499+++ linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
35500@@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
35501 }
35502
35503 static struct kobj_type legacy_ktype = {
35504- .sysfs_ops = &(struct sysfs_ops){
35505+ .sysfs_ops = &(const struct sysfs_ops){
35506 .store = legacy_store, .show = legacy_show
35507 },
35508 .release = &legacy_release,
35509diff -urNp linux-2.6.32.45/drivers/pci/intel-iommu.c linux-2.6.32.45/drivers/pci/intel-iommu.c
35510--- linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
35511+++ linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
35512@@ -2643,7 +2643,7 @@ error:
35513 return 0;
35514 }
35515
35516-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
35517+dma_addr_t intel_map_page(struct device *dev, struct page *page,
35518 unsigned long offset, size_t size,
35519 enum dma_data_direction dir,
35520 struct dma_attrs *attrs)
35521@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
35522 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
35523 }
35524
35525-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35526+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35527 size_t size, enum dma_data_direction dir,
35528 struct dma_attrs *attrs)
35529 {
35530@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
35531 }
35532 }
35533
35534-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
35535+void *intel_alloc_coherent(struct device *hwdev, size_t size,
35536 dma_addr_t *dma_handle, gfp_t flags)
35537 {
35538 void *vaddr;
35539@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
35540 return NULL;
35541 }
35542
35543-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35544+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35545 dma_addr_t dma_handle)
35546 {
35547 int order;
35548@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
35549 free_pages((unsigned long)vaddr, order);
35550 }
35551
35552-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35553+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35554 int nelems, enum dma_data_direction dir,
35555 struct dma_attrs *attrs)
35556 {
35557@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
35558 return nelems;
35559 }
35560
35561-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35562+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35563 enum dma_data_direction dir, struct dma_attrs *attrs)
35564 {
35565 int i;
35566@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
35567 return nelems;
35568 }
35569
35570-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35571+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35572 {
35573 return !dma_addr;
35574 }
35575
35576-struct dma_map_ops intel_dma_ops = {
35577+const struct dma_map_ops intel_dma_ops = {
35578 .alloc_coherent = intel_alloc_coherent,
35579 .free_coherent = intel_free_coherent,
35580 .map_sg = intel_map_sg,
35581diff -urNp linux-2.6.32.45/drivers/pci/pcie/aspm.c linux-2.6.32.45/drivers/pci/pcie/aspm.c
35582--- linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
35583+++ linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
35584@@ -27,9 +27,9 @@
35585 #define MODULE_PARAM_PREFIX "pcie_aspm."
35586
35587 /* Note: those are not register definitions */
35588-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35589-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35590-#define ASPM_STATE_L1 (4) /* L1 state */
35591+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35592+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35593+#define ASPM_STATE_L1 (4U) /* L1 state */
35594 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35595 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35596
35597diff -urNp linux-2.6.32.45/drivers/pci/probe.c linux-2.6.32.45/drivers/pci/probe.c
35598--- linux-2.6.32.45/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
35599+++ linux-2.6.32.45/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
35600@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
35601 return ret;
35602 }
35603
35604-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
35605+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
35606 struct device_attribute *attr,
35607 char *buf)
35608 {
35609 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
35610 }
35611
35612-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
35613+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
35614 struct device_attribute *attr,
35615 char *buf)
35616 {
35617diff -urNp linux-2.6.32.45/drivers/pci/proc.c linux-2.6.32.45/drivers/pci/proc.c
35618--- linux-2.6.32.45/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
35619+++ linux-2.6.32.45/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
35620@@ -480,7 +480,16 @@ static const struct file_operations proc
35621 static int __init pci_proc_init(void)
35622 {
35623 struct pci_dev *dev = NULL;
35624+
35625+#ifdef CONFIG_GRKERNSEC_PROC_ADD
35626+#ifdef CONFIG_GRKERNSEC_PROC_USER
35627+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35628+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35629+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35630+#endif
35631+#else
35632 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35633+#endif
35634 proc_create("devices", 0, proc_bus_pci_dir,
35635 &proc_bus_pci_dev_operations);
35636 proc_initialized = 1;
35637diff -urNp linux-2.6.32.45/drivers/pci/slot.c linux-2.6.32.45/drivers/pci/slot.c
35638--- linux-2.6.32.45/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
35639+++ linux-2.6.32.45/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
35640@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
35641 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
35642 }
35643
35644-static struct sysfs_ops pci_slot_sysfs_ops = {
35645+static const struct sysfs_ops pci_slot_sysfs_ops = {
35646 .show = pci_slot_attr_show,
35647 .store = pci_slot_attr_store,
35648 };
35649diff -urNp linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c
35650--- linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
35651+++ linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
35652@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
35653 return -EFAULT;
35654 }
35655 }
35656- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35657+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35658 if (!buf)
35659 return -ENOMEM;
35660
35661diff -urNp linux-2.6.32.45/drivers/platform/x86/acer-wmi.c linux-2.6.32.45/drivers/platform/x86/acer-wmi.c
35662--- linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
35663+++ linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
35664@@ -918,7 +918,7 @@ static int update_bl_status(struct backl
35665 return 0;
35666 }
35667
35668-static struct backlight_ops acer_bl_ops = {
35669+static const struct backlight_ops acer_bl_ops = {
35670 .get_brightness = read_brightness,
35671 .update_status = update_bl_status,
35672 };
35673diff -urNp linux-2.6.32.45/drivers/platform/x86/asus_acpi.c linux-2.6.32.45/drivers/platform/x86/asus_acpi.c
35674--- linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
35675+++ linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
35676@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
35677 return 0;
35678 }
35679
35680-static struct backlight_ops asus_backlight_data = {
35681+static const struct backlight_ops asus_backlight_data = {
35682 .get_brightness = read_brightness,
35683 .update_status = set_brightness_status,
35684 };
35685diff -urNp linux-2.6.32.45/drivers/platform/x86/asus-laptop.c linux-2.6.32.45/drivers/platform/x86/asus-laptop.c
35686--- linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
35687+++ linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
35688@@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
35689 */
35690 static int read_brightness(struct backlight_device *bd);
35691 static int update_bl_status(struct backlight_device *bd);
35692-static struct backlight_ops asusbl_ops = {
35693+static const struct backlight_ops asusbl_ops = {
35694 .get_brightness = read_brightness,
35695 .update_status = update_bl_status,
35696 };
35697diff -urNp linux-2.6.32.45/drivers/platform/x86/compal-laptop.c linux-2.6.32.45/drivers/platform/x86/compal-laptop.c
35698--- linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
35699+++ linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
35700@@ -163,7 +163,7 @@ static int bl_update_status(struct backl
35701 return set_lcd_level(b->props.brightness);
35702 }
35703
35704-static struct backlight_ops compalbl_ops = {
35705+static const struct backlight_ops compalbl_ops = {
35706 .get_brightness = bl_get_brightness,
35707 .update_status = bl_update_status,
35708 };
35709diff -urNp linux-2.6.32.45/drivers/platform/x86/dell-laptop.c linux-2.6.32.45/drivers/platform/x86/dell-laptop.c
35710--- linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
35711+++ linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
35712@@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
35713 return buffer.output[1];
35714 }
35715
35716-static struct backlight_ops dell_ops = {
35717+static const struct backlight_ops dell_ops = {
35718 .get_brightness = dell_get_intensity,
35719 .update_status = dell_send_intensity,
35720 };
35721diff -urNp linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c
35722--- linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
35723+++ linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
35724@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
35725 */
35726 static int read_brightness(struct backlight_device *bd);
35727 static int update_bl_status(struct backlight_device *bd);
35728-static struct backlight_ops eeepcbl_ops = {
35729+static const struct backlight_ops eeepcbl_ops = {
35730 .get_brightness = read_brightness,
35731 .update_status = update_bl_status,
35732 };
35733diff -urNp linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c
35734--- linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
35735+++ linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
35736@@ -436,7 +436,7 @@ static int bl_update_status(struct backl
35737 return ret;
35738 }
35739
35740-static struct backlight_ops fujitsubl_ops = {
35741+static const struct backlight_ops fujitsubl_ops = {
35742 .get_brightness = bl_get_brightness,
35743 .update_status = bl_update_status,
35744 };
35745diff -urNp linux-2.6.32.45/drivers/platform/x86/msi-laptop.c linux-2.6.32.45/drivers/platform/x86/msi-laptop.c
35746--- linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
35747+++ linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
35748@@ -161,7 +161,7 @@ static int bl_update_status(struct backl
35749 return set_lcd_level(b->props.brightness);
35750 }
35751
35752-static struct backlight_ops msibl_ops = {
35753+static const struct backlight_ops msibl_ops = {
35754 .get_brightness = bl_get_brightness,
35755 .update_status = bl_update_status,
35756 };
35757diff -urNp linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c
35758--- linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
35759+++ linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
35760@@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
35761 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
35762 }
35763
35764-static struct backlight_ops pcc_backlight_ops = {
35765+static const struct backlight_ops pcc_backlight_ops = {
35766 .get_brightness = bl_get,
35767 .update_status = bl_set_status,
35768 };
35769diff -urNp linux-2.6.32.45/drivers/platform/x86/sony-laptop.c linux-2.6.32.45/drivers/platform/x86/sony-laptop.c
35770--- linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
35771+++ linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
35772@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
35773 }
35774
35775 static struct backlight_device *sony_backlight_device;
35776-static struct backlight_ops sony_backlight_ops = {
35777+static const struct backlight_ops sony_backlight_ops = {
35778 .update_status = sony_backlight_update_status,
35779 .get_brightness = sony_backlight_get_brightness,
35780 };
35781diff -urNp linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c
35782--- linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
35783+++ linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:33:55.000000000 -0400
35784@@ -2137,7 +2137,7 @@ static int hotkey_mask_get(void)
35785 return 0;
35786 }
35787
35788-void static hotkey_mask_warn_incomplete_mask(void)
35789+static void hotkey_mask_warn_incomplete_mask(void)
35790 {
35791 /* log only what the user can fix... */
35792 const u32 wantedmask = hotkey_driver_mask &
35793@@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
35794 BACKLIGHT_UPDATE_HOTKEY);
35795 }
35796
35797-static struct backlight_ops ibm_backlight_data = {
35798+static const struct backlight_ops ibm_backlight_data = {
35799 .get_brightness = brightness_get,
35800 .update_status = brightness_update_status,
35801 };
35802diff -urNp linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c
35803--- linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
35804+++ linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
35805@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
35806 return AE_OK;
35807 }
35808
35809-static struct backlight_ops toshiba_backlight_data = {
35810+static const struct backlight_ops toshiba_backlight_data = {
35811 .get_brightness = get_lcd,
35812 .update_status = set_lcd_status,
35813 };
35814diff -urNp linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c
35815--- linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
35816+++ linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
35817@@ -60,7 +60,7 @@ do { \
35818 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35819 } while(0)
35820
35821-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35822+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35823 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35824
35825 /*
35826@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
35827
35828 cpu = get_cpu();
35829 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35830+
35831+ pax_open_kernel();
35832 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35833+ pax_close_kernel();
35834
35835 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35836 spin_lock_irqsave(&pnp_bios_lock, flags);
35837@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
35838 :"memory");
35839 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35840
35841+ pax_open_kernel();
35842 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35843+ pax_close_kernel();
35844+
35845 put_cpu();
35846
35847 /* If we get here and this is set then the PnP BIOS faulted on us. */
35848@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
35849 return status;
35850 }
35851
35852-void pnpbios_calls_init(union pnp_bios_install_struct *header)
35853+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35854 {
35855 int i;
35856
35857@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
35858 pnp_bios_callpoint.offset = header->fields.pm16offset;
35859 pnp_bios_callpoint.segment = PNP_CS16;
35860
35861+ pax_open_kernel();
35862+
35863 for_each_possible_cpu(i) {
35864 struct desc_struct *gdt = get_cpu_gdt_table(i);
35865 if (!gdt)
35866@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
35867 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35868 (unsigned long)__va(header->fields.pm16dseg));
35869 }
35870+
35871+ pax_close_kernel();
35872 }
35873diff -urNp linux-2.6.32.45/drivers/pnp/resource.c linux-2.6.32.45/drivers/pnp/resource.c
35874--- linux-2.6.32.45/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
35875+++ linux-2.6.32.45/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
35876@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
35877 return 1;
35878
35879 /* check if the resource is valid */
35880- if (*irq < 0 || *irq > 15)
35881+ if (*irq > 15)
35882 return 0;
35883
35884 /* check if the resource is reserved */
35885@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
35886 return 1;
35887
35888 /* check if the resource is valid */
35889- if (*dma < 0 || *dma == 4 || *dma > 7)
35890+ if (*dma == 4 || *dma > 7)
35891 return 0;
35892
35893 /* check if the resource is reserved */
35894diff -urNp linux-2.6.32.45/drivers/power/bq27x00_battery.c linux-2.6.32.45/drivers/power/bq27x00_battery.c
35895--- linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-03-27 14:31:47.000000000 -0400
35896+++ linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-08-05 20:33:55.000000000 -0400
35897@@ -44,7 +44,7 @@ struct bq27x00_device_info;
35898 struct bq27x00_access_methods {
35899 int (*read)(u8 reg, int *rt_value, int b_single,
35900 struct bq27x00_device_info *di);
35901-};
35902+} __no_const;
35903
35904 struct bq27x00_device_info {
35905 struct device *dev;
35906diff -urNp linux-2.6.32.45/drivers/rtc/rtc-dev.c linux-2.6.32.45/drivers/rtc/rtc-dev.c
35907--- linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
35908+++ linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
35909@@ -14,6 +14,7 @@
35910 #include <linux/module.h>
35911 #include <linux/rtc.h>
35912 #include <linux/sched.h>
35913+#include <linux/grsecurity.h>
35914 #include "rtc-core.h"
35915
35916 static dev_t rtc_devt;
35917@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
35918 if (copy_from_user(&tm, uarg, sizeof(tm)))
35919 return -EFAULT;
35920
35921+ gr_log_timechange();
35922+
35923 return rtc_set_time(rtc, &tm);
35924
35925 case RTC_PIE_ON:
35926diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.c linux-2.6.32.45/drivers/s390/cio/qdio_perf.c
35927--- linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
35928+++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
35929@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
35930 static int qdio_perf_proc_show(struct seq_file *m, void *v)
35931 {
35932 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
35933- (long)atomic_long_read(&perf_stats.qdio_int));
35934+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
35935 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
35936- (long)atomic_long_read(&perf_stats.pci_int));
35937+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
35938 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
35939- (long)atomic_long_read(&perf_stats.thin_int));
35940+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
35941 seq_printf(m, "\n");
35942 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
35943- (long)atomic_long_read(&perf_stats.tasklet_inbound));
35944+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
35945 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
35946- (long)atomic_long_read(&perf_stats.tasklet_outbound));
35947+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
35948 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
35949- (long)atomic_long_read(&perf_stats.tasklet_thinint),
35950- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
35951+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
35952+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
35953 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
35954- (long)atomic_long_read(&perf_stats.thinint_inbound),
35955- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
35956+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
35957+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
35958 seq_printf(m, "\n");
35959 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
35960- (long)atomic_long_read(&perf_stats.siga_in));
35961+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
35962 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
35963- (long)atomic_long_read(&perf_stats.siga_out));
35964+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
35965 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
35966- (long)atomic_long_read(&perf_stats.siga_sync));
35967+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
35968 seq_printf(m, "\n");
35969 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
35970- (long)atomic_long_read(&perf_stats.inbound_handler));
35971+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
35972 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
35973- (long)atomic_long_read(&perf_stats.outbound_handler));
35974+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
35975 seq_printf(m, "\n");
35976 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
35977- (long)atomic_long_read(&perf_stats.fast_requeue));
35978+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
35979 seq_printf(m, "Number of outbound target full condition\t: %li\n",
35980- (long)atomic_long_read(&perf_stats.outbound_target_full));
35981+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
35982 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
35983- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
35984+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
35985 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
35986- (long)atomic_long_read(&perf_stats.debug_stop_polling));
35987+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
35988 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
35989- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
35990+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
35991 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
35992- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
35993- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
35994+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
35995+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
35996 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
35997- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
35998- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
35999+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
36000+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
36001 seq_printf(m, "\n");
36002 return 0;
36003 }
36004diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.h linux-2.6.32.45/drivers/s390/cio/qdio_perf.h
36005--- linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
36006+++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
36007@@ -13,46 +13,46 @@
36008
36009 struct qdio_perf_stats {
36010 /* interrupt handler calls */
36011- atomic_long_t qdio_int;
36012- atomic_long_t pci_int;
36013- atomic_long_t thin_int;
36014+ atomic_long_unchecked_t qdio_int;
36015+ atomic_long_unchecked_t pci_int;
36016+ atomic_long_unchecked_t thin_int;
36017
36018 /* tasklet runs */
36019- atomic_long_t tasklet_inbound;
36020- atomic_long_t tasklet_outbound;
36021- atomic_long_t tasklet_thinint;
36022- atomic_long_t tasklet_thinint_loop;
36023- atomic_long_t thinint_inbound;
36024- atomic_long_t thinint_inbound_loop;
36025- atomic_long_t thinint_inbound_loop2;
36026+ atomic_long_unchecked_t tasklet_inbound;
36027+ atomic_long_unchecked_t tasklet_outbound;
36028+ atomic_long_unchecked_t tasklet_thinint;
36029+ atomic_long_unchecked_t tasklet_thinint_loop;
36030+ atomic_long_unchecked_t thinint_inbound;
36031+ atomic_long_unchecked_t thinint_inbound_loop;
36032+ atomic_long_unchecked_t thinint_inbound_loop2;
36033
36034 /* signal adapter calls */
36035- atomic_long_t siga_out;
36036- atomic_long_t siga_in;
36037- atomic_long_t siga_sync;
36038+ atomic_long_unchecked_t siga_out;
36039+ atomic_long_unchecked_t siga_in;
36040+ atomic_long_unchecked_t siga_sync;
36041
36042 /* misc */
36043- atomic_long_t inbound_handler;
36044- atomic_long_t outbound_handler;
36045- atomic_long_t fast_requeue;
36046- atomic_long_t outbound_target_full;
36047+ atomic_long_unchecked_t inbound_handler;
36048+ atomic_long_unchecked_t outbound_handler;
36049+ atomic_long_unchecked_t fast_requeue;
36050+ atomic_long_unchecked_t outbound_target_full;
36051
36052 /* for debugging */
36053- atomic_long_t debug_tl_out_timer;
36054- atomic_long_t debug_stop_polling;
36055- atomic_long_t debug_eqbs_all;
36056- atomic_long_t debug_eqbs_incomplete;
36057- atomic_long_t debug_sqbs_all;
36058- atomic_long_t debug_sqbs_incomplete;
36059+ atomic_long_unchecked_t debug_tl_out_timer;
36060+ atomic_long_unchecked_t debug_stop_polling;
36061+ atomic_long_unchecked_t debug_eqbs_all;
36062+ atomic_long_unchecked_t debug_eqbs_incomplete;
36063+ atomic_long_unchecked_t debug_sqbs_all;
36064+ atomic_long_unchecked_t debug_sqbs_incomplete;
36065 };
36066
36067 extern struct qdio_perf_stats perf_stats;
36068 extern int qdio_performance_stats;
36069
36070-static inline void qdio_perf_stat_inc(atomic_long_t *count)
36071+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
36072 {
36073 if (qdio_performance_stats)
36074- atomic_long_inc(count);
36075+ atomic_long_inc_unchecked(count);
36076 }
36077
36078 int qdio_setup_perf_stats(void);
36079diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h
36080--- linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-03-27 14:31:47.000000000 -0400
36081+++ linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:33:55.000000000 -0400
36082@@ -471,7 +471,7 @@ struct adapter_ops
36083 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36084 /* Administrative operations */
36085 int (*adapter_comm)(struct aac_dev * dev, int comm);
36086-};
36087+} __no_const;
36088
36089 /*
36090 * Define which interrupt handler needs to be installed
36091diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c
36092--- linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
36093+++ linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
36094@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
36095 u32 actual_fibsize64, actual_fibsize = 0;
36096 int i;
36097
36098+ pax_track_stack();
36099
36100 if (dev->in_reset) {
36101 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
36102diff -urNp linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c
36103--- linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
36104+++ linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
36105@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
36106 flash_error_table[i].reason);
36107 }
36108
36109-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
36110+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
36111 asd_show_update_bios, asd_store_update_bios);
36112
36113 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
36114diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h
36115--- linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-03-27 14:31:47.000000000 -0400
36116+++ linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-08-05 20:33:55.000000000 -0400
36117@@ -61,7 +61,7 @@ struct bfa_hwif_s {
36118 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
36119 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
36120 u32 *nvecs, u32 *maxvec);
36121-};
36122+} __no_const;
36123 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36124
36125 struct bfa_iocfc_s {
36126diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h
36127--- linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-03-27 14:31:47.000000000 -0400
36128+++ linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:33:55.000000000 -0400
36129@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
36130 bfa_ioc_disable_cbfn_t disable_cbfn;
36131 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36132 bfa_ioc_reset_cbfn_t reset_cbfn;
36133-};
36134+} __no_const;
36135
36136 /**
36137 * Heartbeat failure notification queue element.
36138diff -urNp linux-2.6.32.45/drivers/scsi/BusLogic.c linux-2.6.32.45/drivers/scsi/BusLogic.c
36139--- linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
36140+++ linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
36141@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
36142 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
36143 *PrototypeHostAdapter)
36144 {
36145+ pax_track_stack();
36146+
36147 /*
36148 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
36149 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
36150diff -urNp linux-2.6.32.45/drivers/scsi/dpt_i2o.c linux-2.6.32.45/drivers/scsi/dpt_i2o.c
36151--- linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
36152+++ linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
36153@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
36154 dma_addr_t addr;
36155 ulong flags = 0;
36156
36157+ pax_track_stack();
36158+
36159 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
36160 // get user msg size in u32s
36161 if(get_user(size, &user_msg[0])){
36162@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
36163 s32 rcode;
36164 dma_addr_t addr;
36165
36166+ pax_track_stack();
36167+
36168 memset(msg, 0 , sizeof(msg));
36169 len = scsi_bufflen(cmd);
36170 direction = 0x00000000;
36171diff -urNp linux-2.6.32.45/drivers/scsi/eata.c linux-2.6.32.45/drivers/scsi/eata.c
36172--- linux-2.6.32.45/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
36173+++ linux-2.6.32.45/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
36174@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
36175 struct hostdata *ha;
36176 char name[16];
36177
36178+ pax_track_stack();
36179+
36180 sprintf(name, "%s%d", driver_name, j);
36181
36182 if (!request_region(port_base, REGION_SIZE, driver_name)) {
36183diff -urNp linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c
36184--- linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
36185+++ linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
36186@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
36187 size_t rlen;
36188 size_t dlen;
36189
36190+ pax_track_stack();
36191+
36192 fiph = (struct fip_header *)skb->data;
36193 sub = fiph->fip_subcode;
36194 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
36195diff -urNp linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c
36196--- linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-03-27 14:31:47.000000000 -0400
36197+++ linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-08-05 20:33:55.000000000 -0400
36198@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct p
36199 /* Start local port initiatialization */
36200
36201 lp->link_up = 0;
36202- lp->tt = fnic_transport_template;
36203+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
36204
36205 lp->max_retry_count = fnic->config.flogi_retries;
36206 lp->max_rport_retry_count = fnic->config.plogi_retries;
36207diff -urNp linux-2.6.32.45/drivers/scsi/gdth.c linux-2.6.32.45/drivers/scsi/gdth.c
36208--- linux-2.6.32.45/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
36209+++ linux-2.6.32.45/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
36210@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
36211 ulong flags;
36212 gdth_ha_str *ha;
36213
36214+ pax_track_stack();
36215+
36216 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
36217 return -EFAULT;
36218 ha = gdth_find_ha(ldrv.ionode);
36219@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
36220 gdth_ha_str *ha;
36221 int rval;
36222
36223+ pax_track_stack();
36224+
36225 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
36226 res.number >= MAX_HDRIVES)
36227 return -EFAULT;
36228@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
36229 gdth_ha_str *ha;
36230 int rval;
36231
36232+ pax_track_stack();
36233+
36234 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
36235 return -EFAULT;
36236 ha = gdth_find_ha(gen.ionode);
36237@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
36238 int i;
36239 gdth_cmd_str gdtcmd;
36240 char cmnd[MAX_COMMAND_SIZE];
36241+
36242+ pax_track_stack();
36243+
36244 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
36245
36246 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
36247diff -urNp linux-2.6.32.45/drivers/scsi/gdth_proc.c linux-2.6.32.45/drivers/scsi/gdth_proc.c
36248--- linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
36249+++ linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
36250@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
36251 ulong64 paddr;
36252
36253 char cmnd[MAX_COMMAND_SIZE];
36254+
36255+ pax_track_stack();
36256+
36257 memset(cmnd, 0xff, 12);
36258 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
36259
36260@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
36261 gdth_hget_str *phg;
36262 char cmnd[MAX_COMMAND_SIZE];
36263
36264+ pax_track_stack();
36265+
36266 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
36267 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
36268 if (!gdtcmd || !estr)
36269diff -urNp linux-2.6.32.45/drivers/scsi/hosts.c linux-2.6.32.45/drivers/scsi/hosts.c
36270--- linux-2.6.32.45/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
36271+++ linux-2.6.32.45/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
36272@@ -40,7 +40,7 @@
36273 #include "scsi_logging.h"
36274
36275
36276-static atomic_t scsi_host_next_hn; /* host_no for next new host */
36277+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36278
36279
36280 static void scsi_host_cls_release(struct device *dev)
36281@@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
36282 * subtract one because we increment first then return, but we need to
36283 * know what the next host number was before increment
36284 */
36285- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36286+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36287 shost->dma_channel = 0xff;
36288
36289 /* These three are default values which can be overridden */
36290diff -urNp linux-2.6.32.45/drivers/scsi/ipr.c linux-2.6.32.45/drivers/scsi/ipr.c
36291--- linux-2.6.32.45/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
36292+++ linux-2.6.32.45/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
36293@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
36294 return true;
36295 }
36296
36297-static struct ata_port_operations ipr_sata_ops = {
36298+static const struct ata_port_operations ipr_sata_ops = {
36299 .phy_reset = ipr_ata_phy_reset,
36300 .hardreset = ipr_sata_reset,
36301 .post_internal_cmd = ipr_ata_post_internal,
36302diff -urNp linux-2.6.32.45/drivers/scsi/ips.h linux-2.6.32.45/drivers/scsi/ips.h
36303--- linux-2.6.32.45/drivers/scsi/ips.h 2011-03-27 14:31:47.000000000 -0400
36304+++ linux-2.6.32.45/drivers/scsi/ips.h 2011-08-05 20:33:55.000000000 -0400
36305@@ -1027,7 +1027,7 @@ typedef struct {
36306 int (*intr)(struct ips_ha *);
36307 void (*enableint)(struct ips_ha *);
36308 uint32_t (*statupd)(struct ips_ha *);
36309-} ips_hw_func_t;
36310+} __no_const ips_hw_func_t;
36311
36312 typedef struct ips_ha {
36313 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36314diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c
36315--- linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
36316+++ linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:22:32.000000000 -0400
36317@@ -86,12 +86,12 @@ struct fc_exch_mgr {
36318 * all together if not used XXX
36319 */
36320 struct {
36321- atomic_t no_free_exch;
36322- atomic_t no_free_exch_xid;
36323- atomic_t xid_not_found;
36324- atomic_t xid_busy;
36325- atomic_t seq_not_found;
36326- atomic_t non_bls_resp;
36327+ atomic_unchecked_t no_free_exch;
36328+ atomic_unchecked_t no_free_exch_xid;
36329+ atomic_unchecked_t xid_not_found;
36330+ atomic_unchecked_t xid_busy;
36331+ atomic_unchecked_t seq_not_found;
36332+ atomic_unchecked_t non_bls_resp;
36333 } stats;
36334 };
36335 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
36336@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
36337 /* allocate memory for exchange */
36338 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36339 if (!ep) {
36340- atomic_inc(&mp->stats.no_free_exch);
36341+ atomic_inc_unchecked(&mp->stats.no_free_exch);
36342 goto out;
36343 }
36344 memset(ep, 0, sizeof(*ep));
36345@@ -557,7 +557,7 @@ out:
36346 return ep;
36347 err:
36348 spin_unlock_bh(&pool->lock);
36349- atomic_inc(&mp->stats.no_free_exch_xid);
36350+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36351 mempool_free(ep, mp->ep_pool);
36352 return NULL;
36353 }
36354@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36355 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36356 ep = fc_exch_find(mp, xid);
36357 if (!ep) {
36358- atomic_inc(&mp->stats.xid_not_found);
36359+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36360 reject = FC_RJT_OX_ID;
36361 goto out;
36362 }
36363@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36364 ep = fc_exch_find(mp, xid);
36365 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36366 if (ep) {
36367- atomic_inc(&mp->stats.xid_busy);
36368+ atomic_inc_unchecked(&mp->stats.xid_busy);
36369 reject = FC_RJT_RX_ID;
36370 goto rel;
36371 }
36372@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36373 }
36374 xid = ep->xid; /* get our XID */
36375 } else if (!ep) {
36376- atomic_inc(&mp->stats.xid_not_found);
36377+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36378 reject = FC_RJT_RX_ID; /* XID not found */
36379 goto out;
36380 }
36381@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36382 } else {
36383 sp = &ep->seq;
36384 if (sp->id != fh->fh_seq_id) {
36385- atomic_inc(&mp->stats.seq_not_found);
36386+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36387 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
36388 goto rel;
36389 }
36390@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
36391
36392 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36393 if (!ep) {
36394- atomic_inc(&mp->stats.xid_not_found);
36395+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36396 goto out;
36397 }
36398 if (ep->esb_stat & ESB_ST_COMPLETE) {
36399- atomic_inc(&mp->stats.xid_not_found);
36400+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36401 goto out;
36402 }
36403 if (ep->rxid == FC_XID_UNKNOWN)
36404 ep->rxid = ntohs(fh->fh_rx_id);
36405 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36406- atomic_inc(&mp->stats.xid_not_found);
36407+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36408 goto rel;
36409 }
36410 if (ep->did != ntoh24(fh->fh_s_id) &&
36411 ep->did != FC_FID_FLOGI) {
36412- atomic_inc(&mp->stats.xid_not_found);
36413+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36414 goto rel;
36415 }
36416 sof = fr_sof(fp);
36417@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
36418 } else {
36419 sp = &ep->seq;
36420 if (sp->id != fh->fh_seq_id) {
36421- atomic_inc(&mp->stats.seq_not_found);
36422+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36423 goto rel;
36424 }
36425 }
36426@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
36427 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36428
36429 if (!sp)
36430- atomic_inc(&mp->stats.xid_not_found);
36431+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36432 else
36433- atomic_inc(&mp->stats.non_bls_resp);
36434+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
36435
36436 fc_frame_free(fp);
36437 }
36438diff -urNp linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c
36439--- linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
36440+++ linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
36441@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
36442 }
36443 }
36444
36445-static struct ata_port_operations sas_sata_ops = {
36446+static const struct ata_port_operations sas_sata_ops = {
36447 .phy_reset = sas_ata_phy_reset,
36448 .post_internal_cmd = sas_ata_post_internal,
36449 .qc_defer = ata_std_qc_defer,
36450diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c
36451--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
36452+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
36453@@ -124,7 +124,7 @@ struct lpfc_debug {
36454 int len;
36455 };
36456
36457-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36458+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36459 static unsigned long lpfc_debugfs_start_time = 0L;
36460
36461 /**
36462@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
36463 lpfc_debugfs_enable = 0;
36464
36465 len = 0;
36466- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36467+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36468 (lpfc_debugfs_max_disc_trc - 1);
36469 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36470 dtp = vport->disc_trc + i;
36471@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
36472 lpfc_debugfs_enable = 0;
36473
36474 len = 0;
36475- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36476+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36477 (lpfc_debugfs_max_slow_ring_trc - 1);
36478 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36479 dtp = phba->slow_ring_trc + i;
36480@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
36481 uint32_t *ptr;
36482 char buffer[1024];
36483
36484+ pax_track_stack();
36485+
36486 off = 0;
36487 spin_lock_irq(&phba->hbalock);
36488
36489@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
36490 !vport || !vport->disc_trc)
36491 return;
36492
36493- index = atomic_inc_return(&vport->disc_trc_cnt) &
36494+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36495 (lpfc_debugfs_max_disc_trc - 1);
36496 dtp = vport->disc_trc + index;
36497 dtp->fmt = fmt;
36498 dtp->data1 = data1;
36499 dtp->data2 = data2;
36500 dtp->data3 = data3;
36501- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36502+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36503 dtp->jif = jiffies;
36504 #endif
36505 return;
36506@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
36507 !phba || !phba->slow_ring_trc)
36508 return;
36509
36510- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36511+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36512 (lpfc_debugfs_max_slow_ring_trc - 1);
36513 dtp = phba->slow_ring_trc + index;
36514 dtp->fmt = fmt;
36515 dtp->data1 = data1;
36516 dtp->data2 = data2;
36517 dtp->data3 = data3;
36518- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36519+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36520 dtp->jif = jiffies;
36521 #endif
36522 return;
36523@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36524 "slow_ring buffer\n");
36525 goto debug_failed;
36526 }
36527- atomic_set(&phba->slow_ring_trc_cnt, 0);
36528+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36529 memset(phba->slow_ring_trc, 0,
36530 (sizeof(struct lpfc_debugfs_trc) *
36531 lpfc_debugfs_max_slow_ring_trc));
36532@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36533 "buffer\n");
36534 goto debug_failed;
36535 }
36536- atomic_set(&vport->disc_trc_cnt, 0);
36537+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36538
36539 snprintf(name, sizeof(name), "discovery_trace");
36540 vport->debug_disc_trc =
36541diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h
36542--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
36543+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
36544@@ -400,7 +400,7 @@ struct lpfc_vport {
36545 struct dentry *debug_nodelist;
36546 struct dentry *vport_debugfs_root;
36547 struct lpfc_debugfs_trc *disc_trc;
36548- atomic_t disc_trc_cnt;
36549+ atomic_unchecked_t disc_trc_cnt;
36550 #endif
36551 uint8_t stat_data_enabled;
36552 uint8_t stat_data_blocked;
36553@@ -725,8 +725,8 @@ struct lpfc_hba {
36554 struct timer_list fabric_block_timer;
36555 unsigned long bit_flags;
36556 #define FABRIC_COMANDS_BLOCKED 0
36557- atomic_t num_rsrc_err;
36558- atomic_t num_cmd_success;
36559+ atomic_unchecked_t num_rsrc_err;
36560+ atomic_unchecked_t num_cmd_success;
36561 unsigned long last_rsrc_error_time;
36562 unsigned long last_ramp_down_time;
36563 unsigned long last_ramp_up_time;
36564@@ -740,7 +740,7 @@ struct lpfc_hba {
36565 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
36566 struct dentry *debug_slow_ring_trc;
36567 struct lpfc_debugfs_trc *slow_ring_trc;
36568- atomic_t slow_ring_trc_cnt;
36569+ atomic_unchecked_t slow_ring_trc_cnt;
36570 #endif
36571
36572 /* Used for deferred freeing of ELS data buffers */
36573diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c
36574--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-03-27 14:31:47.000000000 -0400
36575+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:33:55.000000000 -0400
36576@@ -8021,8 +8021,10 @@ lpfc_init(void)
36577 printk(LPFC_COPYRIGHT "\n");
36578
36579 if (lpfc_enable_npiv) {
36580- lpfc_transport_functions.vport_create = lpfc_vport_create;
36581- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36582+ pax_open_kernel();
36583+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36584+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36585+ pax_close_kernel();
36586 }
36587 lpfc_transport_template =
36588 fc_attach_transport(&lpfc_transport_functions);
36589diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c
36590--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
36591+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
36592@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
36593 uint32_t evt_posted;
36594
36595 spin_lock_irqsave(&phba->hbalock, flags);
36596- atomic_inc(&phba->num_rsrc_err);
36597+ atomic_inc_unchecked(&phba->num_rsrc_err);
36598 phba->last_rsrc_error_time = jiffies;
36599
36600 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36601@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
36602 unsigned long flags;
36603 struct lpfc_hba *phba = vport->phba;
36604 uint32_t evt_posted;
36605- atomic_inc(&phba->num_cmd_success);
36606+ atomic_inc_unchecked(&phba->num_cmd_success);
36607
36608 if (vport->cfg_lun_queue_depth <= queue_depth)
36609 return;
36610@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36611 int i;
36612 struct lpfc_rport_data *rdata;
36613
36614- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36615- num_cmd_success = atomic_read(&phba->num_cmd_success);
36616+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36617+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36618
36619 vports = lpfc_create_vport_work_array(phba);
36620 if (vports != NULL)
36621@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36622 }
36623 }
36624 lpfc_destroy_vport_work_array(phba, vports);
36625- atomic_set(&phba->num_rsrc_err, 0);
36626- atomic_set(&phba->num_cmd_success, 0);
36627+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36628+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36629 }
36630
36631 /**
36632@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
36633 }
36634 }
36635 lpfc_destroy_vport_work_array(phba, vports);
36636- atomic_set(&phba->num_rsrc_err, 0);
36637- atomic_set(&phba->num_cmd_success, 0);
36638+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36639+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36640 }
36641
36642 /**
36643diff -urNp linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c
36644--- linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
36645+++ linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
36646@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
36647 int rval;
36648 int i;
36649
36650+ pax_track_stack();
36651+
36652 // Allocate memory for the base list of scb for management module.
36653 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
36654
36655diff -urNp linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c
36656--- linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
36657+++ linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
36658@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
36659 int nelem = ARRAY_SIZE(get_attrs), a = 0;
36660 int ret;
36661
36662+ pax_track_stack();
36663+
36664 or = osd_start_request(od, GFP_KERNEL);
36665 if (!or)
36666 return -ENOMEM;
36667diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.c linux-2.6.32.45/drivers/scsi/pmcraid.c
36668--- linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:35:29.000000000 -0400
36669+++ linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:33:59.000000000 -0400
36670@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
36671 res->scsi_dev = scsi_dev;
36672 scsi_dev->hostdata = res;
36673 res->change_detected = 0;
36674- atomic_set(&res->read_failures, 0);
36675- atomic_set(&res->write_failures, 0);
36676+ atomic_set_unchecked(&res->read_failures, 0);
36677+ atomic_set_unchecked(&res->write_failures, 0);
36678 rc = 0;
36679 }
36680 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36681@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
36682
36683 /* If this was a SCSI read/write command keep count of errors */
36684 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36685- atomic_inc(&res->read_failures);
36686+ atomic_inc_unchecked(&res->read_failures);
36687 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36688- atomic_inc(&res->write_failures);
36689+ atomic_inc_unchecked(&res->write_failures);
36690
36691 if (!RES_IS_GSCSI(res->cfg_entry) &&
36692 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36693@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(stru
36694
36695 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36696 /* add resources only after host is added into system */
36697- if (!atomic_read(&pinstance->expose_resources))
36698+ if (!atomic_read_unchecked(&pinstance->expose_resources))
36699 return;
36700
36701 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
36702@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instan
36703 init_waitqueue_head(&pinstance->reset_wait_q);
36704
36705 atomic_set(&pinstance->outstanding_cmds, 0);
36706- atomic_set(&pinstance->expose_resources, 0);
36707+ atomic_set_unchecked(&pinstance->expose_resources, 0);
36708
36709 INIT_LIST_HEAD(&pinstance->free_res_q);
36710 INIT_LIST_HEAD(&pinstance->used_res_q);
36711@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
36712 /* Schedule worker thread to handle CCN and take care of adding and
36713 * removing devices to OS
36714 */
36715- atomic_set(&pinstance->expose_resources, 1);
36716+ atomic_set_unchecked(&pinstance->expose_resources, 1);
36717 schedule_work(&pinstance->worker_q);
36718 return rc;
36719
36720diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.h linux-2.6.32.45/drivers/scsi/pmcraid.h
36721--- linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
36722+++ linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
36723@@ -690,7 +690,7 @@ struct pmcraid_instance {
36724 atomic_t outstanding_cmds;
36725
36726 /* should add/delete resources to mid-layer now ?*/
36727- atomic_t expose_resources;
36728+ atomic_unchecked_t expose_resources;
36729
36730 /* Tasklet to handle deferred processing */
36731 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
36732@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
36733 struct list_head queue; /* link to "to be exposed" resources */
36734 struct pmcraid_config_table_entry cfg_entry;
36735 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36736- atomic_t read_failures; /* count of failed READ commands */
36737- atomic_t write_failures; /* count of failed WRITE commands */
36738+ atomic_unchecked_t read_failures; /* count of failed READ commands */
36739+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36740
36741 /* To indicate add/delete/modify during CCN */
36742 u8 change_detected;
36743diff -urNp linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h
36744--- linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-03-27 14:31:47.000000000 -0400
36745+++ linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:33:55.000000000 -0400
36746@@ -2089,7 +2089,7 @@ struct isp_operations {
36747
36748 int (*get_flash_version) (struct scsi_qla_host *, void *);
36749 int (*start_scsi) (srb_t *);
36750-};
36751+} __no_const;
36752
36753 /* MSI-X Support *************************************************************/
36754
36755diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h
36756--- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
36757+++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
36758@@ -240,7 +240,7 @@ struct ddb_entry {
36759 atomic_t retry_relogin_timer; /* Min Time between relogins
36760 * (4000 only) */
36761 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
36762- atomic_t relogin_retry_count; /* Num of times relogin has been
36763+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36764 * retried */
36765
36766 uint16_t port;
36767diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c
36768--- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
36769+++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
36770@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
36771 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
36772 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36773 atomic_set(&ddb_entry->relogin_timer, 0);
36774- atomic_set(&ddb_entry->relogin_retry_count, 0);
36775+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36776 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36777 list_add_tail(&ddb_entry->list, &ha->ddb_list);
36778 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
36779@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
36780 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36781 atomic_set(&ddb_entry->port_down_timer,
36782 ha->port_down_retry_count);
36783- atomic_set(&ddb_entry->relogin_retry_count, 0);
36784+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36785 atomic_set(&ddb_entry->relogin_timer, 0);
36786 clear_bit(DF_RELOGIN, &ddb_entry->flags);
36787 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
36788diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c
36789--- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
36790+++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
36791@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
36792 ddb_entry->fw_ddb_device_state ==
36793 DDB_DS_SESSION_FAILED) {
36794 /* Reset retry relogin timer */
36795- atomic_inc(&ddb_entry->relogin_retry_count);
36796+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36797 DEBUG2(printk("scsi%ld: index[%d] relogin"
36798 " timed out-retrying"
36799 " relogin (%d)\n",
36800 ha->host_no,
36801 ddb_entry->fw_ddb_index,
36802- atomic_read(&ddb_entry->
36803+ atomic_read_unchecked(&ddb_entry->
36804 relogin_retry_count))
36805 );
36806 start_dpc++;
36807diff -urNp linux-2.6.32.45/drivers/scsi/scsi.c linux-2.6.32.45/drivers/scsi/scsi.c
36808--- linux-2.6.32.45/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
36809+++ linux-2.6.32.45/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
36810@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
36811 unsigned long timeout;
36812 int rtn = 0;
36813
36814- atomic_inc(&cmd->device->iorequest_cnt);
36815+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36816
36817 /* check if the device is still usable */
36818 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36819diff -urNp linux-2.6.32.45/drivers/scsi/scsi_debug.c linux-2.6.32.45/drivers/scsi/scsi_debug.c
36820--- linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
36821+++ linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
36822@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
36823 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
36824 unsigned char *cmd = (unsigned char *)scp->cmnd;
36825
36826+ pax_track_stack();
36827+
36828 if ((errsts = check_readiness(scp, 1, devip)))
36829 return errsts;
36830 memset(arr, 0, sizeof(arr));
36831@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
36832 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
36833 unsigned char *cmd = (unsigned char *)scp->cmnd;
36834
36835+ pax_track_stack();
36836+
36837 if ((errsts = check_readiness(scp, 1, devip)))
36838 return errsts;
36839 memset(arr, 0, sizeof(arr));
36840diff -urNp linux-2.6.32.45/drivers/scsi/scsi_lib.c linux-2.6.32.45/drivers/scsi/scsi_lib.c
36841--- linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
36842+++ linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
36843@@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
36844
36845 scsi_init_cmd_errh(cmd);
36846 cmd->result = DID_NO_CONNECT << 16;
36847- atomic_inc(&cmd->device->iorequest_cnt);
36848+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36849
36850 /*
36851 * SCSI request completion path will do scsi_device_unbusy(),
36852@@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
36853 */
36854 cmd->serial_number = 0;
36855
36856- atomic_inc(&cmd->device->iodone_cnt);
36857+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
36858 if (cmd->result)
36859- atomic_inc(&cmd->device->ioerr_cnt);
36860+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36861
36862 disposition = scsi_decide_disposition(cmd);
36863 if (disposition != SUCCESS &&
36864diff -urNp linux-2.6.32.45/drivers/scsi/scsi_sysfs.c linux-2.6.32.45/drivers/scsi/scsi_sysfs.c
36865--- linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
36866+++ linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
36867@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
36868 char *buf) \
36869 { \
36870 struct scsi_device *sdev = to_scsi_device(dev); \
36871- unsigned long long count = atomic_read(&sdev->field); \
36872+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
36873 return snprintf(buf, 20, "0x%llx\n", count); \
36874 } \
36875 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36876diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c
36877--- linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
36878+++ linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
36879@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
36880 * Netlink Infrastructure
36881 */
36882
36883-static atomic_t fc_event_seq;
36884+static atomic_unchecked_t fc_event_seq;
36885
36886 /**
36887 * fc_get_event_number - Obtain the next sequential FC event number
36888@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
36889 u32
36890 fc_get_event_number(void)
36891 {
36892- return atomic_add_return(1, &fc_event_seq);
36893+ return atomic_add_return_unchecked(1, &fc_event_seq);
36894 }
36895 EXPORT_SYMBOL(fc_get_event_number);
36896
36897@@ -641,7 +641,7 @@ static __init int fc_transport_init(void
36898 {
36899 int error;
36900
36901- atomic_set(&fc_event_seq, 0);
36902+ atomic_set_unchecked(&fc_event_seq, 0);
36903
36904 error = transport_class_register(&fc_host_class);
36905 if (error)
36906diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c
36907--- linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
36908+++ linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
36909@@ -81,7 +81,7 @@ struct iscsi_internal {
36910 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
36911 };
36912
36913-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36914+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36915 static struct workqueue_struct *iscsi_eh_timer_workq;
36916
36917 /*
36918@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
36919 int err;
36920
36921 ihost = shost->shost_data;
36922- session->sid = atomic_add_return(1, &iscsi_session_nr);
36923+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36924
36925 if (id == ISCSI_MAX_TARGET) {
36926 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
36927@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
36928 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36929 ISCSI_TRANSPORT_VERSION);
36930
36931- atomic_set(&iscsi_session_nr, 0);
36932+ atomic_set_unchecked(&iscsi_session_nr, 0);
36933
36934 err = class_register(&iscsi_transport_class);
36935 if (err)
36936diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c
36937--- linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
36938+++ linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
36939@@ -33,7 +33,7 @@
36940 #include "scsi_transport_srp_internal.h"
36941
36942 struct srp_host_attrs {
36943- atomic_t next_port_id;
36944+ atomic_unchecked_t next_port_id;
36945 };
36946 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36947
36948@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
36949 struct Scsi_Host *shost = dev_to_shost(dev);
36950 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36951
36952- atomic_set(&srp_host->next_port_id, 0);
36953+ atomic_set_unchecked(&srp_host->next_port_id, 0);
36954 return 0;
36955 }
36956
36957@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
36958 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36959 rport->roles = ids->roles;
36960
36961- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36962+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36963 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36964
36965 transport_setup_device(&rport->dev);
36966diff -urNp linux-2.6.32.45/drivers/scsi/sg.c linux-2.6.32.45/drivers/scsi/sg.c
36967--- linux-2.6.32.45/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
36968+++ linux-2.6.32.45/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
36969@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
36970 const struct file_operations * fops;
36971 };
36972
36973-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36974+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36975 {"allow_dio", &adio_fops},
36976 {"debug", &debug_fops},
36977 {"def_reserved_size", &dressz_fops},
36978@@ -2307,7 +2307,7 @@ sg_proc_init(void)
36979 {
36980 int k, mask;
36981 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
36982- struct sg_proc_leaf * leaf;
36983+ const struct sg_proc_leaf * leaf;
36984
36985 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
36986 if (!sg_proc_sgp)
36987diff -urNp linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c
36988--- linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
36989+++ linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
36990@@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
36991 int do_iounmap = 0;
36992 int do_disable_device = 1;
36993
36994+ pax_track_stack();
36995+
36996 memset(&sym_dev, 0, sizeof(sym_dev));
36997 memset(&nvram, 0, sizeof(nvram));
36998 sym_dev.pdev = pdev;
36999diff -urNp linux-2.6.32.45/drivers/serial/kgdboc.c linux-2.6.32.45/drivers/serial/kgdboc.c
37000--- linux-2.6.32.45/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
37001+++ linux-2.6.32.45/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
37002@@ -18,7 +18,7 @@
37003
37004 #define MAX_CONFIG_LEN 40
37005
37006-static struct kgdb_io kgdboc_io_ops;
37007+static const struct kgdb_io kgdboc_io_ops;
37008
37009 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37010 static int configured = -1;
37011@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
37012 module_put(THIS_MODULE);
37013 }
37014
37015-static struct kgdb_io kgdboc_io_ops = {
37016+static const struct kgdb_io kgdboc_io_ops = {
37017 .name = "kgdboc",
37018 .read_char = kgdboc_get_char,
37019 .write_char = kgdboc_put_char,
37020diff -urNp linux-2.6.32.45/drivers/spi/spi.c linux-2.6.32.45/drivers/spi/spi.c
37021--- linux-2.6.32.45/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
37022+++ linux-2.6.32.45/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
37023@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
37024 EXPORT_SYMBOL_GPL(spi_sync);
37025
37026 /* portable code must never pass more than 32 bytes */
37027-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37028+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
37029
37030 static u8 *buf;
37031
37032diff -urNp linux-2.6.32.45/drivers/staging/android/binder.c linux-2.6.32.45/drivers/staging/android/binder.c
37033--- linux-2.6.32.45/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
37034+++ linux-2.6.32.45/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
37035@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
37036 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
37037 }
37038
37039-static struct vm_operations_struct binder_vm_ops = {
37040+static const struct vm_operations_struct binder_vm_ops = {
37041 .open = binder_vma_open,
37042 .close = binder_vma_close,
37043 };
37044diff -urNp linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c
37045--- linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
37046+++ linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
37047@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
37048 return VM_FAULT_NOPAGE;
37049 }
37050
37051-static struct vm_operations_struct b3dfg_vm_ops = {
37052+static const struct vm_operations_struct b3dfg_vm_ops = {
37053 .fault = b3dfg_vma_fault,
37054 };
37055
37056@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
37057 return r;
37058 }
37059
37060-static struct file_operations b3dfg_fops = {
37061+static const struct file_operations b3dfg_fops = {
37062 .owner = THIS_MODULE,
37063 .open = b3dfg_open,
37064 .release = b3dfg_release,
37065diff -urNp linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c
37066--- linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:35:29.000000000 -0400
37067+++ linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:34:00.000000000 -0400
37068@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
37069 mutex_unlock(&dev->mutex);
37070 }
37071
37072-static struct vm_operations_struct comedi_vm_ops = {
37073+static const struct vm_operations_struct comedi_vm_ops = {
37074 .close = comedi_unmap,
37075 };
37076
37077diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c
37078--- linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
37079+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
37080@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
37081 static dev_t adsp_devno;
37082 static struct class *adsp_class;
37083
37084-static struct file_operations adsp_fops = {
37085+static const struct file_operations adsp_fops = {
37086 .owner = THIS_MODULE,
37087 .open = adsp_open,
37088 .unlocked_ioctl = adsp_ioctl,
37089diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c
37090--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
37091+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
37092@@ -1022,7 +1022,7 @@ done:
37093 return rc;
37094 }
37095
37096-static struct file_operations audio_aac_fops = {
37097+static const struct file_operations audio_aac_fops = {
37098 .owner = THIS_MODULE,
37099 .open = audio_open,
37100 .release = audio_release,
37101diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c
37102--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
37103+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
37104@@ -833,7 +833,7 @@ done:
37105 return rc;
37106 }
37107
37108-static struct file_operations audio_amrnb_fops = {
37109+static const struct file_operations audio_amrnb_fops = {
37110 .owner = THIS_MODULE,
37111 .open = audamrnb_open,
37112 .release = audamrnb_release,
37113diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c
37114--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
37115+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
37116@@ -805,7 +805,7 @@ dma_fail:
37117 return rc;
37118 }
37119
37120-static struct file_operations audio_evrc_fops = {
37121+static const struct file_operations audio_evrc_fops = {
37122 .owner = THIS_MODULE,
37123 .open = audevrc_open,
37124 .release = audevrc_release,
37125diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c
37126--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
37127+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
37128@@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
37129 return 0;
37130 }
37131
37132-static struct file_operations audio_fops = {
37133+static const struct file_operations audio_fops = {
37134 .owner = THIS_MODULE,
37135 .open = audio_in_open,
37136 .release = audio_in_release,
37137@@ -922,7 +922,7 @@ static struct file_operations audio_fops
37138 .unlocked_ioctl = audio_in_ioctl,
37139 };
37140
37141-static struct file_operations audpre_fops = {
37142+static const struct file_operations audpre_fops = {
37143 .owner = THIS_MODULE,
37144 .open = audpre_open,
37145 .unlocked_ioctl = audpre_ioctl,
37146diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c
37147--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
37148+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
37149@@ -941,7 +941,7 @@ done:
37150 return rc;
37151 }
37152
37153-static struct file_operations audio_mp3_fops = {
37154+static const struct file_operations audio_mp3_fops = {
37155 .owner = THIS_MODULE,
37156 .open = audio_open,
37157 .release = audio_release,
37158diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c
37159--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
37160+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
37161@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
37162 return 0;
37163 }
37164
37165-static struct file_operations audio_fops = {
37166+static const struct file_operations audio_fops = {
37167 .owner = THIS_MODULE,
37168 .open = audio_open,
37169 .release = audio_release,
37170@@ -819,7 +819,7 @@ static struct file_operations audio_fops
37171 .unlocked_ioctl = audio_ioctl,
37172 };
37173
37174-static struct file_operations audpp_fops = {
37175+static const struct file_operations audpp_fops = {
37176 .owner = THIS_MODULE,
37177 .open = audpp_open,
37178 .unlocked_ioctl = audpp_ioctl,
37179diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c
37180--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
37181+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
37182@@ -816,7 +816,7 @@ err:
37183 return rc;
37184 }
37185
37186-static struct file_operations audio_qcelp_fops = {
37187+static const struct file_operations audio_qcelp_fops = {
37188 .owner = THIS_MODULE,
37189 .open = audqcelp_open,
37190 .release = audqcelp_release,
37191diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c
37192--- linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
37193+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
37194@@ -242,7 +242,7 @@ err:
37195 return rc;
37196 }
37197
37198-static struct file_operations snd_fops = {
37199+static const struct file_operations snd_fops = {
37200 .owner = THIS_MODULE,
37201 .open = snd_open,
37202 .release = snd_release,
37203diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c
37204--- linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
37205+++ linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
37206@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
37207 return 0;
37208 }
37209
37210-static struct file_operations qmi_fops = {
37211+static const struct file_operations qmi_fops = {
37212 .owner = THIS_MODULE,
37213 .read = qmi_read,
37214 .write = qmi_write,
37215diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c
37216--- linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
37217+++ linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
37218@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
37219 return rc;
37220 }
37221
37222-static struct file_operations rpcrouter_server_fops = {
37223+static const struct file_operations rpcrouter_server_fops = {
37224 .owner = THIS_MODULE,
37225 .open = rpcrouter_open,
37226 .release = rpcrouter_release,
37227@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
37228 .unlocked_ioctl = rpcrouter_ioctl,
37229 };
37230
37231-static struct file_operations rpcrouter_router_fops = {
37232+static const struct file_operations rpcrouter_router_fops = {
37233 .owner = THIS_MODULE,
37234 .open = rpcrouter_open,
37235 .release = rpcrouter_release,
37236diff -urNp linux-2.6.32.45/drivers/staging/dst/dcore.c linux-2.6.32.45/drivers/staging/dst/dcore.c
37237--- linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
37238+++ linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
37239@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
37240 return 0;
37241 }
37242
37243-static struct block_device_operations dst_blk_ops = {
37244+static const struct block_device_operations dst_blk_ops = {
37245 .open = dst_bdev_open,
37246 .release = dst_bdev_release,
37247 .owner = THIS_MODULE,
37248@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
37249 n->size = ctl->size;
37250
37251 atomic_set(&n->refcnt, 1);
37252- atomic_long_set(&n->gen, 0);
37253+ atomic_long_set_unchecked(&n->gen, 0);
37254 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
37255
37256 err = dst_node_sysfs_init(n);
37257diff -urNp linux-2.6.32.45/drivers/staging/dst/trans.c linux-2.6.32.45/drivers/staging/dst/trans.c
37258--- linux-2.6.32.45/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
37259+++ linux-2.6.32.45/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
37260@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
37261 t->error = 0;
37262 t->retries = 0;
37263 atomic_set(&t->refcnt, 1);
37264- t->gen = atomic_long_inc_return(&n->gen);
37265+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
37266
37267 t->enc = bio_data_dir(bio);
37268 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
37269diff -urNp linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c
37270--- linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
37271+++ linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
37272@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
37273 struct net_device_stats *stats = &etdev->net_stats;
37274
37275 if (pMpTcb->Flags & fMP_DEST_BROAD)
37276- atomic_inc(&etdev->Stats.brdcstxmt);
37277+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
37278 else if (pMpTcb->Flags & fMP_DEST_MULTI)
37279- atomic_inc(&etdev->Stats.multixmt);
37280+ atomic_inc_unchecked(&etdev->Stats.multixmt);
37281 else
37282- atomic_inc(&etdev->Stats.unixmt);
37283+ atomic_inc_unchecked(&etdev->Stats.unixmt);
37284
37285 if (pMpTcb->Packet) {
37286 stats->tx_bytes += pMpTcb->Packet->len;
37287diff -urNp linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h
37288--- linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
37289+++ linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
37290@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
37291 * operations
37292 */
37293 u32 unircv; /* # multicast packets received */
37294- atomic_t unixmt; /* # multicast packets for Tx */
37295+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
37296 u32 multircv; /* # multicast packets received */
37297- atomic_t multixmt; /* # multicast packets for Tx */
37298+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
37299 u32 brdcstrcv; /* # broadcast packets received */
37300- atomic_t brdcstxmt; /* # broadcast packets for Tx */
37301+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
37302 u32 norcvbuf; /* # Rx packets discarded */
37303 u32 noxmtbuf; /* # Tx packets discarded */
37304
37305diff -urNp linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c
37306--- linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
37307+++ linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
37308@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
37309 return 0;
37310 }
37311
37312-static struct vm_operations_struct go7007_vm_ops = {
37313+static const struct vm_operations_struct go7007_vm_ops = {
37314 .open = go7007_vm_open,
37315 .close = go7007_vm_close,
37316 .fault = go7007_vm_fault,
37317diff -urNp linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c
37318--- linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
37319+++ linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
37320@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
37321 /* The one and only one */
37322 static struct blkvsc_driver_context g_blkvsc_drv;
37323
37324-static struct block_device_operations block_ops = {
37325+static const struct block_device_operations block_ops = {
37326 .owner = THIS_MODULE,
37327 .open = blkvsc_open,
37328 .release = blkvsc_release,
37329diff -urNp linux-2.6.32.45/drivers/staging/hv/Channel.c linux-2.6.32.45/drivers/staging/hv/Channel.c
37330--- linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
37331+++ linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
37332@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
37333
37334 DPRINT_ENTER(VMBUS);
37335
37336- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
37337- atomic_inc(&gVmbusConnection.NextGpadlHandle);
37338+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
37339+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
37340
37341 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
37342 ASSERT(msgInfo != NULL);
37343diff -urNp linux-2.6.32.45/drivers/staging/hv/Hv.c linux-2.6.32.45/drivers/staging/hv/Hv.c
37344--- linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
37345+++ linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
37346@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
37347 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
37348 u32 outputAddressHi = outputAddress >> 32;
37349 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
37350- volatile void *hypercallPage = gHvContext.HypercallPage;
37351+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
37352
37353 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
37354 Control, Input, Output);
37355diff -urNp linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c
37356--- linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
37357+++ linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
37358@@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
37359 to_device_context(root_device_obj);
37360 struct device_context *child_device_ctx =
37361 to_device_context(child_device_obj);
37362- static atomic_t device_num = ATOMIC_INIT(0);
37363+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
37364
37365 DPRINT_ENTER(VMBUS_DRV);
37366
37367@@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
37368
37369 /* Set the device name. Otherwise, device_register() will fail. */
37370 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
37371- atomic_inc_return(&device_num));
37372+ atomic_inc_return_unchecked(&device_num));
37373
37374 /* The new device belongs to this bus */
37375 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
37376diff -urNp linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h
37377--- linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
37378+++ linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
37379@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
37380 struct VMBUS_CONNECTION {
37381 enum VMBUS_CONNECT_STATE ConnectState;
37382
37383- atomic_t NextGpadlHandle;
37384+ atomic_unchecked_t NextGpadlHandle;
37385
37386 /*
37387 * Represents channel interrupts. Each bit position represents a
37388diff -urNp linux-2.6.32.45/drivers/staging/iio/ring_generic.h linux-2.6.32.45/drivers/staging/iio/ring_generic.h
37389--- linux-2.6.32.45/drivers/staging/iio/ring_generic.h 2011-03-27 14:31:47.000000000 -0400
37390+++ linux-2.6.32.45/drivers/staging/iio/ring_generic.h 2011-08-23 20:24:26.000000000 -0400
37391@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
37392
37393 int (*is_enabled)(struct iio_ring_buffer *ring);
37394 int (*enable)(struct iio_ring_buffer *ring);
37395-};
37396+} __no_const;
37397
37398 /**
37399 * struct iio_ring_buffer - general ring buffer structure
37400diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet.c linux-2.6.32.45/drivers/staging/octeon/ethernet.c
37401--- linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
37402+++ linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
37403@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
37404 * since the RX tasklet also increments it.
37405 */
37406 #ifdef CONFIG_64BIT
37407- atomic64_add(rx_status.dropped_packets,
37408- (atomic64_t *)&priv->stats.rx_dropped);
37409+ atomic64_add_unchecked(rx_status.dropped_packets,
37410+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37411 #else
37412- atomic_add(rx_status.dropped_packets,
37413- (atomic_t *)&priv->stats.rx_dropped);
37414+ atomic_add_unchecked(rx_status.dropped_packets,
37415+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
37416 #endif
37417 }
37418
37419diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c
37420--- linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
37421+++ linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
37422@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
37423 /* Increment RX stats for virtual ports */
37424 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37425 #ifdef CONFIG_64BIT
37426- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37427- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37428+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37429+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37430 #else
37431- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37432- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37433+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37434+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37435 #endif
37436 }
37437 netif_receive_skb(skb);
37438@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
37439 dev->name);
37440 */
37441 #ifdef CONFIG_64BIT
37442- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37443+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
37444 #else
37445- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37446+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
37447 #endif
37448 dev_kfree_skb_irq(skb);
37449 }
37450diff -urNp linux-2.6.32.45/drivers/staging/panel/panel.c linux-2.6.32.45/drivers/staging/panel/panel.c
37451--- linux-2.6.32.45/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
37452+++ linux-2.6.32.45/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
37453@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
37454 return 0;
37455 }
37456
37457-static struct file_operations lcd_fops = {
37458+static const struct file_operations lcd_fops = {
37459 .write = lcd_write,
37460 .open = lcd_open,
37461 .release = lcd_release,
37462@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
37463 return 0;
37464 }
37465
37466-static struct file_operations keypad_fops = {
37467+static const struct file_operations keypad_fops = {
37468 .read = keypad_read, /* read */
37469 .open = keypad_open, /* open */
37470 .release = keypad_release, /* close */
37471diff -urNp linux-2.6.32.45/drivers/staging/phison/phison.c linux-2.6.32.45/drivers/staging/phison/phison.c
37472--- linux-2.6.32.45/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
37473+++ linux-2.6.32.45/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
37474@@ -43,7 +43,7 @@ static struct scsi_host_template phison_
37475 ATA_BMDMA_SHT(DRV_NAME),
37476 };
37477
37478-static struct ata_port_operations phison_ops = {
37479+static const struct ata_port_operations phison_ops = {
37480 .inherits = &ata_bmdma_port_ops,
37481 .prereset = phison_pre_reset,
37482 };
37483diff -urNp linux-2.6.32.45/drivers/staging/poch/poch.c linux-2.6.32.45/drivers/staging/poch/poch.c
37484--- linux-2.6.32.45/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
37485+++ linux-2.6.32.45/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
37486@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
37487 return 0;
37488 }
37489
37490-static struct file_operations poch_fops = {
37491+static const struct file_operations poch_fops = {
37492 .owner = THIS_MODULE,
37493 .open = poch_open,
37494 .release = poch_release,
37495diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/inode.c linux-2.6.32.45/drivers/staging/pohmelfs/inode.c
37496--- linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37497+++ linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
37498@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
37499 mutex_init(&psb->mcache_lock);
37500 psb->mcache_root = RB_ROOT;
37501 psb->mcache_timeout = msecs_to_jiffies(5000);
37502- atomic_long_set(&psb->mcache_gen, 0);
37503+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
37504
37505 psb->trans_max_pages = 100;
37506
37507@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
37508 INIT_LIST_HEAD(&psb->crypto_ready_list);
37509 INIT_LIST_HEAD(&psb->crypto_active_list);
37510
37511- atomic_set(&psb->trans_gen, 1);
37512+ atomic_set_unchecked(&psb->trans_gen, 1);
37513 atomic_long_set(&psb->total_inodes, 0);
37514
37515 mutex_init(&psb->state_lock);
37516diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c
37517--- linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
37518+++ linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
37519@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
37520 m->data = data;
37521 m->start = start;
37522 m->size = size;
37523- m->gen = atomic_long_inc_return(&psb->mcache_gen);
37524+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
37525
37526 mutex_lock(&psb->mcache_lock);
37527 err = pohmelfs_mcache_insert(psb, m);
37528diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h
37529--- linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
37530+++ linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
37531@@ -570,14 +570,14 @@ struct pohmelfs_config;
37532 struct pohmelfs_sb {
37533 struct rb_root mcache_root;
37534 struct mutex mcache_lock;
37535- atomic_long_t mcache_gen;
37536+ atomic_long_unchecked_t mcache_gen;
37537 unsigned long mcache_timeout;
37538
37539 unsigned int idx;
37540
37541 unsigned int trans_retries;
37542
37543- atomic_t trans_gen;
37544+ atomic_unchecked_t trans_gen;
37545
37546 unsigned int crypto_attached_size;
37547 unsigned int crypto_align_size;
37548diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/trans.c linux-2.6.32.45/drivers/staging/pohmelfs/trans.c
37549--- linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
37550+++ linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
37551@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
37552 int err;
37553 struct netfs_cmd *cmd = t->iovec.iov_base;
37554
37555- t->gen = atomic_inc_return(&psb->trans_gen);
37556+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
37557
37558 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
37559 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
37560diff -urNp linux-2.6.32.45/drivers/staging/sep/sep_driver.c linux-2.6.32.45/drivers/staging/sep/sep_driver.c
37561--- linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
37562+++ linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
37563@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
37564 static dev_t sep_devno;
37565
37566 /* the files operations structure of the driver */
37567-static struct file_operations sep_file_operations = {
37568+static const struct file_operations sep_file_operations = {
37569 .owner = THIS_MODULE,
37570 .ioctl = sep_ioctl,
37571 .poll = sep_poll,
37572diff -urNp linux-2.6.32.45/drivers/staging/usbip/usbip_common.h linux-2.6.32.45/drivers/staging/usbip/usbip_common.h
37573--- linux-2.6.32.45/drivers/staging/usbip/usbip_common.h 2011-04-17 17:00:52.000000000 -0400
37574+++ linux-2.6.32.45/drivers/staging/usbip/usbip_common.h 2011-08-23 20:24:26.000000000 -0400
37575@@ -374,7 +374,7 @@ struct usbip_device {
37576 void (*shutdown)(struct usbip_device *);
37577 void (*reset)(struct usbip_device *);
37578 void (*unusable)(struct usbip_device *);
37579- } eh_ops;
37580+ } __no_const eh_ops;
37581 };
37582
37583
37584diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci.h linux-2.6.32.45/drivers/staging/usbip/vhci.h
37585--- linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
37586+++ linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
37587@@ -92,7 +92,7 @@ struct vhci_hcd {
37588 unsigned resuming:1;
37589 unsigned long re_timeout;
37590
37591- atomic_t seqnum;
37592+ atomic_unchecked_t seqnum;
37593
37594 /*
37595 * NOTE:
37596diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c
37597--- linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
37598+++ linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
37599@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
37600 return;
37601 }
37602
37603- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37604+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37605 if (priv->seqnum == 0xffff)
37606 usbip_uinfo("seqnum max\n");
37607
37608@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
37609 return -ENOMEM;
37610 }
37611
37612- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37613+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37614 if (unlink->seqnum == 0xffff)
37615 usbip_uinfo("seqnum max\n");
37616
37617@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
37618 vdev->rhport = rhport;
37619 }
37620
37621- atomic_set(&vhci->seqnum, 0);
37622+ atomic_set_unchecked(&vhci->seqnum, 0);
37623 spin_lock_init(&vhci->lock);
37624
37625
37626diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c
37627--- linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
37628+++ linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
37629@@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
37630 usbip_uerr("cannot find a urb of seqnum %u\n",
37631 pdu->base.seqnum);
37632 usbip_uinfo("max seqnum %d\n",
37633- atomic_read(&the_controller->seqnum));
37634+ atomic_read_unchecked(&the_controller->seqnum));
37635 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37636 return;
37637 }
37638diff -urNp linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c
37639--- linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
37640+++ linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
37641@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
37642 static int __init vme_user_probe(struct device *, int, int);
37643 static int __exit vme_user_remove(struct device *, int, int);
37644
37645-static struct file_operations vme_user_fops = {
37646+static const struct file_operations vme_user_fops = {
37647 .open = vme_user_open,
37648 .release = vme_user_release,
37649 .read = vme_user_read,
37650diff -urNp linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c
37651--- linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c 2011-03-27 14:31:47.000000000 -0400
37652+++ linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 20:24:26.000000000 -0400
37653@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hf
37654
37655 struct usbctlx_completor {
37656 int (*complete) (struct usbctlx_completor *);
37657-};
37658+} __no_const;
37659 typedef struct usbctlx_completor usbctlx_completor_t;
37660
37661 static int
37662diff -urNp linux-2.6.32.45/drivers/telephony/ixj.c linux-2.6.32.45/drivers/telephony/ixj.c
37663--- linux-2.6.32.45/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
37664+++ linux-2.6.32.45/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
37665@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
37666 bool mContinue;
37667 char *pIn, *pOut;
37668
37669+ pax_track_stack();
37670+
37671 if (!SCI_Prepare(j))
37672 return 0;
37673
37674diff -urNp linux-2.6.32.45/drivers/uio/uio.c linux-2.6.32.45/drivers/uio/uio.c
37675--- linux-2.6.32.45/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
37676+++ linux-2.6.32.45/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
37677@@ -23,6 +23,7 @@
37678 #include <linux/string.h>
37679 #include <linux/kobject.h>
37680 #include <linux/uio_driver.h>
37681+#include <asm/local.h>
37682
37683 #define UIO_MAX_DEVICES 255
37684
37685@@ -30,10 +31,10 @@ struct uio_device {
37686 struct module *owner;
37687 struct device *dev;
37688 int minor;
37689- atomic_t event;
37690+ atomic_unchecked_t event;
37691 struct fasync_struct *async_queue;
37692 wait_queue_head_t wait;
37693- int vma_count;
37694+ local_t vma_count;
37695 struct uio_info *info;
37696 struct kobject *map_dir;
37697 struct kobject *portio_dir;
37698@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
37699 return entry->show(mem, buf);
37700 }
37701
37702-static struct sysfs_ops map_sysfs_ops = {
37703+static const struct sysfs_ops map_sysfs_ops = {
37704 .show = map_type_show,
37705 };
37706
37707@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
37708 return entry->show(port, buf);
37709 }
37710
37711-static struct sysfs_ops portio_sysfs_ops = {
37712+static const struct sysfs_ops portio_sysfs_ops = {
37713 .show = portio_type_show,
37714 };
37715
37716@@ -255,7 +256,7 @@ static ssize_t show_event(struct device
37717 struct uio_device *idev = dev_get_drvdata(dev);
37718 if (idev)
37719 return sprintf(buf, "%u\n",
37720- (unsigned int)atomic_read(&idev->event));
37721+ (unsigned int)atomic_read_unchecked(&idev->event));
37722 else
37723 return -ENODEV;
37724 }
37725@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
37726 {
37727 struct uio_device *idev = info->uio_dev;
37728
37729- atomic_inc(&idev->event);
37730+ atomic_inc_unchecked(&idev->event);
37731 wake_up_interruptible(&idev->wait);
37732 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37733 }
37734@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
37735 }
37736
37737 listener->dev = idev;
37738- listener->event_count = atomic_read(&idev->event);
37739+ listener->event_count = atomic_read_unchecked(&idev->event);
37740 filep->private_data = listener;
37741
37742 if (idev->info->open) {
37743@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
37744 return -EIO;
37745
37746 poll_wait(filep, &idev->wait, wait);
37747- if (listener->event_count != atomic_read(&idev->event))
37748+ if (listener->event_count != atomic_read_unchecked(&idev->event))
37749 return POLLIN | POLLRDNORM;
37750 return 0;
37751 }
37752@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
37753 do {
37754 set_current_state(TASK_INTERRUPTIBLE);
37755
37756- event_count = atomic_read(&idev->event);
37757+ event_count = atomic_read_unchecked(&idev->event);
37758 if (event_count != listener->event_count) {
37759 if (copy_to_user(buf, &event_count, count))
37760 retval = -EFAULT;
37761@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
37762 static void uio_vma_open(struct vm_area_struct *vma)
37763 {
37764 struct uio_device *idev = vma->vm_private_data;
37765- idev->vma_count++;
37766+ local_inc(&idev->vma_count);
37767 }
37768
37769 static void uio_vma_close(struct vm_area_struct *vma)
37770 {
37771 struct uio_device *idev = vma->vm_private_data;
37772- idev->vma_count--;
37773+ local_dec(&idev->vma_count);
37774 }
37775
37776 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37777@@ -840,7 +841,7 @@ int __uio_register_device(struct module
37778 idev->owner = owner;
37779 idev->info = info;
37780 init_waitqueue_head(&idev->wait);
37781- atomic_set(&idev->event, 0);
37782+ atomic_set_unchecked(&idev->event, 0);
37783
37784 ret = uio_get_minor(idev);
37785 if (ret)
37786diff -urNp linux-2.6.32.45/drivers/usb/atm/usbatm.c linux-2.6.32.45/drivers/usb/atm/usbatm.c
37787--- linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
37788+++ linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
37789@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
37790 if (printk_ratelimit())
37791 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37792 __func__, vpi, vci);
37793- atomic_inc(&vcc->stats->rx_err);
37794+ atomic_inc_unchecked(&vcc->stats->rx_err);
37795 return;
37796 }
37797
37798@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
37799 if (length > ATM_MAX_AAL5_PDU) {
37800 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37801 __func__, length, vcc);
37802- atomic_inc(&vcc->stats->rx_err);
37803+ atomic_inc_unchecked(&vcc->stats->rx_err);
37804 goto out;
37805 }
37806
37807@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
37808 if (sarb->len < pdu_length) {
37809 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37810 __func__, pdu_length, sarb->len, vcc);
37811- atomic_inc(&vcc->stats->rx_err);
37812+ atomic_inc_unchecked(&vcc->stats->rx_err);
37813 goto out;
37814 }
37815
37816 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37817 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37818 __func__, vcc);
37819- atomic_inc(&vcc->stats->rx_err);
37820+ atomic_inc_unchecked(&vcc->stats->rx_err);
37821 goto out;
37822 }
37823
37824@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
37825 if (printk_ratelimit())
37826 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37827 __func__, length);
37828- atomic_inc(&vcc->stats->rx_drop);
37829+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37830 goto out;
37831 }
37832
37833@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
37834
37835 vcc->push(vcc, skb);
37836
37837- atomic_inc(&vcc->stats->rx);
37838+ atomic_inc_unchecked(&vcc->stats->rx);
37839 out:
37840 skb_trim(sarb, 0);
37841 }
37842@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
37843 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37844
37845 usbatm_pop(vcc, skb);
37846- atomic_inc(&vcc->stats->tx);
37847+ atomic_inc_unchecked(&vcc->stats->tx);
37848
37849 skb = skb_dequeue(&instance->sndqueue);
37850 }
37851@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
37852 if (!left--)
37853 return sprintf(page,
37854 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37855- atomic_read(&atm_dev->stats.aal5.tx),
37856- atomic_read(&atm_dev->stats.aal5.tx_err),
37857- atomic_read(&atm_dev->stats.aal5.rx),
37858- atomic_read(&atm_dev->stats.aal5.rx_err),
37859- atomic_read(&atm_dev->stats.aal5.rx_drop));
37860+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37861+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37862+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37863+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37864+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37865
37866 if (!left--) {
37867 if (instance->disconnected)
37868diff -urNp linux-2.6.32.45/drivers/usb/class/cdc-wdm.c linux-2.6.32.45/drivers/usb/class/cdc-wdm.c
37869--- linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
37870+++ linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
37871@@ -314,7 +314,7 @@ static ssize_t wdm_write
37872 if (r < 0)
37873 goto outnp;
37874
37875- if (!file->f_flags && O_NONBLOCK)
37876+ if (!(file->f_flags & O_NONBLOCK))
37877 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
37878 &desc->flags));
37879 else
37880diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.c linux-2.6.32.45/drivers/usb/core/hcd.c
37881--- linux-2.6.32.45/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
37882+++ linux-2.6.32.45/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
37883@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
37884
37885 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
37886
37887-struct usb_mon_operations *mon_ops;
37888+const struct usb_mon_operations *mon_ops;
37889
37890 /*
37891 * The registration is unlocked.
37892@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
37893 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
37894 */
37895
37896-int usb_mon_register (struct usb_mon_operations *ops)
37897+int usb_mon_register (const struct usb_mon_operations *ops)
37898 {
37899
37900 if (mon_ops)
37901diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.h linux-2.6.32.45/drivers/usb/core/hcd.h
37902--- linux-2.6.32.45/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
37903+++ linux-2.6.32.45/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
37904@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
37905 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
37906
37907 struct usb_mon_operations {
37908- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
37909- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
37910- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
37911+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
37912+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
37913+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
37914 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
37915 };
37916
37917-extern struct usb_mon_operations *mon_ops;
37918+extern const struct usb_mon_operations *mon_ops;
37919
37920 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
37921 {
37922@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
37923 (*mon_ops->urb_complete)(bus, urb, status);
37924 }
37925
37926-int usb_mon_register(struct usb_mon_operations *ops);
37927+int usb_mon_register(const struct usb_mon_operations *ops);
37928 void usb_mon_deregister(void);
37929
37930 #else
37931diff -urNp linux-2.6.32.45/drivers/usb/core/message.c linux-2.6.32.45/drivers/usb/core/message.c
37932--- linux-2.6.32.45/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
37933+++ linux-2.6.32.45/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
37934@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
37935 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
37936 if (buf) {
37937 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
37938- if (len > 0) {
37939- smallbuf = kmalloc(++len, GFP_NOIO);
37940+ if (len++ > 0) {
37941+ smallbuf = kmalloc(len, GFP_NOIO);
37942 if (!smallbuf)
37943 return buf;
37944 memcpy(smallbuf, buf, len);
37945diff -urNp linux-2.6.32.45/drivers/usb/misc/appledisplay.c linux-2.6.32.45/drivers/usb/misc/appledisplay.c
37946--- linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
37947+++ linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
37948@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
37949 return pdata->msgdata[1];
37950 }
37951
37952-static struct backlight_ops appledisplay_bl_data = {
37953+static const struct backlight_ops appledisplay_bl_data = {
37954 .get_brightness = appledisplay_bl_get_brightness,
37955 .update_status = appledisplay_bl_update_status,
37956 };
37957diff -urNp linux-2.6.32.45/drivers/usb/mon/mon_main.c linux-2.6.32.45/drivers/usb/mon/mon_main.c
37958--- linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
37959+++ linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
37960@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
37961 /*
37962 * Ops
37963 */
37964-static struct usb_mon_operations mon_ops_0 = {
37965+static const struct usb_mon_operations mon_ops_0 = {
37966 .urb_submit = mon_submit,
37967 .urb_submit_error = mon_submit_error,
37968 .urb_complete = mon_complete,
37969diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h
37970--- linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
37971+++ linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
37972@@ -192,7 +192,7 @@ struct wahc {
37973 struct list_head xfer_delayed_list;
37974 spinlock_t xfer_list_lock;
37975 struct work_struct xfer_work;
37976- atomic_t xfer_id_count;
37977+ atomic_unchecked_t xfer_id_count;
37978 };
37979
37980
37981@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
37982 INIT_LIST_HEAD(&wa->xfer_delayed_list);
37983 spin_lock_init(&wa->xfer_list_lock);
37984 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
37985- atomic_set(&wa->xfer_id_count, 1);
37986+ atomic_set_unchecked(&wa->xfer_id_count, 1);
37987 }
37988
37989 /**
37990diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c
37991--- linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
37992+++ linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
37993@@ -293,7 +293,7 @@ out:
37994 */
37995 static void wa_xfer_id_init(struct wa_xfer *xfer)
37996 {
37997- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
37998+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
37999 }
38000
38001 /*
38002diff -urNp linux-2.6.32.45/drivers/uwb/wlp/messages.c linux-2.6.32.45/drivers/uwb/wlp/messages.c
38003--- linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
38004+++ linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
38005@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
38006 size_t len = skb->len;
38007 size_t used;
38008 ssize_t result;
38009- struct wlp_nonce enonce, rnonce;
38010+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
38011 enum wlp_assc_error assc_err;
38012 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
38013 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
38014diff -urNp linux-2.6.32.45/drivers/uwb/wlp/sysfs.c linux-2.6.32.45/drivers/uwb/wlp/sysfs.c
38015--- linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
38016+++ linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
38017@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
38018 return ret;
38019 }
38020
38021-static
38022-struct sysfs_ops wss_sysfs_ops = {
38023+static const struct sysfs_ops wss_sysfs_ops = {
38024 .show = wlp_wss_attr_show,
38025 .store = wlp_wss_attr_store,
38026 };
38027diff -urNp linux-2.6.32.45/drivers/video/atmel_lcdfb.c linux-2.6.32.45/drivers/video/atmel_lcdfb.c
38028--- linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
38029+++ linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
38030@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
38031 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
38032 }
38033
38034-static struct backlight_ops atmel_lcdc_bl_ops = {
38035+static const struct backlight_ops atmel_lcdc_bl_ops = {
38036 .update_status = atmel_bl_update_status,
38037 .get_brightness = atmel_bl_get_brightness,
38038 };
38039diff -urNp linux-2.6.32.45/drivers/video/aty/aty128fb.c linux-2.6.32.45/drivers/video/aty/aty128fb.c
38040--- linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
38041+++ linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
38042@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
38043 return bd->props.brightness;
38044 }
38045
38046-static struct backlight_ops aty128_bl_data = {
38047+static const struct backlight_ops aty128_bl_data = {
38048 .get_brightness = aty128_bl_get_brightness,
38049 .update_status = aty128_bl_update_status,
38050 };
38051diff -urNp linux-2.6.32.45/drivers/video/aty/atyfb_base.c linux-2.6.32.45/drivers/video/aty/atyfb_base.c
38052--- linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
38053+++ linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
38054@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
38055 return bd->props.brightness;
38056 }
38057
38058-static struct backlight_ops aty_bl_data = {
38059+static const struct backlight_ops aty_bl_data = {
38060 .get_brightness = aty_bl_get_brightness,
38061 .update_status = aty_bl_update_status,
38062 };
38063diff -urNp linux-2.6.32.45/drivers/video/aty/radeon_backlight.c linux-2.6.32.45/drivers/video/aty/radeon_backlight.c
38064--- linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
38065+++ linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
38066@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
38067 return bd->props.brightness;
38068 }
38069
38070-static struct backlight_ops radeon_bl_data = {
38071+static const struct backlight_ops radeon_bl_data = {
38072 .get_brightness = radeon_bl_get_brightness,
38073 .update_status = radeon_bl_update_status,
38074 };
38075diff -urNp linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c
38076--- linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
38077+++ linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
38078@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
38079 return error ? data->current_brightness : reg_val;
38080 }
38081
38082-static struct backlight_ops adp5520_bl_ops = {
38083+static const struct backlight_ops adp5520_bl_ops = {
38084 .update_status = adp5520_bl_update_status,
38085 .get_brightness = adp5520_bl_get_brightness,
38086 };
38087diff -urNp linux-2.6.32.45/drivers/video/backlight/adx_bl.c linux-2.6.32.45/drivers/video/backlight/adx_bl.c
38088--- linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
38089+++ linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
38090@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
38091 return 1;
38092 }
38093
38094-static struct backlight_ops adx_backlight_ops = {
38095+static const struct backlight_ops adx_backlight_ops = {
38096 .options = 0,
38097 .update_status = adx_backlight_update_status,
38098 .get_brightness = adx_backlight_get_brightness,
38099diff -urNp linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c
38100--- linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
38101+++ linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
38102@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
38103 return pwm_channel_enable(&pwmbl->pwmc);
38104 }
38105
38106-static struct backlight_ops atmel_pwm_bl_ops = {
38107+static const struct backlight_ops atmel_pwm_bl_ops = {
38108 .get_brightness = atmel_pwm_bl_get_intensity,
38109 .update_status = atmel_pwm_bl_set_intensity,
38110 };
38111diff -urNp linux-2.6.32.45/drivers/video/backlight/backlight.c linux-2.6.32.45/drivers/video/backlight/backlight.c
38112--- linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
38113+++ linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
38114@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
38115 * ERR_PTR() or a pointer to the newly allocated device.
38116 */
38117 struct backlight_device *backlight_device_register(const char *name,
38118- struct device *parent, void *devdata, struct backlight_ops *ops)
38119+ struct device *parent, void *devdata, const struct backlight_ops *ops)
38120 {
38121 struct backlight_device *new_bd;
38122 int rc;
38123diff -urNp linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c
38124--- linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
38125+++ linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
38126@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
38127 }
38128 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
38129
38130-static struct backlight_ops corgi_bl_ops = {
38131+static const struct backlight_ops corgi_bl_ops = {
38132 .get_brightness = corgi_bl_get_intensity,
38133 .update_status = corgi_bl_update_status,
38134 };
38135diff -urNp linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c
38136--- linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
38137+++ linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
38138@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
38139 return intensity;
38140 }
38141
38142-static struct backlight_ops cr_backlight_ops = {
38143+static const struct backlight_ops cr_backlight_ops = {
38144 .get_brightness = cr_backlight_get_intensity,
38145 .update_status = cr_backlight_set_intensity,
38146 };
38147diff -urNp linux-2.6.32.45/drivers/video/backlight/da903x_bl.c linux-2.6.32.45/drivers/video/backlight/da903x_bl.c
38148--- linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
38149+++ linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
38150@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
38151 return data->current_brightness;
38152 }
38153
38154-static struct backlight_ops da903x_backlight_ops = {
38155+static const struct backlight_ops da903x_backlight_ops = {
38156 .update_status = da903x_backlight_update_status,
38157 .get_brightness = da903x_backlight_get_brightness,
38158 };
38159diff -urNp linux-2.6.32.45/drivers/video/backlight/generic_bl.c linux-2.6.32.45/drivers/video/backlight/generic_bl.c
38160--- linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
38161+++ linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
38162@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
38163 }
38164 EXPORT_SYMBOL(corgibl_limit_intensity);
38165
38166-static struct backlight_ops genericbl_ops = {
38167+static const struct backlight_ops genericbl_ops = {
38168 .options = BL_CORE_SUSPENDRESUME,
38169 .get_brightness = genericbl_get_intensity,
38170 .update_status = genericbl_send_intensity,
38171diff -urNp linux-2.6.32.45/drivers/video/backlight/hp680_bl.c linux-2.6.32.45/drivers/video/backlight/hp680_bl.c
38172--- linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
38173+++ linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
38174@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
38175 return current_intensity;
38176 }
38177
38178-static struct backlight_ops hp680bl_ops = {
38179+static const struct backlight_ops hp680bl_ops = {
38180 .get_brightness = hp680bl_get_intensity,
38181 .update_status = hp680bl_set_intensity,
38182 };
38183diff -urNp linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c
38184--- linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
38185+++ linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
38186@@ -93,7 +93,7 @@ out:
38187 return ret;
38188 }
38189
38190-static struct backlight_ops jornada_bl_ops = {
38191+static const struct backlight_ops jornada_bl_ops = {
38192 .get_brightness = jornada_bl_get_brightness,
38193 .update_status = jornada_bl_update_status,
38194 .options = BL_CORE_SUSPENDRESUME,
38195diff -urNp linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c
38196--- linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
38197+++ linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
38198@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
38199 return kb3886bl_intensity;
38200 }
38201
38202-static struct backlight_ops kb3886bl_ops = {
38203+static const struct backlight_ops kb3886bl_ops = {
38204 .get_brightness = kb3886bl_get_intensity,
38205 .update_status = kb3886bl_send_intensity,
38206 };
38207diff -urNp linux-2.6.32.45/drivers/video/backlight/locomolcd.c linux-2.6.32.45/drivers/video/backlight/locomolcd.c
38208--- linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
38209+++ linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
38210@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
38211 return current_intensity;
38212 }
38213
38214-static struct backlight_ops locomobl_data = {
38215+static const struct backlight_ops locomobl_data = {
38216 .get_brightness = locomolcd_get_intensity,
38217 .update_status = locomolcd_set_intensity,
38218 };
38219diff -urNp linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c
38220--- linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
38221+++ linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
38222@@ -33,7 +33,7 @@ struct dmi_match_data {
38223 unsigned long iostart;
38224 unsigned long iolen;
38225 /* Backlight operations structure. */
38226- struct backlight_ops backlight_ops;
38227+ const struct backlight_ops backlight_ops;
38228 };
38229
38230 /* Module parameters. */
38231diff -urNp linux-2.6.32.45/drivers/video/backlight/omap1_bl.c linux-2.6.32.45/drivers/video/backlight/omap1_bl.c
38232--- linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
38233+++ linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
38234@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
38235 return bl->current_intensity;
38236 }
38237
38238-static struct backlight_ops omapbl_ops = {
38239+static const struct backlight_ops omapbl_ops = {
38240 .get_brightness = omapbl_get_intensity,
38241 .update_status = omapbl_update_status,
38242 };
38243diff -urNp linux-2.6.32.45/drivers/video/backlight/progear_bl.c linux-2.6.32.45/drivers/video/backlight/progear_bl.c
38244--- linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
38245+++ linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
38246@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
38247 return intensity - HW_LEVEL_MIN;
38248 }
38249
38250-static struct backlight_ops progearbl_ops = {
38251+static const struct backlight_ops progearbl_ops = {
38252 .get_brightness = progearbl_get_intensity,
38253 .update_status = progearbl_set_intensity,
38254 };
38255diff -urNp linux-2.6.32.45/drivers/video/backlight/pwm_bl.c linux-2.6.32.45/drivers/video/backlight/pwm_bl.c
38256--- linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
38257+++ linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
38258@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
38259 return bl->props.brightness;
38260 }
38261
38262-static struct backlight_ops pwm_backlight_ops = {
38263+static const struct backlight_ops pwm_backlight_ops = {
38264 .update_status = pwm_backlight_update_status,
38265 .get_brightness = pwm_backlight_get_brightness,
38266 };
38267diff -urNp linux-2.6.32.45/drivers/video/backlight/tosa_bl.c linux-2.6.32.45/drivers/video/backlight/tosa_bl.c
38268--- linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
38269+++ linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
38270@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
38271 return props->brightness;
38272 }
38273
38274-static struct backlight_ops bl_ops = {
38275+static const struct backlight_ops bl_ops = {
38276 .get_brightness = tosa_bl_get_brightness,
38277 .update_status = tosa_bl_update_status,
38278 };
38279diff -urNp linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c
38280--- linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
38281+++ linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
38282@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
38283 return data->current_brightness;
38284 }
38285
38286-static struct backlight_ops wm831x_backlight_ops = {
38287+static const struct backlight_ops wm831x_backlight_ops = {
38288 .options = BL_CORE_SUSPENDRESUME,
38289 .update_status = wm831x_backlight_update_status,
38290 .get_brightness = wm831x_backlight_get_brightness,
38291diff -urNp linux-2.6.32.45/drivers/video/bf54x-lq043fb.c linux-2.6.32.45/drivers/video/bf54x-lq043fb.c
38292--- linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
38293+++ linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
38294@@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
38295 return 0;
38296 }
38297
38298-static struct backlight_ops bfin_lq043fb_bl_ops = {
38299+static const struct backlight_ops bfin_lq043fb_bl_ops = {
38300 .get_brightness = bl_get_brightness,
38301 };
38302
38303diff -urNp linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c
38304--- linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
38305+++ linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
38306@@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
38307 return 0;
38308 }
38309
38310-static struct backlight_ops bfin_lq043fb_bl_ops = {
38311+static const struct backlight_ops bfin_lq043fb_bl_ops = {
38312 .get_brightness = bl_get_brightness,
38313 };
38314
38315diff -urNp linux-2.6.32.45/drivers/video/fbcmap.c linux-2.6.32.45/drivers/video/fbcmap.c
38316--- linux-2.6.32.45/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
38317+++ linux-2.6.32.45/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
38318@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
38319 rc = -ENODEV;
38320 goto out;
38321 }
38322- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38323- !info->fbops->fb_setcmap)) {
38324+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38325 rc = -EINVAL;
38326 goto out1;
38327 }
38328diff -urNp linux-2.6.32.45/drivers/video/fbmem.c linux-2.6.32.45/drivers/video/fbmem.c
38329--- linux-2.6.32.45/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
38330+++ linux-2.6.32.45/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
38331@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
38332 image->dx += image->width + 8;
38333 }
38334 } else if (rotate == FB_ROTATE_UD) {
38335- for (x = 0; x < num && image->dx >= 0; x++) {
38336+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38337 info->fbops->fb_imageblit(info, image);
38338 image->dx -= image->width + 8;
38339 }
38340@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
38341 image->dy += image->height + 8;
38342 }
38343 } else if (rotate == FB_ROTATE_CCW) {
38344- for (x = 0; x < num && image->dy >= 0; x++) {
38345+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38346 info->fbops->fb_imageblit(info, image);
38347 image->dy -= image->height + 8;
38348 }
38349@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
38350 int flags = info->flags;
38351 int ret = 0;
38352
38353+ pax_track_stack();
38354+
38355 if (var->activate & FB_ACTIVATE_INV_MODE) {
38356 struct fb_videomode mode1, mode2;
38357
38358@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
38359 void __user *argp = (void __user *)arg;
38360 long ret = 0;
38361
38362+ pax_track_stack();
38363+
38364 switch (cmd) {
38365 case FBIOGET_VSCREENINFO:
38366 if (!lock_fb_info(info))
38367@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
38368 return -EFAULT;
38369 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38370 return -EINVAL;
38371- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38372+ if (con2fb.framebuffer >= FB_MAX)
38373 return -EINVAL;
38374 if (!registered_fb[con2fb.framebuffer])
38375 request_module("fb%d", con2fb.framebuffer);
38376diff -urNp linux-2.6.32.45/drivers/video/i810/i810_accel.c linux-2.6.32.45/drivers/video/i810/i810_accel.c
38377--- linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
38378+++ linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
38379@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
38380 }
38381 }
38382 printk("ringbuffer lockup!!!\n");
38383+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38384 i810_report_error(mmio);
38385 par->dev_flags |= LOCKUP;
38386 info->pixmap.scan_align = 1;
38387diff -urNp linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c
38388--- linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
38389+++ linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
38390@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
38391 return bd->props.brightness;
38392 }
38393
38394-static struct backlight_ops nvidia_bl_ops = {
38395+static const struct backlight_ops nvidia_bl_ops = {
38396 .get_brightness = nvidia_bl_get_brightness,
38397 .update_status = nvidia_bl_update_status,
38398 };
38399diff -urNp linux-2.6.32.45/drivers/video/riva/fbdev.c linux-2.6.32.45/drivers/video/riva/fbdev.c
38400--- linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
38401+++ linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
38402@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
38403 return bd->props.brightness;
38404 }
38405
38406-static struct backlight_ops riva_bl_ops = {
38407+static const struct backlight_ops riva_bl_ops = {
38408 .get_brightness = riva_bl_get_brightness,
38409 .update_status = riva_bl_update_status,
38410 };
38411diff -urNp linux-2.6.32.45/drivers/video/uvesafb.c linux-2.6.32.45/drivers/video/uvesafb.c
38412--- linux-2.6.32.45/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
38413+++ linux-2.6.32.45/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
38414@@ -18,6 +18,7 @@
38415 #include <linux/fb.h>
38416 #include <linux/io.h>
38417 #include <linux/mutex.h>
38418+#include <linux/moduleloader.h>
38419 #include <video/edid.h>
38420 #include <video/uvesafb.h>
38421 #ifdef CONFIG_X86
38422@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
38423 NULL,
38424 };
38425
38426- return call_usermodehelper(v86d_path, argv, envp, 1);
38427+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38428 }
38429
38430 /*
38431@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
38432 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38433 par->pmi_setpal = par->ypan = 0;
38434 } else {
38435+
38436+#ifdef CONFIG_PAX_KERNEXEC
38437+#ifdef CONFIG_MODULES
38438+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38439+#endif
38440+ if (!par->pmi_code) {
38441+ par->pmi_setpal = par->ypan = 0;
38442+ return 0;
38443+ }
38444+#endif
38445+
38446 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38447 + task->t.regs.edi);
38448+
38449+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38450+ pax_open_kernel();
38451+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
38452+ pax_close_kernel();
38453+
38454+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
38455+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
38456+#else
38457 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
38458 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
38459+#endif
38460+
38461 printk(KERN_INFO "uvesafb: protected mode interface info at "
38462 "%04x:%04x\n",
38463 (u16)task->t.regs.es, (u16)task->t.regs.edi);
38464@@ -1799,6 +1822,11 @@ out:
38465 if (par->vbe_modes)
38466 kfree(par->vbe_modes);
38467
38468+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38469+ if (par->pmi_code)
38470+ module_free_exec(NULL, par->pmi_code);
38471+#endif
38472+
38473 framebuffer_release(info);
38474 return err;
38475 }
38476@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
38477 kfree(par->vbe_state_orig);
38478 if (par->vbe_state_saved)
38479 kfree(par->vbe_state_saved);
38480+
38481+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38482+ if (par->pmi_code)
38483+ module_free_exec(NULL, par->pmi_code);
38484+#endif
38485+
38486 }
38487
38488 framebuffer_release(info);
38489diff -urNp linux-2.6.32.45/drivers/video/vesafb.c linux-2.6.32.45/drivers/video/vesafb.c
38490--- linux-2.6.32.45/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
38491+++ linux-2.6.32.45/drivers/video/vesafb.c 2011-08-05 20:33:55.000000000 -0400
38492@@ -9,6 +9,7 @@
38493 */
38494
38495 #include <linux/module.h>
38496+#include <linux/moduleloader.h>
38497 #include <linux/kernel.h>
38498 #include <linux/errno.h>
38499 #include <linux/string.h>
38500@@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
38501 static int vram_total __initdata; /* Set total amount of memory */
38502 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
38503 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
38504-static void (*pmi_start)(void) __read_mostly;
38505-static void (*pmi_pal) (void) __read_mostly;
38506+static void (*pmi_start)(void) __read_only;
38507+static void (*pmi_pal) (void) __read_only;
38508 static int depth __read_mostly;
38509 static int vga_compat __read_mostly;
38510 /* --------------------------------------------------------------------- */
38511@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
38512 unsigned int size_vmode;
38513 unsigned int size_remap;
38514 unsigned int size_total;
38515+ void *pmi_code = NULL;
38516
38517 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
38518 return -ENODEV;
38519@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
38520 size_remap = size_total;
38521 vesafb_fix.smem_len = size_remap;
38522
38523-#ifndef __i386__
38524- screen_info.vesapm_seg = 0;
38525-#endif
38526-
38527 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
38528 printk(KERN_WARNING
38529 "vesafb: cannot reserve video memory at 0x%lx\n",
38530@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
38531 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
38532 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
38533
38534+#ifdef __i386__
38535+
38536+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38537+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
38538+ if (!pmi_code)
38539+#elif !defined(CONFIG_PAX_KERNEXEC)
38540+ if (0)
38541+#endif
38542+
38543+#endif
38544+ screen_info.vesapm_seg = 0;
38545+
38546 if (screen_info.vesapm_seg) {
38547- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
38548- screen_info.vesapm_seg,screen_info.vesapm_off);
38549+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
38550+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
38551 }
38552
38553 if (screen_info.vesapm_seg < 0xc000)
38554@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
38555
38556 if (ypan || pmi_setpal) {
38557 unsigned short *pmi_base;
38558+
38559 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
38560- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
38561- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
38562+
38563+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38564+ pax_open_kernel();
38565+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
38566+#else
38567+ pmi_code = pmi_base;
38568+#endif
38569+
38570+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
38571+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
38572+
38573+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38574+ pmi_start = ktva_ktla(pmi_start);
38575+ pmi_pal = ktva_ktla(pmi_pal);
38576+ pax_close_kernel();
38577+#endif
38578+
38579 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
38580 if (pmi_base[3]) {
38581 printk(KERN_INFO "vesafb: pmi: ports = ");
38582@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
38583 info->node, info->fix.id);
38584 return 0;
38585 err:
38586+
38587+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38588+ module_free_exec(NULL, pmi_code);
38589+#endif
38590+
38591 if (info->screen_base)
38592 iounmap(info->screen_base);
38593 framebuffer_release(info);
38594diff -urNp linux-2.6.32.45/drivers/xen/sys-hypervisor.c linux-2.6.32.45/drivers/xen/sys-hypervisor.c
38595--- linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
38596+++ linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
38597@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
38598 return 0;
38599 }
38600
38601-static struct sysfs_ops hyp_sysfs_ops = {
38602+static const struct sysfs_ops hyp_sysfs_ops = {
38603 .show = hyp_sysfs_show,
38604 .store = hyp_sysfs_store,
38605 };
38606diff -urNp linux-2.6.32.45/fs/9p/vfs_inode.c linux-2.6.32.45/fs/9p/vfs_inode.c
38607--- linux-2.6.32.45/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
38608+++ linux-2.6.32.45/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
38609@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
38610 static void
38611 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38612 {
38613- char *s = nd_get_link(nd);
38614+ const char *s = nd_get_link(nd);
38615
38616 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
38617 IS_ERR(s) ? "<error>" : s);
38618diff -urNp linux-2.6.32.45/fs/aio.c linux-2.6.32.45/fs/aio.c
38619--- linux-2.6.32.45/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
38620+++ linux-2.6.32.45/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
38621@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
38622 size += sizeof(struct io_event) * nr_events;
38623 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
38624
38625- if (nr_pages < 0)
38626+ if (nr_pages <= 0)
38627 return -EINVAL;
38628
38629 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
38630@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
38631 struct aio_timeout to;
38632 int retry = 0;
38633
38634+ pax_track_stack();
38635+
38636 /* needed to zero any padding within an entry (there shouldn't be
38637 * any, but C is fun!
38638 */
38639@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
38640 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
38641 {
38642 ssize_t ret;
38643+ struct iovec iovstack;
38644
38645 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
38646 kiocb->ki_nbytes, 1,
38647- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
38648+ &iovstack, &kiocb->ki_iovec);
38649 if (ret < 0)
38650 goto out;
38651
38652+ if (kiocb->ki_iovec == &iovstack) {
38653+ kiocb->ki_inline_vec = iovstack;
38654+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
38655+ }
38656 kiocb->ki_nr_segs = kiocb->ki_nbytes;
38657 kiocb->ki_cur_seg = 0;
38658 /* ki_nbytes/left now reflect bytes instead of segs */
38659diff -urNp linux-2.6.32.45/fs/attr.c linux-2.6.32.45/fs/attr.c
38660--- linux-2.6.32.45/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
38661+++ linux-2.6.32.45/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
38662@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
38663 unsigned long limit;
38664
38665 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
38666+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
38667 if (limit != RLIM_INFINITY && offset > limit)
38668 goto out_sig;
38669 if (offset > inode->i_sb->s_maxbytes)
38670diff -urNp linux-2.6.32.45/fs/autofs/root.c linux-2.6.32.45/fs/autofs/root.c
38671--- linux-2.6.32.45/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
38672+++ linux-2.6.32.45/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
38673@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
38674 set_bit(n,sbi->symlink_bitmap);
38675 sl = &sbi->symlink[n];
38676 sl->len = strlen(symname);
38677- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
38678+ slsize = sl->len+1;
38679+ sl->data = kmalloc(slsize, GFP_KERNEL);
38680 if (!sl->data) {
38681 clear_bit(n,sbi->symlink_bitmap);
38682 unlock_kernel();
38683diff -urNp linux-2.6.32.45/fs/autofs4/symlink.c linux-2.6.32.45/fs/autofs4/symlink.c
38684--- linux-2.6.32.45/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
38685+++ linux-2.6.32.45/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
38686@@ -15,7 +15,7 @@
38687 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
38688 {
38689 struct autofs_info *ino = autofs4_dentry_ino(dentry);
38690- nd_set_link(nd, (char *)ino->u.symlink);
38691+ nd_set_link(nd, ino->u.symlink);
38692 return NULL;
38693 }
38694
38695diff -urNp linux-2.6.32.45/fs/befs/linuxvfs.c linux-2.6.32.45/fs/befs/linuxvfs.c
38696--- linux-2.6.32.45/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
38697+++ linux-2.6.32.45/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
38698@@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
38699 {
38700 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
38701 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
38702- char *link = nd_get_link(nd);
38703+ const char *link = nd_get_link(nd);
38704 if (!IS_ERR(link))
38705 kfree(link);
38706 }
38707diff -urNp linux-2.6.32.45/fs/binfmt_aout.c linux-2.6.32.45/fs/binfmt_aout.c
38708--- linux-2.6.32.45/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
38709+++ linux-2.6.32.45/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
38710@@ -16,6 +16,7 @@
38711 #include <linux/string.h>
38712 #include <linux/fs.h>
38713 #include <linux/file.h>
38714+#include <linux/security.h>
38715 #include <linux/stat.h>
38716 #include <linux/fcntl.h>
38717 #include <linux/ptrace.h>
38718@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
38719 #endif
38720 # define START_STACK(u) (u.start_stack)
38721
38722+ memset(&dump, 0, sizeof(dump));
38723+
38724 fs = get_fs();
38725 set_fs(KERNEL_DS);
38726 has_dumped = 1;
38727@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
38728
38729 /* If the size of the dump file exceeds the rlimit, then see what would happen
38730 if we wrote the stack, but not the data area. */
38731+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
38732 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
38733 dump.u_dsize = 0;
38734
38735 /* Make sure we have enough room to write the stack and data areas. */
38736+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
38737 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
38738 dump.u_ssize = 0;
38739
38740@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
38741 dump_size = dump.u_ssize << PAGE_SHIFT;
38742 DUMP_WRITE(dump_start,dump_size);
38743 }
38744-/* Finally dump the task struct. Not be used by gdb, but could be useful */
38745- set_fs(KERNEL_DS);
38746- DUMP_WRITE(current,sizeof(*current));
38747+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
38748 end_coredump:
38749 set_fs(fs);
38750 return has_dumped;
38751@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
38752 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
38753 if (rlim >= RLIM_INFINITY)
38754 rlim = ~0;
38755+
38756+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
38757 if (ex.a_data + ex.a_bss > rlim)
38758 return -ENOMEM;
38759
38760@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
38761 install_exec_creds(bprm);
38762 current->flags &= ~PF_FORKNOEXEC;
38763
38764+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
38765+ current->mm->pax_flags = 0UL;
38766+#endif
38767+
38768+#ifdef CONFIG_PAX_PAGEEXEC
38769+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
38770+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
38771+
38772+#ifdef CONFIG_PAX_EMUTRAMP
38773+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
38774+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
38775+#endif
38776+
38777+#ifdef CONFIG_PAX_MPROTECT
38778+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
38779+ current->mm->pax_flags |= MF_PAX_MPROTECT;
38780+#endif
38781+
38782+ }
38783+#endif
38784+
38785 if (N_MAGIC(ex) == OMAGIC) {
38786 unsigned long text_addr, map_size;
38787 loff_t pos;
38788@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
38789
38790 down_write(&current->mm->mmap_sem);
38791 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
38792- PROT_READ | PROT_WRITE | PROT_EXEC,
38793+ PROT_READ | PROT_WRITE,
38794 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
38795 fd_offset + ex.a_text);
38796 up_write(&current->mm->mmap_sem);
38797diff -urNp linux-2.6.32.45/fs/binfmt_elf.c linux-2.6.32.45/fs/binfmt_elf.c
38798--- linux-2.6.32.45/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
38799+++ linux-2.6.32.45/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
38800@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
38801 #define elf_core_dump NULL
38802 #endif
38803
38804+#ifdef CONFIG_PAX_MPROTECT
38805+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
38806+#endif
38807+
38808 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
38809 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
38810 #else
38811@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
38812 .load_binary = load_elf_binary,
38813 .load_shlib = load_elf_library,
38814 .core_dump = elf_core_dump,
38815+
38816+#ifdef CONFIG_PAX_MPROTECT
38817+ .handle_mprotect= elf_handle_mprotect,
38818+#endif
38819+
38820 .min_coredump = ELF_EXEC_PAGESIZE,
38821 .hasvdso = 1
38822 };
38823@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
38824
38825 static int set_brk(unsigned long start, unsigned long end)
38826 {
38827+ unsigned long e = end;
38828+
38829 start = ELF_PAGEALIGN(start);
38830 end = ELF_PAGEALIGN(end);
38831 if (end > start) {
38832@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
38833 if (BAD_ADDR(addr))
38834 return addr;
38835 }
38836- current->mm->start_brk = current->mm->brk = end;
38837+ current->mm->start_brk = current->mm->brk = e;
38838 return 0;
38839 }
38840
38841@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
38842 elf_addr_t __user *u_rand_bytes;
38843 const char *k_platform = ELF_PLATFORM;
38844 const char *k_base_platform = ELF_BASE_PLATFORM;
38845- unsigned char k_rand_bytes[16];
38846+ u32 k_rand_bytes[4];
38847 int items;
38848 elf_addr_t *elf_info;
38849 int ei_index = 0;
38850 const struct cred *cred = current_cred();
38851 struct vm_area_struct *vma;
38852+ unsigned long saved_auxv[AT_VECTOR_SIZE];
38853+
38854+ pax_track_stack();
38855
38856 /*
38857 * In some cases (e.g. Hyper-Threading), we want to avoid L1
38858@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
38859 * Generate 16 random bytes for userspace PRNG seeding.
38860 */
38861 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
38862- u_rand_bytes = (elf_addr_t __user *)
38863- STACK_ALLOC(p, sizeof(k_rand_bytes));
38864+ srandom32(k_rand_bytes[0] ^ random32());
38865+ srandom32(k_rand_bytes[1] ^ random32());
38866+ srandom32(k_rand_bytes[2] ^ random32());
38867+ srandom32(k_rand_bytes[3] ^ random32());
38868+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
38869+ u_rand_bytes = (elf_addr_t __user *) p;
38870 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
38871 return -EFAULT;
38872
38873@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
38874 return -EFAULT;
38875 current->mm->env_end = p;
38876
38877+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
38878+
38879 /* Put the elf_info on the stack in the right place. */
38880 sp = (elf_addr_t __user *)envp + 1;
38881- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
38882+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
38883 return -EFAULT;
38884 return 0;
38885 }
38886@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
38887 {
38888 struct elf_phdr *elf_phdata;
38889 struct elf_phdr *eppnt;
38890- unsigned long load_addr = 0;
38891+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
38892 int load_addr_set = 0;
38893 unsigned long last_bss = 0, elf_bss = 0;
38894- unsigned long error = ~0UL;
38895+ unsigned long error = -EINVAL;
38896 unsigned long total_size;
38897 int retval, i, size;
38898
38899@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
38900 goto out_close;
38901 }
38902
38903+#ifdef CONFIG_PAX_SEGMEXEC
38904+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
38905+ pax_task_size = SEGMEXEC_TASK_SIZE;
38906+#endif
38907+
38908 eppnt = elf_phdata;
38909 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
38910 if (eppnt->p_type == PT_LOAD) {
38911@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
38912 k = load_addr + eppnt->p_vaddr;
38913 if (BAD_ADDR(k) ||
38914 eppnt->p_filesz > eppnt->p_memsz ||
38915- eppnt->p_memsz > TASK_SIZE ||
38916- TASK_SIZE - eppnt->p_memsz < k) {
38917+ eppnt->p_memsz > pax_task_size ||
38918+ pax_task_size - eppnt->p_memsz < k) {
38919 error = -ENOMEM;
38920 goto out_close;
38921 }
38922@@ -532,6 +557,194 @@ out:
38923 return error;
38924 }
38925
38926+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
38927+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
38928+{
38929+ unsigned long pax_flags = 0UL;
38930+
38931+#ifdef CONFIG_PAX_PAGEEXEC
38932+ if (elf_phdata->p_flags & PF_PAGEEXEC)
38933+ pax_flags |= MF_PAX_PAGEEXEC;
38934+#endif
38935+
38936+#ifdef CONFIG_PAX_SEGMEXEC
38937+ if (elf_phdata->p_flags & PF_SEGMEXEC)
38938+ pax_flags |= MF_PAX_SEGMEXEC;
38939+#endif
38940+
38941+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38942+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38943+ if (nx_enabled)
38944+ pax_flags &= ~MF_PAX_SEGMEXEC;
38945+ else
38946+ pax_flags &= ~MF_PAX_PAGEEXEC;
38947+ }
38948+#endif
38949+
38950+#ifdef CONFIG_PAX_EMUTRAMP
38951+ if (elf_phdata->p_flags & PF_EMUTRAMP)
38952+ pax_flags |= MF_PAX_EMUTRAMP;
38953+#endif
38954+
38955+#ifdef CONFIG_PAX_MPROTECT
38956+ if (elf_phdata->p_flags & PF_MPROTECT)
38957+ pax_flags |= MF_PAX_MPROTECT;
38958+#endif
38959+
38960+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
38961+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
38962+ pax_flags |= MF_PAX_RANDMMAP;
38963+#endif
38964+
38965+ return pax_flags;
38966+}
38967+#endif
38968+
38969+#ifdef CONFIG_PAX_PT_PAX_FLAGS
38970+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
38971+{
38972+ unsigned long pax_flags = 0UL;
38973+
38974+#ifdef CONFIG_PAX_PAGEEXEC
38975+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
38976+ pax_flags |= MF_PAX_PAGEEXEC;
38977+#endif
38978+
38979+#ifdef CONFIG_PAX_SEGMEXEC
38980+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
38981+ pax_flags |= MF_PAX_SEGMEXEC;
38982+#endif
38983+
38984+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38985+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38986+ if (nx_enabled)
38987+ pax_flags &= ~MF_PAX_SEGMEXEC;
38988+ else
38989+ pax_flags &= ~MF_PAX_PAGEEXEC;
38990+ }
38991+#endif
38992+
38993+#ifdef CONFIG_PAX_EMUTRAMP
38994+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
38995+ pax_flags |= MF_PAX_EMUTRAMP;
38996+#endif
38997+
38998+#ifdef CONFIG_PAX_MPROTECT
38999+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39000+ pax_flags |= MF_PAX_MPROTECT;
39001+#endif
39002+
39003+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39004+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39005+ pax_flags |= MF_PAX_RANDMMAP;
39006+#endif
39007+
39008+ return pax_flags;
39009+}
39010+#endif
39011+
39012+#ifdef CONFIG_PAX_EI_PAX
39013+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39014+{
39015+ unsigned long pax_flags = 0UL;
39016+
39017+#ifdef CONFIG_PAX_PAGEEXEC
39018+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39019+ pax_flags |= MF_PAX_PAGEEXEC;
39020+#endif
39021+
39022+#ifdef CONFIG_PAX_SEGMEXEC
39023+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39024+ pax_flags |= MF_PAX_SEGMEXEC;
39025+#endif
39026+
39027+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39028+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39029+ if (nx_enabled)
39030+ pax_flags &= ~MF_PAX_SEGMEXEC;
39031+ else
39032+ pax_flags &= ~MF_PAX_PAGEEXEC;
39033+ }
39034+#endif
39035+
39036+#ifdef CONFIG_PAX_EMUTRAMP
39037+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39038+ pax_flags |= MF_PAX_EMUTRAMP;
39039+#endif
39040+
39041+#ifdef CONFIG_PAX_MPROTECT
39042+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39043+ pax_flags |= MF_PAX_MPROTECT;
39044+#endif
39045+
39046+#ifdef CONFIG_PAX_ASLR
39047+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39048+ pax_flags |= MF_PAX_RANDMMAP;
39049+#endif
39050+
39051+ return pax_flags;
39052+}
39053+#endif
39054+
39055+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39056+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39057+{
39058+ unsigned long pax_flags = 0UL;
39059+
39060+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39061+ unsigned long i;
39062+ int found_flags = 0;
39063+#endif
39064+
39065+#ifdef CONFIG_PAX_EI_PAX
39066+ pax_flags = pax_parse_ei_pax(elf_ex);
39067+#endif
39068+
39069+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39070+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39071+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39072+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39073+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39074+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39075+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39076+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39077+ return -EINVAL;
39078+
39079+#ifdef CONFIG_PAX_SOFTMODE
39080+ if (pax_softmode)
39081+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
39082+ else
39083+#endif
39084+
39085+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39086+ found_flags = 1;
39087+ break;
39088+ }
39089+#endif
39090+
39091+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39092+ if (found_flags == 0) {
39093+ struct elf_phdr phdr;
39094+ memset(&phdr, 0, sizeof(phdr));
39095+ phdr.p_flags = PF_NOEMUTRAMP;
39096+#ifdef CONFIG_PAX_SOFTMODE
39097+ if (pax_softmode)
39098+ pax_flags = pax_parse_softmode(&phdr);
39099+ else
39100+#endif
39101+ pax_flags = pax_parse_hardmode(&phdr);
39102+ }
39103+#endif
39104+
39105+
39106+ if (0 > pax_check_flags(&pax_flags))
39107+ return -EINVAL;
39108+
39109+ current->mm->pax_flags = pax_flags;
39110+ return 0;
39111+}
39112+#endif
39113+
39114 /*
39115 * These are the functions used to load ELF style executables and shared
39116 * libraries. There is no binary dependent code anywhere else.
39117@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
39118 {
39119 unsigned int random_variable = 0;
39120
39121+#ifdef CONFIG_PAX_RANDUSTACK
39122+ if (randomize_va_space)
39123+ return stack_top - current->mm->delta_stack;
39124+#endif
39125+
39126 if ((current->flags & PF_RANDOMIZE) &&
39127 !(current->personality & ADDR_NO_RANDOMIZE)) {
39128 random_variable = get_random_int() & STACK_RND_MASK;
39129@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
39130 unsigned long load_addr = 0, load_bias = 0;
39131 int load_addr_set = 0;
39132 char * elf_interpreter = NULL;
39133- unsigned long error;
39134+ unsigned long error = 0;
39135 struct elf_phdr *elf_ppnt, *elf_phdata;
39136 unsigned long elf_bss, elf_brk;
39137 int retval, i;
39138@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
39139 unsigned long start_code, end_code, start_data, end_data;
39140 unsigned long reloc_func_desc = 0;
39141 int executable_stack = EXSTACK_DEFAULT;
39142- unsigned long def_flags = 0;
39143 struct {
39144 struct elfhdr elf_ex;
39145 struct elfhdr interp_elf_ex;
39146 } *loc;
39147+ unsigned long pax_task_size = TASK_SIZE;
39148
39149 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39150 if (!loc) {
39151@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
39152
39153 /* OK, This is the point of no return */
39154 current->flags &= ~PF_FORKNOEXEC;
39155- current->mm->def_flags = def_flags;
39156+
39157+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39158+ current->mm->pax_flags = 0UL;
39159+#endif
39160+
39161+#ifdef CONFIG_PAX_DLRESOLVE
39162+ current->mm->call_dl_resolve = 0UL;
39163+#endif
39164+
39165+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39166+ current->mm->call_syscall = 0UL;
39167+#endif
39168+
39169+#ifdef CONFIG_PAX_ASLR
39170+ current->mm->delta_mmap = 0UL;
39171+ current->mm->delta_stack = 0UL;
39172+#endif
39173+
39174+ current->mm->def_flags = 0;
39175+
39176+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39177+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39178+ send_sig(SIGKILL, current, 0);
39179+ goto out_free_dentry;
39180+ }
39181+#endif
39182+
39183+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39184+ pax_set_initial_flags(bprm);
39185+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39186+ if (pax_set_initial_flags_func)
39187+ (pax_set_initial_flags_func)(bprm);
39188+#endif
39189+
39190+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39191+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
39192+ current->mm->context.user_cs_limit = PAGE_SIZE;
39193+ current->mm->def_flags |= VM_PAGEEXEC;
39194+ }
39195+#endif
39196+
39197+#ifdef CONFIG_PAX_SEGMEXEC
39198+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39199+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39200+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39201+ pax_task_size = SEGMEXEC_TASK_SIZE;
39202+ }
39203+#endif
39204+
39205+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39206+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39207+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39208+ put_cpu();
39209+ }
39210+#endif
39211
39212 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39213 may depend on the personality. */
39214 SET_PERSONALITY(loc->elf_ex);
39215+
39216+#ifdef CONFIG_PAX_ASLR
39217+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39218+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39219+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39220+ }
39221+#endif
39222+
39223+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39224+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39225+ executable_stack = EXSTACK_DISABLE_X;
39226+ current->personality &= ~READ_IMPLIES_EXEC;
39227+ } else
39228+#endif
39229+
39230 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39231 current->personality |= READ_IMPLIES_EXEC;
39232
39233@@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
39234 #else
39235 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39236 #endif
39237+
39238+#ifdef CONFIG_PAX_RANDMMAP
39239+ /* PaX: randomize base address at the default exe base if requested */
39240+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39241+#ifdef CONFIG_SPARC64
39242+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39243+#else
39244+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39245+#endif
39246+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39247+ elf_flags |= MAP_FIXED;
39248+ }
39249+#endif
39250+
39251 }
39252
39253 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39254@@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
39255 * allowed task size. Note that p_filesz must always be
39256 * <= p_memsz so it is only necessary to check p_memsz.
39257 */
39258- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39259- elf_ppnt->p_memsz > TASK_SIZE ||
39260- TASK_SIZE - elf_ppnt->p_memsz < k) {
39261+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39262+ elf_ppnt->p_memsz > pax_task_size ||
39263+ pax_task_size - elf_ppnt->p_memsz < k) {
39264 /* set_brk can never work. Avoid overflows. */
39265 send_sig(SIGKILL, current, 0);
39266 retval = -EINVAL;
39267@@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
39268 start_data += load_bias;
39269 end_data += load_bias;
39270
39271+#ifdef CONFIG_PAX_RANDMMAP
39272+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39273+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39274+#endif
39275+
39276 /* Calling set_brk effectively mmaps the pages that we need
39277 * for the bss and break sections. We must do this before
39278 * mapping in the interpreter, to make sure it doesn't wind
39279@@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
39280 goto out_free_dentry;
39281 }
39282 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39283- send_sig(SIGSEGV, current, 0);
39284- retval = -EFAULT; /* Nobody gets to see this, but.. */
39285- goto out_free_dentry;
39286+ /*
39287+ * This bss-zeroing can fail if the ELF
39288+ * file specifies odd protections. So
39289+ * we don't check the return value
39290+ */
39291 }
39292
39293 if (elf_interpreter) {
39294@@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
39295 unsigned long n = off;
39296 if (n > PAGE_SIZE)
39297 n = PAGE_SIZE;
39298- if (!dump_write(file, buf, n))
39299+ if (!dump_write(file, buf, n)) {
39300+ free_page((unsigned long)buf);
39301 return 0;
39302+ }
39303 off -= n;
39304 }
39305 free_page((unsigned long)buf);
39306@@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
39307 * Decide what to dump of a segment, part, all or none.
39308 */
39309 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39310- unsigned long mm_flags)
39311+ unsigned long mm_flags, long signr)
39312 {
39313 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39314
39315@@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
39316 if (vma->vm_file == NULL)
39317 return 0;
39318
39319- if (FILTER(MAPPED_PRIVATE))
39320+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39321 goto whole;
39322
39323 /*
39324@@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
39325 #undef DUMP_WRITE
39326
39327 #define DUMP_WRITE(addr, nr) \
39328+ do { \
39329+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
39330 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
39331- goto end_coredump;
39332+ goto end_coredump; \
39333+ } while (0);
39334
39335 static void fill_elf_header(struct elfhdr *elf, int segs,
39336 u16 machine, u32 flags, u8 osabi)
39337@@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
39338 {
39339 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39340 int i = 0;
39341- do
39342+ do {
39343 i += 2;
39344- while (auxv[i - 2] != AT_NULL);
39345+ } while (auxv[i - 2] != AT_NULL);
39346 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39347 }
39348
39349@@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
39350 phdr.p_offset = offset;
39351 phdr.p_vaddr = vma->vm_start;
39352 phdr.p_paddr = 0;
39353- phdr.p_filesz = vma_dump_size(vma, mm_flags);
39354+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
39355 phdr.p_memsz = vma->vm_end - vma->vm_start;
39356 offset += phdr.p_filesz;
39357 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39358@@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
39359 unsigned long addr;
39360 unsigned long end;
39361
39362- end = vma->vm_start + vma_dump_size(vma, mm_flags);
39363+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
39364
39365 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39366 struct page *page;
39367@@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
39368 page = get_dump_page(addr);
39369 if (page) {
39370 void *kaddr = kmap(page);
39371+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39372 stop = ((size += PAGE_SIZE) > limit) ||
39373 !dump_write(file, kaddr, PAGE_SIZE);
39374 kunmap(page);
39375@@ -2042,6 +2356,97 @@ out:
39376
39377 #endif /* USE_ELF_CORE_DUMP */
39378
39379+#ifdef CONFIG_PAX_MPROTECT
39380+/* PaX: non-PIC ELF libraries need relocations on their executable segments
39381+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39382+ * we'll remove VM_MAYWRITE for good on RELRO segments.
39383+ *
39384+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39385+ * basis because we want to allow the common case and not the special ones.
39386+ */
39387+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39388+{
39389+ struct elfhdr elf_h;
39390+ struct elf_phdr elf_p;
39391+ unsigned long i;
39392+ unsigned long oldflags;
39393+ bool is_textrel_rw, is_textrel_rx, is_relro;
39394+
39395+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39396+ return;
39397+
39398+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39399+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39400+
39401+#ifdef CONFIG_PAX_ELFRELOCS
39402+ /* possible TEXTREL */
39403+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39404+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39405+#else
39406+ is_textrel_rw = false;
39407+ is_textrel_rx = false;
39408+#endif
39409+
39410+ /* possible RELRO */
39411+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
39412+
39413+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
39414+ return;
39415+
39416+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
39417+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
39418+
39419+#ifdef CONFIG_PAX_ETEXECRELOCS
39420+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39421+#else
39422+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
39423+#endif
39424+
39425+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39426+ !elf_check_arch(&elf_h) ||
39427+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
39428+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
39429+ return;
39430+
39431+ for (i = 0UL; i < elf_h.e_phnum; i++) {
39432+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
39433+ return;
39434+ switch (elf_p.p_type) {
39435+ case PT_DYNAMIC:
39436+ if (!is_textrel_rw && !is_textrel_rx)
39437+ continue;
39438+ i = 0UL;
39439+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
39440+ elf_dyn dyn;
39441+
39442+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
39443+ return;
39444+ if (dyn.d_tag == DT_NULL)
39445+ return;
39446+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
39447+ gr_log_textrel(vma);
39448+ if (is_textrel_rw)
39449+ vma->vm_flags |= VM_MAYWRITE;
39450+ else
39451+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
39452+ vma->vm_flags &= ~VM_MAYWRITE;
39453+ return;
39454+ }
39455+ i++;
39456+ }
39457+ return;
39458+
39459+ case PT_GNU_RELRO:
39460+ if (!is_relro)
39461+ continue;
39462+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
39463+ vma->vm_flags &= ~VM_MAYWRITE;
39464+ return;
39465+ }
39466+ }
39467+}
39468+#endif
39469+
39470 static int __init init_elf_binfmt(void)
39471 {
39472 return register_binfmt(&elf_format);
39473diff -urNp linux-2.6.32.45/fs/binfmt_flat.c linux-2.6.32.45/fs/binfmt_flat.c
39474--- linux-2.6.32.45/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
39475+++ linux-2.6.32.45/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
39476@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
39477 realdatastart = (unsigned long) -ENOMEM;
39478 printk("Unable to allocate RAM for process data, errno %d\n",
39479 (int)-realdatastart);
39480+ down_write(&current->mm->mmap_sem);
39481 do_munmap(current->mm, textpos, text_len);
39482+ up_write(&current->mm->mmap_sem);
39483 ret = realdatastart;
39484 goto err;
39485 }
39486@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
39487 }
39488 if (IS_ERR_VALUE(result)) {
39489 printk("Unable to read data+bss, errno %d\n", (int)-result);
39490+ down_write(&current->mm->mmap_sem);
39491 do_munmap(current->mm, textpos, text_len);
39492 do_munmap(current->mm, realdatastart, data_len + extra);
39493+ up_write(&current->mm->mmap_sem);
39494 ret = result;
39495 goto err;
39496 }
39497@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
39498 }
39499 if (IS_ERR_VALUE(result)) {
39500 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
39501+ down_write(&current->mm->mmap_sem);
39502 do_munmap(current->mm, textpos, text_len + data_len + extra +
39503 MAX_SHARED_LIBS * sizeof(unsigned long));
39504+ up_write(&current->mm->mmap_sem);
39505 ret = result;
39506 goto err;
39507 }
39508diff -urNp linux-2.6.32.45/fs/bio.c linux-2.6.32.45/fs/bio.c
39509--- linux-2.6.32.45/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
39510+++ linux-2.6.32.45/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
39511@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
39512
39513 i = 0;
39514 while (i < bio_slab_nr) {
39515- struct bio_slab *bslab = &bio_slabs[i];
39516+ bslab = &bio_slabs[i];
39517
39518 if (!bslab->slab && entry == -1)
39519 entry = i;
39520@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
39521 const int read = bio_data_dir(bio) == READ;
39522 struct bio_map_data *bmd = bio->bi_private;
39523 int i;
39524- char *p = bmd->sgvecs[0].iov_base;
39525+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
39526
39527 __bio_for_each_segment(bvec, bio, i, 0) {
39528 char *addr = page_address(bvec->bv_page);
39529diff -urNp linux-2.6.32.45/fs/block_dev.c linux-2.6.32.45/fs/block_dev.c
39530--- linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:35:29.000000000 -0400
39531+++ linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:34:00.000000000 -0400
39532@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
39533 else if (bdev->bd_contains == bdev)
39534 res = 0; /* is a whole device which isn't held */
39535
39536- else if (bdev->bd_contains->bd_holder == bd_claim)
39537+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
39538 res = 0; /* is a partition of a device that is being partitioned */
39539 else if (bdev->bd_contains->bd_holder != NULL)
39540 res = -EBUSY; /* is a partition of a held device */
39541diff -urNp linux-2.6.32.45/fs/btrfs/ctree.c linux-2.6.32.45/fs/btrfs/ctree.c
39542--- linux-2.6.32.45/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
39543+++ linux-2.6.32.45/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
39544@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
39545 free_extent_buffer(buf);
39546 add_root_to_dirty_list(root);
39547 } else {
39548- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
39549- parent_start = parent->start;
39550- else
39551+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
39552+ if (parent)
39553+ parent_start = parent->start;
39554+ else
39555+ parent_start = 0;
39556+ } else
39557 parent_start = 0;
39558
39559 WARN_ON(trans->transid != btrfs_header_generation(parent));
39560@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
39561
39562 ret = 0;
39563 if (slot == 0) {
39564- struct btrfs_disk_key disk_key;
39565 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
39566 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
39567 }
39568diff -urNp linux-2.6.32.45/fs/btrfs/disk-io.c linux-2.6.32.45/fs/btrfs/disk-io.c
39569--- linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
39570+++ linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
39571@@ -39,7 +39,7 @@
39572 #include "tree-log.h"
39573 #include "free-space-cache.h"
39574
39575-static struct extent_io_ops btree_extent_io_ops;
39576+static const struct extent_io_ops btree_extent_io_ops;
39577 static void end_workqueue_fn(struct btrfs_work *work);
39578 static void free_fs_root(struct btrfs_root *root);
39579
39580@@ -2607,7 +2607,7 @@ out:
39581 return 0;
39582 }
39583
39584-static struct extent_io_ops btree_extent_io_ops = {
39585+static const struct extent_io_ops btree_extent_io_ops = {
39586 .write_cache_pages_lock_hook = btree_lock_page_hook,
39587 .readpage_end_io_hook = btree_readpage_end_io_hook,
39588 .submit_bio_hook = btree_submit_bio_hook,
39589diff -urNp linux-2.6.32.45/fs/btrfs/extent_io.h linux-2.6.32.45/fs/btrfs/extent_io.h
39590--- linux-2.6.32.45/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
39591+++ linux-2.6.32.45/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
39592@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
39593 struct bio *bio, int mirror_num,
39594 unsigned long bio_flags);
39595 struct extent_io_ops {
39596- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
39597+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
39598 u64 start, u64 end, int *page_started,
39599 unsigned long *nr_written);
39600- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
39601- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
39602+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
39603+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
39604 extent_submit_bio_hook_t *submit_bio_hook;
39605- int (*merge_bio_hook)(struct page *page, unsigned long offset,
39606+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
39607 size_t size, struct bio *bio,
39608 unsigned long bio_flags);
39609- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
39610- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
39611+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
39612+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
39613 u64 start, u64 end,
39614 struct extent_state *state);
39615- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
39616+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
39617 u64 start, u64 end,
39618 struct extent_state *state);
39619- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39620+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39621 struct extent_state *state);
39622- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39623+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39624 struct extent_state *state, int uptodate);
39625- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
39626+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
39627 unsigned long old, unsigned long bits);
39628- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
39629+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
39630 unsigned long bits);
39631- int (*merge_extent_hook)(struct inode *inode,
39632+ int (* const merge_extent_hook)(struct inode *inode,
39633 struct extent_state *new,
39634 struct extent_state *other);
39635- int (*split_extent_hook)(struct inode *inode,
39636+ int (* const split_extent_hook)(struct inode *inode,
39637 struct extent_state *orig, u64 split);
39638- int (*write_cache_pages_lock_hook)(struct page *page);
39639+ int (* const write_cache_pages_lock_hook)(struct page *page);
39640 };
39641
39642 struct extent_io_tree {
39643@@ -88,7 +88,7 @@ struct extent_io_tree {
39644 u64 dirty_bytes;
39645 spinlock_t lock;
39646 spinlock_t buffer_lock;
39647- struct extent_io_ops *ops;
39648+ const struct extent_io_ops *ops;
39649 };
39650
39651 struct extent_state {
39652diff -urNp linux-2.6.32.45/fs/btrfs/extent-tree.c linux-2.6.32.45/fs/btrfs/extent-tree.c
39653--- linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
39654+++ linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
39655@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
39656 u64 group_start = group->key.objectid;
39657 new_extents = kmalloc(sizeof(*new_extents),
39658 GFP_NOFS);
39659+ if (!new_extents) {
39660+ ret = -ENOMEM;
39661+ goto out;
39662+ }
39663 nr_extents = 1;
39664 ret = get_new_locations(reloc_inode,
39665 extent_key,
39666diff -urNp linux-2.6.32.45/fs/btrfs/free-space-cache.c linux-2.6.32.45/fs/btrfs/free-space-cache.c
39667--- linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
39668+++ linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
39669@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
39670
39671 while(1) {
39672 if (entry->bytes < bytes || entry->offset < min_start) {
39673- struct rb_node *node;
39674-
39675 node = rb_next(&entry->offset_index);
39676 if (!node)
39677 break;
39678@@ -1226,7 +1224,7 @@ again:
39679 */
39680 while (entry->bitmap || found_bitmap ||
39681 (!entry->bitmap && entry->bytes < min_bytes)) {
39682- struct rb_node *node = rb_next(&entry->offset_index);
39683+ node = rb_next(&entry->offset_index);
39684
39685 if (entry->bitmap && entry->bytes > bytes + empty_size) {
39686 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
39687diff -urNp linux-2.6.32.45/fs/btrfs/inode.c linux-2.6.32.45/fs/btrfs/inode.c
39688--- linux-2.6.32.45/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
39689+++ linux-2.6.32.45/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
39690@@ -63,7 +63,7 @@ static const struct inode_operations btr
39691 static const struct address_space_operations btrfs_aops;
39692 static const struct address_space_operations btrfs_symlink_aops;
39693 static const struct file_operations btrfs_dir_file_operations;
39694-static struct extent_io_ops btrfs_extent_io_ops;
39695+static const struct extent_io_ops btrfs_extent_io_ops;
39696
39697 static struct kmem_cache *btrfs_inode_cachep;
39698 struct kmem_cache *btrfs_trans_handle_cachep;
39699@@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
39700 1, 0, NULL, GFP_NOFS);
39701 while (start < end) {
39702 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
39703+ BUG_ON(!async_cow);
39704 async_cow->inode = inode;
39705 async_cow->root = root;
39706 async_cow->locked_page = locked_page;
39707@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
39708 inline_size = btrfs_file_extent_inline_item_len(leaf,
39709 btrfs_item_nr(leaf, path->slots[0]));
39710 tmp = kmalloc(inline_size, GFP_NOFS);
39711+ if (!tmp)
39712+ return -ENOMEM;
39713 ptr = btrfs_file_extent_inline_start(item);
39714
39715 read_extent_buffer(leaf, tmp, ptr, inline_size);
39716@@ -5410,7 +5413,7 @@ fail:
39717 return -ENOMEM;
39718 }
39719
39720-static int btrfs_getattr(struct vfsmount *mnt,
39721+int btrfs_getattr(struct vfsmount *mnt,
39722 struct dentry *dentry, struct kstat *stat)
39723 {
39724 struct inode *inode = dentry->d_inode;
39725@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
39726 return 0;
39727 }
39728
39729+EXPORT_SYMBOL(btrfs_getattr);
39730+
39731+dev_t get_btrfs_dev_from_inode(struct inode *inode)
39732+{
39733+ return BTRFS_I(inode)->root->anon_super.s_dev;
39734+}
39735+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
39736+
39737 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
39738 struct inode *new_dir, struct dentry *new_dentry)
39739 {
39740@@ -5972,7 +5983,7 @@ static const struct file_operations btrf
39741 .fsync = btrfs_sync_file,
39742 };
39743
39744-static struct extent_io_ops btrfs_extent_io_ops = {
39745+static const struct extent_io_ops btrfs_extent_io_ops = {
39746 .fill_delalloc = run_delalloc_range,
39747 .submit_bio_hook = btrfs_submit_bio_hook,
39748 .merge_bio_hook = btrfs_merge_bio_hook,
39749diff -urNp linux-2.6.32.45/fs/btrfs/relocation.c linux-2.6.32.45/fs/btrfs/relocation.c
39750--- linux-2.6.32.45/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
39751+++ linux-2.6.32.45/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
39752@@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
39753 }
39754 spin_unlock(&rc->reloc_root_tree.lock);
39755
39756- BUG_ON((struct btrfs_root *)node->data != root);
39757+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
39758
39759 if (!del) {
39760 spin_lock(&rc->reloc_root_tree.lock);
39761diff -urNp linux-2.6.32.45/fs/btrfs/sysfs.c linux-2.6.32.45/fs/btrfs/sysfs.c
39762--- linux-2.6.32.45/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
39763+++ linux-2.6.32.45/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
39764@@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
39765 complete(&root->kobj_unregister);
39766 }
39767
39768-static struct sysfs_ops btrfs_super_attr_ops = {
39769+static const struct sysfs_ops btrfs_super_attr_ops = {
39770 .show = btrfs_super_attr_show,
39771 .store = btrfs_super_attr_store,
39772 };
39773
39774-static struct sysfs_ops btrfs_root_attr_ops = {
39775+static const struct sysfs_ops btrfs_root_attr_ops = {
39776 .show = btrfs_root_attr_show,
39777 .store = btrfs_root_attr_store,
39778 };
39779diff -urNp linux-2.6.32.45/fs/buffer.c linux-2.6.32.45/fs/buffer.c
39780--- linux-2.6.32.45/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
39781+++ linux-2.6.32.45/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
39782@@ -25,6 +25,7 @@
39783 #include <linux/percpu.h>
39784 #include <linux/slab.h>
39785 #include <linux/capability.h>
39786+#include <linux/security.h>
39787 #include <linux/blkdev.h>
39788 #include <linux/file.h>
39789 #include <linux/quotaops.h>
39790diff -urNp linux-2.6.32.45/fs/cachefiles/bind.c linux-2.6.32.45/fs/cachefiles/bind.c
39791--- linux-2.6.32.45/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
39792+++ linux-2.6.32.45/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
39793@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
39794 args);
39795
39796 /* start by checking things over */
39797- ASSERT(cache->fstop_percent >= 0 &&
39798- cache->fstop_percent < cache->fcull_percent &&
39799+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
39800 cache->fcull_percent < cache->frun_percent &&
39801 cache->frun_percent < 100);
39802
39803- ASSERT(cache->bstop_percent >= 0 &&
39804- cache->bstop_percent < cache->bcull_percent &&
39805+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
39806 cache->bcull_percent < cache->brun_percent &&
39807 cache->brun_percent < 100);
39808
39809diff -urNp linux-2.6.32.45/fs/cachefiles/daemon.c linux-2.6.32.45/fs/cachefiles/daemon.c
39810--- linux-2.6.32.45/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
39811+++ linux-2.6.32.45/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
39812@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
39813 if (test_bit(CACHEFILES_DEAD, &cache->flags))
39814 return -EIO;
39815
39816- if (datalen < 0 || datalen > PAGE_SIZE - 1)
39817+ if (datalen > PAGE_SIZE - 1)
39818 return -EOPNOTSUPP;
39819
39820 /* drag the command string into the kernel so we can parse it */
39821@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
39822 if (args[0] != '%' || args[1] != '\0')
39823 return -EINVAL;
39824
39825- if (fstop < 0 || fstop >= cache->fcull_percent)
39826+ if (fstop >= cache->fcull_percent)
39827 return cachefiles_daemon_range_error(cache, args);
39828
39829 cache->fstop_percent = fstop;
39830@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
39831 if (args[0] != '%' || args[1] != '\0')
39832 return -EINVAL;
39833
39834- if (bstop < 0 || bstop >= cache->bcull_percent)
39835+ if (bstop >= cache->bcull_percent)
39836 return cachefiles_daemon_range_error(cache, args);
39837
39838 cache->bstop_percent = bstop;
39839diff -urNp linux-2.6.32.45/fs/cachefiles/internal.h linux-2.6.32.45/fs/cachefiles/internal.h
39840--- linux-2.6.32.45/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
39841+++ linux-2.6.32.45/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
39842@@ -56,7 +56,7 @@ struct cachefiles_cache {
39843 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
39844 struct rb_root active_nodes; /* active nodes (can't be culled) */
39845 rwlock_t active_lock; /* lock for active_nodes */
39846- atomic_t gravecounter; /* graveyard uniquifier */
39847+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
39848 unsigned frun_percent; /* when to stop culling (% files) */
39849 unsigned fcull_percent; /* when to start culling (% files) */
39850 unsigned fstop_percent; /* when to stop allocating (% files) */
39851@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
39852 * proc.c
39853 */
39854 #ifdef CONFIG_CACHEFILES_HISTOGRAM
39855-extern atomic_t cachefiles_lookup_histogram[HZ];
39856-extern atomic_t cachefiles_mkdir_histogram[HZ];
39857-extern atomic_t cachefiles_create_histogram[HZ];
39858+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
39859+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
39860+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
39861
39862 extern int __init cachefiles_proc_init(void);
39863 extern void cachefiles_proc_cleanup(void);
39864 static inline
39865-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
39866+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
39867 {
39868 unsigned long jif = jiffies - start_jif;
39869 if (jif >= HZ)
39870 jif = HZ - 1;
39871- atomic_inc(&histogram[jif]);
39872+ atomic_inc_unchecked(&histogram[jif]);
39873 }
39874
39875 #else
39876diff -urNp linux-2.6.32.45/fs/cachefiles/namei.c linux-2.6.32.45/fs/cachefiles/namei.c
39877--- linux-2.6.32.45/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
39878+++ linux-2.6.32.45/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
39879@@ -250,7 +250,7 @@ try_again:
39880 /* first step is to make up a grave dentry in the graveyard */
39881 sprintf(nbuffer, "%08x%08x",
39882 (uint32_t) get_seconds(),
39883- (uint32_t) atomic_inc_return(&cache->gravecounter));
39884+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
39885
39886 /* do the multiway lock magic */
39887 trap = lock_rename(cache->graveyard, dir);
39888diff -urNp linux-2.6.32.45/fs/cachefiles/proc.c linux-2.6.32.45/fs/cachefiles/proc.c
39889--- linux-2.6.32.45/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
39890+++ linux-2.6.32.45/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
39891@@ -14,9 +14,9 @@
39892 #include <linux/seq_file.h>
39893 #include "internal.h"
39894
39895-atomic_t cachefiles_lookup_histogram[HZ];
39896-atomic_t cachefiles_mkdir_histogram[HZ];
39897-atomic_t cachefiles_create_histogram[HZ];
39898+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
39899+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
39900+atomic_unchecked_t cachefiles_create_histogram[HZ];
39901
39902 /*
39903 * display the latency histogram
39904@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
39905 return 0;
39906 default:
39907 index = (unsigned long) v - 3;
39908- x = atomic_read(&cachefiles_lookup_histogram[index]);
39909- y = atomic_read(&cachefiles_mkdir_histogram[index]);
39910- z = atomic_read(&cachefiles_create_histogram[index]);
39911+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
39912+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
39913+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
39914 if (x == 0 && y == 0 && z == 0)
39915 return 0;
39916
39917diff -urNp linux-2.6.32.45/fs/cachefiles/rdwr.c linux-2.6.32.45/fs/cachefiles/rdwr.c
39918--- linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
39919+++ linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
39920@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
39921 old_fs = get_fs();
39922 set_fs(KERNEL_DS);
39923 ret = file->f_op->write(
39924- file, (const void __user *) data, len, &pos);
39925+ file, (__force const void __user *) data, len, &pos);
39926 set_fs(old_fs);
39927 kunmap(page);
39928 if (ret != len)
39929diff -urNp linux-2.6.32.45/fs/cifs/cifs_debug.c linux-2.6.32.45/fs/cifs/cifs_debug.c
39930--- linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
39931+++ linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
39932@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
39933 tcon = list_entry(tmp3,
39934 struct cifsTconInfo,
39935 tcon_list);
39936- atomic_set(&tcon->num_smbs_sent, 0);
39937- atomic_set(&tcon->num_writes, 0);
39938- atomic_set(&tcon->num_reads, 0);
39939- atomic_set(&tcon->num_oplock_brks, 0);
39940- atomic_set(&tcon->num_opens, 0);
39941- atomic_set(&tcon->num_posixopens, 0);
39942- atomic_set(&tcon->num_posixmkdirs, 0);
39943- atomic_set(&tcon->num_closes, 0);
39944- atomic_set(&tcon->num_deletes, 0);
39945- atomic_set(&tcon->num_mkdirs, 0);
39946- atomic_set(&tcon->num_rmdirs, 0);
39947- atomic_set(&tcon->num_renames, 0);
39948- atomic_set(&tcon->num_t2renames, 0);
39949- atomic_set(&tcon->num_ffirst, 0);
39950- atomic_set(&tcon->num_fnext, 0);
39951- atomic_set(&tcon->num_fclose, 0);
39952- atomic_set(&tcon->num_hardlinks, 0);
39953- atomic_set(&tcon->num_symlinks, 0);
39954- atomic_set(&tcon->num_locks, 0);
39955+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
39956+ atomic_set_unchecked(&tcon->num_writes, 0);
39957+ atomic_set_unchecked(&tcon->num_reads, 0);
39958+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
39959+ atomic_set_unchecked(&tcon->num_opens, 0);
39960+ atomic_set_unchecked(&tcon->num_posixopens, 0);
39961+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
39962+ atomic_set_unchecked(&tcon->num_closes, 0);
39963+ atomic_set_unchecked(&tcon->num_deletes, 0);
39964+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
39965+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
39966+ atomic_set_unchecked(&tcon->num_renames, 0);
39967+ atomic_set_unchecked(&tcon->num_t2renames, 0);
39968+ atomic_set_unchecked(&tcon->num_ffirst, 0);
39969+ atomic_set_unchecked(&tcon->num_fnext, 0);
39970+ atomic_set_unchecked(&tcon->num_fclose, 0);
39971+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
39972+ atomic_set_unchecked(&tcon->num_symlinks, 0);
39973+ atomic_set_unchecked(&tcon->num_locks, 0);
39974 }
39975 }
39976 }
39977@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
39978 if (tcon->need_reconnect)
39979 seq_puts(m, "\tDISCONNECTED ");
39980 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
39981- atomic_read(&tcon->num_smbs_sent),
39982- atomic_read(&tcon->num_oplock_brks));
39983+ atomic_read_unchecked(&tcon->num_smbs_sent),
39984+ atomic_read_unchecked(&tcon->num_oplock_brks));
39985 seq_printf(m, "\nReads: %d Bytes: %lld",
39986- atomic_read(&tcon->num_reads),
39987+ atomic_read_unchecked(&tcon->num_reads),
39988 (long long)(tcon->bytes_read));
39989 seq_printf(m, "\nWrites: %d Bytes: %lld",
39990- atomic_read(&tcon->num_writes),
39991+ atomic_read_unchecked(&tcon->num_writes),
39992 (long long)(tcon->bytes_written));
39993 seq_printf(m, "\nFlushes: %d",
39994- atomic_read(&tcon->num_flushes));
39995+ atomic_read_unchecked(&tcon->num_flushes));
39996 seq_printf(m, "\nLocks: %d HardLinks: %d "
39997 "Symlinks: %d",
39998- atomic_read(&tcon->num_locks),
39999- atomic_read(&tcon->num_hardlinks),
40000- atomic_read(&tcon->num_symlinks));
40001+ atomic_read_unchecked(&tcon->num_locks),
40002+ atomic_read_unchecked(&tcon->num_hardlinks),
40003+ atomic_read_unchecked(&tcon->num_symlinks));
40004 seq_printf(m, "\nOpens: %d Closes: %d "
40005 "Deletes: %d",
40006- atomic_read(&tcon->num_opens),
40007- atomic_read(&tcon->num_closes),
40008- atomic_read(&tcon->num_deletes));
40009+ atomic_read_unchecked(&tcon->num_opens),
40010+ atomic_read_unchecked(&tcon->num_closes),
40011+ atomic_read_unchecked(&tcon->num_deletes));
40012 seq_printf(m, "\nPosix Opens: %d "
40013 "Posix Mkdirs: %d",
40014- atomic_read(&tcon->num_posixopens),
40015- atomic_read(&tcon->num_posixmkdirs));
40016+ atomic_read_unchecked(&tcon->num_posixopens),
40017+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40018 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40019- atomic_read(&tcon->num_mkdirs),
40020- atomic_read(&tcon->num_rmdirs));
40021+ atomic_read_unchecked(&tcon->num_mkdirs),
40022+ atomic_read_unchecked(&tcon->num_rmdirs));
40023 seq_printf(m, "\nRenames: %d T2 Renames %d",
40024- atomic_read(&tcon->num_renames),
40025- atomic_read(&tcon->num_t2renames));
40026+ atomic_read_unchecked(&tcon->num_renames),
40027+ atomic_read_unchecked(&tcon->num_t2renames));
40028 seq_printf(m, "\nFindFirst: %d FNext %d "
40029 "FClose %d",
40030- atomic_read(&tcon->num_ffirst),
40031- atomic_read(&tcon->num_fnext),
40032- atomic_read(&tcon->num_fclose));
40033+ atomic_read_unchecked(&tcon->num_ffirst),
40034+ atomic_read_unchecked(&tcon->num_fnext),
40035+ atomic_read_unchecked(&tcon->num_fclose));
40036 }
40037 }
40038 }
40039diff -urNp linux-2.6.32.45/fs/cifs/cifsglob.h linux-2.6.32.45/fs/cifs/cifsglob.h
40040--- linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:35:29.000000000 -0400
40041+++ linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:34:00.000000000 -0400
40042@@ -252,28 +252,28 @@ struct cifsTconInfo {
40043 __u16 Flags; /* optional support bits */
40044 enum statusEnum tidStatus;
40045 #ifdef CONFIG_CIFS_STATS
40046- atomic_t num_smbs_sent;
40047- atomic_t num_writes;
40048- atomic_t num_reads;
40049- atomic_t num_flushes;
40050- atomic_t num_oplock_brks;
40051- atomic_t num_opens;
40052- atomic_t num_closes;
40053- atomic_t num_deletes;
40054- atomic_t num_mkdirs;
40055- atomic_t num_posixopens;
40056- atomic_t num_posixmkdirs;
40057- atomic_t num_rmdirs;
40058- atomic_t num_renames;
40059- atomic_t num_t2renames;
40060- atomic_t num_ffirst;
40061- atomic_t num_fnext;
40062- atomic_t num_fclose;
40063- atomic_t num_hardlinks;
40064- atomic_t num_symlinks;
40065- atomic_t num_locks;
40066- atomic_t num_acl_get;
40067- atomic_t num_acl_set;
40068+ atomic_unchecked_t num_smbs_sent;
40069+ atomic_unchecked_t num_writes;
40070+ atomic_unchecked_t num_reads;
40071+ atomic_unchecked_t num_flushes;
40072+ atomic_unchecked_t num_oplock_brks;
40073+ atomic_unchecked_t num_opens;
40074+ atomic_unchecked_t num_closes;
40075+ atomic_unchecked_t num_deletes;
40076+ atomic_unchecked_t num_mkdirs;
40077+ atomic_unchecked_t num_posixopens;
40078+ atomic_unchecked_t num_posixmkdirs;
40079+ atomic_unchecked_t num_rmdirs;
40080+ atomic_unchecked_t num_renames;
40081+ atomic_unchecked_t num_t2renames;
40082+ atomic_unchecked_t num_ffirst;
40083+ atomic_unchecked_t num_fnext;
40084+ atomic_unchecked_t num_fclose;
40085+ atomic_unchecked_t num_hardlinks;
40086+ atomic_unchecked_t num_symlinks;
40087+ atomic_unchecked_t num_locks;
40088+ atomic_unchecked_t num_acl_get;
40089+ atomic_unchecked_t num_acl_set;
40090 #ifdef CONFIG_CIFS_STATS2
40091 unsigned long long time_writes;
40092 unsigned long long time_reads;
40093@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
40094 }
40095
40096 #ifdef CONFIG_CIFS_STATS
40097-#define cifs_stats_inc atomic_inc
40098+#define cifs_stats_inc atomic_inc_unchecked
40099
40100 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
40101 unsigned int bytes)
40102diff -urNp linux-2.6.32.45/fs/cifs/link.c linux-2.6.32.45/fs/cifs/link.c
40103--- linux-2.6.32.45/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
40104+++ linux-2.6.32.45/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
40105@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
40106
40107 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40108 {
40109- char *p = nd_get_link(nd);
40110+ const char *p = nd_get_link(nd);
40111 if (!IS_ERR(p))
40112 kfree(p);
40113 }
40114diff -urNp linux-2.6.32.45/fs/coda/cache.c linux-2.6.32.45/fs/coda/cache.c
40115--- linux-2.6.32.45/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
40116+++ linux-2.6.32.45/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
40117@@ -24,14 +24,14 @@
40118 #include <linux/coda_fs_i.h>
40119 #include <linux/coda_cache.h>
40120
40121-static atomic_t permission_epoch = ATOMIC_INIT(0);
40122+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40123
40124 /* replace or extend an acl cache hit */
40125 void coda_cache_enter(struct inode *inode, int mask)
40126 {
40127 struct coda_inode_info *cii = ITOC(inode);
40128
40129- cii->c_cached_epoch = atomic_read(&permission_epoch);
40130+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40131 if (cii->c_uid != current_fsuid()) {
40132 cii->c_uid = current_fsuid();
40133 cii->c_cached_perm = mask;
40134@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
40135 void coda_cache_clear_inode(struct inode *inode)
40136 {
40137 struct coda_inode_info *cii = ITOC(inode);
40138- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40139+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40140 }
40141
40142 /* remove all acl caches */
40143 void coda_cache_clear_all(struct super_block *sb)
40144 {
40145- atomic_inc(&permission_epoch);
40146+ atomic_inc_unchecked(&permission_epoch);
40147 }
40148
40149
40150@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
40151
40152 hit = (mask & cii->c_cached_perm) == mask &&
40153 cii->c_uid == current_fsuid() &&
40154- cii->c_cached_epoch == atomic_read(&permission_epoch);
40155+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40156
40157 return hit;
40158 }
40159diff -urNp linux-2.6.32.45/fs/compat_binfmt_elf.c linux-2.6.32.45/fs/compat_binfmt_elf.c
40160--- linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
40161+++ linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
40162@@ -29,10 +29,12 @@
40163 #undef elfhdr
40164 #undef elf_phdr
40165 #undef elf_note
40166+#undef elf_dyn
40167 #undef elf_addr_t
40168 #define elfhdr elf32_hdr
40169 #define elf_phdr elf32_phdr
40170 #define elf_note elf32_note
40171+#define elf_dyn Elf32_Dyn
40172 #define elf_addr_t Elf32_Addr
40173
40174 /*
40175diff -urNp linux-2.6.32.45/fs/compat.c linux-2.6.32.45/fs/compat.c
40176--- linux-2.6.32.45/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
40177+++ linux-2.6.32.45/fs/compat.c 2011-08-11 19:56:56.000000000 -0400
40178@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
40179
40180 struct compat_readdir_callback {
40181 struct compat_old_linux_dirent __user *dirent;
40182+ struct file * file;
40183 int result;
40184 };
40185
40186@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
40187 buf->result = -EOVERFLOW;
40188 return -EOVERFLOW;
40189 }
40190+
40191+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40192+ return 0;
40193+
40194 buf->result++;
40195 dirent = buf->dirent;
40196 if (!access_ok(VERIFY_WRITE, dirent,
40197@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
40198
40199 buf.result = 0;
40200 buf.dirent = dirent;
40201+ buf.file = file;
40202
40203 error = vfs_readdir(file, compat_fillonedir, &buf);
40204 if (buf.result)
40205@@ -899,6 +905,7 @@ struct compat_linux_dirent {
40206 struct compat_getdents_callback {
40207 struct compat_linux_dirent __user *current_dir;
40208 struct compat_linux_dirent __user *previous;
40209+ struct file * file;
40210 int count;
40211 int error;
40212 };
40213@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
40214 buf->error = -EOVERFLOW;
40215 return -EOVERFLOW;
40216 }
40217+
40218+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40219+ return 0;
40220+
40221 dirent = buf->previous;
40222 if (dirent) {
40223 if (__put_user(offset, &dirent->d_off))
40224@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
40225 buf.previous = NULL;
40226 buf.count = count;
40227 buf.error = 0;
40228+ buf.file = file;
40229
40230 error = vfs_readdir(file, compat_filldir, &buf);
40231 if (error >= 0)
40232@@ -987,6 +999,7 @@ out:
40233 struct compat_getdents_callback64 {
40234 struct linux_dirent64 __user *current_dir;
40235 struct linux_dirent64 __user *previous;
40236+ struct file * file;
40237 int count;
40238 int error;
40239 };
40240@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
40241 buf->error = -EINVAL; /* only used if we fail.. */
40242 if (reclen > buf->count)
40243 return -EINVAL;
40244+
40245+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40246+ return 0;
40247+
40248 dirent = buf->previous;
40249
40250 if (dirent) {
40251@@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
40252 buf.previous = NULL;
40253 buf.count = count;
40254 buf.error = 0;
40255+ buf.file = file;
40256
40257 error = vfs_readdir(file, compat_filldir64, &buf);
40258 if (error >= 0)
40259@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
40260 * verify all the pointers
40261 */
40262 ret = -EINVAL;
40263- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
40264+ if (nr_segs > UIO_MAXIOV)
40265 goto out;
40266 if (!file->f_op)
40267 goto out;
40268@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
40269 compat_uptr_t __user *envp,
40270 struct pt_regs * regs)
40271 {
40272+#ifdef CONFIG_GRKERNSEC
40273+ struct file *old_exec_file;
40274+ struct acl_subject_label *old_acl;
40275+ struct rlimit old_rlim[RLIM_NLIMITS];
40276+#endif
40277 struct linux_binprm *bprm;
40278 struct file *file;
40279 struct files_struct *displaced;
40280 bool clear_in_exec;
40281 int retval;
40282+ const struct cred *cred = current_cred();
40283+
40284+ /*
40285+ * We move the actual failure in case of RLIMIT_NPROC excess from
40286+ * set*uid() to execve() because too many poorly written programs
40287+ * don't check setuid() return code. Here we additionally recheck
40288+ * whether NPROC limit is still exceeded.
40289+ */
40290+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40291+
40292+ if ((current->flags & PF_NPROC_EXCEEDED) &&
40293+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40294+ retval = -EAGAIN;
40295+ goto out_ret;
40296+ }
40297+
40298+ /* We're below the limit (still or again), so we don't want to make
40299+ * further execve() calls fail. */
40300+ current->flags &= ~PF_NPROC_EXCEEDED;
40301
40302 retval = unshare_files(&displaced);
40303 if (retval)
40304@@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
40305 bprm->filename = filename;
40306 bprm->interp = filename;
40307
40308+ if (gr_process_user_ban()) {
40309+ retval = -EPERM;
40310+ goto out_file;
40311+ }
40312+
40313+ retval = -EACCES;
40314+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
40315+ goto out_file;
40316+
40317 retval = bprm_mm_init(bprm);
40318 if (retval)
40319 goto out_file;
40320@@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
40321 if (retval < 0)
40322 goto out;
40323
40324+ if (!gr_tpe_allow(file)) {
40325+ retval = -EACCES;
40326+ goto out;
40327+ }
40328+
40329+ if (gr_check_crash_exec(file)) {
40330+ retval = -EACCES;
40331+ goto out;
40332+ }
40333+
40334+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40335+
40336+ gr_handle_exec_args_compat(bprm, argv);
40337+
40338+#ifdef CONFIG_GRKERNSEC
40339+ old_acl = current->acl;
40340+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40341+ old_exec_file = current->exec_file;
40342+ get_file(file);
40343+ current->exec_file = file;
40344+#endif
40345+
40346+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40347+ bprm->unsafe & LSM_UNSAFE_SHARE);
40348+ if (retval < 0)
40349+ goto out_fail;
40350+
40351 retval = search_binary_handler(bprm, regs);
40352 if (retval < 0)
40353- goto out;
40354+ goto out_fail;
40355+#ifdef CONFIG_GRKERNSEC
40356+ if (old_exec_file)
40357+ fput(old_exec_file);
40358+#endif
40359
40360 /* execve succeeded */
40361 current->fs->in_exec = 0;
40362@@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
40363 put_files_struct(displaced);
40364 return retval;
40365
40366+out_fail:
40367+#ifdef CONFIG_GRKERNSEC
40368+ current->acl = old_acl;
40369+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40370+ fput(current->exec_file);
40371+ current->exec_file = old_exec_file;
40372+#endif
40373+
40374 out:
40375 if (bprm->mm) {
40376 acct_arg_size(bprm, 0);
40377@@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat
40378 struct fdtable *fdt;
40379 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40380
40381+ pax_track_stack();
40382+
40383 if (n < 0)
40384 goto out_nofds;
40385
40386diff -urNp linux-2.6.32.45/fs/compat_ioctl.c linux-2.6.32.45/fs/compat_ioctl.c
40387--- linux-2.6.32.45/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
40388+++ linux-2.6.32.45/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
40389@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
40390 up = (struct compat_video_spu_palette __user *) arg;
40391 err = get_user(palp, &up->palette);
40392 err |= get_user(length, &up->length);
40393+ if (err)
40394+ return -EFAULT;
40395
40396 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40397 err = put_user(compat_ptr(palp), &up_native->palette);
40398diff -urNp linux-2.6.32.45/fs/configfs/dir.c linux-2.6.32.45/fs/configfs/dir.c
40399--- linux-2.6.32.45/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40400+++ linux-2.6.32.45/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
40401@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
40402 }
40403 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40404 struct configfs_dirent *next;
40405- const char * name;
40406+ const unsigned char * name;
40407+ char d_name[sizeof(next->s_dentry->d_iname)];
40408 int len;
40409
40410 next = list_entry(p, struct configfs_dirent,
40411@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
40412 continue;
40413
40414 name = configfs_get_name(next);
40415- len = strlen(name);
40416+ if (next->s_dentry && name == next->s_dentry->d_iname) {
40417+ len = next->s_dentry->d_name.len;
40418+ memcpy(d_name, name, len);
40419+ name = d_name;
40420+ } else
40421+ len = strlen(name);
40422 if (next->s_dentry)
40423 ino = next->s_dentry->d_inode->i_ino;
40424 else
40425diff -urNp linux-2.6.32.45/fs/dcache.c linux-2.6.32.45/fs/dcache.c
40426--- linux-2.6.32.45/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
40427+++ linux-2.6.32.45/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
40428@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
40429
40430 static struct kmem_cache *dentry_cache __read_mostly;
40431
40432-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
40433-
40434 /*
40435 * This is the single most critical data structure when it comes
40436 * to the dcache: the hashtable for lookups. Somebody should try
40437@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
40438 mempages -= reserve;
40439
40440 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40441- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40442+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40443
40444 dcache_init();
40445 inode_init();
40446diff -urNp linux-2.6.32.45/fs/dlm/lockspace.c linux-2.6.32.45/fs/dlm/lockspace.c
40447--- linux-2.6.32.45/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
40448+++ linux-2.6.32.45/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
40449@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
40450 kfree(ls);
40451 }
40452
40453-static struct sysfs_ops dlm_attr_ops = {
40454+static const struct sysfs_ops dlm_attr_ops = {
40455 .show = dlm_attr_show,
40456 .store = dlm_attr_store,
40457 };
40458diff -urNp linux-2.6.32.45/fs/ecryptfs/inode.c linux-2.6.32.45/fs/ecryptfs/inode.c
40459--- linux-2.6.32.45/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40460+++ linux-2.6.32.45/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40461@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
40462 old_fs = get_fs();
40463 set_fs(get_ds());
40464 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40465- (char __user *)lower_buf,
40466+ (__force char __user *)lower_buf,
40467 lower_bufsiz);
40468 set_fs(old_fs);
40469 if (rc < 0)
40470@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
40471 }
40472 old_fs = get_fs();
40473 set_fs(get_ds());
40474- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40475+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
40476 set_fs(old_fs);
40477 if (rc < 0)
40478 goto out_free;
40479diff -urNp linux-2.6.32.45/fs/exec.c linux-2.6.32.45/fs/exec.c
40480--- linux-2.6.32.45/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
40481+++ linux-2.6.32.45/fs/exec.c 2011-08-11 19:56:19.000000000 -0400
40482@@ -56,12 +56,24 @@
40483 #include <linux/fsnotify.h>
40484 #include <linux/fs_struct.h>
40485 #include <linux/pipe_fs_i.h>
40486+#include <linux/random.h>
40487+#include <linux/seq_file.h>
40488+
40489+#ifdef CONFIG_PAX_REFCOUNT
40490+#include <linux/kallsyms.h>
40491+#include <linux/kdebug.h>
40492+#endif
40493
40494 #include <asm/uaccess.h>
40495 #include <asm/mmu_context.h>
40496 #include <asm/tlb.h>
40497 #include "internal.h"
40498
40499+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
40500+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
40501+EXPORT_SYMBOL(pax_set_initial_flags_func);
40502+#endif
40503+
40504 int core_uses_pid;
40505 char core_pattern[CORENAME_MAX_SIZE] = "core";
40506 unsigned int core_pipe_limit;
40507@@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
40508 goto out;
40509
40510 file = do_filp_open(AT_FDCWD, tmp,
40511- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40512+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40513 MAY_READ | MAY_EXEC | MAY_OPEN);
40514 putname(tmp);
40515 error = PTR_ERR(file);
40516@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
40517 int write)
40518 {
40519 struct page *page;
40520- int ret;
40521
40522-#ifdef CONFIG_STACK_GROWSUP
40523- if (write) {
40524- ret = expand_stack_downwards(bprm->vma, pos);
40525- if (ret < 0)
40526- return NULL;
40527- }
40528-#endif
40529- ret = get_user_pages(current, bprm->mm, pos,
40530- 1, write, 1, &page, NULL);
40531- if (ret <= 0)
40532+ if (0 > expand_stack_downwards(bprm->vma, pos))
40533+ return NULL;
40534+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
40535 return NULL;
40536
40537 if (write) {
40538@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
40539 vma->vm_end = STACK_TOP_MAX;
40540 vma->vm_start = vma->vm_end - PAGE_SIZE;
40541 vma->vm_flags = VM_STACK_FLAGS;
40542+
40543+#ifdef CONFIG_PAX_SEGMEXEC
40544+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
40545+#endif
40546+
40547 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
40548
40549 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
40550@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
40551 mm->stack_vm = mm->total_vm = 1;
40552 up_write(&mm->mmap_sem);
40553 bprm->p = vma->vm_end - sizeof(void *);
40554+
40555+#ifdef CONFIG_PAX_RANDUSTACK
40556+ if (randomize_va_space)
40557+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
40558+#endif
40559+
40560 return 0;
40561 err:
40562 up_write(&mm->mmap_sem);
40563@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
40564 int r;
40565 mm_segment_t oldfs = get_fs();
40566 set_fs(KERNEL_DS);
40567- r = copy_strings(argc, (char __user * __user *)argv, bprm);
40568+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
40569 set_fs(oldfs);
40570 return r;
40571 }
40572@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
40573 unsigned long new_end = old_end - shift;
40574 struct mmu_gather *tlb;
40575
40576- BUG_ON(new_start > new_end);
40577+ if (new_start >= new_end || new_start < mmap_min_addr)
40578+ return -ENOMEM;
40579
40580 /*
40581 * ensure there are no vmas between where we want to go
40582@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
40583 if (vma != find_vma(mm, new_start))
40584 return -EFAULT;
40585
40586+#ifdef CONFIG_PAX_SEGMEXEC
40587+ BUG_ON(pax_find_mirror_vma(vma));
40588+#endif
40589+
40590 /*
40591 * cover the whole range: [new_start, old_end)
40592 */
40593@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
40594 stack_top = arch_align_stack(stack_top);
40595 stack_top = PAGE_ALIGN(stack_top);
40596
40597- if (unlikely(stack_top < mmap_min_addr) ||
40598- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
40599- return -ENOMEM;
40600-
40601 stack_shift = vma->vm_end - stack_top;
40602
40603 bprm->p -= stack_shift;
40604@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
40605 bprm->exec -= stack_shift;
40606
40607 down_write(&mm->mmap_sem);
40608+
40609+ /* Move stack pages down in memory. */
40610+ if (stack_shift) {
40611+ ret = shift_arg_pages(vma, stack_shift);
40612+ if (ret)
40613+ goto out_unlock;
40614+ }
40615+
40616 vm_flags = VM_STACK_FLAGS;
40617
40618 /*
40619@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
40620 vm_flags &= ~VM_EXEC;
40621 vm_flags |= mm->def_flags;
40622
40623+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40624+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40625+ vm_flags &= ~VM_EXEC;
40626+
40627+#ifdef CONFIG_PAX_MPROTECT
40628+ if (mm->pax_flags & MF_PAX_MPROTECT)
40629+ vm_flags &= ~VM_MAYEXEC;
40630+#endif
40631+
40632+ }
40633+#endif
40634+
40635 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
40636 vm_flags);
40637 if (ret)
40638 goto out_unlock;
40639 BUG_ON(prev != vma);
40640
40641- /* Move stack pages down in memory. */
40642- if (stack_shift) {
40643- ret = shift_arg_pages(vma, stack_shift);
40644- if (ret)
40645- goto out_unlock;
40646- }
40647-
40648 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
40649 stack_size = vma->vm_end - vma->vm_start;
40650 /*
40651@@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
40652 int err;
40653
40654 file = do_filp_open(AT_FDCWD, name,
40655- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40656+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40657 MAY_EXEC | MAY_OPEN);
40658 if (IS_ERR(file))
40659 goto out;
40660@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
40661 old_fs = get_fs();
40662 set_fs(get_ds());
40663 /* The cast to a user pointer is valid due to the set_fs() */
40664- result = vfs_read(file, (void __user *)addr, count, &pos);
40665+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
40666 set_fs(old_fs);
40667 return result;
40668 }
40669@@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
40670 }
40671 rcu_read_unlock();
40672
40673- if (p->fs->users > n_fs) {
40674+ if (atomic_read(&p->fs->users) > n_fs) {
40675 bprm->unsafe |= LSM_UNSAFE_SHARE;
40676 } else {
40677 res = -EAGAIN;
40678@@ -1347,11 +1376,35 @@ int do_execve(char * filename,
40679 char __user *__user *envp,
40680 struct pt_regs * regs)
40681 {
40682+#ifdef CONFIG_GRKERNSEC
40683+ struct file *old_exec_file;
40684+ struct acl_subject_label *old_acl;
40685+ struct rlimit old_rlim[RLIM_NLIMITS];
40686+#endif
40687 struct linux_binprm *bprm;
40688 struct file *file;
40689 struct files_struct *displaced;
40690 bool clear_in_exec;
40691 int retval;
40692+ const struct cred *cred = current_cred();
40693+
40694+ /*
40695+ * We move the actual failure in case of RLIMIT_NPROC excess from
40696+ * set*uid() to execve() because too many poorly written programs
40697+ * don't check setuid() return code. Here we additionally recheck
40698+ * whether NPROC limit is still exceeded.
40699+ */
40700+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40701+
40702+ if ((current->flags & PF_NPROC_EXCEEDED) &&
40703+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40704+ retval = -EAGAIN;
40705+ goto out_ret;
40706+ }
40707+
40708+ /* We're below the limit (still or again), so we don't want to make
40709+ * further execve() calls fail. */
40710+ current->flags &= ~PF_NPROC_EXCEEDED;
40711
40712 retval = unshare_files(&displaced);
40713 if (retval)
40714@@ -1383,6 +1436,16 @@ int do_execve(char * filename,
40715 bprm->filename = filename;
40716 bprm->interp = filename;
40717
40718+ if (gr_process_user_ban()) {
40719+ retval = -EPERM;
40720+ goto out_file;
40721+ }
40722+
40723+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
40724+ retval = -EACCES;
40725+ goto out_file;
40726+ }
40727+
40728 retval = bprm_mm_init(bprm);
40729 if (retval)
40730 goto out_file;
40731@@ -1412,10 +1475,41 @@ int do_execve(char * filename,
40732 if (retval < 0)
40733 goto out;
40734
40735+ if (!gr_tpe_allow(file)) {
40736+ retval = -EACCES;
40737+ goto out;
40738+ }
40739+
40740+ if (gr_check_crash_exec(file)) {
40741+ retval = -EACCES;
40742+ goto out;
40743+ }
40744+
40745+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40746+
40747+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
40748+
40749+#ifdef CONFIG_GRKERNSEC
40750+ old_acl = current->acl;
40751+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40752+ old_exec_file = current->exec_file;
40753+ get_file(file);
40754+ current->exec_file = file;
40755+#endif
40756+
40757+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40758+ bprm->unsafe & LSM_UNSAFE_SHARE);
40759+ if (retval < 0)
40760+ goto out_fail;
40761+
40762 current->flags &= ~PF_KTHREAD;
40763 retval = search_binary_handler(bprm,regs);
40764 if (retval < 0)
40765- goto out;
40766+ goto out_fail;
40767+#ifdef CONFIG_GRKERNSEC
40768+ if (old_exec_file)
40769+ fput(old_exec_file);
40770+#endif
40771
40772 /* execve succeeded */
40773 current->fs->in_exec = 0;
40774@@ -1426,6 +1520,14 @@ int do_execve(char * filename,
40775 put_files_struct(displaced);
40776 return retval;
40777
40778+out_fail:
40779+#ifdef CONFIG_GRKERNSEC
40780+ current->acl = old_acl;
40781+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40782+ fput(current->exec_file);
40783+ current->exec_file = old_exec_file;
40784+#endif
40785+
40786 out:
40787 if (bprm->mm) {
40788 acct_arg_size(bprm, 0);
40789@@ -1591,6 +1693,220 @@ out:
40790 return ispipe;
40791 }
40792
40793+int pax_check_flags(unsigned long *flags)
40794+{
40795+ int retval = 0;
40796+
40797+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
40798+ if (*flags & MF_PAX_SEGMEXEC)
40799+ {
40800+ *flags &= ~MF_PAX_SEGMEXEC;
40801+ retval = -EINVAL;
40802+ }
40803+#endif
40804+
40805+ if ((*flags & MF_PAX_PAGEEXEC)
40806+
40807+#ifdef CONFIG_PAX_PAGEEXEC
40808+ && (*flags & MF_PAX_SEGMEXEC)
40809+#endif
40810+
40811+ )
40812+ {
40813+ *flags &= ~MF_PAX_PAGEEXEC;
40814+ retval = -EINVAL;
40815+ }
40816+
40817+ if ((*flags & MF_PAX_MPROTECT)
40818+
40819+#ifdef CONFIG_PAX_MPROTECT
40820+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
40821+#endif
40822+
40823+ )
40824+ {
40825+ *flags &= ~MF_PAX_MPROTECT;
40826+ retval = -EINVAL;
40827+ }
40828+
40829+ if ((*flags & MF_PAX_EMUTRAMP)
40830+
40831+#ifdef CONFIG_PAX_EMUTRAMP
40832+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
40833+#endif
40834+
40835+ )
40836+ {
40837+ *flags &= ~MF_PAX_EMUTRAMP;
40838+ retval = -EINVAL;
40839+ }
40840+
40841+ return retval;
40842+}
40843+
40844+EXPORT_SYMBOL(pax_check_flags);
40845+
40846+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40847+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
40848+{
40849+ struct task_struct *tsk = current;
40850+ struct mm_struct *mm = current->mm;
40851+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
40852+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
40853+ char *path_exec = NULL;
40854+ char *path_fault = NULL;
40855+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
40856+
40857+ if (buffer_exec && buffer_fault) {
40858+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
40859+
40860+ down_read(&mm->mmap_sem);
40861+ vma = mm->mmap;
40862+ while (vma && (!vma_exec || !vma_fault)) {
40863+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
40864+ vma_exec = vma;
40865+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
40866+ vma_fault = vma;
40867+ vma = vma->vm_next;
40868+ }
40869+ if (vma_exec) {
40870+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
40871+ if (IS_ERR(path_exec))
40872+ path_exec = "<path too long>";
40873+ else {
40874+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
40875+ if (path_exec) {
40876+ *path_exec = 0;
40877+ path_exec = buffer_exec;
40878+ } else
40879+ path_exec = "<path too long>";
40880+ }
40881+ }
40882+ if (vma_fault) {
40883+ start = vma_fault->vm_start;
40884+ end = vma_fault->vm_end;
40885+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
40886+ if (vma_fault->vm_file) {
40887+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
40888+ if (IS_ERR(path_fault))
40889+ path_fault = "<path too long>";
40890+ else {
40891+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
40892+ if (path_fault) {
40893+ *path_fault = 0;
40894+ path_fault = buffer_fault;
40895+ } else
40896+ path_fault = "<path too long>";
40897+ }
40898+ } else
40899+ path_fault = "<anonymous mapping>";
40900+ }
40901+ up_read(&mm->mmap_sem);
40902+ }
40903+ if (tsk->signal->curr_ip)
40904+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
40905+ else
40906+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
40907+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
40908+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
40909+ task_uid(tsk), task_euid(tsk), pc, sp);
40910+ free_page((unsigned long)buffer_exec);
40911+ free_page((unsigned long)buffer_fault);
40912+ pax_report_insns(pc, sp);
40913+ do_coredump(SIGKILL, SIGKILL, regs);
40914+}
40915+#endif
40916+
40917+#ifdef CONFIG_PAX_REFCOUNT
40918+void pax_report_refcount_overflow(struct pt_regs *regs)
40919+{
40920+ if (current->signal->curr_ip)
40921+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
40922+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
40923+ else
40924+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
40925+ current->comm, task_pid_nr(current), current_uid(), current_euid());
40926+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
40927+ show_regs(regs);
40928+ force_sig_specific(SIGKILL, current);
40929+}
40930+#endif
40931+
40932+#ifdef CONFIG_PAX_USERCOPY
40933+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
40934+int object_is_on_stack(const void *obj, unsigned long len)
40935+{
40936+ const void * const stack = task_stack_page(current);
40937+ const void * const stackend = stack + THREAD_SIZE;
40938+
40939+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
40940+ const void *frame = NULL;
40941+ const void *oldframe;
40942+#endif
40943+
40944+ if (obj + len < obj)
40945+ return -1;
40946+
40947+ if (obj + len <= stack || stackend <= obj)
40948+ return 0;
40949+
40950+ if (obj < stack || stackend < obj + len)
40951+ return -1;
40952+
40953+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
40954+ oldframe = __builtin_frame_address(1);
40955+ if (oldframe)
40956+ frame = __builtin_frame_address(2);
40957+ /*
40958+ low ----------------------------------------------> high
40959+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
40960+ ^----------------^
40961+ allow copies only within here
40962+ */
40963+ while (stack <= frame && frame < stackend) {
40964+ /* if obj + len extends past the last frame, this
40965+ check won't pass and the next frame will be 0,
40966+ causing us to bail out and correctly report
40967+ the copy as invalid
40968+ */
40969+ if (obj + len <= frame)
40970+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
40971+ oldframe = frame;
40972+ frame = *(const void * const *)frame;
40973+ }
40974+ return -1;
40975+#else
40976+ return 1;
40977+#endif
40978+}
40979+
40980+
40981+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
40982+{
40983+ if (current->signal->curr_ip)
40984+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
40985+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
40986+ else
40987+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
40988+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
40989+
40990+ dump_stack();
40991+ gr_handle_kernel_exploit();
40992+ do_group_exit(SIGKILL);
40993+}
40994+#endif
40995+
40996+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
40997+void pax_track_stack(void)
40998+{
40999+ unsigned long sp = (unsigned long)&sp;
41000+ if (sp < current_thread_info()->lowest_stack &&
41001+ sp > (unsigned long)task_stack_page(current))
41002+ current_thread_info()->lowest_stack = sp;
41003+}
41004+EXPORT_SYMBOL(pax_track_stack);
41005+#endif
41006+
41007 static int zap_process(struct task_struct *start)
41008 {
41009 struct task_struct *t;
41010@@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct
41011 pipe = file->f_path.dentry->d_inode->i_pipe;
41012
41013 pipe_lock(pipe);
41014- pipe->readers++;
41015- pipe->writers--;
41016+ atomic_inc(&pipe->readers);
41017+ atomic_dec(&pipe->writers);
41018
41019- while ((pipe->readers > 1) && (!signal_pending(current))) {
41020+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41021 wake_up_interruptible_sync(&pipe->wait);
41022 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41023 pipe_wait(pipe);
41024 }
41025
41026- pipe->readers--;
41027- pipe->writers++;
41028+ atomic_dec(&pipe->readers);
41029+ atomic_inc(&pipe->writers);
41030 pipe_unlock(pipe);
41031
41032 }
41033@@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_co
41034 char **helper_argv = NULL;
41035 int helper_argc = 0;
41036 int dump_count = 0;
41037- static atomic_t core_dump_count = ATOMIC_INIT(0);
41038+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41039
41040 audit_core_dumps(signr);
41041
41042+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41043+ gr_handle_brute_attach(current, mm->flags);
41044+
41045 binfmt = mm->binfmt;
41046 if (!binfmt || !binfmt->core_dump)
41047 goto fail;
41048@@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_co
41049 */
41050 clear_thread_flag(TIF_SIGPENDING);
41051
41052+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41053+
41054 /*
41055 * lock_kernel() because format_corename() is controlled by sysctl, which
41056 * uses lock_kernel()
41057@@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_co
41058 goto fail_unlock;
41059 }
41060
41061- dump_count = atomic_inc_return(&core_dump_count);
41062+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
41063 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41064 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41065 task_tgid_vnr(current), current->comm);
41066@@ -1972,7 +2293,7 @@ close_fail:
41067 filp_close(file, NULL);
41068 fail_dropcount:
41069 if (dump_count)
41070- atomic_dec(&core_dump_count);
41071+ atomic_dec_unchecked(&core_dump_count);
41072 fail_unlock:
41073 if (helper_argv)
41074 argv_free(helper_argv);
41075diff -urNp linux-2.6.32.45/fs/ext2/balloc.c linux-2.6.32.45/fs/ext2/balloc.c
41076--- linux-2.6.32.45/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
41077+++ linux-2.6.32.45/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
41078@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41079
41080 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41081 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41082- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41083+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41084 sbi->s_resuid != current_fsuid() &&
41085 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41086 return 0;
41087diff -urNp linux-2.6.32.45/fs/ext3/balloc.c linux-2.6.32.45/fs/ext3/balloc.c
41088--- linux-2.6.32.45/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
41089+++ linux-2.6.32.45/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
41090@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
41091
41092 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41093 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41094- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41095+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41096 sbi->s_resuid != current_fsuid() &&
41097 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41098 return 0;
41099diff -urNp linux-2.6.32.45/fs/ext4/balloc.c linux-2.6.32.45/fs/ext4/balloc.c
41100--- linux-2.6.32.45/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
41101+++ linux-2.6.32.45/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
41102@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
41103 /* Hm, nope. Are (enough) root reserved blocks available? */
41104 if (sbi->s_resuid == current_fsuid() ||
41105 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41106- capable(CAP_SYS_RESOURCE)) {
41107+ capable_nolog(CAP_SYS_RESOURCE)) {
41108 if (free_blocks >= (nblocks + dirty_blocks))
41109 return 1;
41110 }
41111diff -urNp linux-2.6.32.45/fs/ext4/ext4.h linux-2.6.32.45/fs/ext4/ext4.h
41112--- linux-2.6.32.45/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
41113+++ linux-2.6.32.45/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
41114@@ -1078,19 +1078,19 @@ struct ext4_sb_info {
41115
41116 /* stats for buddy allocator */
41117 spinlock_t s_mb_pa_lock;
41118- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41119- atomic_t s_bal_success; /* we found long enough chunks */
41120- atomic_t s_bal_allocated; /* in blocks */
41121- atomic_t s_bal_ex_scanned; /* total extents scanned */
41122- atomic_t s_bal_goals; /* goal hits */
41123- atomic_t s_bal_breaks; /* too long searches */
41124- atomic_t s_bal_2orders; /* 2^order hits */
41125+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41126+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41127+ atomic_unchecked_t s_bal_allocated; /* in blocks */
41128+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41129+ atomic_unchecked_t s_bal_goals; /* goal hits */
41130+ atomic_unchecked_t s_bal_breaks; /* too long searches */
41131+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41132 spinlock_t s_bal_lock;
41133 unsigned long s_mb_buddies_generated;
41134 unsigned long long s_mb_generation_time;
41135- atomic_t s_mb_lost_chunks;
41136- atomic_t s_mb_preallocated;
41137- atomic_t s_mb_discarded;
41138+ atomic_unchecked_t s_mb_lost_chunks;
41139+ atomic_unchecked_t s_mb_preallocated;
41140+ atomic_unchecked_t s_mb_discarded;
41141 atomic_t s_lock_busy;
41142
41143 /* locality groups */
41144diff -urNp linux-2.6.32.45/fs/ext4/mballoc.c linux-2.6.32.45/fs/ext4/mballoc.c
41145--- linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
41146+++ linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
41147@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
41148 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41149
41150 if (EXT4_SB(sb)->s_mb_stats)
41151- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41152+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41153
41154 break;
41155 }
41156@@ -2131,7 +2131,7 @@ repeat:
41157 ac->ac_status = AC_STATUS_CONTINUE;
41158 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41159 cr = 3;
41160- atomic_inc(&sbi->s_mb_lost_chunks);
41161+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41162 goto repeat;
41163 }
41164 }
41165@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
41166 ext4_grpblk_t counters[16];
41167 } sg;
41168
41169+ pax_track_stack();
41170+
41171 group--;
41172 if (group == 0)
41173 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41174@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
41175 if (sbi->s_mb_stats) {
41176 printk(KERN_INFO
41177 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41178- atomic_read(&sbi->s_bal_allocated),
41179- atomic_read(&sbi->s_bal_reqs),
41180- atomic_read(&sbi->s_bal_success));
41181+ atomic_read_unchecked(&sbi->s_bal_allocated),
41182+ atomic_read_unchecked(&sbi->s_bal_reqs),
41183+ atomic_read_unchecked(&sbi->s_bal_success));
41184 printk(KERN_INFO
41185 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41186 "%u 2^N hits, %u breaks, %u lost\n",
41187- atomic_read(&sbi->s_bal_ex_scanned),
41188- atomic_read(&sbi->s_bal_goals),
41189- atomic_read(&sbi->s_bal_2orders),
41190- atomic_read(&sbi->s_bal_breaks),
41191- atomic_read(&sbi->s_mb_lost_chunks));
41192+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41193+ atomic_read_unchecked(&sbi->s_bal_goals),
41194+ atomic_read_unchecked(&sbi->s_bal_2orders),
41195+ atomic_read_unchecked(&sbi->s_bal_breaks),
41196+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41197 printk(KERN_INFO
41198 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41199 sbi->s_mb_buddies_generated++,
41200 sbi->s_mb_generation_time);
41201 printk(KERN_INFO
41202 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41203- atomic_read(&sbi->s_mb_preallocated),
41204- atomic_read(&sbi->s_mb_discarded));
41205+ atomic_read_unchecked(&sbi->s_mb_preallocated),
41206+ atomic_read_unchecked(&sbi->s_mb_discarded));
41207 }
41208
41209 free_percpu(sbi->s_locality_groups);
41210@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
41211 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41212
41213 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41214- atomic_inc(&sbi->s_bal_reqs);
41215- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41216+ atomic_inc_unchecked(&sbi->s_bal_reqs);
41217+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41218 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
41219- atomic_inc(&sbi->s_bal_success);
41220- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41221+ atomic_inc_unchecked(&sbi->s_bal_success);
41222+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41223 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41224 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41225- atomic_inc(&sbi->s_bal_goals);
41226+ atomic_inc_unchecked(&sbi->s_bal_goals);
41227 if (ac->ac_found > sbi->s_mb_max_to_scan)
41228- atomic_inc(&sbi->s_bal_breaks);
41229+ atomic_inc_unchecked(&sbi->s_bal_breaks);
41230 }
41231
41232 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41233@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41234 trace_ext4_mb_new_inode_pa(ac, pa);
41235
41236 ext4_mb_use_inode_pa(ac, pa);
41237- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41238+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41239
41240 ei = EXT4_I(ac->ac_inode);
41241 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41242@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41243 trace_ext4_mb_new_group_pa(ac, pa);
41244
41245 ext4_mb_use_group_pa(ac, pa);
41246- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41247+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41248
41249 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41250 lg = ac->ac_lg;
41251@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41252 * from the bitmap and continue.
41253 */
41254 }
41255- atomic_add(free, &sbi->s_mb_discarded);
41256+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
41257
41258 return err;
41259 }
41260@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41261 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41262 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41263 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41264- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41265+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41266
41267 if (ac) {
41268 ac->ac_sb = sb;
41269diff -urNp linux-2.6.32.45/fs/ext4/super.c linux-2.6.32.45/fs/ext4/super.c
41270--- linux-2.6.32.45/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
41271+++ linux-2.6.32.45/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
41272@@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
41273 }
41274
41275
41276-static struct sysfs_ops ext4_attr_ops = {
41277+static const struct sysfs_ops ext4_attr_ops = {
41278 .show = ext4_attr_show,
41279 .store = ext4_attr_store,
41280 };
41281diff -urNp linux-2.6.32.45/fs/fcntl.c linux-2.6.32.45/fs/fcntl.c
41282--- linux-2.6.32.45/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
41283+++ linux-2.6.32.45/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
41284@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
41285 if (err)
41286 return err;
41287
41288+ if (gr_handle_chroot_fowner(pid, type))
41289+ return -ENOENT;
41290+ if (gr_check_protected_task_fowner(pid, type))
41291+ return -EACCES;
41292+
41293 f_modown(filp, pid, type, force);
41294 return 0;
41295 }
41296@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
41297 switch (cmd) {
41298 case F_DUPFD:
41299 case F_DUPFD_CLOEXEC:
41300+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41301 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41302 break;
41303 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41304diff -urNp linux-2.6.32.45/fs/fifo.c linux-2.6.32.45/fs/fifo.c
41305--- linux-2.6.32.45/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
41306+++ linux-2.6.32.45/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
41307@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
41308 */
41309 filp->f_op = &read_pipefifo_fops;
41310 pipe->r_counter++;
41311- if (pipe->readers++ == 0)
41312+ if (atomic_inc_return(&pipe->readers) == 1)
41313 wake_up_partner(inode);
41314
41315- if (!pipe->writers) {
41316+ if (!atomic_read(&pipe->writers)) {
41317 if ((filp->f_flags & O_NONBLOCK)) {
41318 /* suppress POLLHUP until we have
41319 * seen a writer */
41320@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
41321 * errno=ENXIO when there is no process reading the FIFO.
41322 */
41323 ret = -ENXIO;
41324- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
41325+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
41326 goto err;
41327
41328 filp->f_op = &write_pipefifo_fops;
41329 pipe->w_counter++;
41330- if (!pipe->writers++)
41331+ if (atomic_inc_return(&pipe->writers) == 1)
41332 wake_up_partner(inode);
41333
41334- if (!pipe->readers) {
41335+ if (!atomic_read(&pipe->readers)) {
41336 wait_for_partner(inode, &pipe->r_counter);
41337 if (signal_pending(current))
41338 goto err_wr;
41339@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
41340 */
41341 filp->f_op = &rdwr_pipefifo_fops;
41342
41343- pipe->readers++;
41344- pipe->writers++;
41345+ atomic_inc(&pipe->readers);
41346+ atomic_inc(&pipe->writers);
41347 pipe->r_counter++;
41348 pipe->w_counter++;
41349- if (pipe->readers == 1 || pipe->writers == 1)
41350+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
41351 wake_up_partner(inode);
41352 break;
41353
41354@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
41355 return 0;
41356
41357 err_rd:
41358- if (!--pipe->readers)
41359+ if (atomic_dec_and_test(&pipe->readers))
41360 wake_up_interruptible(&pipe->wait);
41361 ret = -ERESTARTSYS;
41362 goto err;
41363
41364 err_wr:
41365- if (!--pipe->writers)
41366+ if (atomic_dec_and_test(&pipe->writers))
41367 wake_up_interruptible(&pipe->wait);
41368 ret = -ERESTARTSYS;
41369 goto err;
41370
41371 err:
41372- if (!pipe->readers && !pipe->writers)
41373+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
41374 free_pipe_info(inode);
41375
41376 err_nocleanup:
41377diff -urNp linux-2.6.32.45/fs/file.c linux-2.6.32.45/fs/file.c
41378--- linux-2.6.32.45/fs/file.c 2011-03-27 14:31:47.000000000 -0400
41379+++ linux-2.6.32.45/fs/file.c 2011-04-17 15:56:46.000000000 -0400
41380@@ -14,6 +14,7 @@
41381 #include <linux/slab.h>
41382 #include <linux/vmalloc.h>
41383 #include <linux/file.h>
41384+#include <linux/security.h>
41385 #include <linux/fdtable.h>
41386 #include <linux/bitops.h>
41387 #include <linux/interrupt.h>
41388@@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
41389 * N.B. For clone tasks sharing a files structure, this test
41390 * will limit the total number of files that can be opened.
41391 */
41392+
41393+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
41394 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41395 return -EMFILE;
41396
41397diff -urNp linux-2.6.32.45/fs/filesystems.c linux-2.6.32.45/fs/filesystems.c
41398--- linux-2.6.32.45/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
41399+++ linux-2.6.32.45/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
41400@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
41401 int len = dot ? dot - name : strlen(name);
41402
41403 fs = __get_fs_type(name, len);
41404+
41405+#ifdef CONFIG_GRKERNSEC_MODHARDEN
41406+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
41407+#else
41408 if (!fs && (request_module("%.*s", len, name) == 0))
41409+#endif
41410 fs = __get_fs_type(name, len);
41411
41412 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
41413diff -urNp linux-2.6.32.45/fs/fscache/cookie.c linux-2.6.32.45/fs/fscache/cookie.c
41414--- linux-2.6.32.45/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
41415+++ linux-2.6.32.45/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
41416@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
41417 parent ? (char *) parent->def->name : "<no-parent>",
41418 def->name, netfs_data);
41419
41420- fscache_stat(&fscache_n_acquires);
41421+ fscache_stat_unchecked(&fscache_n_acquires);
41422
41423 /* if there's no parent cookie, then we don't create one here either */
41424 if (!parent) {
41425- fscache_stat(&fscache_n_acquires_null);
41426+ fscache_stat_unchecked(&fscache_n_acquires_null);
41427 _leave(" [no parent]");
41428 return NULL;
41429 }
41430@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
41431 /* allocate and initialise a cookie */
41432 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
41433 if (!cookie) {
41434- fscache_stat(&fscache_n_acquires_oom);
41435+ fscache_stat_unchecked(&fscache_n_acquires_oom);
41436 _leave(" [ENOMEM]");
41437 return NULL;
41438 }
41439@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
41440
41441 switch (cookie->def->type) {
41442 case FSCACHE_COOKIE_TYPE_INDEX:
41443- fscache_stat(&fscache_n_cookie_index);
41444+ fscache_stat_unchecked(&fscache_n_cookie_index);
41445 break;
41446 case FSCACHE_COOKIE_TYPE_DATAFILE:
41447- fscache_stat(&fscache_n_cookie_data);
41448+ fscache_stat_unchecked(&fscache_n_cookie_data);
41449 break;
41450 default:
41451- fscache_stat(&fscache_n_cookie_special);
41452+ fscache_stat_unchecked(&fscache_n_cookie_special);
41453 break;
41454 }
41455
41456@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
41457 if (fscache_acquire_non_index_cookie(cookie) < 0) {
41458 atomic_dec(&parent->n_children);
41459 __fscache_cookie_put(cookie);
41460- fscache_stat(&fscache_n_acquires_nobufs);
41461+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
41462 _leave(" = NULL");
41463 return NULL;
41464 }
41465 }
41466
41467- fscache_stat(&fscache_n_acquires_ok);
41468+ fscache_stat_unchecked(&fscache_n_acquires_ok);
41469 _leave(" = %p", cookie);
41470 return cookie;
41471 }
41472@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
41473 cache = fscache_select_cache_for_object(cookie->parent);
41474 if (!cache) {
41475 up_read(&fscache_addremove_sem);
41476- fscache_stat(&fscache_n_acquires_no_cache);
41477+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
41478 _leave(" = -ENOMEDIUM [no cache]");
41479 return -ENOMEDIUM;
41480 }
41481@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
41482 object = cache->ops->alloc_object(cache, cookie);
41483 fscache_stat_d(&fscache_n_cop_alloc_object);
41484 if (IS_ERR(object)) {
41485- fscache_stat(&fscache_n_object_no_alloc);
41486+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
41487 ret = PTR_ERR(object);
41488 goto error;
41489 }
41490
41491- fscache_stat(&fscache_n_object_alloc);
41492+ fscache_stat_unchecked(&fscache_n_object_alloc);
41493
41494 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
41495
41496@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
41497 struct fscache_object *object;
41498 struct hlist_node *_p;
41499
41500- fscache_stat(&fscache_n_updates);
41501+ fscache_stat_unchecked(&fscache_n_updates);
41502
41503 if (!cookie) {
41504- fscache_stat(&fscache_n_updates_null);
41505+ fscache_stat_unchecked(&fscache_n_updates_null);
41506 _leave(" [no cookie]");
41507 return;
41508 }
41509@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
41510 struct fscache_object *object;
41511 unsigned long event;
41512
41513- fscache_stat(&fscache_n_relinquishes);
41514+ fscache_stat_unchecked(&fscache_n_relinquishes);
41515 if (retire)
41516- fscache_stat(&fscache_n_relinquishes_retire);
41517+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
41518
41519 if (!cookie) {
41520- fscache_stat(&fscache_n_relinquishes_null);
41521+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
41522 _leave(" [no cookie]");
41523 return;
41524 }
41525@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
41526
41527 /* wait for the cookie to finish being instantiated (or to fail) */
41528 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
41529- fscache_stat(&fscache_n_relinquishes_waitcrt);
41530+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
41531 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
41532 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
41533 }
41534diff -urNp linux-2.6.32.45/fs/fscache/internal.h linux-2.6.32.45/fs/fscache/internal.h
41535--- linux-2.6.32.45/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
41536+++ linux-2.6.32.45/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
41537@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
41538 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
41539 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
41540
41541-extern atomic_t fscache_n_op_pend;
41542-extern atomic_t fscache_n_op_run;
41543-extern atomic_t fscache_n_op_enqueue;
41544-extern atomic_t fscache_n_op_deferred_release;
41545-extern atomic_t fscache_n_op_release;
41546-extern atomic_t fscache_n_op_gc;
41547-extern atomic_t fscache_n_op_cancelled;
41548-extern atomic_t fscache_n_op_rejected;
41549-
41550-extern atomic_t fscache_n_attr_changed;
41551-extern atomic_t fscache_n_attr_changed_ok;
41552-extern atomic_t fscache_n_attr_changed_nobufs;
41553-extern atomic_t fscache_n_attr_changed_nomem;
41554-extern atomic_t fscache_n_attr_changed_calls;
41555-
41556-extern atomic_t fscache_n_allocs;
41557-extern atomic_t fscache_n_allocs_ok;
41558-extern atomic_t fscache_n_allocs_wait;
41559-extern atomic_t fscache_n_allocs_nobufs;
41560-extern atomic_t fscache_n_allocs_intr;
41561-extern atomic_t fscache_n_allocs_object_dead;
41562-extern atomic_t fscache_n_alloc_ops;
41563-extern atomic_t fscache_n_alloc_op_waits;
41564-
41565-extern atomic_t fscache_n_retrievals;
41566-extern atomic_t fscache_n_retrievals_ok;
41567-extern atomic_t fscache_n_retrievals_wait;
41568-extern atomic_t fscache_n_retrievals_nodata;
41569-extern atomic_t fscache_n_retrievals_nobufs;
41570-extern atomic_t fscache_n_retrievals_intr;
41571-extern atomic_t fscache_n_retrievals_nomem;
41572-extern atomic_t fscache_n_retrievals_object_dead;
41573-extern atomic_t fscache_n_retrieval_ops;
41574-extern atomic_t fscache_n_retrieval_op_waits;
41575-
41576-extern atomic_t fscache_n_stores;
41577-extern atomic_t fscache_n_stores_ok;
41578-extern atomic_t fscache_n_stores_again;
41579-extern atomic_t fscache_n_stores_nobufs;
41580-extern atomic_t fscache_n_stores_oom;
41581-extern atomic_t fscache_n_store_ops;
41582-extern atomic_t fscache_n_store_calls;
41583-extern atomic_t fscache_n_store_pages;
41584-extern atomic_t fscache_n_store_radix_deletes;
41585-extern atomic_t fscache_n_store_pages_over_limit;
41586-
41587-extern atomic_t fscache_n_store_vmscan_not_storing;
41588-extern atomic_t fscache_n_store_vmscan_gone;
41589-extern atomic_t fscache_n_store_vmscan_busy;
41590-extern atomic_t fscache_n_store_vmscan_cancelled;
41591-
41592-extern atomic_t fscache_n_marks;
41593-extern atomic_t fscache_n_uncaches;
41594-
41595-extern atomic_t fscache_n_acquires;
41596-extern atomic_t fscache_n_acquires_null;
41597-extern atomic_t fscache_n_acquires_no_cache;
41598-extern atomic_t fscache_n_acquires_ok;
41599-extern atomic_t fscache_n_acquires_nobufs;
41600-extern atomic_t fscache_n_acquires_oom;
41601-
41602-extern atomic_t fscache_n_updates;
41603-extern atomic_t fscache_n_updates_null;
41604-extern atomic_t fscache_n_updates_run;
41605-
41606-extern atomic_t fscache_n_relinquishes;
41607-extern atomic_t fscache_n_relinquishes_null;
41608-extern atomic_t fscache_n_relinquishes_waitcrt;
41609-extern atomic_t fscache_n_relinquishes_retire;
41610-
41611-extern atomic_t fscache_n_cookie_index;
41612-extern atomic_t fscache_n_cookie_data;
41613-extern atomic_t fscache_n_cookie_special;
41614-
41615-extern atomic_t fscache_n_object_alloc;
41616-extern atomic_t fscache_n_object_no_alloc;
41617-extern atomic_t fscache_n_object_lookups;
41618-extern atomic_t fscache_n_object_lookups_negative;
41619-extern atomic_t fscache_n_object_lookups_positive;
41620-extern atomic_t fscache_n_object_lookups_timed_out;
41621-extern atomic_t fscache_n_object_created;
41622-extern atomic_t fscache_n_object_avail;
41623-extern atomic_t fscache_n_object_dead;
41624-
41625-extern atomic_t fscache_n_checkaux_none;
41626-extern atomic_t fscache_n_checkaux_okay;
41627-extern atomic_t fscache_n_checkaux_update;
41628-extern atomic_t fscache_n_checkaux_obsolete;
41629+extern atomic_unchecked_t fscache_n_op_pend;
41630+extern atomic_unchecked_t fscache_n_op_run;
41631+extern atomic_unchecked_t fscache_n_op_enqueue;
41632+extern atomic_unchecked_t fscache_n_op_deferred_release;
41633+extern atomic_unchecked_t fscache_n_op_release;
41634+extern atomic_unchecked_t fscache_n_op_gc;
41635+extern atomic_unchecked_t fscache_n_op_cancelled;
41636+extern atomic_unchecked_t fscache_n_op_rejected;
41637+
41638+extern atomic_unchecked_t fscache_n_attr_changed;
41639+extern atomic_unchecked_t fscache_n_attr_changed_ok;
41640+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
41641+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
41642+extern atomic_unchecked_t fscache_n_attr_changed_calls;
41643+
41644+extern atomic_unchecked_t fscache_n_allocs;
41645+extern atomic_unchecked_t fscache_n_allocs_ok;
41646+extern atomic_unchecked_t fscache_n_allocs_wait;
41647+extern atomic_unchecked_t fscache_n_allocs_nobufs;
41648+extern atomic_unchecked_t fscache_n_allocs_intr;
41649+extern atomic_unchecked_t fscache_n_allocs_object_dead;
41650+extern atomic_unchecked_t fscache_n_alloc_ops;
41651+extern atomic_unchecked_t fscache_n_alloc_op_waits;
41652+
41653+extern atomic_unchecked_t fscache_n_retrievals;
41654+extern atomic_unchecked_t fscache_n_retrievals_ok;
41655+extern atomic_unchecked_t fscache_n_retrievals_wait;
41656+extern atomic_unchecked_t fscache_n_retrievals_nodata;
41657+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
41658+extern atomic_unchecked_t fscache_n_retrievals_intr;
41659+extern atomic_unchecked_t fscache_n_retrievals_nomem;
41660+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
41661+extern atomic_unchecked_t fscache_n_retrieval_ops;
41662+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
41663+
41664+extern atomic_unchecked_t fscache_n_stores;
41665+extern atomic_unchecked_t fscache_n_stores_ok;
41666+extern atomic_unchecked_t fscache_n_stores_again;
41667+extern atomic_unchecked_t fscache_n_stores_nobufs;
41668+extern atomic_unchecked_t fscache_n_stores_oom;
41669+extern atomic_unchecked_t fscache_n_store_ops;
41670+extern atomic_unchecked_t fscache_n_store_calls;
41671+extern atomic_unchecked_t fscache_n_store_pages;
41672+extern atomic_unchecked_t fscache_n_store_radix_deletes;
41673+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
41674+
41675+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
41676+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
41677+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
41678+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
41679+
41680+extern atomic_unchecked_t fscache_n_marks;
41681+extern atomic_unchecked_t fscache_n_uncaches;
41682+
41683+extern atomic_unchecked_t fscache_n_acquires;
41684+extern atomic_unchecked_t fscache_n_acquires_null;
41685+extern atomic_unchecked_t fscache_n_acquires_no_cache;
41686+extern atomic_unchecked_t fscache_n_acquires_ok;
41687+extern atomic_unchecked_t fscache_n_acquires_nobufs;
41688+extern atomic_unchecked_t fscache_n_acquires_oom;
41689+
41690+extern atomic_unchecked_t fscache_n_updates;
41691+extern atomic_unchecked_t fscache_n_updates_null;
41692+extern atomic_unchecked_t fscache_n_updates_run;
41693+
41694+extern atomic_unchecked_t fscache_n_relinquishes;
41695+extern atomic_unchecked_t fscache_n_relinquishes_null;
41696+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
41697+extern atomic_unchecked_t fscache_n_relinquishes_retire;
41698+
41699+extern atomic_unchecked_t fscache_n_cookie_index;
41700+extern atomic_unchecked_t fscache_n_cookie_data;
41701+extern atomic_unchecked_t fscache_n_cookie_special;
41702+
41703+extern atomic_unchecked_t fscache_n_object_alloc;
41704+extern atomic_unchecked_t fscache_n_object_no_alloc;
41705+extern atomic_unchecked_t fscache_n_object_lookups;
41706+extern atomic_unchecked_t fscache_n_object_lookups_negative;
41707+extern atomic_unchecked_t fscache_n_object_lookups_positive;
41708+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
41709+extern atomic_unchecked_t fscache_n_object_created;
41710+extern atomic_unchecked_t fscache_n_object_avail;
41711+extern atomic_unchecked_t fscache_n_object_dead;
41712+
41713+extern atomic_unchecked_t fscache_n_checkaux_none;
41714+extern atomic_unchecked_t fscache_n_checkaux_okay;
41715+extern atomic_unchecked_t fscache_n_checkaux_update;
41716+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
41717
41718 extern atomic_t fscache_n_cop_alloc_object;
41719 extern atomic_t fscache_n_cop_lookup_object;
41720@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
41721 atomic_inc(stat);
41722 }
41723
41724+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
41725+{
41726+ atomic_inc_unchecked(stat);
41727+}
41728+
41729 static inline void fscache_stat_d(atomic_t *stat)
41730 {
41731 atomic_dec(stat);
41732@@ -259,6 +264,7 @@ extern const struct file_operations fsca
41733
41734 #define __fscache_stat(stat) (NULL)
41735 #define fscache_stat(stat) do {} while (0)
41736+#define fscache_stat_unchecked(stat) do {} while (0)
41737 #define fscache_stat_d(stat) do {} while (0)
41738 #endif
41739
41740diff -urNp linux-2.6.32.45/fs/fscache/object.c linux-2.6.32.45/fs/fscache/object.c
41741--- linux-2.6.32.45/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
41742+++ linux-2.6.32.45/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
41743@@ -144,7 +144,7 @@ static void fscache_object_state_machine
41744 /* update the object metadata on disk */
41745 case FSCACHE_OBJECT_UPDATING:
41746 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
41747- fscache_stat(&fscache_n_updates_run);
41748+ fscache_stat_unchecked(&fscache_n_updates_run);
41749 fscache_stat(&fscache_n_cop_update_object);
41750 object->cache->ops->update_object(object);
41751 fscache_stat_d(&fscache_n_cop_update_object);
41752@@ -233,7 +233,7 @@ static void fscache_object_state_machine
41753 spin_lock(&object->lock);
41754 object->state = FSCACHE_OBJECT_DEAD;
41755 spin_unlock(&object->lock);
41756- fscache_stat(&fscache_n_object_dead);
41757+ fscache_stat_unchecked(&fscache_n_object_dead);
41758 goto terminal_transit;
41759
41760 /* handle the parent cache of this object being withdrawn from
41761@@ -248,7 +248,7 @@ static void fscache_object_state_machine
41762 spin_lock(&object->lock);
41763 object->state = FSCACHE_OBJECT_DEAD;
41764 spin_unlock(&object->lock);
41765- fscache_stat(&fscache_n_object_dead);
41766+ fscache_stat_unchecked(&fscache_n_object_dead);
41767 goto terminal_transit;
41768
41769 /* complain about the object being woken up once it is
41770@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
41771 parent->cookie->def->name, cookie->def->name,
41772 object->cache->tag->name);
41773
41774- fscache_stat(&fscache_n_object_lookups);
41775+ fscache_stat_unchecked(&fscache_n_object_lookups);
41776 fscache_stat(&fscache_n_cop_lookup_object);
41777 ret = object->cache->ops->lookup_object(object);
41778 fscache_stat_d(&fscache_n_cop_lookup_object);
41779@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
41780 if (ret == -ETIMEDOUT) {
41781 /* probably stuck behind another object, so move this one to
41782 * the back of the queue */
41783- fscache_stat(&fscache_n_object_lookups_timed_out);
41784+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
41785 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
41786 }
41787
41788@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
41789
41790 spin_lock(&object->lock);
41791 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
41792- fscache_stat(&fscache_n_object_lookups_negative);
41793+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
41794
41795 /* transit here to allow write requests to begin stacking up
41796 * and read requests to begin returning ENODATA */
41797@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
41798 * result, in which case there may be data available */
41799 spin_lock(&object->lock);
41800 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
41801- fscache_stat(&fscache_n_object_lookups_positive);
41802+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
41803
41804 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
41805
41806@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
41807 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
41808 } else {
41809 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
41810- fscache_stat(&fscache_n_object_created);
41811+ fscache_stat_unchecked(&fscache_n_object_created);
41812
41813 object->state = FSCACHE_OBJECT_AVAILABLE;
41814 spin_unlock(&object->lock);
41815@@ -633,7 +633,7 @@ static void fscache_object_available(str
41816 fscache_enqueue_dependents(object);
41817
41818 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
41819- fscache_stat(&fscache_n_object_avail);
41820+ fscache_stat_unchecked(&fscache_n_object_avail);
41821
41822 _leave("");
41823 }
41824@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
41825 enum fscache_checkaux result;
41826
41827 if (!object->cookie->def->check_aux) {
41828- fscache_stat(&fscache_n_checkaux_none);
41829+ fscache_stat_unchecked(&fscache_n_checkaux_none);
41830 return FSCACHE_CHECKAUX_OKAY;
41831 }
41832
41833@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
41834 switch (result) {
41835 /* entry okay as is */
41836 case FSCACHE_CHECKAUX_OKAY:
41837- fscache_stat(&fscache_n_checkaux_okay);
41838+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
41839 break;
41840
41841 /* entry requires update */
41842 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
41843- fscache_stat(&fscache_n_checkaux_update);
41844+ fscache_stat_unchecked(&fscache_n_checkaux_update);
41845 break;
41846
41847 /* entry requires deletion */
41848 case FSCACHE_CHECKAUX_OBSOLETE:
41849- fscache_stat(&fscache_n_checkaux_obsolete);
41850+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
41851 break;
41852
41853 default:
41854diff -urNp linux-2.6.32.45/fs/fscache/operation.c linux-2.6.32.45/fs/fscache/operation.c
41855--- linux-2.6.32.45/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
41856+++ linux-2.6.32.45/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
41857@@ -16,7 +16,7 @@
41858 #include <linux/seq_file.h>
41859 #include "internal.h"
41860
41861-atomic_t fscache_op_debug_id;
41862+atomic_unchecked_t fscache_op_debug_id;
41863 EXPORT_SYMBOL(fscache_op_debug_id);
41864
41865 /**
41866@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
41867 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
41868 ASSERTCMP(atomic_read(&op->usage), >, 0);
41869
41870- fscache_stat(&fscache_n_op_enqueue);
41871+ fscache_stat_unchecked(&fscache_n_op_enqueue);
41872 switch (op->flags & FSCACHE_OP_TYPE) {
41873 case FSCACHE_OP_FAST:
41874 _debug("queue fast");
41875@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
41876 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
41877 if (op->processor)
41878 fscache_enqueue_operation(op);
41879- fscache_stat(&fscache_n_op_run);
41880+ fscache_stat_unchecked(&fscache_n_op_run);
41881 }
41882
41883 /*
41884@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
41885 if (object->n_ops > 0) {
41886 atomic_inc(&op->usage);
41887 list_add_tail(&op->pend_link, &object->pending_ops);
41888- fscache_stat(&fscache_n_op_pend);
41889+ fscache_stat_unchecked(&fscache_n_op_pend);
41890 } else if (!list_empty(&object->pending_ops)) {
41891 atomic_inc(&op->usage);
41892 list_add_tail(&op->pend_link, &object->pending_ops);
41893- fscache_stat(&fscache_n_op_pend);
41894+ fscache_stat_unchecked(&fscache_n_op_pend);
41895 fscache_start_operations(object);
41896 } else {
41897 ASSERTCMP(object->n_in_progress, ==, 0);
41898@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
41899 object->n_exclusive++; /* reads and writes must wait */
41900 atomic_inc(&op->usage);
41901 list_add_tail(&op->pend_link, &object->pending_ops);
41902- fscache_stat(&fscache_n_op_pend);
41903+ fscache_stat_unchecked(&fscache_n_op_pend);
41904 ret = 0;
41905 } else {
41906 /* not allowed to submit ops in any other state */
41907@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
41908 if (object->n_exclusive > 0) {
41909 atomic_inc(&op->usage);
41910 list_add_tail(&op->pend_link, &object->pending_ops);
41911- fscache_stat(&fscache_n_op_pend);
41912+ fscache_stat_unchecked(&fscache_n_op_pend);
41913 } else if (!list_empty(&object->pending_ops)) {
41914 atomic_inc(&op->usage);
41915 list_add_tail(&op->pend_link, &object->pending_ops);
41916- fscache_stat(&fscache_n_op_pend);
41917+ fscache_stat_unchecked(&fscache_n_op_pend);
41918 fscache_start_operations(object);
41919 } else {
41920 ASSERTCMP(object->n_exclusive, ==, 0);
41921@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
41922 object->n_ops++;
41923 atomic_inc(&op->usage);
41924 list_add_tail(&op->pend_link, &object->pending_ops);
41925- fscache_stat(&fscache_n_op_pend);
41926+ fscache_stat_unchecked(&fscache_n_op_pend);
41927 ret = 0;
41928 } else if (object->state == FSCACHE_OBJECT_DYING ||
41929 object->state == FSCACHE_OBJECT_LC_DYING ||
41930 object->state == FSCACHE_OBJECT_WITHDRAWING) {
41931- fscache_stat(&fscache_n_op_rejected);
41932+ fscache_stat_unchecked(&fscache_n_op_rejected);
41933 ret = -ENOBUFS;
41934 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
41935 fscache_report_unexpected_submission(object, op, ostate);
41936@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
41937
41938 ret = -EBUSY;
41939 if (!list_empty(&op->pend_link)) {
41940- fscache_stat(&fscache_n_op_cancelled);
41941+ fscache_stat_unchecked(&fscache_n_op_cancelled);
41942 list_del_init(&op->pend_link);
41943 object->n_ops--;
41944 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
41945@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
41946 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
41947 BUG();
41948
41949- fscache_stat(&fscache_n_op_release);
41950+ fscache_stat_unchecked(&fscache_n_op_release);
41951
41952 if (op->release) {
41953 op->release(op);
41954@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
41955 * lock, and defer it otherwise */
41956 if (!spin_trylock(&object->lock)) {
41957 _debug("defer put");
41958- fscache_stat(&fscache_n_op_deferred_release);
41959+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
41960
41961 cache = object->cache;
41962 spin_lock(&cache->op_gc_list_lock);
41963@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
41964
41965 _debug("GC DEFERRED REL OBJ%x OP%x",
41966 object->debug_id, op->debug_id);
41967- fscache_stat(&fscache_n_op_gc);
41968+ fscache_stat_unchecked(&fscache_n_op_gc);
41969
41970 ASSERTCMP(atomic_read(&op->usage), ==, 0);
41971
41972diff -urNp linux-2.6.32.45/fs/fscache/page.c linux-2.6.32.45/fs/fscache/page.c
41973--- linux-2.6.32.45/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
41974+++ linux-2.6.32.45/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
41975@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
41976 val = radix_tree_lookup(&cookie->stores, page->index);
41977 if (!val) {
41978 rcu_read_unlock();
41979- fscache_stat(&fscache_n_store_vmscan_not_storing);
41980+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
41981 __fscache_uncache_page(cookie, page);
41982 return true;
41983 }
41984@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
41985 spin_unlock(&cookie->stores_lock);
41986
41987 if (xpage) {
41988- fscache_stat(&fscache_n_store_vmscan_cancelled);
41989- fscache_stat(&fscache_n_store_radix_deletes);
41990+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
41991+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
41992 ASSERTCMP(xpage, ==, page);
41993 } else {
41994- fscache_stat(&fscache_n_store_vmscan_gone);
41995+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
41996 }
41997
41998 wake_up_bit(&cookie->flags, 0);
41999@@ -106,7 +106,7 @@ page_busy:
42000 /* we might want to wait here, but that could deadlock the allocator as
42001 * the slow-work threads writing to the cache may all end up sleeping
42002 * on memory allocation */
42003- fscache_stat(&fscache_n_store_vmscan_busy);
42004+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42005 return false;
42006 }
42007 EXPORT_SYMBOL(__fscache_maybe_release_page);
42008@@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
42009 FSCACHE_COOKIE_STORING_TAG);
42010 if (!radix_tree_tag_get(&cookie->stores, page->index,
42011 FSCACHE_COOKIE_PENDING_TAG)) {
42012- fscache_stat(&fscache_n_store_radix_deletes);
42013+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42014 xpage = radix_tree_delete(&cookie->stores, page->index);
42015 }
42016 spin_unlock(&cookie->stores_lock);
42017@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
42018
42019 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42020
42021- fscache_stat(&fscache_n_attr_changed_calls);
42022+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42023
42024 if (fscache_object_is_active(object)) {
42025 fscache_set_op_state(op, "CallFS");
42026@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
42027
42028 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42029
42030- fscache_stat(&fscache_n_attr_changed);
42031+ fscache_stat_unchecked(&fscache_n_attr_changed);
42032
42033 op = kzalloc(sizeof(*op), GFP_KERNEL);
42034 if (!op) {
42035- fscache_stat(&fscache_n_attr_changed_nomem);
42036+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42037 _leave(" = -ENOMEM");
42038 return -ENOMEM;
42039 }
42040@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
42041 if (fscache_submit_exclusive_op(object, op) < 0)
42042 goto nobufs;
42043 spin_unlock(&cookie->lock);
42044- fscache_stat(&fscache_n_attr_changed_ok);
42045+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42046 fscache_put_operation(op);
42047 _leave(" = 0");
42048 return 0;
42049@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
42050 nobufs:
42051 spin_unlock(&cookie->lock);
42052 kfree(op);
42053- fscache_stat(&fscache_n_attr_changed_nobufs);
42054+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42055 _leave(" = %d", -ENOBUFS);
42056 return -ENOBUFS;
42057 }
42058@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
42059 /* allocate a retrieval operation and attempt to submit it */
42060 op = kzalloc(sizeof(*op), GFP_NOIO);
42061 if (!op) {
42062- fscache_stat(&fscache_n_retrievals_nomem);
42063+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42064 return NULL;
42065 }
42066
42067@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
42068 return 0;
42069 }
42070
42071- fscache_stat(&fscache_n_retrievals_wait);
42072+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
42073
42074 jif = jiffies;
42075 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42076 fscache_wait_bit_interruptible,
42077 TASK_INTERRUPTIBLE) != 0) {
42078- fscache_stat(&fscache_n_retrievals_intr);
42079+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42080 _leave(" = -ERESTARTSYS");
42081 return -ERESTARTSYS;
42082 }
42083@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
42084 */
42085 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42086 struct fscache_retrieval *op,
42087- atomic_t *stat_op_waits,
42088- atomic_t *stat_object_dead)
42089+ atomic_unchecked_t *stat_op_waits,
42090+ atomic_unchecked_t *stat_object_dead)
42091 {
42092 int ret;
42093
42094@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
42095 goto check_if_dead;
42096
42097 _debug(">>> WT");
42098- fscache_stat(stat_op_waits);
42099+ fscache_stat_unchecked(stat_op_waits);
42100 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42101 fscache_wait_bit_interruptible,
42102 TASK_INTERRUPTIBLE) < 0) {
42103@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
42104
42105 check_if_dead:
42106 if (unlikely(fscache_object_is_dead(object))) {
42107- fscache_stat(stat_object_dead);
42108+ fscache_stat_unchecked(stat_object_dead);
42109 return -ENOBUFS;
42110 }
42111 return 0;
42112@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
42113
42114 _enter("%p,%p,,,", cookie, page);
42115
42116- fscache_stat(&fscache_n_retrievals);
42117+ fscache_stat_unchecked(&fscache_n_retrievals);
42118
42119 if (hlist_empty(&cookie->backing_objects))
42120 goto nobufs;
42121@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
42122 goto nobufs_unlock;
42123 spin_unlock(&cookie->lock);
42124
42125- fscache_stat(&fscache_n_retrieval_ops);
42126+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42127
42128 /* pin the netfs read context in case we need to do the actual netfs
42129 * read because we've encountered a cache read failure */
42130@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
42131
42132 error:
42133 if (ret == -ENOMEM)
42134- fscache_stat(&fscache_n_retrievals_nomem);
42135+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42136 else if (ret == -ERESTARTSYS)
42137- fscache_stat(&fscache_n_retrievals_intr);
42138+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42139 else if (ret == -ENODATA)
42140- fscache_stat(&fscache_n_retrievals_nodata);
42141+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42142 else if (ret < 0)
42143- fscache_stat(&fscache_n_retrievals_nobufs);
42144+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42145 else
42146- fscache_stat(&fscache_n_retrievals_ok);
42147+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42148
42149 fscache_put_retrieval(op);
42150 _leave(" = %d", ret);
42151@@ -453,7 +453,7 @@ nobufs_unlock:
42152 spin_unlock(&cookie->lock);
42153 kfree(op);
42154 nobufs:
42155- fscache_stat(&fscache_n_retrievals_nobufs);
42156+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42157 _leave(" = -ENOBUFS");
42158 return -ENOBUFS;
42159 }
42160@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
42161
42162 _enter("%p,,%d,,,", cookie, *nr_pages);
42163
42164- fscache_stat(&fscache_n_retrievals);
42165+ fscache_stat_unchecked(&fscache_n_retrievals);
42166
42167 if (hlist_empty(&cookie->backing_objects))
42168 goto nobufs;
42169@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
42170 goto nobufs_unlock;
42171 spin_unlock(&cookie->lock);
42172
42173- fscache_stat(&fscache_n_retrieval_ops);
42174+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42175
42176 /* pin the netfs read context in case we need to do the actual netfs
42177 * read because we've encountered a cache read failure */
42178@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
42179
42180 error:
42181 if (ret == -ENOMEM)
42182- fscache_stat(&fscache_n_retrievals_nomem);
42183+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42184 else if (ret == -ERESTARTSYS)
42185- fscache_stat(&fscache_n_retrievals_intr);
42186+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42187 else if (ret == -ENODATA)
42188- fscache_stat(&fscache_n_retrievals_nodata);
42189+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42190 else if (ret < 0)
42191- fscache_stat(&fscache_n_retrievals_nobufs);
42192+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42193 else
42194- fscache_stat(&fscache_n_retrievals_ok);
42195+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42196
42197 fscache_put_retrieval(op);
42198 _leave(" = %d", ret);
42199@@ -570,7 +570,7 @@ nobufs_unlock:
42200 spin_unlock(&cookie->lock);
42201 kfree(op);
42202 nobufs:
42203- fscache_stat(&fscache_n_retrievals_nobufs);
42204+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42205 _leave(" = -ENOBUFS");
42206 return -ENOBUFS;
42207 }
42208@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
42209
42210 _enter("%p,%p,,,", cookie, page);
42211
42212- fscache_stat(&fscache_n_allocs);
42213+ fscache_stat_unchecked(&fscache_n_allocs);
42214
42215 if (hlist_empty(&cookie->backing_objects))
42216 goto nobufs;
42217@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
42218 goto nobufs_unlock;
42219 spin_unlock(&cookie->lock);
42220
42221- fscache_stat(&fscache_n_alloc_ops);
42222+ fscache_stat_unchecked(&fscache_n_alloc_ops);
42223
42224 ret = fscache_wait_for_retrieval_activation(
42225 object, op,
42226@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
42227
42228 error:
42229 if (ret == -ERESTARTSYS)
42230- fscache_stat(&fscache_n_allocs_intr);
42231+ fscache_stat_unchecked(&fscache_n_allocs_intr);
42232 else if (ret < 0)
42233- fscache_stat(&fscache_n_allocs_nobufs);
42234+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42235 else
42236- fscache_stat(&fscache_n_allocs_ok);
42237+ fscache_stat_unchecked(&fscache_n_allocs_ok);
42238
42239 fscache_put_retrieval(op);
42240 _leave(" = %d", ret);
42241@@ -651,7 +651,7 @@ nobufs_unlock:
42242 spin_unlock(&cookie->lock);
42243 kfree(op);
42244 nobufs:
42245- fscache_stat(&fscache_n_allocs_nobufs);
42246+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42247 _leave(" = -ENOBUFS");
42248 return -ENOBUFS;
42249 }
42250@@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
42251
42252 spin_lock(&cookie->stores_lock);
42253
42254- fscache_stat(&fscache_n_store_calls);
42255+ fscache_stat_unchecked(&fscache_n_store_calls);
42256
42257 /* find a page to store */
42258 page = NULL;
42259@@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
42260 page = results[0];
42261 _debug("gang %d [%lx]", n, page->index);
42262 if (page->index > op->store_limit) {
42263- fscache_stat(&fscache_n_store_pages_over_limit);
42264+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
42265 goto superseded;
42266 }
42267
42268@@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
42269
42270 if (page) {
42271 fscache_set_op_state(&op->op, "Store");
42272- fscache_stat(&fscache_n_store_pages);
42273+ fscache_stat_unchecked(&fscache_n_store_pages);
42274 fscache_stat(&fscache_n_cop_write_page);
42275 ret = object->cache->ops->write_page(op, page);
42276 fscache_stat_d(&fscache_n_cop_write_page);
42277@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
42278 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42279 ASSERT(PageFsCache(page));
42280
42281- fscache_stat(&fscache_n_stores);
42282+ fscache_stat_unchecked(&fscache_n_stores);
42283
42284 op = kzalloc(sizeof(*op), GFP_NOIO);
42285 if (!op)
42286@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
42287 spin_unlock(&cookie->stores_lock);
42288 spin_unlock(&object->lock);
42289
42290- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
42291+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
42292 op->store_limit = object->store_limit;
42293
42294 if (fscache_submit_op(object, &op->op) < 0)
42295@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
42296
42297 spin_unlock(&cookie->lock);
42298 radix_tree_preload_end();
42299- fscache_stat(&fscache_n_store_ops);
42300- fscache_stat(&fscache_n_stores_ok);
42301+ fscache_stat_unchecked(&fscache_n_store_ops);
42302+ fscache_stat_unchecked(&fscache_n_stores_ok);
42303
42304 /* the slow work queue now carries its own ref on the object */
42305 fscache_put_operation(&op->op);
42306@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
42307 return 0;
42308
42309 already_queued:
42310- fscache_stat(&fscache_n_stores_again);
42311+ fscache_stat_unchecked(&fscache_n_stores_again);
42312 already_pending:
42313 spin_unlock(&cookie->stores_lock);
42314 spin_unlock(&object->lock);
42315 spin_unlock(&cookie->lock);
42316 radix_tree_preload_end();
42317 kfree(op);
42318- fscache_stat(&fscache_n_stores_ok);
42319+ fscache_stat_unchecked(&fscache_n_stores_ok);
42320 _leave(" = 0");
42321 return 0;
42322
42323@@ -886,14 +886,14 @@ nobufs:
42324 spin_unlock(&cookie->lock);
42325 radix_tree_preload_end();
42326 kfree(op);
42327- fscache_stat(&fscache_n_stores_nobufs);
42328+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
42329 _leave(" = -ENOBUFS");
42330 return -ENOBUFS;
42331
42332 nomem_free:
42333 kfree(op);
42334 nomem:
42335- fscache_stat(&fscache_n_stores_oom);
42336+ fscache_stat_unchecked(&fscache_n_stores_oom);
42337 _leave(" = -ENOMEM");
42338 return -ENOMEM;
42339 }
42340@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
42341 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42342 ASSERTCMP(page, !=, NULL);
42343
42344- fscache_stat(&fscache_n_uncaches);
42345+ fscache_stat_unchecked(&fscache_n_uncaches);
42346
42347 /* cache withdrawal may beat us to it */
42348 if (!PageFsCache(page))
42349@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
42350 unsigned long loop;
42351
42352 #ifdef CONFIG_FSCACHE_STATS
42353- atomic_add(pagevec->nr, &fscache_n_marks);
42354+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
42355 #endif
42356
42357 for (loop = 0; loop < pagevec->nr; loop++) {
42358diff -urNp linux-2.6.32.45/fs/fscache/stats.c linux-2.6.32.45/fs/fscache/stats.c
42359--- linux-2.6.32.45/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
42360+++ linux-2.6.32.45/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
42361@@ -18,95 +18,95 @@
42362 /*
42363 * operation counters
42364 */
42365-atomic_t fscache_n_op_pend;
42366-atomic_t fscache_n_op_run;
42367-atomic_t fscache_n_op_enqueue;
42368-atomic_t fscache_n_op_requeue;
42369-atomic_t fscache_n_op_deferred_release;
42370-atomic_t fscache_n_op_release;
42371-atomic_t fscache_n_op_gc;
42372-atomic_t fscache_n_op_cancelled;
42373-atomic_t fscache_n_op_rejected;
42374-
42375-atomic_t fscache_n_attr_changed;
42376-atomic_t fscache_n_attr_changed_ok;
42377-atomic_t fscache_n_attr_changed_nobufs;
42378-atomic_t fscache_n_attr_changed_nomem;
42379-atomic_t fscache_n_attr_changed_calls;
42380-
42381-atomic_t fscache_n_allocs;
42382-atomic_t fscache_n_allocs_ok;
42383-atomic_t fscache_n_allocs_wait;
42384-atomic_t fscache_n_allocs_nobufs;
42385-atomic_t fscache_n_allocs_intr;
42386-atomic_t fscache_n_allocs_object_dead;
42387-atomic_t fscache_n_alloc_ops;
42388-atomic_t fscache_n_alloc_op_waits;
42389-
42390-atomic_t fscache_n_retrievals;
42391-atomic_t fscache_n_retrievals_ok;
42392-atomic_t fscache_n_retrievals_wait;
42393-atomic_t fscache_n_retrievals_nodata;
42394-atomic_t fscache_n_retrievals_nobufs;
42395-atomic_t fscache_n_retrievals_intr;
42396-atomic_t fscache_n_retrievals_nomem;
42397-atomic_t fscache_n_retrievals_object_dead;
42398-atomic_t fscache_n_retrieval_ops;
42399-atomic_t fscache_n_retrieval_op_waits;
42400-
42401-atomic_t fscache_n_stores;
42402-atomic_t fscache_n_stores_ok;
42403-atomic_t fscache_n_stores_again;
42404-atomic_t fscache_n_stores_nobufs;
42405-atomic_t fscache_n_stores_oom;
42406-atomic_t fscache_n_store_ops;
42407-atomic_t fscache_n_store_calls;
42408-atomic_t fscache_n_store_pages;
42409-atomic_t fscache_n_store_radix_deletes;
42410-atomic_t fscache_n_store_pages_over_limit;
42411-
42412-atomic_t fscache_n_store_vmscan_not_storing;
42413-atomic_t fscache_n_store_vmscan_gone;
42414-atomic_t fscache_n_store_vmscan_busy;
42415-atomic_t fscache_n_store_vmscan_cancelled;
42416-
42417-atomic_t fscache_n_marks;
42418-atomic_t fscache_n_uncaches;
42419-
42420-atomic_t fscache_n_acquires;
42421-atomic_t fscache_n_acquires_null;
42422-atomic_t fscache_n_acquires_no_cache;
42423-atomic_t fscache_n_acquires_ok;
42424-atomic_t fscache_n_acquires_nobufs;
42425-atomic_t fscache_n_acquires_oom;
42426-
42427-atomic_t fscache_n_updates;
42428-atomic_t fscache_n_updates_null;
42429-atomic_t fscache_n_updates_run;
42430-
42431-atomic_t fscache_n_relinquishes;
42432-atomic_t fscache_n_relinquishes_null;
42433-atomic_t fscache_n_relinquishes_waitcrt;
42434-atomic_t fscache_n_relinquishes_retire;
42435-
42436-atomic_t fscache_n_cookie_index;
42437-atomic_t fscache_n_cookie_data;
42438-atomic_t fscache_n_cookie_special;
42439-
42440-atomic_t fscache_n_object_alloc;
42441-atomic_t fscache_n_object_no_alloc;
42442-atomic_t fscache_n_object_lookups;
42443-atomic_t fscache_n_object_lookups_negative;
42444-atomic_t fscache_n_object_lookups_positive;
42445-atomic_t fscache_n_object_lookups_timed_out;
42446-atomic_t fscache_n_object_created;
42447-atomic_t fscache_n_object_avail;
42448-atomic_t fscache_n_object_dead;
42449-
42450-atomic_t fscache_n_checkaux_none;
42451-atomic_t fscache_n_checkaux_okay;
42452-atomic_t fscache_n_checkaux_update;
42453-atomic_t fscache_n_checkaux_obsolete;
42454+atomic_unchecked_t fscache_n_op_pend;
42455+atomic_unchecked_t fscache_n_op_run;
42456+atomic_unchecked_t fscache_n_op_enqueue;
42457+atomic_unchecked_t fscache_n_op_requeue;
42458+atomic_unchecked_t fscache_n_op_deferred_release;
42459+atomic_unchecked_t fscache_n_op_release;
42460+atomic_unchecked_t fscache_n_op_gc;
42461+atomic_unchecked_t fscache_n_op_cancelled;
42462+atomic_unchecked_t fscache_n_op_rejected;
42463+
42464+atomic_unchecked_t fscache_n_attr_changed;
42465+atomic_unchecked_t fscache_n_attr_changed_ok;
42466+atomic_unchecked_t fscache_n_attr_changed_nobufs;
42467+atomic_unchecked_t fscache_n_attr_changed_nomem;
42468+atomic_unchecked_t fscache_n_attr_changed_calls;
42469+
42470+atomic_unchecked_t fscache_n_allocs;
42471+atomic_unchecked_t fscache_n_allocs_ok;
42472+atomic_unchecked_t fscache_n_allocs_wait;
42473+atomic_unchecked_t fscache_n_allocs_nobufs;
42474+atomic_unchecked_t fscache_n_allocs_intr;
42475+atomic_unchecked_t fscache_n_allocs_object_dead;
42476+atomic_unchecked_t fscache_n_alloc_ops;
42477+atomic_unchecked_t fscache_n_alloc_op_waits;
42478+
42479+atomic_unchecked_t fscache_n_retrievals;
42480+atomic_unchecked_t fscache_n_retrievals_ok;
42481+atomic_unchecked_t fscache_n_retrievals_wait;
42482+atomic_unchecked_t fscache_n_retrievals_nodata;
42483+atomic_unchecked_t fscache_n_retrievals_nobufs;
42484+atomic_unchecked_t fscache_n_retrievals_intr;
42485+atomic_unchecked_t fscache_n_retrievals_nomem;
42486+atomic_unchecked_t fscache_n_retrievals_object_dead;
42487+atomic_unchecked_t fscache_n_retrieval_ops;
42488+atomic_unchecked_t fscache_n_retrieval_op_waits;
42489+
42490+atomic_unchecked_t fscache_n_stores;
42491+atomic_unchecked_t fscache_n_stores_ok;
42492+atomic_unchecked_t fscache_n_stores_again;
42493+atomic_unchecked_t fscache_n_stores_nobufs;
42494+atomic_unchecked_t fscache_n_stores_oom;
42495+atomic_unchecked_t fscache_n_store_ops;
42496+atomic_unchecked_t fscache_n_store_calls;
42497+atomic_unchecked_t fscache_n_store_pages;
42498+atomic_unchecked_t fscache_n_store_radix_deletes;
42499+atomic_unchecked_t fscache_n_store_pages_over_limit;
42500+
42501+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42502+atomic_unchecked_t fscache_n_store_vmscan_gone;
42503+atomic_unchecked_t fscache_n_store_vmscan_busy;
42504+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42505+
42506+atomic_unchecked_t fscache_n_marks;
42507+atomic_unchecked_t fscache_n_uncaches;
42508+
42509+atomic_unchecked_t fscache_n_acquires;
42510+atomic_unchecked_t fscache_n_acquires_null;
42511+atomic_unchecked_t fscache_n_acquires_no_cache;
42512+atomic_unchecked_t fscache_n_acquires_ok;
42513+atomic_unchecked_t fscache_n_acquires_nobufs;
42514+atomic_unchecked_t fscache_n_acquires_oom;
42515+
42516+atomic_unchecked_t fscache_n_updates;
42517+atomic_unchecked_t fscache_n_updates_null;
42518+atomic_unchecked_t fscache_n_updates_run;
42519+
42520+atomic_unchecked_t fscache_n_relinquishes;
42521+atomic_unchecked_t fscache_n_relinquishes_null;
42522+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42523+atomic_unchecked_t fscache_n_relinquishes_retire;
42524+
42525+atomic_unchecked_t fscache_n_cookie_index;
42526+atomic_unchecked_t fscache_n_cookie_data;
42527+atomic_unchecked_t fscache_n_cookie_special;
42528+
42529+atomic_unchecked_t fscache_n_object_alloc;
42530+atomic_unchecked_t fscache_n_object_no_alloc;
42531+atomic_unchecked_t fscache_n_object_lookups;
42532+atomic_unchecked_t fscache_n_object_lookups_negative;
42533+atomic_unchecked_t fscache_n_object_lookups_positive;
42534+atomic_unchecked_t fscache_n_object_lookups_timed_out;
42535+atomic_unchecked_t fscache_n_object_created;
42536+atomic_unchecked_t fscache_n_object_avail;
42537+atomic_unchecked_t fscache_n_object_dead;
42538+
42539+atomic_unchecked_t fscache_n_checkaux_none;
42540+atomic_unchecked_t fscache_n_checkaux_okay;
42541+atomic_unchecked_t fscache_n_checkaux_update;
42542+atomic_unchecked_t fscache_n_checkaux_obsolete;
42543
42544 atomic_t fscache_n_cop_alloc_object;
42545 atomic_t fscache_n_cop_lookup_object;
42546@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
42547 seq_puts(m, "FS-Cache statistics\n");
42548
42549 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
42550- atomic_read(&fscache_n_cookie_index),
42551- atomic_read(&fscache_n_cookie_data),
42552- atomic_read(&fscache_n_cookie_special));
42553+ atomic_read_unchecked(&fscache_n_cookie_index),
42554+ atomic_read_unchecked(&fscache_n_cookie_data),
42555+ atomic_read_unchecked(&fscache_n_cookie_special));
42556
42557 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
42558- atomic_read(&fscache_n_object_alloc),
42559- atomic_read(&fscache_n_object_no_alloc),
42560- atomic_read(&fscache_n_object_avail),
42561- atomic_read(&fscache_n_object_dead));
42562+ atomic_read_unchecked(&fscache_n_object_alloc),
42563+ atomic_read_unchecked(&fscache_n_object_no_alloc),
42564+ atomic_read_unchecked(&fscache_n_object_avail),
42565+ atomic_read_unchecked(&fscache_n_object_dead));
42566 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
42567- atomic_read(&fscache_n_checkaux_none),
42568- atomic_read(&fscache_n_checkaux_okay),
42569- atomic_read(&fscache_n_checkaux_update),
42570- atomic_read(&fscache_n_checkaux_obsolete));
42571+ atomic_read_unchecked(&fscache_n_checkaux_none),
42572+ atomic_read_unchecked(&fscache_n_checkaux_okay),
42573+ atomic_read_unchecked(&fscache_n_checkaux_update),
42574+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
42575
42576 seq_printf(m, "Pages : mrk=%u unc=%u\n",
42577- atomic_read(&fscache_n_marks),
42578- atomic_read(&fscache_n_uncaches));
42579+ atomic_read_unchecked(&fscache_n_marks),
42580+ atomic_read_unchecked(&fscache_n_uncaches));
42581
42582 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
42583 " oom=%u\n",
42584- atomic_read(&fscache_n_acquires),
42585- atomic_read(&fscache_n_acquires_null),
42586- atomic_read(&fscache_n_acquires_no_cache),
42587- atomic_read(&fscache_n_acquires_ok),
42588- atomic_read(&fscache_n_acquires_nobufs),
42589- atomic_read(&fscache_n_acquires_oom));
42590+ atomic_read_unchecked(&fscache_n_acquires),
42591+ atomic_read_unchecked(&fscache_n_acquires_null),
42592+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
42593+ atomic_read_unchecked(&fscache_n_acquires_ok),
42594+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
42595+ atomic_read_unchecked(&fscache_n_acquires_oom));
42596
42597 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
42598- atomic_read(&fscache_n_object_lookups),
42599- atomic_read(&fscache_n_object_lookups_negative),
42600- atomic_read(&fscache_n_object_lookups_positive),
42601- atomic_read(&fscache_n_object_lookups_timed_out),
42602- atomic_read(&fscache_n_object_created));
42603+ atomic_read_unchecked(&fscache_n_object_lookups),
42604+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
42605+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
42606+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
42607+ atomic_read_unchecked(&fscache_n_object_created));
42608
42609 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
42610- atomic_read(&fscache_n_updates),
42611- atomic_read(&fscache_n_updates_null),
42612- atomic_read(&fscache_n_updates_run));
42613+ atomic_read_unchecked(&fscache_n_updates),
42614+ atomic_read_unchecked(&fscache_n_updates_null),
42615+ atomic_read_unchecked(&fscache_n_updates_run));
42616
42617 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
42618- atomic_read(&fscache_n_relinquishes),
42619- atomic_read(&fscache_n_relinquishes_null),
42620- atomic_read(&fscache_n_relinquishes_waitcrt),
42621- atomic_read(&fscache_n_relinquishes_retire));
42622+ atomic_read_unchecked(&fscache_n_relinquishes),
42623+ atomic_read_unchecked(&fscache_n_relinquishes_null),
42624+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
42625+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
42626
42627 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
42628- atomic_read(&fscache_n_attr_changed),
42629- atomic_read(&fscache_n_attr_changed_ok),
42630- atomic_read(&fscache_n_attr_changed_nobufs),
42631- atomic_read(&fscache_n_attr_changed_nomem),
42632- atomic_read(&fscache_n_attr_changed_calls));
42633+ atomic_read_unchecked(&fscache_n_attr_changed),
42634+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
42635+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
42636+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
42637+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
42638
42639 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
42640- atomic_read(&fscache_n_allocs),
42641- atomic_read(&fscache_n_allocs_ok),
42642- atomic_read(&fscache_n_allocs_wait),
42643- atomic_read(&fscache_n_allocs_nobufs),
42644- atomic_read(&fscache_n_allocs_intr));
42645+ atomic_read_unchecked(&fscache_n_allocs),
42646+ atomic_read_unchecked(&fscache_n_allocs_ok),
42647+ atomic_read_unchecked(&fscache_n_allocs_wait),
42648+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
42649+ atomic_read_unchecked(&fscache_n_allocs_intr));
42650 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
42651- atomic_read(&fscache_n_alloc_ops),
42652- atomic_read(&fscache_n_alloc_op_waits),
42653- atomic_read(&fscache_n_allocs_object_dead));
42654+ atomic_read_unchecked(&fscache_n_alloc_ops),
42655+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
42656+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
42657
42658 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
42659 " int=%u oom=%u\n",
42660- atomic_read(&fscache_n_retrievals),
42661- atomic_read(&fscache_n_retrievals_ok),
42662- atomic_read(&fscache_n_retrievals_wait),
42663- atomic_read(&fscache_n_retrievals_nodata),
42664- atomic_read(&fscache_n_retrievals_nobufs),
42665- atomic_read(&fscache_n_retrievals_intr),
42666- atomic_read(&fscache_n_retrievals_nomem));
42667+ atomic_read_unchecked(&fscache_n_retrievals),
42668+ atomic_read_unchecked(&fscache_n_retrievals_ok),
42669+ atomic_read_unchecked(&fscache_n_retrievals_wait),
42670+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
42671+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
42672+ atomic_read_unchecked(&fscache_n_retrievals_intr),
42673+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
42674 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
42675- atomic_read(&fscache_n_retrieval_ops),
42676- atomic_read(&fscache_n_retrieval_op_waits),
42677- atomic_read(&fscache_n_retrievals_object_dead));
42678+ atomic_read_unchecked(&fscache_n_retrieval_ops),
42679+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
42680+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
42681
42682 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
42683- atomic_read(&fscache_n_stores),
42684- atomic_read(&fscache_n_stores_ok),
42685- atomic_read(&fscache_n_stores_again),
42686- atomic_read(&fscache_n_stores_nobufs),
42687- atomic_read(&fscache_n_stores_oom));
42688+ atomic_read_unchecked(&fscache_n_stores),
42689+ atomic_read_unchecked(&fscache_n_stores_ok),
42690+ atomic_read_unchecked(&fscache_n_stores_again),
42691+ atomic_read_unchecked(&fscache_n_stores_nobufs),
42692+ atomic_read_unchecked(&fscache_n_stores_oom));
42693 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
42694- atomic_read(&fscache_n_store_ops),
42695- atomic_read(&fscache_n_store_calls),
42696- atomic_read(&fscache_n_store_pages),
42697- atomic_read(&fscache_n_store_radix_deletes),
42698- atomic_read(&fscache_n_store_pages_over_limit));
42699+ atomic_read_unchecked(&fscache_n_store_ops),
42700+ atomic_read_unchecked(&fscache_n_store_calls),
42701+ atomic_read_unchecked(&fscache_n_store_pages),
42702+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
42703+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
42704
42705 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
42706- atomic_read(&fscache_n_store_vmscan_not_storing),
42707- atomic_read(&fscache_n_store_vmscan_gone),
42708- atomic_read(&fscache_n_store_vmscan_busy),
42709- atomic_read(&fscache_n_store_vmscan_cancelled));
42710+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
42711+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
42712+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
42713+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
42714
42715 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
42716- atomic_read(&fscache_n_op_pend),
42717- atomic_read(&fscache_n_op_run),
42718- atomic_read(&fscache_n_op_enqueue),
42719- atomic_read(&fscache_n_op_cancelled),
42720- atomic_read(&fscache_n_op_rejected));
42721+ atomic_read_unchecked(&fscache_n_op_pend),
42722+ atomic_read_unchecked(&fscache_n_op_run),
42723+ atomic_read_unchecked(&fscache_n_op_enqueue),
42724+ atomic_read_unchecked(&fscache_n_op_cancelled),
42725+ atomic_read_unchecked(&fscache_n_op_rejected));
42726 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
42727- atomic_read(&fscache_n_op_deferred_release),
42728- atomic_read(&fscache_n_op_release),
42729- atomic_read(&fscache_n_op_gc));
42730+ atomic_read_unchecked(&fscache_n_op_deferred_release),
42731+ atomic_read_unchecked(&fscache_n_op_release),
42732+ atomic_read_unchecked(&fscache_n_op_gc));
42733
42734 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
42735 atomic_read(&fscache_n_cop_alloc_object),
42736diff -urNp linux-2.6.32.45/fs/fs_struct.c linux-2.6.32.45/fs/fs_struct.c
42737--- linux-2.6.32.45/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
42738+++ linux-2.6.32.45/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
42739@@ -4,6 +4,7 @@
42740 #include <linux/path.h>
42741 #include <linux/slab.h>
42742 #include <linux/fs_struct.h>
42743+#include <linux/grsecurity.h>
42744
42745 /*
42746 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
42747@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
42748 old_root = fs->root;
42749 fs->root = *path;
42750 path_get(path);
42751+ gr_set_chroot_entries(current, path);
42752 write_unlock(&fs->lock);
42753 if (old_root.dentry)
42754 path_put(&old_root);
42755@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
42756 && fs->root.mnt == old_root->mnt) {
42757 path_get(new_root);
42758 fs->root = *new_root;
42759+ gr_set_chroot_entries(p, new_root);
42760 count++;
42761 }
42762 if (fs->pwd.dentry == old_root->dentry
42763@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
42764 task_lock(tsk);
42765 write_lock(&fs->lock);
42766 tsk->fs = NULL;
42767- kill = !--fs->users;
42768+ gr_clear_chroot_entries(tsk);
42769+ kill = !atomic_dec_return(&fs->users);
42770 write_unlock(&fs->lock);
42771 task_unlock(tsk);
42772 if (kill)
42773@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
42774 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42775 /* We don't need to lock fs - think why ;-) */
42776 if (fs) {
42777- fs->users = 1;
42778+ atomic_set(&fs->users, 1);
42779 fs->in_exec = 0;
42780 rwlock_init(&fs->lock);
42781 fs->umask = old->umask;
42782@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
42783
42784 task_lock(current);
42785 write_lock(&fs->lock);
42786- kill = !--fs->users;
42787+ kill = !atomic_dec_return(&fs->users);
42788 current->fs = new_fs;
42789+ gr_set_chroot_entries(current, &new_fs->root);
42790 write_unlock(&fs->lock);
42791 task_unlock(current);
42792
42793@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
42794
42795 /* to be mentioned only in INIT_TASK */
42796 struct fs_struct init_fs = {
42797- .users = 1,
42798+ .users = ATOMIC_INIT(1),
42799 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
42800 .umask = 0022,
42801 };
42802@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
42803 task_lock(current);
42804
42805 write_lock(&init_fs.lock);
42806- init_fs.users++;
42807+ atomic_inc(&init_fs.users);
42808 write_unlock(&init_fs.lock);
42809
42810 write_lock(&fs->lock);
42811 current->fs = &init_fs;
42812- kill = !--fs->users;
42813+ gr_set_chroot_entries(current, &current->fs->root);
42814+ kill = !atomic_dec_return(&fs->users);
42815 write_unlock(&fs->lock);
42816
42817 task_unlock(current);
42818diff -urNp linux-2.6.32.45/fs/fuse/cuse.c linux-2.6.32.45/fs/fuse/cuse.c
42819--- linux-2.6.32.45/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
42820+++ linux-2.6.32.45/fs/fuse/cuse.c 2011-08-05 20:33:55.000000000 -0400
42821@@ -576,10 +576,12 @@ static int __init cuse_init(void)
42822 INIT_LIST_HEAD(&cuse_conntbl[i]);
42823
42824 /* inherit and extend fuse_dev_operations */
42825- cuse_channel_fops = fuse_dev_operations;
42826- cuse_channel_fops.owner = THIS_MODULE;
42827- cuse_channel_fops.open = cuse_channel_open;
42828- cuse_channel_fops.release = cuse_channel_release;
42829+ pax_open_kernel();
42830+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
42831+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
42832+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
42833+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
42834+ pax_close_kernel();
42835
42836 cuse_class = class_create(THIS_MODULE, "cuse");
42837 if (IS_ERR(cuse_class))
42838diff -urNp linux-2.6.32.45/fs/fuse/dev.c linux-2.6.32.45/fs/fuse/dev.c
42839--- linux-2.6.32.45/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
42840+++ linux-2.6.32.45/fs/fuse/dev.c 2011-08-05 20:33:55.000000000 -0400
42841@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
42842 {
42843 struct fuse_notify_inval_entry_out outarg;
42844 int err = -EINVAL;
42845- char buf[FUSE_NAME_MAX+1];
42846+ char *buf = NULL;
42847 struct qstr name;
42848
42849 if (size < sizeof(outarg))
42850@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
42851 if (outarg.namelen > FUSE_NAME_MAX)
42852 goto err;
42853
42854+ err = -ENOMEM;
42855+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
42856+ if (!buf)
42857+ goto err;
42858+
42859 name.name = buf;
42860 name.len = outarg.namelen;
42861 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
42862@@ -910,17 +915,15 @@ static int fuse_notify_inval_entry(struc
42863
42864 down_read(&fc->killsb);
42865 err = -ENOENT;
42866- if (!fc->sb)
42867- goto err_unlock;
42868-
42869- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
42870-
42871-err_unlock:
42872+ if (fc->sb)
42873+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
42874 up_read(&fc->killsb);
42875+ kfree(buf);
42876 return err;
42877
42878 err:
42879 fuse_copy_finish(cs);
42880+ kfree(buf);
42881 return err;
42882 }
42883
42884diff -urNp linux-2.6.32.45/fs/fuse/dir.c linux-2.6.32.45/fs/fuse/dir.c
42885--- linux-2.6.32.45/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
42886+++ linux-2.6.32.45/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
42887@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
42888 return link;
42889 }
42890
42891-static void free_link(char *link)
42892+static void free_link(const char *link)
42893 {
42894 if (!IS_ERR(link))
42895 free_page((unsigned long) link);
42896diff -urNp linux-2.6.32.45/fs/gfs2/ops_inode.c linux-2.6.32.45/fs/gfs2/ops_inode.c
42897--- linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
42898+++ linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
42899@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
42900 unsigned int x;
42901 int error;
42902
42903+ pax_track_stack();
42904+
42905 if (ndentry->d_inode) {
42906 nip = GFS2_I(ndentry->d_inode);
42907 if (ip == nip)
42908diff -urNp linux-2.6.32.45/fs/gfs2/sys.c linux-2.6.32.45/fs/gfs2/sys.c
42909--- linux-2.6.32.45/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
42910+++ linux-2.6.32.45/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
42911@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
42912 return a->store ? a->store(sdp, buf, len) : len;
42913 }
42914
42915-static struct sysfs_ops gfs2_attr_ops = {
42916+static const struct sysfs_ops gfs2_attr_ops = {
42917 .show = gfs2_attr_show,
42918 .store = gfs2_attr_store,
42919 };
42920@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
42921 return 0;
42922 }
42923
42924-static struct kset_uevent_ops gfs2_uevent_ops = {
42925+static const struct kset_uevent_ops gfs2_uevent_ops = {
42926 .uevent = gfs2_uevent,
42927 };
42928
42929diff -urNp linux-2.6.32.45/fs/hfsplus/catalog.c linux-2.6.32.45/fs/hfsplus/catalog.c
42930--- linux-2.6.32.45/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
42931+++ linux-2.6.32.45/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
42932@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
42933 int err;
42934 u16 type;
42935
42936+ pax_track_stack();
42937+
42938 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
42939 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
42940 if (err)
42941@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
42942 int entry_size;
42943 int err;
42944
42945+ pax_track_stack();
42946+
42947 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
42948 sb = dir->i_sb;
42949 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
42950@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
42951 int entry_size, type;
42952 int err = 0;
42953
42954+ pax_track_stack();
42955+
42956 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
42957 dst_dir->i_ino, dst_name->name);
42958 sb = src_dir->i_sb;
42959diff -urNp linux-2.6.32.45/fs/hfsplus/dir.c linux-2.6.32.45/fs/hfsplus/dir.c
42960--- linux-2.6.32.45/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
42961+++ linux-2.6.32.45/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
42962@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
42963 struct hfsplus_readdir_data *rd;
42964 u16 type;
42965
42966+ pax_track_stack();
42967+
42968 if (filp->f_pos >= inode->i_size)
42969 return 0;
42970
42971diff -urNp linux-2.6.32.45/fs/hfsplus/inode.c linux-2.6.32.45/fs/hfsplus/inode.c
42972--- linux-2.6.32.45/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
42973+++ linux-2.6.32.45/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
42974@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
42975 int res = 0;
42976 u16 type;
42977
42978+ pax_track_stack();
42979+
42980 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
42981
42982 HFSPLUS_I(inode).dev = 0;
42983@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
42984 struct hfs_find_data fd;
42985 hfsplus_cat_entry entry;
42986
42987+ pax_track_stack();
42988+
42989 if (HFSPLUS_IS_RSRC(inode))
42990 main_inode = HFSPLUS_I(inode).rsrc_inode;
42991
42992diff -urNp linux-2.6.32.45/fs/hfsplus/ioctl.c linux-2.6.32.45/fs/hfsplus/ioctl.c
42993--- linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
42994+++ linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
42995@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
42996 struct hfsplus_cat_file *file;
42997 int res;
42998
42999+ pax_track_stack();
43000+
43001 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43002 return -EOPNOTSUPP;
43003
43004@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43005 struct hfsplus_cat_file *file;
43006 ssize_t res = 0;
43007
43008+ pax_track_stack();
43009+
43010 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43011 return -EOPNOTSUPP;
43012
43013diff -urNp linux-2.6.32.45/fs/hfsplus/super.c linux-2.6.32.45/fs/hfsplus/super.c
43014--- linux-2.6.32.45/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
43015+++ linux-2.6.32.45/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
43016@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
43017 struct nls_table *nls = NULL;
43018 int err = -EINVAL;
43019
43020+ pax_track_stack();
43021+
43022 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43023 if (!sbi)
43024 return -ENOMEM;
43025diff -urNp linux-2.6.32.45/fs/hugetlbfs/inode.c linux-2.6.32.45/fs/hugetlbfs/inode.c
43026--- linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43027+++ linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
43028@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
43029 .kill_sb = kill_litter_super,
43030 };
43031
43032-static struct vfsmount *hugetlbfs_vfsmount;
43033+struct vfsmount *hugetlbfs_vfsmount;
43034
43035 static int can_do_hugetlb_shm(void)
43036 {
43037diff -urNp linux-2.6.32.45/fs/ioctl.c linux-2.6.32.45/fs/ioctl.c
43038--- linux-2.6.32.45/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43039+++ linux-2.6.32.45/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
43040@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
43041 u64 phys, u64 len, u32 flags)
43042 {
43043 struct fiemap_extent extent;
43044- struct fiemap_extent *dest = fieinfo->fi_extents_start;
43045+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
43046
43047 /* only count the extents */
43048 if (fieinfo->fi_extents_max == 0) {
43049@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
43050
43051 fieinfo.fi_flags = fiemap.fm_flags;
43052 fieinfo.fi_extents_max = fiemap.fm_extent_count;
43053- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
43054+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
43055
43056 if (fiemap.fm_extent_count != 0 &&
43057 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
43058@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
43059 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
43060 fiemap.fm_flags = fieinfo.fi_flags;
43061 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
43062- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
43063+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
43064 error = -EFAULT;
43065
43066 return error;
43067diff -urNp linux-2.6.32.45/fs/jbd/checkpoint.c linux-2.6.32.45/fs/jbd/checkpoint.c
43068--- linux-2.6.32.45/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
43069+++ linux-2.6.32.45/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
43070@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
43071 tid_t this_tid;
43072 int result;
43073
43074+ pax_track_stack();
43075+
43076 jbd_debug(1, "Start checkpoint\n");
43077
43078 /*
43079diff -urNp linux-2.6.32.45/fs/jffs2/compr_rtime.c linux-2.6.32.45/fs/jffs2/compr_rtime.c
43080--- linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
43081+++ linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
43082@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43083 int outpos = 0;
43084 int pos=0;
43085
43086+ pax_track_stack();
43087+
43088 memset(positions,0,sizeof(positions));
43089
43090 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43091@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
43092 int outpos = 0;
43093 int pos=0;
43094
43095+ pax_track_stack();
43096+
43097 memset(positions,0,sizeof(positions));
43098
43099 while (outpos<destlen) {
43100diff -urNp linux-2.6.32.45/fs/jffs2/compr_rubin.c linux-2.6.32.45/fs/jffs2/compr_rubin.c
43101--- linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
43102+++ linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
43103@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43104 int ret;
43105 uint32_t mysrclen, mydstlen;
43106
43107+ pax_track_stack();
43108+
43109 mysrclen = *sourcelen;
43110 mydstlen = *dstlen - 8;
43111
43112diff -urNp linux-2.6.32.45/fs/jffs2/erase.c linux-2.6.32.45/fs/jffs2/erase.c
43113--- linux-2.6.32.45/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
43114+++ linux-2.6.32.45/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
43115@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
43116 struct jffs2_unknown_node marker = {
43117 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43118 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43119- .totlen = cpu_to_je32(c->cleanmarker_size)
43120+ .totlen = cpu_to_je32(c->cleanmarker_size),
43121+ .hdr_crc = cpu_to_je32(0)
43122 };
43123
43124 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43125diff -urNp linux-2.6.32.45/fs/jffs2/wbuf.c linux-2.6.32.45/fs/jffs2/wbuf.c
43126--- linux-2.6.32.45/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
43127+++ linux-2.6.32.45/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
43128@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43129 {
43130 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43131 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43132- .totlen = constant_cpu_to_je32(8)
43133+ .totlen = constant_cpu_to_je32(8),
43134+ .hdr_crc = constant_cpu_to_je32(0)
43135 };
43136
43137 /*
43138diff -urNp linux-2.6.32.45/fs/jffs2/xattr.c linux-2.6.32.45/fs/jffs2/xattr.c
43139--- linux-2.6.32.45/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
43140+++ linux-2.6.32.45/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
43141@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43142
43143 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43144
43145+ pax_track_stack();
43146+
43147 /* Phase.1 : Merge same xref */
43148 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43149 xref_tmphash[i] = NULL;
43150diff -urNp linux-2.6.32.45/fs/jfs/super.c linux-2.6.32.45/fs/jfs/super.c
43151--- linux-2.6.32.45/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
43152+++ linux-2.6.32.45/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
43153@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
43154
43155 jfs_inode_cachep =
43156 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43157- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43158+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43159 init_once);
43160 if (jfs_inode_cachep == NULL)
43161 return -ENOMEM;
43162diff -urNp linux-2.6.32.45/fs/Kconfig.binfmt linux-2.6.32.45/fs/Kconfig.binfmt
43163--- linux-2.6.32.45/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
43164+++ linux-2.6.32.45/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
43165@@ -86,7 +86,7 @@ config HAVE_AOUT
43166
43167 config BINFMT_AOUT
43168 tristate "Kernel support for a.out and ECOFF binaries"
43169- depends on HAVE_AOUT
43170+ depends on HAVE_AOUT && BROKEN
43171 ---help---
43172 A.out (Assembler.OUTput) is a set of formats for libraries and
43173 executables used in the earliest versions of UNIX. Linux used
43174diff -urNp linux-2.6.32.45/fs/libfs.c linux-2.6.32.45/fs/libfs.c
43175--- linux-2.6.32.45/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
43176+++ linux-2.6.32.45/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
43177@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
43178
43179 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43180 struct dentry *next;
43181+ char d_name[sizeof(next->d_iname)];
43182+ const unsigned char *name;
43183+
43184 next = list_entry(p, struct dentry, d_u.d_child);
43185 if (d_unhashed(next) || !next->d_inode)
43186 continue;
43187
43188 spin_unlock(&dcache_lock);
43189- if (filldir(dirent, next->d_name.name,
43190+ name = next->d_name.name;
43191+ if (name == next->d_iname) {
43192+ memcpy(d_name, name, next->d_name.len);
43193+ name = d_name;
43194+ }
43195+ if (filldir(dirent, name,
43196 next->d_name.len, filp->f_pos,
43197 next->d_inode->i_ino,
43198 dt_type(next->d_inode)) < 0)
43199diff -urNp linux-2.6.32.45/fs/lockd/clntproc.c linux-2.6.32.45/fs/lockd/clntproc.c
43200--- linux-2.6.32.45/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
43201+++ linux-2.6.32.45/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
43202@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43203 /*
43204 * Cookie counter for NLM requests
43205 */
43206-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43207+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43208
43209 void nlmclnt_next_cookie(struct nlm_cookie *c)
43210 {
43211- u32 cookie = atomic_inc_return(&nlm_cookie);
43212+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43213
43214 memcpy(c->data, &cookie, 4);
43215 c->len=4;
43216@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43217 struct nlm_rqst reqst, *req;
43218 int status;
43219
43220+ pax_track_stack();
43221+
43222 req = &reqst;
43223 memset(req, 0, sizeof(*req));
43224 locks_init_lock(&req->a_args.lock.fl);
43225diff -urNp linux-2.6.32.45/fs/lockd/svc.c linux-2.6.32.45/fs/lockd/svc.c
43226--- linux-2.6.32.45/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
43227+++ linux-2.6.32.45/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
43228@@ -43,7 +43,7 @@
43229
43230 static struct svc_program nlmsvc_program;
43231
43232-struct nlmsvc_binding * nlmsvc_ops;
43233+const struct nlmsvc_binding * nlmsvc_ops;
43234 EXPORT_SYMBOL_GPL(nlmsvc_ops);
43235
43236 static DEFINE_MUTEX(nlmsvc_mutex);
43237diff -urNp linux-2.6.32.45/fs/locks.c linux-2.6.32.45/fs/locks.c
43238--- linux-2.6.32.45/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
43239+++ linux-2.6.32.45/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
43240@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
43241
43242 static struct kmem_cache *filelock_cache __read_mostly;
43243
43244+static void locks_init_lock_always(struct file_lock *fl)
43245+{
43246+ fl->fl_next = NULL;
43247+ fl->fl_fasync = NULL;
43248+ fl->fl_owner = NULL;
43249+ fl->fl_pid = 0;
43250+ fl->fl_nspid = NULL;
43251+ fl->fl_file = NULL;
43252+ fl->fl_flags = 0;
43253+ fl->fl_type = 0;
43254+ fl->fl_start = fl->fl_end = 0;
43255+}
43256+
43257 /* Allocate an empty lock structure. */
43258 static struct file_lock *locks_alloc_lock(void)
43259 {
43260- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43261+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43262+
43263+ if (fl)
43264+ locks_init_lock_always(fl);
43265+
43266+ return fl;
43267 }
43268
43269 void locks_release_private(struct file_lock *fl)
43270@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
43271 INIT_LIST_HEAD(&fl->fl_link);
43272 INIT_LIST_HEAD(&fl->fl_block);
43273 init_waitqueue_head(&fl->fl_wait);
43274- fl->fl_next = NULL;
43275- fl->fl_fasync = NULL;
43276- fl->fl_owner = NULL;
43277- fl->fl_pid = 0;
43278- fl->fl_nspid = NULL;
43279- fl->fl_file = NULL;
43280- fl->fl_flags = 0;
43281- fl->fl_type = 0;
43282- fl->fl_start = fl->fl_end = 0;
43283 fl->fl_ops = NULL;
43284 fl->fl_lmops = NULL;
43285+ locks_init_lock_always(fl);
43286 }
43287
43288 EXPORT_SYMBOL(locks_init_lock);
43289@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
43290 return;
43291
43292 if (filp->f_op && filp->f_op->flock) {
43293- struct file_lock fl = {
43294+ struct file_lock flock = {
43295 .fl_pid = current->tgid,
43296 .fl_file = filp,
43297 .fl_flags = FL_FLOCK,
43298 .fl_type = F_UNLCK,
43299 .fl_end = OFFSET_MAX,
43300 };
43301- filp->f_op->flock(filp, F_SETLKW, &fl);
43302- if (fl.fl_ops && fl.fl_ops->fl_release_private)
43303- fl.fl_ops->fl_release_private(&fl);
43304+ filp->f_op->flock(filp, F_SETLKW, &flock);
43305+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
43306+ flock.fl_ops->fl_release_private(&flock);
43307 }
43308
43309 lock_kernel();
43310diff -urNp linux-2.6.32.45/fs/mbcache.c linux-2.6.32.45/fs/mbcache.c
43311--- linux-2.6.32.45/fs/mbcache.c 2011-03-27 14:31:47.000000000 -0400
43312+++ linux-2.6.32.45/fs/mbcache.c 2011-08-05 20:33:55.000000000 -0400
43313@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct
43314 if (!cache)
43315 goto fail;
43316 cache->c_name = name;
43317- cache->c_op.free = NULL;
43318+ *(void **)&cache->c_op.free = NULL;
43319 if (cache_op)
43320- cache->c_op.free = cache_op->free;
43321+ *(void **)&cache->c_op.free = cache_op->free;
43322 atomic_set(&cache->c_entry_count, 0);
43323 cache->c_bucket_bits = bucket_bits;
43324 #ifdef MB_CACHE_INDEXES_COUNT
43325diff -urNp linux-2.6.32.45/fs/namei.c linux-2.6.32.45/fs/namei.c
43326--- linux-2.6.32.45/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
43327+++ linux-2.6.32.45/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
43328@@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
43329 return ret;
43330
43331 /*
43332- * Read/write DACs are always overridable.
43333- * Executable DACs are overridable if at least one exec bit is set.
43334- */
43335- if (!(mask & MAY_EXEC) || execute_ok(inode))
43336- if (capable(CAP_DAC_OVERRIDE))
43337- return 0;
43338-
43339- /*
43340 * Searching includes executable on directories, else just read.
43341 */
43342 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43343@@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
43344 if (capable(CAP_DAC_READ_SEARCH))
43345 return 0;
43346
43347+ /*
43348+ * Read/write DACs are always overridable.
43349+ * Executable DACs are overridable if at least one exec bit is set.
43350+ */
43351+ if (!(mask & MAY_EXEC) || execute_ok(inode))
43352+ if (capable(CAP_DAC_OVERRIDE))
43353+ return 0;
43354+
43355 return -EACCES;
43356 }
43357
43358@@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
43359 if (!ret)
43360 goto ok;
43361
43362- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
43363+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
43364+ capable(CAP_DAC_OVERRIDE))
43365 goto ok;
43366
43367 return ret;
43368@@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
43369 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
43370 error = PTR_ERR(cookie);
43371 if (!IS_ERR(cookie)) {
43372- char *s = nd_get_link(nd);
43373+ const char *s = nd_get_link(nd);
43374 error = 0;
43375 if (s)
43376 error = __vfs_follow_link(nd, s);
43377@@ -669,6 +670,13 @@ static inline int do_follow_link(struct
43378 err = security_inode_follow_link(path->dentry, nd);
43379 if (err)
43380 goto loop;
43381+
43382+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
43383+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
43384+ err = -EACCES;
43385+ goto loop;
43386+ }
43387+
43388 current->link_count++;
43389 current->total_link_count++;
43390 nd->depth++;
43391@@ -1016,11 +1024,18 @@ return_reval:
43392 break;
43393 }
43394 return_base:
43395+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43396+ path_put(&nd->path);
43397+ return -ENOENT;
43398+ }
43399 return 0;
43400 out_dput:
43401 path_put_conditional(&next, nd);
43402 break;
43403 }
43404+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
43405+ err = -ENOENT;
43406+
43407 path_put(&nd->path);
43408 return_err:
43409 return err;
43410@@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
43411 int retval = path_init(dfd, name, flags, nd);
43412 if (!retval)
43413 retval = path_walk(name, nd);
43414- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
43415- nd->path.dentry->d_inode))
43416- audit_inode(name, nd->path.dentry);
43417+
43418+ if (likely(!retval)) {
43419+ if (nd->path.dentry && nd->path.dentry->d_inode) {
43420+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43421+ retval = -ENOENT;
43422+ if (!audit_dummy_context())
43423+ audit_inode(name, nd->path.dentry);
43424+ }
43425+ }
43426 if (nd->root.mnt) {
43427 path_put(&nd->root);
43428 nd->root.mnt = NULL;
43429 }
43430+
43431 return retval;
43432 }
43433
43434@@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
43435 if (error)
43436 goto err_out;
43437
43438+
43439+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
43440+ error = -EPERM;
43441+ goto err_out;
43442+ }
43443+ if (gr_handle_rawio(inode)) {
43444+ error = -EPERM;
43445+ goto err_out;
43446+ }
43447+ if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
43448+ error = -EACCES;
43449+ goto err_out;
43450+ }
43451+
43452 if (flag & O_TRUNC) {
43453 error = get_write_access(inode);
43454 if (error)
43455@@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
43456 int error;
43457 struct dentry *dir = nd->path.dentry;
43458
43459+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
43460+ error = -EACCES;
43461+ goto out_unlock;
43462+ }
43463+
43464 if (!IS_POSIXACL(dir->d_inode))
43465 mode &= ~current_umask();
43466 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
43467 if (error)
43468 goto out_unlock;
43469 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
43470+ if (!error)
43471+ gr_handle_create(path->dentry, nd->path.mnt);
43472 out_unlock:
43473 mutex_unlock(&dir->d_inode->i_mutex);
43474 dput(nd->path.dentry);
43475@@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
43476 &nd, flag);
43477 if (error)
43478 return ERR_PTR(error);
43479+
43480+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
43481+ error = -EPERM;
43482+ goto exit;
43483+ }
43484+
43485+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
43486+ error = -EPERM;
43487+ goto exit;
43488+ }
43489+
43490+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
43491+ error = -EACCES;
43492+ goto exit;
43493+ }
43494+
43495 goto ok;
43496 }
43497
43498@@ -1795,6 +1854,14 @@ do_last:
43499 /*
43500 * It already exists.
43501 */
43502+
43503+ /* only check if O_CREAT is specified, all other checks need
43504+ to go into may_open */
43505+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
43506+ error = -EACCES;
43507+ goto exit_mutex_unlock;
43508+ }
43509+
43510 mutex_unlock(&dir->d_inode->i_mutex);
43511 audit_inode(pathname, path.dentry);
43512
43513@@ -1887,6 +1954,13 @@ do_link:
43514 error = security_inode_follow_link(path.dentry, &nd);
43515 if (error)
43516 goto exit_dput;
43517+
43518+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
43519+ path.dentry, nd.path.mnt)) {
43520+ error = -EACCES;
43521+ goto exit_dput;
43522+ }
43523+
43524 error = __do_follow_link(&path, &nd);
43525 if (error) {
43526 /* Does someone understand code flow here? Or it is only
43527@@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43528 error = may_mknod(mode);
43529 if (error)
43530 goto out_dput;
43531+
43532+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
43533+ error = -EPERM;
43534+ goto out_dput;
43535+ }
43536+
43537+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
43538+ error = -EACCES;
43539+ goto out_dput;
43540+ }
43541+
43542 error = mnt_want_write(nd.path.mnt);
43543 if (error)
43544 goto out_dput;
43545@@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43546 }
43547 out_drop_write:
43548 mnt_drop_write(nd.path.mnt);
43549+
43550+ if (!error)
43551+ gr_handle_create(dentry, nd.path.mnt);
43552 out_dput:
43553 dput(dentry);
43554 out_unlock:
43555@@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43556 if (IS_ERR(dentry))
43557 goto out_unlock;
43558
43559+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
43560+ error = -EACCES;
43561+ goto out_dput;
43562+ }
43563+
43564 if (!IS_POSIXACL(nd.path.dentry->d_inode))
43565 mode &= ~current_umask();
43566 error = mnt_want_write(nd.path.mnt);
43567@@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43568 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
43569 out_drop_write:
43570 mnt_drop_write(nd.path.mnt);
43571+
43572+ if (!error)
43573+ gr_handle_create(dentry, nd.path.mnt);
43574+
43575 out_dput:
43576 dput(dentry);
43577 out_unlock:
43578@@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
43579 char * name;
43580 struct dentry *dentry;
43581 struct nameidata nd;
43582+ ino_t saved_ino = 0;
43583+ dev_t saved_dev = 0;
43584
43585 error = user_path_parent(dfd, pathname, &nd, &name);
43586 if (error)
43587@@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
43588 error = PTR_ERR(dentry);
43589 if (IS_ERR(dentry))
43590 goto exit2;
43591+
43592+ if (dentry->d_inode != NULL) {
43593+ if (dentry->d_inode->i_nlink <= 1) {
43594+ saved_ino = dentry->d_inode->i_ino;
43595+ saved_dev = gr_get_dev_from_dentry(dentry);
43596+ }
43597+
43598+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
43599+ error = -EACCES;
43600+ goto exit3;
43601+ }
43602+ }
43603+
43604 error = mnt_want_write(nd.path.mnt);
43605 if (error)
43606 goto exit3;
43607@@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
43608 if (error)
43609 goto exit4;
43610 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
43611+ if (!error && (saved_dev || saved_ino))
43612+ gr_handle_delete(saved_ino, saved_dev);
43613 exit4:
43614 mnt_drop_write(nd.path.mnt);
43615 exit3:
43616@@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
43617 struct dentry *dentry;
43618 struct nameidata nd;
43619 struct inode *inode = NULL;
43620+ ino_t saved_ino = 0;
43621+ dev_t saved_dev = 0;
43622
43623 error = user_path_parent(dfd, pathname, &nd, &name);
43624 if (error)
43625@@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
43626 if (nd.last.name[nd.last.len])
43627 goto slashes;
43628 inode = dentry->d_inode;
43629- if (inode)
43630+ if (inode) {
43631+ if (inode->i_nlink <= 1) {
43632+ saved_ino = inode->i_ino;
43633+ saved_dev = gr_get_dev_from_dentry(dentry);
43634+ }
43635+
43636 atomic_inc(&inode->i_count);
43637+
43638+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
43639+ error = -EACCES;
43640+ goto exit2;
43641+ }
43642+ }
43643 error = mnt_want_write(nd.path.mnt);
43644 if (error)
43645 goto exit2;
43646@@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
43647 if (error)
43648 goto exit3;
43649 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
43650+ if (!error && (saved_ino || saved_dev))
43651+ gr_handle_delete(saved_ino, saved_dev);
43652 exit3:
43653 mnt_drop_write(nd.path.mnt);
43654 exit2:
43655@@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
43656 if (IS_ERR(dentry))
43657 goto out_unlock;
43658
43659+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
43660+ error = -EACCES;
43661+ goto out_dput;
43662+ }
43663+
43664 error = mnt_want_write(nd.path.mnt);
43665 if (error)
43666 goto out_dput;
43667@@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
43668 if (error)
43669 goto out_drop_write;
43670 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
43671+ if (!error)
43672+ gr_handle_create(dentry, nd.path.mnt);
43673 out_drop_write:
43674 mnt_drop_write(nd.path.mnt);
43675 out_dput:
43676@@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
43677 error = PTR_ERR(new_dentry);
43678 if (IS_ERR(new_dentry))
43679 goto out_unlock;
43680+
43681+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
43682+ old_path.dentry->d_inode,
43683+ old_path.dentry->d_inode->i_mode, to)) {
43684+ error = -EACCES;
43685+ goto out_dput;
43686+ }
43687+
43688+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
43689+ old_path.dentry, old_path.mnt, to)) {
43690+ error = -EACCES;
43691+ goto out_dput;
43692+ }
43693+
43694 error = mnt_want_write(nd.path.mnt);
43695 if (error)
43696 goto out_dput;
43697@@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
43698 if (error)
43699 goto out_drop_write;
43700 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
43701+ if (!error)
43702+ gr_handle_create(new_dentry, nd.path.mnt);
43703 out_drop_write:
43704 mnt_drop_write(nd.path.mnt);
43705 out_dput:
43706@@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43707 char *to;
43708 int error;
43709
43710+ pax_track_stack();
43711+
43712 error = user_path_parent(olddfd, oldname, &oldnd, &from);
43713 if (error)
43714 goto exit;
43715@@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43716 if (new_dentry == trap)
43717 goto exit5;
43718
43719+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
43720+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
43721+ to);
43722+ if (error)
43723+ goto exit5;
43724+
43725 error = mnt_want_write(oldnd.path.mnt);
43726 if (error)
43727 goto exit5;
43728@@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43729 goto exit6;
43730 error = vfs_rename(old_dir->d_inode, old_dentry,
43731 new_dir->d_inode, new_dentry);
43732+ if (!error)
43733+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
43734+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
43735 exit6:
43736 mnt_drop_write(oldnd.path.mnt);
43737 exit5:
43738@@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
43739
43740 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
43741 {
43742+ char tmpbuf[64];
43743+ const char *newlink;
43744 int len;
43745
43746 len = PTR_ERR(link);
43747@@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
43748 len = strlen(link);
43749 if (len > (unsigned) buflen)
43750 len = buflen;
43751- if (copy_to_user(buffer, link, len))
43752+
43753+ if (len < sizeof(tmpbuf)) {
43754+ memcpy(tmpbuf, link, len);
43755+ newlink = tmpbuf;
43756+ } else
43757+ newlink = link;
43758+
43759+ if (copy_to_user(buffer, newlink, len))
43760 len = -EFAULT;
43761 out:
43762 return len;
43763diff -urNp linux-2.6.32.45/fs/namespace.c linux-2.6.32.45/fs/namespace.c
43764--- linux-2.6.32.45/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
43765+++ linux-2.6.32.45/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
43766@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
43767 if (!(sb->s_flags & MS_RDONLY))
43768 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
43769 up_write(&sb->s_umount);
43770+
43771+ gr_log_remount(mnt->mnt_devname, retval);
43772+
43773 return retval;
43774 }
43775
43776@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
43777 security_sb_umount_busy(mnt);
43778 up_write(&namespace_sem);
43779 release_mounts(&umount_list);
43780+
43781+ gr_log_unmount(mnt->mnt_devname, retval);
43782+
43783 return retval;
43784 }
43785
43786@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
43787 if (retval)
43788 goto dput_out;
43789
43790+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
43791+ retval = -EPERM;
43792+ goto dput_out;
43793+ }
43794+
43795+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
43796+ retval = -EPERM;
43797+ goto dput_out;
43798+ }
43799+
43800 if (flags & MS_REMOUNT)
43801 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
43802 data_page);
43803@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
43804 dev_name, data_page);
43805 dput_out:
43806 path_put(&path);
43807+
43808+ gr_log_mount(dev_name, dir_name, retval);
43809+
43810 return retval;
43811 }
43812
43813@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
43814 goto out1;
43815 }
43816
43817+ if (gr_handle_chroot_pivot()) {
43818+ error = -EPERM;
43819+ path_put(&old);
43820+ goto out1;
43821+ }
43822+
43823 read_lock(&current->fs->lock);
43824 root = current->fs->root;
43825 path_get(&current->fs->root);
43826diff -urNp linux-2.6.32.45/fs/ncpfs/dir.c linux-2.6.32.45/fs/ncpfs/dir.c
43827--- linux-2.6.32.45/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
43828+++ linux-2.6.32.45/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
43829@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
43830 int res, val = 0, len;
43831 __u8 __name[NCP_MAXPATHLEN + 1];
43832
43833+ pax_track_stack();
43834+
43835 parent = dget_parent(dentry);
43836 dir = parent->d_inode;
43837
43838@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
43839 int error, res, len;
43840 __u8 __name[NCP_MAXPATHLEN + 1];
43841
43842+ pax_track_stack();
43843+
43844 lock_kernel();
43845 error = -EIO;
43846 if (!ncp_conn_valid(server))
43847@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
43848 int error, result, len;
43849 int opmode;
43850 __u8 __name[NCP_MAXPATHLEN + 1];
43851-
43852+
43853 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
43854 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
43855
43856+ pax_track_stack();
43857+
43858 error = -EIO;
43859 lock_kernel();
43860 if (!ncp_conn_valid(server))
43861@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
43862 int error, len;
43863 __u8 __name[NCP_MAXPATHLEN + 1];
43864
43865+ pax_track_stack();
43866+
43867 DPRINTK("ncp_mkdir: making %s/%s\n",
43868 dentry->d_parent->d_name.name, dentry->d_name.name);
43869
43870@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
43871 if (!ncp_conn_valid(server))
43872 goto out;
43873
43874+ pax_track_stack();
43875+
43876 ncp_age_dentry(server, dentry);
43877 len = sizeof(__name);
43878 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
43879@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
43880 int old_len, new_len;
43881 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
43882
43883+ pax_track_stack();
43884+
43885 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
43886 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
43887 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
43888diff -urNp linux-2.6.32.45/fs/ncpfs/inode.c linux-2.6.32.45/fs/ncpfs/inode.c
43889--- linux-2.6.32.45/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43890+++ linux-2.6.32.45/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
43891@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
43892 #endif
43893 struct ncp_entry_info finfo;
43894
43895+ pax_track_stack();
43896+
43897 data.wdog_pid = NULL;
43898 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
43899 if (!server)
43900diff -urNp linux-2.6.32.45/fs/nfs/inode.c linux-2.6.32.45/fs/nfs/inode.c
43901--- linux-2.6.32.45/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
43902+++ linux-2.6.32.45/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
43903@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
43904 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
43905 nfsi->attrtimeo_timestamp = jiffies;
43906
43907- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
43908+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
43909 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
43910 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
43911 else
43912@@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
43913 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
43914 }
43915
43916-static atomic_long_t nfs_attr_generation_counter;
43917+static atomic_long_unchecked_t nfs_attr_generation_counter;
43918
43919 static unsigned long nfs_read_attr_generation_counter(void)
43920 {
43921- return atomic_long_read(&nfs_attr_generation_counter);
43922+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
43923 }
43924
43925 unsigned long nfs_inc_attr_generation_counter(void)
43926 {
43927- return atomic_long_inc_return(&nfs_attr_generation_counter);
43928+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
43929 }
43930
43931 void nfs_fattr_init(struct nfs_fattr *fattr)
43932diff -urNp linux-2.6.32.45/fs/nfsd/lockd.c linux-2.6.32.45/fs/nfsd/lockd.c
43933--- linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
43934+++ linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
43935@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
43936 fput(filp);
43937 }
43938
43939-static struct nlmsvc_binding nfsd_nlm_ops = {
43940+static const struct nlmsvc_binding nfsd_nlm_ops = {
43941 .fopen = nlm_fopen, /* open file for locking */
43942 .fclose = nlm_fclose, /* close file */
43943 };
43944diff -urNp linux-2.6.32.45/fs/nfsd/nfs4state.c linux-2.6.32.45/fs/nfsd/nfs4state.c
43945--- linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
43946+++ linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
43947@@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
43948 unsigned int cmd;
43949 int err;
43950
43951+ pax_track_stack();
43952+
43953 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
43954 (long long) lock->lk_offset,
43955 (long long) lock->lk_length);
43956diff -urNp linux-2.6.32.45/fs/nfsd/nfs4xdr.c linux-2.6.32.45/fs/nfsd/nfs4xdr.c
43957--- linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
43958+++ linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
43959@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
43960 struct nfsd4_compoundres *resp = rqstp->rq_resp;
43961 u32 minorversion = resp->cstate.minorversion;
43962
43963+ pax_track_stack();
43964+
43965 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
43966 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
43967 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
43968diff -urNp linux-2.6.32.45/fs/nfsd/vfs.c linux-2.6.32.45/fs/nfsd/vfs.c
43969--- linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
43970+++ linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
43971@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
43972 } else {
43973 oldfs = get_fs();
43974 set_fs(KERNEL_DS);
43975- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
43976+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
43977 set_fs(oldfs);
43978 }
43979
43980@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
43981
43982 /* Write the data. */
43983 oldfs = get_fs(); set_fs(KERNEL_DS);
43984- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
43985+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
43986 set_fs(oldfs);
43987 if (host_err < 0)
43988 goto out_nfserr;
43989@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
43990 */
43991
43992 oldfs = get_fs(); set_fs(KERNEL_DS);
43993- host_err = inode->i_op->readlink(dentry, buf, *lenp);
43994+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
43995 set_fs(oldfs);
43996
43997 if (host_err < 0)
43998diff -urNp linux-2.6.32.45/fs/nilfs2/ioctl.c linux-2.6.32.45/fs/nilfs2/ioctl.c
43999--- linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
44000+++ linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
44001@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
44002 unsigned int cmd, void __user *argp)
44003 {
44004 struct nilfs_argv argv[5];
44005- const static size_t argsz[5] = {
44006+ static const size_t argsz[5] = {
44007 sizeof(struct nilfs_vdesc),
44008 sizeof(struct nilfs_period),
44009 sizeof(__u64),
44010diff -urNp linux-2.6.32.45/fs/notify/dnotify/dnotify.c linux-2.6.32.45/fs/notify/dnotify/dnotify.c
44011--- linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
44012+++ linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
44013@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
44014 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
44015 }
44016
44017-static struct fsnotify_ops dnotify_fsnotify_ops = {
44018+static const struct fsnotify_ops dnotify_fsnotify_ops = {
44019 .handle_event = dnotify_handle_event,
44020 .should_send_event = dnotify_should_send_event,
44021 .free_group_priv = NULL,
44022diff -urNp linux-2.6.32.45/fs/notify/notification.c linux-2.6.32.45/fs/notify/notification.c
44023--- linux-2.6.32.45/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
44024+++ linux-2.6.32.45/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
44025@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44026 * get set to 0 so it will never get 'freed'
44027 */
44028 static struct fsnotify_event q_overflow_event;
44029-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44030+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44031
44032 /**
44033 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44034@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44035 */
44036 u32 fsnotify_get_cookie(void)
44037 {
44038- return atomic_inc_return(&fsnotify_sync_cookie);
44039+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44040 }
44041 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44042
44043diff -urNp linux-2.6.32.45/fs/ntfs/dir.c linux-2.6.32.45/fs/ntfs/dir.c
44044--- linux-2.6.32.45/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44045+++ linux-2.6.32.45/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
44046@@ -1328,7 +1328,7 @@ find_next_index_buffer:
44047 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44048 ~(s64)(ndir->itype.index.block_size - 1)));
44049 /* Bounds checks. */
44050- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44051+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44052 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44053 "inode 0x%lx or driver bug.", vdir->i_ino);
44054 goto err_out;
44055diff -urNp linux-2.6.32.45/fs/ntfs/file.c linux-2.6.32.45/fs/ntfs/file.c
44056--- linux-2.6.32.45/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
44057+++ linux-2.6.32.45/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
44058@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
44059 #endif /* NTFS_RW */
44060 };
44061
44062-const struct file_operations ntfs_empty_file_ops = {};
44063+const struct file_operations ntfs_empty_file_ops __read_only;
44064
44065-const struct inode_operations ntfs_empty_inode_ops = {};
44066+const struct inode_operations ntfs_empty_inode_ops __read_only;
44067diff -urNp linux-2.6.32.45/fs/ocfs2/cluster/masklog.c linux-2.6.32.45/fs/ocfs2/cluster/masklog.c
44068--- linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
44069+++ linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
44070@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
44071 return mlog_mask_store(mlog_attr->mask, buf, count);
44072 }
44073
44074-static struct sysfs_ops mlog_attr_ops = {
44075+static const struct sysfs_ops mlog_attr_ops = {
44076 .show = mlog_show,
44077 .store = mlog_store,
44078 };
44079diff -urNp linux-2.6.32.45/fs/ocfs2/localalloc.c linux-2.6.32.45/fs/ocfs2/localalloc.c
44080--- linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
44081+++ linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
44082@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
44083 goto bail;
44084 }
44085
44086- atomic_inc(&osb->alloc_stats.moves);
44087+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44088
44089 status = 0;
44090 bail:
44091diff -urNp linux-2.6.32.45/fs/ocfs2/namei.c linux-2.6.32.45/fs/ocfs2/namei.c
44092--- linux-2.6.32.45/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
44093+++ linux-2.6.32.45/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
44094@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
44095 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44096 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44097
44098+ pax_track_stack();
44099+
44100 /* At some point it might be nice to break this function up a
44101 * bit. */
44102
44103diff -urNp linux-2.6.32.45/fs/ocfs2/ocfs2.h linux-2.6.32.45/fs/ocfs2/ocfs2.h
44104--- linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
44105+++ linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
44106@@ -217,11 +217,11 @@ enum ocfs2_vol_state
44107
44108 struct ocfs2_alloc_stats
44109 {
44110- atomic_t moves;
44111- atomic_t local_data;
44112- atomic_t bitmap_data;
44113- atomic_t bg_allocs;
44114- atomic_t bg_extends;
44115+ atomic_unchecked_t moves;
44116+ atomic_unchecked_t local_data;
44117+ atomic_unchecked_t bitmap_data;
44118+ atomic_unchecked_t bg_allocs;
44119+ atomic_unchecked_t bg_extends;
44120 };
44121
44122 enum ocfs2_local_alloc_state
44123diff -urNp linux-2.6.32.45/fs/ocfs2/suballoc.c linux-2.6.32.45/fs/ocfs2/suballoc.c
44124--- linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
44125+++ linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
44126@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
44127 mlog_errno(status);
44128 goto bail;
44129 }
44130- atomic_inc(&osb->alloc_stats.bg_extends);
44131+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44132
44133 /* You should never ask for this much metadata */
44134 BUG_ON(bits_wanted >
44135@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
44136 mlog_errno(status);
44137 goto bail;
44138 }
44139- atomic_inc(&osb->alloc_stats.bg_allocs);
44140+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44141
44142 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
44143 ac->ac_bits_given += (*num_bits);
44144@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
44145 mlog_errno(status);
44146 goto bail;
44147 }
44148- atomic_inc(&osb->alloc_stats.bg_allocs);
44149+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44150
44151 BUG_ON(num_bits != 1);
44152
44153@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44154 cluster_start,
44155 num_clusters);
44156 if (!status)
44157- atomic_inc(&osb->alloc_stats.local_data);
44158+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44159 } else {
44160 if (min_clusters > (osb->bitmap_cpg - 1)) {
44161 /* The only paths asking for contiguousness
44162@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44163 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44164 bg_blkno,
44165 bg_bit_off);
44166- atomic_inc(&osb->alloc_stats.bitmap_data);
44167+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44168 }
44169 }
44170 if (status < 0) {
44171diff -urNp linux-2.6.32.45/fs/ocfs2/super.c linux-2.6.32.45/fs/ocfs2/super.c
44172--- linux-2.6.32.45/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
44173+++ linux-2.6.32.45/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
44174@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44175 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44176 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44177 "Stats",
44178- atomic_read(&osb->alloc_stats.bitmap_data),
44179- atomic_read(&osb->alloc_stats.local_data),
44180- atomic_read(&osb->alloc_stats.bg_allocs),
44181- atomic_read(&osb->alloc_stats.moves),
44182- atomic_read(&osb->alloc_stats.bg_extends));
44183+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44184+ atomic_read_unchecked(&osb->alloc_stats.local_data),
44185+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44186+ atomic_read_unchecked(&osb->alloc_stats.moves),
44187+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44188
44189 out += snprintf(buf + out, len - out,
44190 "%10s => State: %u Descriptor: %llu Size: %u bits "
44191@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
44192 spin_lock_init(&osb->osb_xattr_lock);
44193 ocfs2_init_inode_steal_slot(osb);
44194
44195- atomic_set(&osb->alloc_stats.moves, 0);
44196- atomic_set(&osb->alloc_stats.local_data, 0);
44197- atomic_set(&osb->alloc_stats.bitmap_data, 0);
44198- atomic_set(&osb->alloc_stats.bg_allocs, 0);
44199- atomic_set(&osb->alloc_stats.bg_extends, 0);
44200+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44201+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44202+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44203+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44204+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44205
44206 /* Copy the blockcheck stats from the superblock probe */
44207 osb->osb_ecc_stats = *stats;
44208diff -urNp linux-2.6.32.45/fs/open.c linux-2.6.32.45/fs/open.c
44209--- linux-2.6.32.45/fs/open.c 2011-03-27 14:31:47.000000000 -0400
44210+++ linux-2.6.32.45/fs/open.c 2011-04-17 15:56:46.000000000 -0400
44211@@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
44212 error = locks_verify_truncate(inode, NULL, length);
44213 if (!error)
44214 error = security_path_truncate(&path, length, 0);
44215+
44216+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44217+ error = -EACCES;
44218+
44219 if (!error) {
44220 vfs_dq_init(inode);
44221 error = do_truncate(path.dentry, length, 0, NULL);
44222@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44223 if (__mnt_is_readonly(path.mnt))
44224 res = -EROFS;
44225
44226+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44227+ res = -EACCES;
44228+
44229 out_path_release:
44230 path_put(&path);
44231 out:
44232@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44233 if (error)
44234 goto dput_and_out;
44235
44236+ gr_log_chdir(path.dentry, path.mnt);
44237+
44238 set_fs_pwd(current->fs, &path);
44239
44240 dput_and_out:
44241@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44242 goto out_putf;
44243
44244 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
44245+
44246+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44247+ error = -EPERM;
44248+
44249+ if (!error)
44250+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44251+
44252 if (!error)
44253 set_fs_pwd(current->fs, &file->f_path);
44254 out_putf:
44255@@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
44256 if (!capable(CAP_SYS_CHROOT))
44257 goto dput_and_out;
44258
44259+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44260+ goto dput_and_out;
44261+
44262+ if (gr_handle_chroot_caps(&path)) {
44263+ error = -ENOMEM;
44264+ goto dput_and_out;
44265+ }
44266+
44267 set_fs_root(current->fs, &path);
44268+
44269+ gr_handle_chroot_chdir(&path);
44270+
44271 error = 0;
44272 dput_and_out:
44273 path_put(&path);
44274@@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44275 err = mnt_want_write_file(file);
44276 if (err)
44277 goto out_putf;
44278+
44279 mutex_lock(&inode->i_mutex);
44280+
44281+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
44282+ err = -EACCES;
44283+ goto out_unlock;
44284+ }
44285+
44286 if (mode == (mode_t) -1)
44287 mode = inode->i_mode;
44288+
44289+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
44290+ err = -EPERM;
44291+ goto out_unlock;
44292+ }
44293+
44294 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44295 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44296 err = notify_change(dentry, &newattrs);
44297+
44298+out_unlock:
44299 mutex_unlock(&inode->i_mutex);
44300 mnt_drop_write(file->f_path.mnt);
44301 out_putf:
44302@@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44303 error = mnt_want_write(path.mnt);
44304 if (error)
44305 goto dput_and_out;
44306+
44307 mutex_lock(&inode->i_mutex);
44308+
44309+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44310+ error = -EACCES;
44311+ goto out_unlock;
44312+ }
44313+
44314 if (mode == (mode_t) -1)
44315 mode = inode->i_mode;
44316+
44317+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44318+ error = -EACCES;
44319+ goto out_unlock;
44320+ }
44321+
44322 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44323 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44324 error = notify_change(path.dentry, &newattrs);
44325+
44326+out_unlock:
44327 mutex_unlock(&inode->i_mutex);
44328 mnt_drop_write(path.mnt);
44329 dput_and_out:
44330@@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
44331 return sys_fchmodat(AT_FDCWD, filename, mode);
44332 }
44333
44334-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
44335+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
44336 {
44337 struct inode *inode = dentry->d_inode;
44338 int error;
44339 struct iattr newattrs;
44340
44341+ if (!gr_acl_handle_chown(dentry, mnt))
44342+ return -EACCES;
44343+
44344 newattrs.ia_valid = ATTR_CTIME;
44345 if (user != (uid_t) -1) {
44346 newattrs.ia_valid |= ATTR_UID;
44347@@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
44348 error = mnt_want_write(path.mnt);
44349 if (error)
44350 goto out_release;
44351- error = chown_common(path.dentry, user, group);
44352+ error = chown_common(path.dentry, user, group, path.mnt);
44353 mnt_drop_write(path.mnt);
44354 out_release:
44355 path_put(&path);
44356@@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
44357 error = mnt_want_write(path.mnt);
44358 if (error)
44359 goto out_release;
44360- error = chown_common(path.dentry, user, group);
44361+ error = chown_common(path.dentry, user, group, path.mnt);
44362 mnt_drop_write(path.mnt);
44363 out_release:
44364 path_put(&path);
44365@@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
44366 error = mnt_want_write(path.mnt);
44367 if (error)
44368 goto out_release;
44369- error = chown_common(path.dentry, user, group);
44370+ error = chown_common(path.dentry, user, group, path.mnt);
44371 mnt_drop_write(path.mnt);
44372 out_release:
44373 path_put(&path);
44374@@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
44375 goto out_fput;
44376 dentry = file->f_path.dentry;
44377 audit_inode(NULL, dentry);
44378- error = chown_common(dentry, user, group);
44379+ error = chown_common(dentry, user, group, file->f_path.mnt);
44380 mnt_drop_write(file->f_path.mnt);
44381 out_fput:
44382 fput(file);
44383@@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
44384 if (!IS_ERR(tmp)) {
44385 fd = get_unused_fd_flags(flags);
44386 if (fd >= 0) {
44387- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
44388+ struct file *f;
44389+ /* don't allow to be set by userland */
44390+ flags &= ~FMODE_GREXEC;
44391+ f = do_filp_open(dfd, tmp, flags, mode, 0);
44392 if (IS_ERR(f)) {
44393 put_unused_fd(fd);
44394 fd = PTR_ERR(f);
44395diff -urNp linux-2.6.32.45/fs/partitions/ldm.c linux-2.6.32.45/fs/partitions/ldm.c
44396--- linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
44397+++ linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
44398@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44399 ldm_error ("A VBLK claims to have %d parts.", num);
44400 return false;
44401 }
44402+
44403 if (rec >= num) {
44404 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44405 return false;
44406@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44407 goto found;
44408 }
44409
44410- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44411+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44412 if (!f) {
44413 ldm_crit ("Out of memory.");
44414 return false;
44415diff -urNp linux-2.6.32.45/fs/partitions/mac.c linux-2.6.32.45/fs/partitions/mac.c
44416--- linux-2.6.32.45/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
44417+++ linux-2.6.32.45/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
44418@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
44419 return 0; /* not a MacOS disk */
44420 }
44421 blocks_in_map = be32_to_cpu(part->map_count);
44422+ printk(" [mac]");
44423 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
44424 put_dev_sector(sect);
44425 return 0;
44426 }
44427- printk(" [mac]");
44428 for (slot = 1; slot <= blocks_in_map; ++slot) {
44429 int pos = slot * secsize;
44430 put_dev_sector(sect);
44431diff -urNp linux-2.6.32.45/fs/pipe.c linux-2.6.32.45/fs/pipe.c
44432--- linux-2.6.32.45/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
44433+++ linux-2.6.32.45/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
44434@@ -401,9 +401,9 @@ redo:
44435 }
44436 if (bufs) /* More to do? */
44437 continue;
44438- if (!pipe->writers)
44439+ if (!atomic_read(&pipe->writers))
44440 break;
44441- if (!pipe->waiting_writers) {
44442+ if (!atomic_read(&pipe->waiting_writers)) {
44443 /* syscall merging: Usually we must not sleep
44444 * if O_NONBLOCK is set, or if we got some data.
44445 * But if a writer sleeps in kernel space, then
44446@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
44447 mutex_lock(&inode->i_mutex);
44448 pipe = inode->i_pipe;
44449
44450- if (!pipe->readers) {
44451+ if (!atomic_read(&pipe->readers)) {
44452 send_sig(SIGPIPE, current, 0);
44453 ret = -EPIPE;
44454 goto out;
44455@@ -511,7 +511,7 @@ redo1:
44456 for (;;) {
44457 int bufs;
44458
44459- if (!pipe->readers) {
44460+ if (!atomic_read(&pipe->readers)) {
44461 send_sig(SIGPIPE, current, 0);
44462 if (!ret)
44463 ret = -EPIPE;
44464@@ -597,9 +597,9 @@ redo2:
44465 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44466 do_wakeup = 0;
44467 }
44468- pipe->waiting_writers++;
44469+ atomic_inc(&pipe->waiting_writers);
44470 pipe_wait(pipe);
44471- pipe->waiting_writers--;
44472+ atomic_dec(&pipe->waiting_writers);
44473 }
44474 out:
44475 mutex_unlock(&inode->i_mutex);
44476@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
44477 mask = 0;
44478 if (filp->f_mode & FMODE_READ) {
44479 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44480- if (!pipe->writers && filp->f_version != pipe->w_counter)
44481+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44482 mask |= POLLHUP;
44483 }
44484
44485@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
44486 * Most Unices do not set POLLERR for FIFOs but on Linux they
44487 * behave exactly like pipes for poll().
44488 */
44489- if (!pipe->readers)
44490+ if (!atomic_read(&pipe->readers))
44491 mask |= POLLERR;
44492 }
44493
44494@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
44495
44496 mutex_lock(&inode->i_mutex);
44497 pipe = inode->i_pipe;
44498- pipe->readers -= decr;
44499- pipe->writers -= decw;
44500+ atomic_sub(decr, &pipe->readers);
44501+ atomic_sub(decw, &pipe->writers);
44502
44503- if (!pipe->readers && !pipe->writers) {
44504+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
44505 free_pipe_info(inode);
44506 } else {
44507 wake_up_interruptible_sync(&pipe->wait);
44508@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
44509
44510 if (inode->i_pipe) {
44511 ret = 0;
44512- inode->i_pipe->readers++;
44513+ atomic_inc(&inode->i_pipe->readers);
44514 }
44515
44516 mutex_unlock(&inode->i_mutex);
44517@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
44518
44519 if (inode->i_pipe) {
44520 ret = 0;
44521- inode->i_pipe->writers++;
44522+ atomic_inc(&inode->i_pipe->writers);
44523 }
44524
44525 mutex_unlock(&inode->i_mutex);
44526@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
44527 if (inode->i_pipe) {
44528 ret = 0;
44529 if (filp->f_mode & FMODE_READ)
44530- inode->i_pipe->readers++;
44531+ atomic_inc(&inode->i_pipe->readers);
44532 if (filp->f_mode & FMODE_WRITE)
44533- inode->i_pipe->writers++;
44534+ atomic_inc(&inode->i_pipe->writers);
44535 }
44536
44537 mutex_unlock(&inode->i_mutex);
44538@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
44539 inode->i_pipe = NULL;
44540 }
44541
44542-static struct vfsmount *pipe_mnt __read_mostly;
44543+struct vfsmount *pipe_mnt __read_mostly;
44544 static int pipefs_delete_dentry(struct dentry *dentry)
44545 {
44546 /*
44547@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
44548 goto fail_iput;
44549 inode->i_pipe = pipe;
44550
44551- pipe->readers = pipe->writers = 1;
44552+ atomic_set(&pipe->readers, 1);
44553+ atomic_set(&pipe->writers, 1);
44554 inode->i_fop = &rdwr_pipefifo_fops;
44555
44556 /*
44557diff -urNp linux-2.6.32.45/fs/proc/array.c linux-2.6.32.45/fs/proc/array.c
44558--- linux-2.6.32.45/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
44559+++ linux-2.6.32.45/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
44560@@ -60,6 +60,7 @@
44561 #include <linux/tty.h>
44562 #include <linux/string.h>
44563 #include <linux/mman.h>
44564+#include <linux/grsecurity.h>
44565 #include <linux/proc_fs.h>
44566 #include <linux/ioport.h>
44567 #include <linux/uaccess.h>
44568@@ -321,6 +322,21 @@ static inline void task_context_switch_c
44569 p->nivcsw);
44570 }
44571
44572+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44573+static inline void task_pax(struct seq_file *m, struct task_struct *p)
44574+{
44575+ if (p->mm)
44576+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
44577+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
44578+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
44579+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
44580+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
44581+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
44582+ else
44583+ seq_printf(m, "PaX:\t-----\n");
44584+}
44585+#endif
44586+
44587 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
44588 struct pid *pid, struct task_struct *task)
44589 {
44590@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
44591 task_cap(m, task);
44592 cpuset_task_status_allowed(m, task);
44593 task_context_switch_counts(m, task);
44594+
44595+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44596+ task_pax(m, task);
44597+#endif
44598+
44599+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
44600+ task_grsec_rbac(m, task);
44601+#endif
44602+
44603 return 0;
44604 }
44605
44606+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44607+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44608+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
44609+ _mm->pax_flags & MF_PAX_SEGMEXEC))
44610+#endif
44611+
44612 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
44613 struct pid *pid, struct task_struct *task, int whole)
44614 {
44615@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
44616 cputime_t cutime, cstime, utime, stime;
44617 cputime_t cgtime, gtime;
44618 unsigned long rsslim = 0;
44619- char tcomm[sizeof(task->comm)];
44620+ char tcomm[sizeof(task->comm)] = { 0 };
44621 unsigned long flags;
44622
44623+ pax_track_stack();
44624+
44625 state = *get_task_state(task);
44626 vsize = eip = esp = 0;
44627 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
44628@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
44629 gtime = task_gtime(task);
44630 }
44631
44632+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44633+ if (PAX_RAND_FLAGS(mm)) {
44634+ eip = 0;
44635+ esp = 0;
44636+ wchan = 0;
44637+ }
44638+#endif
44639+#ifdef CONFIG_GRKERNSEC_HIDESYM
44640+ wchan = 0;
44641+ eip =0;
44642+ esp =0;
44643+#endif
44644+
44645 /* scale priority and nice values from timeslices to -20..20 */
44646 /* to make it look like a "normal" Unix priority/nice value */
44647 priority = task_prio(task);
44648@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
44649 vsize,
44650 mm ? get_mm_rss(mm) : 0,
44651 rsslim,
44652+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44653+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
44654+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
44655+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
44656+#else
44657 mm ? (permitted ? mm->start_code : 1) : 0,
44658 mm ? (permitted ? mm->end_code : 1) : 0,
44659 (permitted && mm) ? mm->start_stack : 0,
44660+#endif
44661 esp,
44662 eip,
44663 /* The signal information here is obsolete.
44664@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
44665
44666 return 0;
44667 }
44668+
44669+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
44670+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
44671+{
44672+ u32 curr_ip = 0;
44673+ unsigned long flags;
44674+
44675+ if (lock_task_sighand(task, &flags)) {
44676+ curr_ip = task->signal->curr_ip;
44677+ unlock_task_sighand(task, &flags);
44678+ }
44679+
44680+ return sprintf(buffer, "%pI4\n", &curr_ip);
44681+}
44682+#endif
44683diff -urNp linux-2.6.32.45/fs/proc/base.c linux-2.6.32.45/fs/proc/base.c
44684--- linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:35:30.000000000 -0400
44685+++ linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:34:33.000000000 -0400
44686@@ -102,6 +102,22 @@ struct pid_entry {
44687 union proc_op op;
44688 };
44689
44690+struct getdents_callback {
44691+ struct linux_dirent __user * current_dir;
44692+ struct linux_dirent __user * previous;
44693+ struct file * file;
44694+ int count;
44695+ int error;
44696+};
44697+
44698+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
44699+ loff_t offset, u64 ino, unsigned int d_type)
44700+{
44701+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
44702+ buf->error = -EINVAL;
44703+ return 0;
44704+}
44705+
44706 #define NOD(NAME, MODE, IOP, FOP, OP) { \
44707 .name = (NAME), \
44708 .len = sizeof(NAME) - 1, \
44709@@ -213,6 +229,9 @@ static int check_mem_permission(struct t
44710 if (task == current)
44711 return 0;
44712
44713+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
44714+ return -EPERM;
44715+
44716 /*
44717 * If current is actively ptrace'ing, and would also be
44718 * permitted to freshly attach with ptrace now, permit it.
44719@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
44720 if (!mm->arg_end)
44721 goto out_mm; /* Shh! No looking before we're done */
44722
44723+ if (gr_acl_handle_procpidmem(task))
44724+ goto out_mm;
44725+
44726 len = mm->arg_end - mm->arg_start;
44727
44728 if (len > PAGE_SIZE)
44729@@ -287,12 +309,28 @@ out:
44730 return res;
44731 }
44732
44733+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44734+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44735+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
44736+ _mm->pax_flags & MF_PAX_SEGMEXEC))
44737+#endif
44738+
44739 static int proc_pid_auxv(struct task_struct *task, char *buffer)
44740 {
44741 int res = 0;
44742 struct mm_struct *mm = get_task_mm(task);
44743 if (mm) {
44744 unsigned int nwords = 0;
44745+
44746+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44747+ /* allow if we're currently ptracing this task */
44748+ if (PAX_RAND_FLAGS(mm) &&
44749+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
44750+ mmput(mm);
44751+ return res;
44752+ }
44753+#endif
44754+
44755 do {
44756 nwords += 2;
44757 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
44758@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
44759 }
44760
44761
44762-#ifdef CONFIG_KALLSYMS
44763+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44764 /*
44765 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
44766 * Returns the resolved symbol. If that fails, simply return the address.
44767@@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
44768 }
44769 #endif /* CONFIG_KALLSYMS */
44770
44771-#ifdef CONFIG_STACKTRACE
44772+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44773
44774 #define MAX_STACK_TRACE_DEPTH 64
44775
44776@@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
44777 return count;
44778 }
44779
44780-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
44781+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44782 static int proc_pid_syscall(struct task_struct *task, char *buffer)
44783 {
44784 long nr;
44785@@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
44786 /************************************************************************/
44787
44788 /* permission checks */
44789-static int proc_fd_access_allowed(struct inode *inode)
44790+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
44791 {
44792 struct task_struct *task;
44793 int allowed = 0;
44794@@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
44795 */
44796 task = get_proc_task(inode);
44797 if (task) {
44798- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
44799+ if (log)
44800+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
44801+ else
44802+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
44803 put_task_struct(task);
44804 }
44805 return allowed;
44806@@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
44807 if (!task)
44808 goto out_no_task;
44809
44810+ if (gr_acl_handle_procpidmem(task))
44811+ goto out;
44812+
44813 if (!ptrace_may_access(task, PTRACE_MODE_READ))
44814 goto out;
44815
44816@@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
44817 path_put(&nd->path);
44818
44819 /* Are we allowed to snoop on the tasks file descriptors? */
44820- if (!proc_fd_access_allowed(inode))
44821+ if (!proc_fd_access_allowed(inode,0))
44822 goto out;
44823
44824 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
44825@@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
44826 struct path path;
44827
44828 /* Are we allowed to snoop on the tasks file descriptors? */
44829- if (!proc_fd_access_allowed(inode))
44830- goto out;
44831+ /* logging this is needed for learning on chromium to work properly,
44832+ but we don't want to flood the logs from 'ps' which does a readlink
44833+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
44834+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
44835+ */
44836+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
44837+ if (!proc_fd_access_allowed(inode,0))
44838+ goto out;
44839+ } else {
44840+ if (!proc_fd_access_allowed(inode,1))
44841+ goto out;
44842+ }
44843
44844 error = PROC_I(inode)->op.proc_get_link(inode, &path);
44845 if (error)
44846@@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
44847 rcu_read_lock();
44848 cred = __task_cred(task);
44849 inode->i_uid = cred->euid;
44850+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44851+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44852+#else
44853 inode->i_gid = cred->egid;
44854+#endif
44855 rcu_read_unlock();
44856 }
44857 security_task_to_inode(task, inode);
44858@@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
44859 struct inode *inode = dentry->d_inode;
44860 struct task_struct *task;
44861 const struct cred *cred;
44862+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44863+ const struct cred *tmpcred = current_cred();
44864+#endif
44865
44866 generic_fillattr(inode, stat);
44867
44868@@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
44869 stat->uid = 0;
44870 stat->gid = 0;
44871 task = pid_task(proc_pid(inode), PIDTYPE_PID);
44872+
44873+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
44874+ rcu_read_unlock();
44875+ return -ENOENT;
44876+ }
44877+
44878 if (task) {
44879+ cred = __task_cred(task);
44880+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44881+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
44882+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44883+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
44884+#endif
44885+ ) {
44886+#endif
44887 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
44888+#ifdef CONFIG_GRKERNSEC_PROC_USER
44889+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
44890+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44891+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
44892+#endif
44893 task_dumpable(task)) {
44894- cred = __task_cred(task);
44895 stat->uid = cred->euid;
44896+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44897+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
44898+#else
44899 stat->gid = cred->egid;
44900+#endif
44901 }
44902+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44903+ } else {
44904+ rcu_read_unlock();
44905+ return -ENOENT;
44906+ }
44907+#endif
44908 }
44909 rcu_read_unlock();
44910 return 0;
44911@@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
44912
44913 if (task) {
44914 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
44915+#ifdef CONFIG_GRKERNSEC_PROC_USER
44916+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
44917+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44918+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
44919+#endif
44920 task_dumpable(task)) {
44921 rcu_read_lock();
44922 cred = __task_cred(task);
44923 inode->i_uid = cred->euid;
44924+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44925+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44926+#else
44927 inode->i_gid = cred->egid;
44928+#endif
44929 rcu_read_unlock();
44930 } else {
44931 inode->i_uid = 0;
44932@@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
44933 int fd = proc_fd(inode);
44934
44935 if (task) {
44936- files = get_files_struct(task);
44937+ if (!gr_acl_handle_procpidmem(task))
44938+ files = get_files_struct(task);
44939 put_task_struct(task);
44940 }
44941 if (files) {
44942@@ -1895,12 +1994,22 @@ static const struct file_operations proc
44943 static int proc_fd_permission(struct inode *inode, int mask)
44944 {
44945 int rv;
44946+ struct task_struct *task;
44947
44948 rv = generic_permission(inode, mask, NULL);
44949- if (rv == 0)
44950- return 0;
44951+
44952 if (task_pid(current) == proc_pid(inode))
44953 rv = 0;
44954+
44955+ task = get_proc_task(inode);
44956+ if (task == NULL)
44957+ return rv;
44958+
44959+ if (gr_acl_handle_procpidmem(task))
44960+ rv = -EACCES;
44961+
44962+ put_task_struct(task);
44963+
44964 return rv;
44965 }
44966
44967@@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
44968 if (!task)
44969 goto out_no_task;
44970
44971+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
44972+ goto out;
44973+
44974 /*
44975 * Yes, it does not scale. And it should not. Don't add
44976 * new entries into /proc/<tgid>/ without very good reasons.
44977@@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
44978 if (!task)
44979 goto out_no_task;
44980
44981+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
44982+ goto out;
44983+
44984 ret = 0;
44985 i = filp->f_pos;
44986 switch (i) {
44987@@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
44988 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
44989 void *cookie)
44990 {
44991- char *s = nd_get_link(nd);
44992+ const char *s = nd_get_link(nd);
44993 if (!IS_ERR(s))
44994 __putname(s);
44995 }
44996@@ -2522,7 +2637,7 @@ static const struct pid_entry tgid_base_
44997 #ifdef CONFIG_SCHED_DEBUG
44998 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
44999 #endif
45000-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45001+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45002 INF("syscall", S_IRUSR, proc_pid_syscall),
45003 #endif
45004 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45005@@ -2547,10 +2662,10 @@ static const struct pid_entry tgid_base_
45006 #ifdef CONFIG_SECURITY
45007 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45008 #endif
45009-#ifdef CONFIG_KALLSYMS
45010+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45011 INF("wchan", S_IRUGO, proc_pid_wchan),
45012 #endif
45013-#ifdef CONFIG_STACKTRACE
45014+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45015 ONE("stack", S_IRUSR, proc_pid_stack),
45016 #endif
45017 #ifdef CONFIG_SCHEDSTATS
45018@@ -2580,6 +2695,9 @@ static const struct pid_entry tgid_base_
45019 #ifdef CONFIG_TASK_IO_ACCOUNTING
45020 INF("io", S_IRUSR, proc_tgid_io_accounting),
45021 #endif
45022+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45023+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45024+#endif
45025 };
45026
45027 static int proc_tgid_base_readdir(struct file * filp,
45028@@ -2704,7 +2822,14 @@ static struct dentry *proc_pid_instantia
45029 if (!inode)
45030 goto out;
45031
45032+#ifdef CONFIG_GRKERNSEC_PROC_USER
45033+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45034+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45035+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45036+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45037+#else
45038 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45039+#endif
45040 inode->i_op = &proc_tgid_base_inode_operations;
45041 inode->i_fop = &proc_tgid_base_operations;
45042 inode->i_flags|=S_IMMUTABLE;
45043@@ -2746,7 +2871,11 @@ struct dentry *proc_pid_lookup(struct in
45044 if (!task)
45045 goto out;
45046
45047+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45048+ goto out_put_task;
45049+
45050 result = proc_pid_instantiate(dir, dentry, task, NULL);
45051+out_put_task:
45052 put_task_struct(task);
45053 out:
45054 return result;
45055@@ -2811,6 +2940,11 @@ int proc_pid_readdir(struct file * filp,
45056 {
45057 unsigned int nr;
45058 struct task_struct *reaper;
45059+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45060+ const struct cred *tmpcred = current_cred();
45061+ const struct cred *itercred;
45062+#endif
45063+ filldir_t __filldir = filldir;
45064 struct tgid_iter iter;
45065 struct pid_namespace *ns;
45066
45067@@ -2834,8 +2968,27 @@ int proc_pid_readdir(struct file * filp,
45068 for (iter = next_tgid(ns, iter);
45069 iter.task;
45070 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45071+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45072+ rcu_read_lock();
45073+ itercred = __task_cred(iter.task);
45074+#endif
45075+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45076+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45077+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45078+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45079+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45080+#endif
45081+ )
45082+#endif
45083+ )
45084+ __filldir = &gr_fake_filldir;
45085+ else
45086+ __filldir = filldir;
45087+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45088+ rcu_read_unlock();
45089+#endif
45090 filp->f_pos = iter.tgid + TGID_OFFSET;
45091- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45092+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45093 put_task_struct(iter.task);
45094 goto out;
45095 }
45096@@ -2861,7 +3014,7 @@ static const struct pid_entry tid_base_s
45097 #ifdef CONFIG_SCHED_DEBUG
45098 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45099 #endif
45100-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45101+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45102 INF("syscall", S_IRUSR, proc_pid_syscall),
45103 #endif
45104 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45105@@ -2885,10 +3038,10 @@ static const struct pid_entry tid_base_s
45106 #ifdef CONFIG_SECURITY
45107 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45108 #endif
45109-#ifdef CONFIG_KALLSYMS
45110+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45111 INF("wchan", S_IRUGO, proc_pid_wchan),
45112 #endif
45113-#ifdef CONFIG_STACKTRACE
45114+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45115 ONE("stack", S_IRUSR, proc_pid_stack),
45116 #endif
45117 #ifdef CONFIG_SCHEDSTATS
45118diff -urNp linux-2.6.32.45/fs/proc/cmdline.c linux-2.6.32.45/fs/proc/cmdline.c
45119--- linux-2.6.32.45/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
45120+++ linux-2.6.32.45/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
45121@@ -23,7 +23,11 @@ static const struct file_operations cmdl
45122
45123 static int __init proc_cmdline_init(void)
45124 {
45125+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45126+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45127+#else
45128 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45129+#endif
45130 return 0;
45131 }
45132 module_init(proc_cmdline_init);
45133diff -urNp linux-2.6.32.45/fs/proc/devices.c linux-2.6.32.45/fs/proc/devices.c
45134--- linux-2.6.32.45/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
45135+++ linux-2.6.32.45/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
45136@@ -64,7 +64,11 @@ static const struct file_operations proc
45137
45138 static int __init proc_devices_init(void)
45139 {
45140+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45141+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45142+#else
45143 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45144+#endif
45145 return 0;
45146 }
45147 module_init(proc_devices_init);
45148diff -urNp linux-2.6.32.45/fs/proc/inode.c linux-2.6.32.45/fs/proc/inode.c
45149--- linux-2.6.32.45/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
45150+++ linux-2.6.32.45/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
45151@@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
45152 if (de->mode) {
45153 inode->i_mode = de->mode;
45154 inode->i_uid = de->uid;
45155+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45156+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45157+#else
45158 inode->i_gid = de->gid;
45159+#endif
45160 }
45161 if (de->size)
45162 inode->i_size = de->size;
45163diff -urNp linux-2.6.32.45/fs/proc/internal.h linux-2.6.32.45/fs/proc/internal.h
45164--- linux-2.6.32.45/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
45165+++ linux-2.6.32.45/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
45166@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45167 struct pid *pid, struct task_struct *task);
45168 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45169 struct pid *pid, struct task_struct *task);
45170+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45171+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45172+#endif
45173 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45174
45175 extern const struct file_operations proc_maps_operations;
45176diff -urNp linux-2.6.32.45/fs/proc/Kconfig linux-2.6.32.45/fs/proc/Kconfig
45177--- linux-2.6.32.45/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
45178+++ linux-2.6.32.45/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
45179@@ -30,12 +30,12 @@ config PROC_FS
45180
45181 config PROC_KCORE
45182 bool "/proc/kcore support" if !ARM
45183- depends on PROC_FS && MMU
45184+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45185
45186 config PROC_VMCORE
45187 bool "/proc/vmcore support (EXPERIMENTAL)"
45188- depends on PROC_FS && CRASH_DUMP
45189- default y
45190+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45191+ default n
45192 help
45193 Exports the dump image of crashed kernel in ELF format.
45194
45195@@ -59,8 +59,8 @@ config PROC_SYSCTL
45196 limited in memory.
45197
45198 config PROC_PAGE_MONITOR
45199- default y
45200- depends on PROC_FS && MMU
45201+ default n
45202+ depends on PROC_FS && MMU && !GRKERNSEC
45203 bool "Enable /proc page monitoring" if EMBEDDED
45204 help
45205 Various /proc files exist to monitor process memory utilization:
45206diff -urNp linux-2.6.32.45/fs/proc/kcore.c linux-2.6.32.45/fs/proc/kcore.c
45207--- linux-2.6.32.45/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
45208+++ linux-2.6.32.45/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
45209@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
45210 off_t offset = 0;
45211 struct kcore_list *m;
45212
45213+ pax_track_stack();
45214+
45215 /* setup ELF header */
45216 elf = (struct elfhdr *) bufp;
45217 bufp += sizeof(struct elfhdr);
45218@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
45219 * the addresses in the elf_phdr on our list.
45220 */
45221 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45222- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45223+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45224+ if (tsz > buflen)
45225 tsz = buflen;
45226-
45227+
45228 while (buflen) {
45229 struct kcore_list *m;
45230
45231@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
45232 kfree(elf_buf);
45233 } else {
45234 if (kern_addr_valid(start)) {
45235- unsigned long n;
45236+ char *elf_buf;
45237+ mm_segment_t oldfs;
45238
45239- n = copy_to_user(buffer, (char *)start, tsz);
45240- /*
45241- * We cannot distingush between fault on source
45242- * and fault on destination. When this happens
45243- * we clear too and hope it will trigger the
45244- * EFAULT again.
45245- */
45246- if (n) {
45247- if (clear_user(buffer + tsz - n,
45248- n))
45249+ elf_buf = kmalloc(tsz, GFP_KERNEL);
45250+ if (!elf_buf)
45251+ return -ENOMEM;
45252+ oldfs = get_fs();
45253+ set_fs(KERNEL_DS);
45254+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45255+ set_fs(oldfs);
45256+ if (copy_to_user(buffer, elf_buf, tsz)) {
45257+ kfree(elf_buf);
45258 return -EFAULT;
45259+ }
45260 }
45261+ set_fs(oldfs);
45262+ kfree(elf_buf);
45263 } else {
45264 if (clear_user(buffer, tsz))
45265 return -EFAULT;
45266@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
45267
45268 static int open_kcore(struct inode *inode, struct file *filp)
45269 {
45270+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45271+ return -EPERM;
45272+#endif
45273 if (!capable(CAP_SYS_RAWIO))
45274 return -EPERM;
45275 if (kcore_need_update)
45276diff -urNp linux-2.6.32.45/fs/proc/meminfo.c linux-2.6.32.45/fs/proc/meminfo.c
45277--- linux-2.6.32.45/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
45278+++ linux-2.6.32.45/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
45279@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45280 unsigned long pages[NR_LRU_LISTS];
45281 int lru;
45282
45283+ pax_track_stack();
45284+
45285 /*
45286 * display in kilobytes.
45287 */
45288@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
45289 vmi.used >> 10,
45290 vmi.largest_chunk >> 10
45291 #ifdef CONFIG_MEMORY_FAILURE
45292- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45293+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45294 #endif
45295 );
45296
45297diff -urNp linux-2.6.32.45/fs/proc/nommu.c linux-2.6.32.45/fs/proc/nommu.c
45298--- linux-2.6.32.45/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
45299+++ linux-2.6.32.45/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
45300@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
45301 if (len < 1)
45302 len = 1;
45303 seq_printf(m, "%*c", len, ' ');
45304- seq_path(m, &file->f_path, "");
45305+ seq_path(m, &file->f_path, "\n\\");
45306 }
45307
45308 seq_putc(m, '\n');
45309diff -urNp linux-2.6.32.45/fs/proc/proc_net.c linux-2.6.32.45/fs/proc/proc_net.c
45310--- linux-2.6.32.45/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
45311+++ linux-2.6.32.45/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
45312@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
45313 struct task_struct *task;
45314 struct nsproxy *ns;
45315 struct net *net = NULL;
45316+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45317+ const struct cred *cred = current_cred();
45318+#endif
45319+
45320+#ifdef CONFIG_GRKERNSEC_PROC_USER
45321+ if (cred->fsuid)
45322+ return net;
45323+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45324+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45325+ return net;
45326+#endif
45327
45328 rcu_read_lock();
45329 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45330diff -urNp linux-2.6.32.45/fs/proc/proc_sysctl.c linux-2.6.32.45/fs/proc/proc_sysctl.c
45331--- linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
45332+++ linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
45333@@ -7,6 +7,8 @@
45334 #include <linux/security.h>
45335 #include "internal.h"
45336
45337+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45338+
45339 static const struct dentry_operations proc_sys_dentry_operations;
45340 static const struct file_operations proc_sys_file_operations;
45341 static const struct inode_operations proc_sys_inode_operations;
45342@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
45343 if (!p)
45344 goto out;
45345
45346+ if (gr_handle_sysctl(p, MAY_EXEC))
45347+ goto out;
45348+
45349 err = ERR_PTR(-ENOMEM);
45350 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
45351 if (h)
45352@@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
45353 if (*pos < file->f_pos)
45354 continue;
45355
45356+ if (gr_handle_sysctl(table, 0))
45357+ continue;
45358+
45359 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45360 if (res)
45361 return res;
45362@@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
45363 if (IS_ERR(head))
45364 return PTR_ERR(head);
45365
45366+ if (table && gr_handle_sysctl(table, MAY_EXEC))
45367+ return -ENOENT;
45368+
45369 generic_fillattr(inode, stat);
45370 if (table)
45371 stat->mode = (stat->mode & S_IFMT) | table->mode;
45372diff -urNp linux-2.6.32.45/fs/proc/root.c linux-2.6.32.45/fs/proc/root.c
45373--- linux-2.6.32.45/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
45374+++ linux-2.6.32.45/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
45375@@ -134,7 +134,15 @@ void __init proc_root_init(void)
45376 #ifdef CONFIG_PROC_DEVICETREE
45377 proc_device_tree_init();
45378 #endif
45379+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45380+#ifdef CONFIG_GRKERNSEC_PROC_USER
45381+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45382+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45383+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45384+#endif
45385+#else
45386 proc_mkdir("bus", NULL);
45387+#endif
45388 proc_sys_init();
45389 }
45390
45391diff -urNp linux-2.6.32.45/fs/proc/task_mmu.c linux-2.6.32.45/fs/proc/task_mmu.c
45392--- linux-2.6.32.45/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
45393+++ linux-2.6.32.45/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
45394@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
45395 "VmStk:\t%8lu kB\n"
45396 "VmExe:\t%8lu kB\n"
45397 "VmLib:\t%8lu kB\n"
45398- "VmPTE:\t%8lu kB\n",
45399- hiwater_vm << (PAGE_SHIFT-10),
45400+ "VmPTE:\t%8lu kB\n"
45401+
45402+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45403+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45404+#endif
45405+
45406+ ,hiwater_vm << (PAGE_SHIFT-10),
45407 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45408 mm->locked_vm << (PAGE_SHIFT-10),
45409 hiwater_rss << (PAGE_SHIFT-10),
45410 total_rss << (PAGE_SHIFT-10),
45411 data << (PAGE_SHIFT-10),
45412 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
45413- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
45414+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
45415+
45416+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45417+ , mm->context.user_cs_base, mm->context.user_cs_limit
45418+#endif
45419+
45420+ );
45421 }
45422
45423 unsigned long task_vsize(struct mm_struct *mm)
45424@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
45425 struct proc_maps_private *priv = m->private;
45426 struct vm_area_struct *vma = v;
45427
45428- vma_stop(priv, vma);
45429+ if (!IS_ERR(vma))
45430+ vma_stop(priv, vma);
45431 if (priv->task)
45432 put_task_struct(priv->task);
45433 }
45434@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
45435 return ret;
45436 }
45437
45438+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45439+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45440+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45441+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45442+#endif
45443+
45444 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
45445 {
45446 struct mm_struct *mm = vma->vm_mm;
45447@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
45448 int flags = vma->vm_flags;
45449 unsigned long ino = 0;
45450 unsigned long long pgoff = 0;
45451- unsigned long start;
45452 dev_t dev = 0;
45453 int len;
45454
45455@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
45456 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
45457 }
45458
45459- /* We don't show the stack guard page in /proc/maps */
45460- start = vma->vm_start;
45461- if (vma->vm_flags & VM_GROWSDOWN)
45462- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
45463- start += PAGE_SIZE;
45464-
45465 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
45466- start,
45467+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45468+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
45469+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
45470+#else
45471+ vma->vm_start,
45472 vma->vm_end,
45473+#endif
45474 flags & VM_READ ? 'r' : '-',
45475 flags & VM_WRITE ? 'w' : '-',
45476 flags & VM_EXEC ? 'x' : '-',
45477 flags & VM_MAYSHARE ? 's' : 'p',
45478+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45479+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
45480+#else
45481 pgoff,
45482+#endif
45483 MAJOR(dev), MINOR(dev), ino, &len);
45484
45485 /*
45486@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
45487 */
45488 if (file) {
45489 pad_len_spaces(m, len);
45490- seq_path(m, &file->f_path, "\n");
45491+ seq_path(m, &file->f_path, "\n\\");
45492 } else {
45493 const char *name = arch_vma_name(vma);
45494 if (!name) {
45495@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
45496 if (vma->vm_start <= mm->brk &&
45497 vma->vm_end >= mm->start_brk) {
45498 name = "[heap]";
45499- } else if (vma->vm_start <= mm->start_stack &&
45500- vma->vm_end >= mm->start_stack) {
45501+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
45502+ (vma->vm_start <= mm->start_stack &&
45503+ vma->vm_end >= mm->start_stack)) {
45504 name = "[stack]";
45505 }
45506 } else {
45507@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
45508 };
45509
45510 memset(&mss, 0, sizeof mss);
45511- mss.vma = vma;
45512- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45513- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45514+
45515+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45516+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
45517+#endif
45518+ mss.vma = vma;
45519+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45520+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45521+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45522+ }
45523+#endif
45524
45525 show_map_vma(m, vma);
45526
45527@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
45528 "Swap: %8lu kB\n"
45529 "KernelPageSize: %8lu kB\n"
45530 "MMUPageSize: %8lu kB\n",
45531+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45532+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
45533+#else
45534 (vma->vm_end - vma->vm_start) >> 10,
45535+#endif
45536 mss.resident >> 10,
45537 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
45538 mss.shared_clean >> 10,
45539diff -urNp linux-2.6.32.45/fs/proc/task_nommu.c linux-2.6.32.45/fs/proc/task_nommu.c
45540--- linux-2.6.32.45/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
45541+++ linux-2.6.32.45/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
45542@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
45543 else
45544 bytes += kobjsize(mm);
45545
45546- if (current->fs && current->fs->users > 1)
45547+ if (current->fs && atomic_read(&current->fs->users) > 1)
45548 sbytes += kobjsize(current->fs);
45549 else
45550 bytes += kobjsize(current->fs);
45551@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
45552 if (len < 1)
45553 len = 1;
45554 seq_printf(m, "%*c", len, ' ');
45555- seq_path(m, &file->f_path, "");
45556+ seq_path(m, &file->f_path, "\n\\");
45557 }
45558
45559 seq_putc(m, '\n');
45560diff -urNp linux-2.6.32.45/fs/readdir.c linux-2.6.32.45/fs/readdir.c
45561--- linux-2.6.32.45/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
45562+++ linux-2.6.32.45/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
45563@@ -16,6 +16,7 @@
45564 #include <linux/security.h>
45565 #include <linux/syscalls.h>
45566 #include <linux/unistd.h>
45567+#include <linux/namei.h>
45568
45569 #include <asm/uaccess.h>
45570
45571@@ -67,6 +68,7 @@ struct old_linux_dirent {
45572
45573 struct readdir_callback {
45574 struct old_linux_dirent __user * dirent;
45575+ struct file * file;
45576 int result;
45577 };
45578
45579@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
45580 buf->result = -EOVERFLOW;
45581 return -EOVERFLOW;
45582 }
45583+
45584+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45585+ return 0;
45586+
45587 buf->result++;
45588 dirent = buf->dirent;
45589 if (!access_ok(VERIFY_WRITE, dirent,
45590@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
45591
45592 buf.result = 0;
45593 buf.dirent = dirent;
45594+ buf.file = file;
45595
45596 error = vfs_readdir(file, fillonedir, &buf);
45597 if (buf.result)
45598@@ -142,6 +149,7 @@ struct linux_dirent {
45599 struct getdents_callback {
45600 struct linux_dirent __user * current_dir;
45601 struct linux_dirent __user * previous;
45602+ struct file * file;
45603 int count;
45604 int error;
45605 };
45606@@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
45607 buf->error = -EOVERFLOW;
45608 return -EOVERFLOW;
45609 }
45610+
45611+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45612+ return 0;
45613+
45614 dirent = buf->previous;
45615 if (dirent) {
45616 if (__put_user(offset, &dirent->d_off))
45617@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
45618 buf.previous = NULL;
45619 buf.count = count;
45620 buf.error = 0;
45621+ buf.file = file;
45622
45623 error = vfs_readdir(file, filldir, &buf);
45624 if (error >= 0)
45625@@ -228,6 +241,7 @@ out:
45626 struct getdents_callback64 {
45627 struct linux_dirent64 __user * current_dir;
45628 struct linux_dirent64 __user * previous;
45629+ struct file *file;
45630 int count;
45631 int error;
45632 };
45633@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
45634 buf->error = -EINVAL; /* only used if we fail.. */
45635 if (reclen > buf->count)
45636 return -EINVAL;
45637+
45638+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45639+ return 0;
45640+
45641 dirent = buf->previous;
45642 if (dirent) {
45643 if (__put_user(offset, &dirent->d_off))
45644@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
45645
45646 buf.current_dir = dirent;
45647 buf.previous = NULL;
45648+ buf.file = file;
45649 buf.count = count;
45650 buf.error = 0;
45651
45652diff -urNp linux-2.6.32.45/fs/reiserfs/dir.c linux-2.6.32.45/fs/reiserfs/dir.c
45653--- linux-2.6.32.45/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
45654+++ linux-2.6.32.45/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
45655@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
45656 struct reiserfs_dir_entry de;
45657 int ret = 0;
45658
45659+ pax_track_stack();
45660+
45661 reiserfs_write_lock(inode->i_sb);
45662
45663 reiserfs_check_lock_depth(inode->i_sb, "readdir");
45664diff -urNp linux-2.6.32.45/fs/reiserfs/do_balan.c linux-2.6.32.45/fs/reiserfs/do_balan.c
45665--- linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
45666+++ linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
45667@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
45668 return;
45669 }
45670
45671- atomic_inc(&(fs_generation(tb->tb_sb)));
45672+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
45673 do_balance_starts(tb);
45674
45675 /* balance leaf returns 0 except if combining L R and S into
45676diff -urNp linux-2.6.32.45/fs/reiserfs/item_ops.c linux-2.6.32.45/fs/reiserfs/item_ops.c
45677--- linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
45678+++ linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
45679@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
45680 vi->vi_index, vi->vi_type, vi->vi_ih);
45681 }
45682
45683-static struct item_operations stat_data_ops = {
45684+static const struct item_operations stat_data_ops = {
45685 .bytes_number = sd_bytes_number,
45686 .decrement_key = sd_decrement_key,
45687 .is_left_mergeable = sd_is_left_mergeable,
45688@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
45689 vi->vi_index, vi->vi_type, vi->vi_ih);
45690 }
45691
45692-static struct item_operations direct_ops = {
45693+static const struct item_operations direct_ops = {
45694 .bytes_number = direct_bytes_number,
45695 .decrement_key = direct_decrement_key,
45696 .is_left_mergeable = direct_is_left_mergeable,
45697@@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
45698 vi->vi_index, vi->vi_type, vi->vi_ih);
45699 }
45700
45701-static struct item_operations indirect_ops = {
45702+static const struct item_operations indirect_ops = {
45703 .bytes_number = indirect_bytes_number,
45704 .decrement_key = indirect_decrement_key,
45705 .is_left_mergeable = indirect_is_left_mergeable,
45706@@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
45707 printk("\n");
45708 }
45709
45710-static struct item_operations direntry_ops = {
45711+static const struct item_operations direntry_ops = {
45712 .bytes_number = direntry_bytes_number,
45713 .decrement_key = direntry_decrement_key,
45714 .is_left_mergeable = direntry_is_left_mergeable,
45715@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
45716 "Invalid item type observed, run fsck ASAP");
45717 }
45718
45719-static struct item_operations errcatch_ops = {
45720+static const struct item_operations errcatch_ops = {
45721 errcatch_bytes_number,
45722 errcatch_decrement_key,
45723 errcatch_is_left_mergeable,
45724@@ -746,7 +746,7 @@ static struct item_operations errcatch_o
45725 #error Item types must use disk-format assigned values.
45726 #endif
45727
45728-struct item_operations *item_ops[TYPE_ANY + 1] = {
45729+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
45730 &stat_data_ops,
45731 &indirect_ops,
45732 &direct_ops,
45733diff -urNp linux-2.6.32.45/fs/reiserfs/journal.c linux-2.6.32.45/fs/reiserfs/journal.c
45734--- linux-2.6.32.45/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
45735+++ linux-2.6.32.45/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
45736@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
45737 struct buffer_head *bh;
45738 int i, j;
45739
45740+ pax_track_stack();
45741+
45742 bh = __getblk(dev, block, bufsize);
45743 if (buffer_uptodate(bh))
45744 return (bh);
45745diff -urNp linux-2.6.32.45/fs/reiserfs/namei.c linux-2.6.32.45/fs/reiserfs/namei.c
45746--- linux-2.6.32.45/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
45747+++ linux-2.6.32.45/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
45748@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
45749 unsigned long savelink = 1;
45750 struct timespec ctime;
45751
45752+ pax_track_stack();
45753+
45754 /* three balancings: (1) old name removal, (2) new name insertion
45755 and (3) maybe "save" link insertion
45756 stat data updates: (1) old directory,
45757diff -urNp linux-2.6.32.45/fs/reiserfs/procfs.c linux-2.6.32.45/fs/reiserfs/procfs.c
45758--- linux-2.6.32.45/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
45759+++ linux-2.6.32.45/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
45760@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
45761 "SMALL_TAILS " : "NO_TAILS ",
45762 replay_only(sb) ? "REPLAY_ONLY " : "",
45763 convert_reiserfs(sb) ? "CONV " : "",
45764- atomic_read(&r->s_generation_counter),
45765+ atomic_read_unchecked(&r->s_generation_counter),
45766 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
45767 SF(s_do_balance), SF(s_unneeded_left_neighbor),
45768 SF(s_good_search_by_key_reada), SF(s_bmaps),
45769@@ -309,6 +309,8 @@ static int show_journal(struct seq_file
45770 struct journal_params *jp = &rs->s_v1.s_journal;
45771 char b[BDEVNAME_SIZE];
45772
45773+ pax_track_stack();
45774+
45775 seq_printf(m, /* on-disk fields */
45776 "jp_journal_1st_block: \t%i\n"
45777 "jp_journal_dev: \t%s[%x]\n"
45778diff -urNp linux-2.6.32.45/fs/reiserfs/stree.c linux-2.6.32.45/fs/reiserfs/stree.c
45779--- linux-2.6.32.45/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
45780+++ linux-2.6.32.45/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
45781@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
45782 int iter = 0;
45783 #endif
45784
45785+ pax_track_stack();
45786+
45787 BUG_ON(!th->t_trans_id);
45788
45789 init_tb_struct(th, &s_del_balance, sb, path,
45790@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
45791 int retval;
45792 int quota_cut_bytes = 0;
45793
45794+ pax_track_stack();
45795+
45796 BUG_ON(!th->t_trans_id);
45797
45798 le_key2cpu_key(&cpu_key, key);
45799@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
45800 int quota_cut_bytes;
45801 loff_t tail_pos = 0;
45802
45803+ pax_track_stack();
45804+
45805 BUG_ON(!th->t_trans_id);
45806
45807 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
45808@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
45809 int retval;
45810 int fs_gen;
45811
45812+ pax_track_stack();
45813+
45814 BUG_ON(!th->t_trans_id);
45815
45816 fs_gen = get_generation(inode->i_sb);
45817@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
45818 int fs_gen = 0;
45819 int quota_bytes = 0;
45820
45821+ pax_track_stack();
45822+
45823 BUG_ON(!th->t_trans_id);
45824
45825 if (inode) { /* Do we count quotas for item? */
45826diff -urNp linux-2.6.32.45/fs/reiserfs/super.c linux-2.6.32.45/fs/reiserfs/super.c
45827--- linux-2.6.32.45/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
45828+++ linux-2.6.32.45/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
45829@@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
45830 {.option_name = NULL}
45831 };
45832
45833+ pax_track_stack();
45834+
45835 *blocks = 0;
45836 if (!options || !*options)
45837 /* use default configuration: create tails, journaling on, no
45838diff -urNp linux-2.6.32.45/fs/select.c linux-2.6.32.45/fs/select.c
45839--- linux-2.6.32.45/fs/select.c 2011-03-27 14:31:47.000000000 -0400
45840+++ linux-2.6.32.45/fs/select.c 2011-05-16 21:46:57.000000000 -0400
45841@@ -20,6 +20,7 @@
45842 #include <linux/module.h>
45843 #include <linux/slab.h>
45844 #include <linux/poll.h>
45845+#include <linux/security.h>
45846 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
45847 #include <linux/file.h>
45848 #include <linux/fdtable.h>
45849@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
45850 int retval, i, timed_out = 0;
45851 unsigned long slack = 0;
45852
45853+ pax_track_stack();
45854+
45855 rcu_read_lock();
45856 retval = max_select_fd(n, fds);
45857 rcu_read_unlock();
45858@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
45859 /* Allocate small arguments on the stack to save memory and be faster */
45860 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
45861
45862+ pax_track_stack();
45863+
45864 ret = -EINVAL;
45865 if (n < 0)
45866 goto out_nofds;
45867@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
45868 struct poll_list *walk = head;
45869 unsigned long todo = nfds;
45870
45871+ pax_track_stack();
45872+
45873+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
45874 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
45875 return -EINVAL;
45876
45877diff -urNp linux-2.6.32.45/fs/seq_file.c linux-2.6.32.45/fs/seq_file.c
45878--- linux-2.6.32.45/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
45879+++ linux-2.6.32.45/fs/seq_file.c 2011-08-23 21:22:32.000000000 -0400
45880@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
45881 return 0;
45882 }
45883 if (!m->buf) {
45884- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
45885+ m->size = PAGE_SIZE;
45886+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
45887 if (!m->buf)
45888 return -ENOMEM;
45889 }
45890@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
45891 Eoverflow:
45892 m->op->stop(m, p);
45893 kfree(m->buf);
45894- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
45895+ m->size <<= 1;
45896+ m->buf = kmalloc(m->size, GFP_KERNEL);
45897 return !m->buf ? -ENOMEM : -EAGAIN;
45898 }
45899
45900@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
45901 m->version = file->f_version;
45902 /* grab buffer if we didn't have one */
45903 if (!m->buf) {
45904- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
45905+ m->size = PAGE_SIZE;
45906+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
45907 if (!m->buf)
45908 goto Enomem;
45909 }
45910@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
45911 goto Fill;
45912 m->op->stop(m, p);
45913 kfree(m->buf);
45914- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
45915+ m->size <<= 1;
45916+ m->buf = kmalloc(m->size, GFP_KERNEL);
45917 if (!m->buf)
45918 goto Enomem;
45919 m->count = 0;
45920@@ -551,7 +555,7 @@ static void single_stop(struct seq_file
45921 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
45922 void *data)
45923 {
45924- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
45925+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
45926 int res = -ENOMEM;
45927
45928 if (op) {
45929diff -urNp linux-2.6.32.45/fs/smbfs/proc.c linux-2.6.32.45/fs/smbfs/proc.c
45930--- linux-2.6.32.45/fs/smbfs/proc.c 2011-03-27 14:31:47.000000000 -0400
45931+++ linux-2.6.32.45/fs/smbfs/proc.c 2011-08-05 20:33:55.000000000 -0400
45932@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *
45933
45934 out:
45935 if (server->local_nls != NULL && server->remote_nls != NULL)
45936- server->ops->convert = convert_cp;
45937+ *(void **)&server->ops->convert = convert_cp;
45938 else
45939- server->ops->convert = convert_memcpy;
45940+ *(void **)&server->ops->convert = convert_memcpy;
45941
45942 smb_unlock_server(server);
45943 return n;
45944@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server,
45945
45946 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
45947 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
45948- server->ops->getattr = smb_proc_getattr_core;
45949+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
45950 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
45951- server->ops->getattr = smb_proc_getattr_ff;
45952+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
45953 }
45954
45955 /* Decode server capabilities */
45956@@ -3439,7 +3439,7 @@ out:
45957 static void
45958 install_ops(struct smb_ops *dst, struct smb_ops *src)
45959 {
45960- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
45961+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
45962 }
45963
45964 /* < LANMAN2 */
45965diff -urNp linux-2.6.32.45/fs/smbfs/symlink.c linux-2.6.32.45/fs/smbfs/symlink.c
45966--- linux-2.6.32.45/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
45967+++ linux-2.6.32.45/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
45968@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
45969
45970 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45971 {
45972- char *s = nd_get_link(nd);
45973+ const char *s = nd_get_link(nd);
45974 if (!IS_ERR(s))
45975 __putname(s);
45976 }
45977diff -urNp linux-2.6.32.45/fs/splice.c linux-2.6.32.45/fs/splice.c
45978--- linux-2.6.32.45/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
45979+++ linux-2.6.32.45/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
45980@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
45981 pipe_lock(pipe);
45982
45983 for (;;) {
45984- if (!pipe->readers) {
45985+ if (!atomic_read(&pipe->readers)) {
45986 send_sig(SIGPIPE, current, 0);
45987 if (!ret)
45988 ret = -EPIPE;
45989@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
45990 do_wakeup = 0;
45991 }
45992
45993- pipe->waiting_writers++;
45994+ atomic_inc(&pipe->waiting_writers);
45995 pipe_wait(pipe);
45996- pipe->waiting_writers--;
45997+ atomic_dec(&pipe->waiting_writers);
45998 }
45999
46000 pipe_unlock(pipe);
46001@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
46002 .spd_release = spd_release_page,
46003 };
46004
46005+ pax_track_stack();
46006+
46007 index = *ppos >> PAGE_CACHE_SHIFT;
46008 loff = *ppos & ~PAGE_CACHE_MASK;
46009 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46010@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
46011 old_fs = get_fs();
46012 set_fs(get_ds());
46013 /* The cast to a user pointer is valid due to the set_fs() */
46014- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46015+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
46016 set_fs(old_fs);
46017
46018 return res;
46019@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
46020 old_fs = get_fs();
46021 set_fs(get_ds());
46022 /* The cast to a user pointer is valid due to the set_fs() */
46023- res = vfs_write(file, (const char __user *)buf, count, &pos);
46024+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
46025 set_fs(old_fs);
46026
46027 return res;
46028@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
46029 .spd_release = spd_release_page,
46030 };
46031
46032+ pax_track_stack();
46033+
46034 index = *ppos >> PAGE_CACHE_SHIFT;
46035 offset = *ppos & ~PAGE_CACHE_MASK;
46036 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46037@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
46038 goto err;
46039
46040 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46041- vec[i].iov_base = (void __user *) page_address(page);
46042+ vec[i].iov_base = (__force void __user *) page_address(page);
46043 vec[i].iov_len = this_len;
46044 pages[i] = page;
46045 spd.nr_pages++;
46046@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46047 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46048 {
46049 while (!pipe->nrbufs) {
46050- if (!pipe->writers)
46051+ if (!atomic_read(&pipe->writers))
46052 return 0;
46053
46054- if (!pipe->waiting_writers && sd->num_spliced)
46055+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46056 return 0;
46057
46058 if (sd->flags & SPLICE_F_NONBLOCK)
46059@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
46060 * out of the pipe right after the splice_to_pipe(). So set
46061 * PIPE_READERS appropriately.
46062 */
46063- pipe->readers = 1;
46064+ atomic_set(&pipe->readers, 1);
46065
46066 current->splice_pipe = pipe;
46067 }
46068@@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
46069 .spd_release = spd_release_page,
46070 };
46071
46072+ pax_track_stack();
46073+
46074 pipe = pipe_info(file->f_path.dentry->d_inode);
46075 if (!pipe)
46076 return -EBADF;
46077@@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
46078 ret = -ERESTARTSYS;
46079 break;
46080 }
46081- if (!pipe->writers)
46082+ if (!atomic_read(&pipe->writers))
46083 break;
46084- if (!pipe->waiting_writers) {
46085+ if (!atomic_read(&pipe->waiting_writers)) {
46086 if (flags & SPLICE_F_NONBLOCK) {
46087 ret = -EAGAIN;
46088 break;
46089@@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
46090 pipe_lock(pipe);
46091
46092 while (pipe->nrbufs >= PIPE_BUFFERS) {
46093- if (!pipe->readers) {
46094+ if (!atomic_read(&pipe->readers)) {
46095 send_sig(SIGPIPE, current, 0);
46096 ret = -EPIPE;
46097 break;
46098@@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
46099 ret = -ERESTARTSYS;
46100 break;
46101 }
46102- pipe->waiting_writers++;
46103+ atomic_inc(&pipe->waiting_writers);
46104 pipe_wait(pipe);
46105- pipe->waiting_writers--;
46106+ atomic_dec(&pipe->waiting_writers);
46107 }
46108
46109 pipe_unlock(pipe);
46110@@ -1785,14 +1791,14 @@ retry:
46111 pipe_double_lock(ipipe, opipe);
46112
46113 do {
46114- if (!opipe->readers) {
46115+ if (!atomic_read(&opipe->readers)) {
46116 send_sig(SIGPIPE, current, 0);
46117 if (!ret)
46118 ret = -EPIPE;
46119 break;
46120 }
46121
46122- if (!ipipe->nrbufs && !ipipe->writers)
46123+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46124 break;
46125
46126 /*
46127@@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
46128 pipe_double_lock(ipipe, opipe);
46129
46130 do {
46131- if (!opipe->readers) {
46132+ if (!atomic_read(&opipe->readers)) {
46133 send_sig(SIGPIPE, current, 0);
46134 if (!ret)
46135 ret = -EPIPE;
46136@@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
46137 * return EAGAIN if we have the potential of some data in the
46138 * future, otherwise just return 0
46139 */
46140- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46141+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46142 ret = -EAGAIN;
46143
46144 pipe_unlock(ipipe);
46145diff -urNp linux-2.6.32.45/fs/sysfs/file.c linux-2.6.32.45/fs/sysfs/file.c
46146--- linux-2.6.32.45/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
46147+++ linux-2.6.32.45/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
46148@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46149
46150 struct sysfs_open_dirent {
46151 atomic_t refcnt;
46152- atomic_t event;
46153+ atomic_unchecked_t event;
46154 wait_queue_head_t poll;
46155 struct list_head buffers; /* goes through sysfs_buffer.list */
46156 };
46157@@ -53,7 +53,7 @@ struct sysfs_buffer {
46158 size_t count;
46159 loff_t pos;
46160 char * page;
46161- struct sysfs_ops * ops;
46162+ const struct sysfs_ops * ops;
46163 struct mutex mutex;
46164 int needs_read_fill;
46165 int event;
46166@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
46167 {
46168 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46169 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46170- struct sysfs_ops * ops = buffer->ops;
46171+ const struct sysfs_ops * ops = buffer->ops;
46172 int ret = 0;
46173 ssize_t count;
46174
46175@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
46176 if (!sysfs_get_active_two(attr_sd))
46177 return -ENODEV;
46178
46179- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46180+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46181 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46182
46183 sysfs_put_active_two(attr_sd);
46184@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
46185 {
46186 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46187 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46188- struct sysfs_ops * ops = buffer->ops;
46189+ const struct sysfs_ops * ops = buffer->ops;
46190 int rc;
46191
46192 /* need attr_sd for attr and ops, its parent for kobj */
46193@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
46194 return -ENOMEM;
46195
46196 atomic_set(&new_od->refcnt, 0);
46197- atomic_set(&new_od->event, 1);
46198+ atomic_set_unchecked(&new_od->event, 1);
46199 init_waitqueue_head(&new_od->poll);
46200 INIT_LIST_HEAD(&new_od->buffers);
46201 goto retry;
46202@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
46203 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
46204 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46205 struct sysfs_buffer *buffer;
46206- struct sysfs_ops *ops;
46207+ const struct sysfs_ops *ops;
46208 int error = -EACCES;
46209 char *p;
46210
46211@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
46212
46213 sysfs_put_active_two(attr_sd);
46214
46215- if (buffer->event != atomic_read(&od->event))
46216+ if (buffer->event != atomic_read_unchecked(&od->event))
46217 goto trigger;
46218
46219 return DEFAULT_POLLMASK;
46220@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
46221
46222 od = sd->s_attr.open;
46223 if (od) {
46224- atomic_inc(&od->event);
46225+ atomic_inc_unchecked(&od->event);
46226 wake_up_interruptible(&od->poll);
46227 }
46228
46229diff -urNp linux-2.6.32.45/fs/sysfs/mount.c linux-2.6.32.45/fs/sysfs/mount.c
46230--- linux-2.6.32.45/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
46231+++ linux-2.6.32.45/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
46232@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46233 .s_name = "",
46234 .s_count = ATOMIC_INIT(1),
46235 .s_flags = SYSFS_DIR,
46236+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46237+ .s_mode = S_IFDIR | S_IRWXU,
46238+#else
46239 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46240+#endif
46241 .s_ino = 1,
46242 };
46243
46244diff -urNp linux-2.6.32.45/fs/sysfs/symlink.c linux-2.6.32.45/fs/sysfs/symlink.c
46245--- linux-2.6.32.45/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46246+++ linux-2.6.32.45/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46247@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
46248
46249 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46250 {
46251- char *page = nd_get_link(nd);
46252+ const char *page = nd_get_link(nd);
46253 if (!IS_ERR(page))
46254 free_page((unsigned long)page);
46255 }
46256diff -urNp linux-2.6.32.45/fs/udf/balloc.c linux-2.6.32.45/fs/udf/balloc.c
46257--- linux-2.6.32.45/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
46258+++ linux-2.6.32.45/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
46259@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
46260
46261 mutex_lock(&sbi->s_alloc_mutex);
46262 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46263- if (bloc->logicalBlockNum < 0 ||
46264- (bloc->logicalBlockNum + count) >
46265- partmap->s_partition_len) {
46266+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46267 udf_debug("%d < %d || %d + %d > %d\n",
46268 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
46269 count, partmap->s_partition_len);
46270@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
46271
46272 mutex_lock(&sbi->s_alloc_mutex);
46273 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46274- if (bloc->logicalBlockNum < 0 ||
46275- (bloc->logicalBlockNum + count) >
46276- partmap->s_partition_len) {
46277+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46278 udf_debug("%d < %d || %d + %d > %d\n",
46279 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
46280 partmap->s_partition_len);
46281diff -urNp linux-2.6.32.45/fs/udf/inode.c linux-2.6.32.45/fs/udf/inode.c
46282--- linux-2.6.32.45/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
46283+++ linux-2.6.32.45/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
46284@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
46285 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46286 int lastblock = 0;
46287
46288+ pax_track_stack();
46289+
46290 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46291 prev_epos.block = iinfo->i_location;
46292 prev_epos.bh = NULL;
46293diff -urNp linux-2.6.32.45/fs/udf/misc.c linux-2.6.32.45/fs/udf/misc.c
46294--- linux-2.6.32.45/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
46295+++ linux-2.6.32.45/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
46296@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46297
46298 u8 udf_tag_checksum(const struct tag *t)
46299 {
46300- u8 *data = (u8 *)t;
46301+ const u8 *data = (const u8 *)t;
46302 u8 checksum = 0;
46303 int i;
46304 for (i = 0; i < sizeof(struct tag); ++i)
46305diff -urNp linux-2.6.32.45/fs/utimes.c linux-2.6.32.45/fs/utimes.c
46306--- linux-2.6.32.45/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
46307+++ linux-2.6.32.45/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
46308@@ -1,6 +1,7 @@
46309 #include <linux/compiler.h>
46310 #include <linux/file.h>
46311 #include <linux/fs.h>
46312+#include <linux/security.h>
46313 #include <linux/linkage.h>
46314 #include <linux/mount.h>
46315 #include <linux/namei.h>
46316@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46317 goto mnt_drop_write_and_out;
46318 }
46319 }
46320+
46321+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46322+ error = -EACCES;
46323+ goto mnt_drop_write_and_out;
46324+ }
46325+
46326 mutex_lock(&inode->i_mutex);
46327 error = notify_change(path->dentry, &newattrs);
46328 mutex_unlock(&inode->i_mutex);
46329diff -urNp linux-2.6.32.45/fs/xattr_acl.c linux-2.6.32.45/fs/xattr_acl.c
46330--- linux-2.6.32.45/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
46331+++ linux-2.6.32.45/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
46332@@ -17,8 +17,8 @@
46333 struct posix_acl *
46334 posix_acl_from_xattr(const void *value, size_t size)
46335 {
46336- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46337- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46338+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46339+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46340 int count;
46341 struct posix_acl *acl;
46342 struct posix_acl_entry *acl_e;
46343diff -urNp linux-2.6.32.45/fs/xattr.c linux-2.6.32.45/fs/xattr.c
46344--- linux-2.6.32.45/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
46345+++ linux-2.6.32.45/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
46346@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46347 * Extended attribute SET operations
46348 */
46349 static long
46350-setxattr(struct dentry *d, const char __user *name, const void __user *value,
46351+setxattr(struct path *path, const char __user *name, const void __user *value,
46352 size_t size, int flags)
46353 {
46354 int error;
46355@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
46356 return PTR_ERR(kvalue);
46357 }
46358
46359- error = vfs_setxattr(d, kname, kvalue, size, flags);
46360+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46361+ error = -EACCES;
46362+ goto out;
46363+ }
46364+
46365+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46366+out:
46367 kfree(kvalue);
46368 return error;
46369 }
46370@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46371 return error;
46372 error = mnt_want_write(path.mnt);
46373 if (!error) {
46374- error = setxattr(path.dentry, name, value, size, flags);
46375+ error = setxattr(&path, name, value, size, flags);
46376 mnt_drop_write(path.mnt);
46377 }
46378 path_put(&path);
46379@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46380 return error;
46381 error = mnt_want_write(path.mnt);
46382 if (!error) {
46383- error = setxattr(path.dentry, name, value, size, flags);
46384+ error = setxattr(&path, name, value, size, flags);
46385 mnt_drop_write(path.mnt);
46386 }
46387 path_put(&path);
46388@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46389 const void __user *,value, size_t, size, int, flags)
46390 {
46391 struct file *f;
46392- struct dentry *dentry;
46393 int error = -EBADF;
46394
46395 f = fget(fd);
46396 if (!f)
46397 return error;
46398- dentry = f->f_path.dentry;
46399- audit_inode(NULL, dentry);
46400+ audit_inode(NULL, f->f_path.dentry);
46401 error = mnt_want_write_file(f);
46402 if (!error) {
46403- error = setxattr(dentry, name, value, size, flags);
46404+ error = setxattr(&f->f_path, name, value, size, flags);
46405 mnt_drop_write(f->f_path.mnt);
46406 }
46407 fput(f);
46408diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c
46409--- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
46410+++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
46411@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
46412 xfs_fsop_geom_t fsgeo;
46413 int error;
46414
46415+ memset(&fsgeo, 0, sizeof(fsgeo));
46416 error = xfs_fs_geometry(mp, &fsgeo, 3);
46417 if (error)
46418 return -error;
46419diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c
46420--- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
46421+++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
46422@@ -134,7 +134,7 @@ xfs_find_handle(
46423 }
46424
46425 error = -EFAULT;
46426- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46427+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46428 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46429 goto out_put;
46430
46431@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
46432 if (IS_ERR(dentry))
46433 return PTR_ERR(dentry);
46434
46435- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
46436+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
46437 if (!kbuf)
46438 goto out_dput;
46439
46440@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
46441 xfs_mount_t *mp,
46442 void __user *arg)
46443 {
46444- xfs_fsop_geom_t fsgeo;
46445+ xfs_fsop_geom_t fsgeo;
46446 int error;
46447
46448 error = xfs_fs_geometry(mp, &fsgeo, 3);
46449diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c
46450--- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
46451+++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
46452@@ -468,7 +468,7 @@ xfs_vn_put_link(
46453 struct nameidata *nd,
46454 void *p)
46455 {
46456- char *s = nd_get_link(nd);
46457+ const char *s = nd_get_link(nd);
46458
46459 if (!IS_ERR(s))
46460 kfree(s);
46461diff -urNp linux-2.6.32.45/fs/xfs/xfs_bmap.c linux-2.6.32.45/fs/xfs/xfs_bmap.c
46462--- linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
46463+++ linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
46464@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
46465 int nmap,
46466 int ret_nmap);
46467 #else
46468-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46469+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46470 #endif /* DEBUG */
46471
46472 #if defined(XFS_RW_TRACE)
46473diff -urNp linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c
46474--- linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
46475+++ linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
46476@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
46477 }
46478
46479 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46480- if (filldir(dirent, sfep->name, sfep->namelen,
46481+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46482+ char name[sfep->namelen];
46483+ memcpy(name, sfep->name, sfep->namelen);
46484+ if (filldir(dirent, name, sfep->namelen,
46485+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
46486+ *offset = off & 0x7fffffff;
46487+ return 0;
46488+ }
46489+ } else if (filldir(dirent, sfep->name, sfep->namelen,
46490 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46491 *offset = off & 0x7fffffff;
46492 return 0;
46493diff -urNp linux-2.6.32.45/grsecurity/gracl_alloc.c linux-2.6.32.45/grsecurity/gracl_alloc.c
46494--- linux-2.6.32.45/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
46495+++ linux-2.6.32.45/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
46496@@ -0,0 +1,105 @@
46497+#include <linux/kernel.h>
46498+#include <linux/mm.h>
46499+#include <linux/slab.h>
46500+#include <linux/vmalloc.h>
46501+#include <linux/gracl.h>
46502+#include <linux/grsecurity.h>
46503+
46504+static unsigned long alloc_stack_next = 1;
46505+static unsigned long alloc_stack_size = 1;
46506+static void **alloc_stack;
46507+
46508+static __inline__ int
46509+alloc_pop(void)
46510+{
46511+ if (alloc_stack_next == 1)
46512+ return 0;
46513+
46514+ kfree(alloc_stack[alloc_stack_next - 2]);
46515+
46516+ alloc_stack_next--;
46517+
46518+ return 1;
46519+}
46520+
46521+static __inline__ int
46522+alloc_push(void *buf)
46523+{
46524+ if (alloc_stack_next >= alloc_stack_size)
46525+ return 1;
46526+
46527+ alloc_stack[alloc_stack_next - 1] = buf;
46528+
46529+ alloc_stack_next++;
46530+
46531+ return 0;
46532+}
46533+
46534+void *
46535+acl_alloc(unsigned long len)
46536+{
46537+ void *ret = NULL;
46538+
46539+ if (!len || len > PAGE_SIZE)
46540+ goto out;
46541+
46542+ ret = kmalloc(len, GFP_KERNEL);
46543+
46544+ if (ret) {
46545+ if (alloc_push(ret)) {
46546+ kfree(ret);
46547+ ret = NULL;
46548+ }
46549+ }
46550+
46551+out:
46552+ return ret;
46553+}
46554+
46555+void *
46556+acl_alloc_num(unsigned long num, unsigned long len)
46557+{
46558+ if (!len || (num > (PAGE_SIZE / len)))
46559+ return NULL;
46560+
46561+ return acl_alloc(num * len);
46562+}
46563+
46564+void
46565+acl_free_all(void)
46566+{
46567+ if (gr_acl_is_enabled() || !alloc_stack)
46568+ return;
46569+
46570+ while (alloc_pop()) ;
46571+
46572+ if (alloc_stack) {
46573+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
46574+ kfree(alloc_stack);
46575+ else
46576+ vfree(alloc_stack);
46577+ }
46578+
46579+ alloc_stack = NULL;
46580+ alloc_stack_size = 1;
46581+ alloc_stack_next = 1;
46582+
46583+ return;
46584+}
46585+
46586+int
46587+acl_alloc_stack_init(unsigned long size)
46588+{
46589+ if ((size * sizeof (void *)) <= PAGE_SIZE)
46590+ alloc_stack =
46591+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
46592+ else
46593+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
46594+
46595+ alloc_stack_size = size;
46596+
46597+ if (!alloc_stack)
46598+ return 0;
46599+ else
46600+ return 1;
46601+}
46602diff -urNp linux-2.6.32.45/grsecurity/gracl.c linux-2.6.32.45/grsecurity/gracl.c
46603--- linux-2.6.32.45/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
46604+++ linux-2.6.32.45/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
46605@@ -0,0 +1,4082 @@
46606+#include <linux/kernel.h>
46607+#include <linux/module.h>
46608+#include <linux/sched.h>
46609+#include <linux/mm.h>
46610+#include <linux/file.h>
46611+#include <linux/fs.h>
46612+#include <linux/namei.h>
46613+#include <linux/mount.h>
46614+#include <linux/tty.h>
46615+#include <linux/proc_fs.h>
46616+#include <linux/smp_lock.h>
46617+#include <linux/slab.h>
46618+#include <linux/vmalloc.h>
46619+#include <linux/types.h>
46620+#include <linux/sysctl.h>
46621+#include <linux/netdevice.h>
46622+#include <linux/ptrace.h>
46623+#include <linux/gracl.h>
46624+#include <linux/gralloc.h>
46625+#include <linux/grsecurity.h>
46626+#include <linux/grinternal.h>
46627+#include <linux/pid_namespace.h>
46628+#include <linux/fdtable.h>
46629+#include <linux/percpu.h>
46630+
46631+#include <asm/uaccess.h>
46632+#include <asm/errno.h>
46633+#include <asm/mman.h>
46634+
46635+static struct acl_role_db acl_role_set;
46636+static struct name_db name_set;
46637+static struct inodev_db inodev_set;
46638+
46639+/* for keeping track of userspace pointers used for subjects, so we
46640+ can share references in the kernel as well
46641+*/
46642+
46643+static struct dentry *real_root;
46644+static struct vfsmount *real_root_mnt;
46645+
46646+static struct acl_subj_map_db subj_map_set;
46647+
46648+static struct acl_role_label *default_role;
46649+
46650+static struct acl_role_label *role_list;
46651+
46652+static u16 acl_sp_role_value;
46653+
46654+extern char *gr_shared_page[4];
46655+static DEFINE_MUTEX(gr_dev_mutex);
46656+DEFINE_RWLOCK(gr_inode_lock);
46657+
46658+struct gr_arg *gr_usermode;
46659+
46660+static unsigned int gr_status __read_only = GR_STATUS_INIT;
46661+
46662+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
46663+extern void gr_clear_learn_entries(void);
46664+
46665+#ifdef CONFIG_GRKERNSEC_RESLOG
46666+extern void gr_log_resource(const struct task_struct *task,
46667+ const int res, const unsigned long wanted, const int gt);
46668+#endif
46669+
46670+unsigned char *gr_system_salt;
46671+unsigned char *gr_system_sum;
46672+
46673+static struct sprole_pw **acl_special_roles = NULL;
46674+static __u16 num_sprole_pws = 0;
46675+
46676+static struct acl_role_label *kernel_role = NULL;
46677+
46678+static unsigned int gr_auth_attempts = 0;
46679+static unsigned long gr_auth_expires = 0UL;
46680+
46681+#ifdef CONFIG_NET
46682+extern struct vfsmount *sock_mnt;
46683+#endif
46684+extern struct vfsmount *pipe_mnt;
46685+extern struct vfsmount *shm_mnt;
46686+#ifdef CONFIG_HUGETLBFS
46687+extern struct vfsmount *hugetlbfs_vfsmount;
46688+#endif
46689+
46690+static struct acl_object_label *fakefs_obj_rw;
46691+static struct acl_object_label *fakefs_obj_rwx;
46692+
46693+extern int gr_init_uidset(void);
46694+extern void gr_free_uidset(void);
46695+extern void gr_remove_uid(uid_t uid);
46696+extern int gr_find_uid(uid_t uid);
46697+
46698+__inline__ int
46699+gr_acl_is_enabled(void)
46700+{
46701+ return (gr_status & GR_READY);
46702+}
46703+
46704+#ifdef CONFIG_BTRFS_FS
46705+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46706+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46707+#endif
46708+
46709+static inline dev_t __get_dev(const struct dentry *dentry)
46710+{
46711+#ifdef CONFIG_BTRFS_FS
46712+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46713+ return get_btrfs_dev_from_inode(dentry->d_inode);
46714+ else
46715+#endif
46716+ return dentry->d_inode->i_sb->s_dev;
46717+}
46718+
46719+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
46720+{
46721+ return __get_dev(dentry);
46722+}
46723+
46724+static char gr_task_roletype_to_char(struct task_struct *task)
46725+{
46726+ switch (task->role->roletype &
46727+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
46728+ GR_ROLE_SPECIAL)) {
46729+ case GR_ROLE_DEFAULT:
46730+ return 'D';
46731+ case GR_ROLE_USER:
46732+ return 'U';
46733+ case GR_ROLE_GROUP:
46734+ return 'G';
46735+ case GR_ROLE_SPECIAL:
46736+ return 'S';
46737+ }
46738+
46739+ return 'X';
46740+}
46741+
46742+char gr_roletype_to_char(void)
46743+{
46744+ return gr_task_roletype_to_char(current);
46745+}
46746+
46747+__inline__ int
46748+gr_acl_tpe_check(void)
46749+{
46750+ if (unlikely(!(gr_status & GR_READY)))
46751+ return 0;
46752+ if (current->role->roletype & GR_ROLE_TPE)
46753+ return 1;
46754+ else
46755+ return 0;
46756+}
46757+
46758+int
46759+gr_handle_rawio(const struct inode *inode)
46760+{
46761+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
46762+ if (inode && S_ISBLK(inode->i_mode) &&
46763+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
46764+ !capable(CAP_SYS_RAWIO))
46765+ return 1;
46766+#endif
46767+ return 0;
46768+}
46769+
46770+static int
46771+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
46772+{
46773+ if (likely(lena != lenb))
46774+ return 0;
46775+
46776+ return !memcmp(a, b, lena);
46777+}
46778+
46779+/* this must be called with vfsmount_lock and dcache_lock held */
46780+
46781+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
46782+ struct dentry *root, struct vfsmount *rootmnt,
46783+ char *buffer, int buflen)
46784+{
46785+ char * end = buffer+buflen;
46786+ char * retval;
46787+ int namelen;
46788+
46789+ *--end = '\0';
46790+ buflen--;
46791+
46792+ if (buflen < 1)
46793+ goto Elong;
46794+ /* Get '/' right */
46795+ retval = end-1;
46796+ *retval = '/';
46797+
46798+ for (;;) {
46799+ struct dentry * parent;
46800+
46801+ if (dentry == root && vfsmnt == rootmnt)
46802+ break;
46803+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
46804+ /* Global root? */
46805+ if (vfsmnt->mnt_parent == vfsmnt)
46806+ goto global_root;
46807+ dentry = vfsmnt->mnt_mountpoint;
46808+ vfsmnt = vfsmnt->mnt_parent;
46809+ continue;
46810+ }
46811+ parent = dentry->d_parent;
46812+ prefetch(parent);
46813+ namelen = dentry->d_name.len;
46814+ buflen -= namelen + 1;
46815+ if (buflen < 0)
46816+ goto Elong;
46817+ end -= namelen;
46818+ memcpy(end, dentry->d_name.name, namelen);
46819+ *--end = '/';
46820+ retval = end;
46821+ dentry = parent;
46822+ }
46823+
46824+out:
46825+ return retval;
46826+
46827+global_root:
46828+ namelen = dentry->d_name.len;
46829+ buflen -= namelen;
46830+ if (buflen < 0)
46831+ goto Elong;
46832+ retval -= namelen-1; /* hit the slash */
46833+ memcpy(retval, dentry->d_name.name, namelen);
46834+ goto out;
46835+Elong:
46836+ retval = ERR_PTR(-ENAMETOOLONG);
46837+ goto out;
46838+}
46839+
46840+static char *
46841+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
46842+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
46843+{
46844+ char *retval;
46845+
46846+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
46847+ if (unlikely(IS_ERR(retval)))
46848+ retval = strcpy(buf, "<path too long>");
46849+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
46850+ retval[1] = '\0';
46851+
46852+ return retval;
46853+}
46854+
46855+static char *
46856+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
46857+ char *buf, int buflen)
46858+{
46859+ char *res;
46860+
46861+ /* we can use real_root, real_root_mnt, because this is only called
46862+ by the RBAC system */
46863+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
46864+
46865+ return res;
46866+}
46867+
46868+static char *
46869+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
46870+ char *buf, int buflen)
46871+{
46872+ char *res;
46873+ struct dentry *root;
46874+ struct vfsmount *rootmnt;
46875+ struct task_struct *reaper = &init_task;
46876+
46877+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
46878+ read_lock(&reaper->fs->lock);
46879+ root = dget(reaper->fs->root.dentry);
46880+ rootmnt = mntget(reaper->fs->root.mnt);
46881+ read_unlock(&reaper->fs->lock);
46882+
46883+ spin_lock(&dcache_lock);
46884+ spin_lock(&vfsmount_lock);
46885+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
46886+ spin_unlock(&vfsmount_lock);
46887+ spin_unlock(&dcache_lock);
46888+
46889+ dput(root);
46890+ mntput(rootmnt);
46891+ return res;
46892+}
46893+
46894+static char *
46895+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
46896+{
46897+ char *ret;
46898+ spin_lock(&dcache_lock);
46899+ spin_lock(&vfsmount_lock);
46900+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
46901+ PAGE_SIZE);
46902+ spin_unlock(&vfsmount_lock);
46903+ spin_unlock(&dcache_lock);
46904+ return ret;
46905+}
46906+
46907+char *
46908+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
46909+{
46910+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
46911+ PAGE_SIZE);
46912+}
46913+
46914+char *
46915+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
46916+{
46917+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
46918+ PAGE_SIZE);
46919+}
46920+
46921+char *
46922+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
46923+{
46924+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
46925+ PAGE_SIZE);
46926+}
46927+
46928+char *
46929+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
46930+{
46931+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
46932+ PAGE_SIZE);
46933+}
46934+
46935+char *
46936+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
46937+{
46938+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
46939+ PAGE_SIZE);
46940+}
46941+
46942+__inline__ __u32
46943+to_gr_audit(const __u32 reqmode)
46944+{
46945+ /* masks off auditable permission flags, then shifts them to create
46946+ auditing flags, and adds the special case of append auditing if
46947+ we're requesting write */
46948+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
46949+}
46950+
46951+struct acl_subject_label *
46952+lookup_subject_map(const struct acl_subject_label *userp)
46953+{
46954+ unsigned int index = shash(userp, subj_map_set.s_size);
46955+ struct subject_map *match;
46956+
46957+ match = subj_map_set.s_hash[index];
46958+
46959+ while (match && match->user != userp)
46960+ match = match->next;
46961+
46962+ if (match != NULL)
46963+ return match->kernel;
46964+ else
46965+ return NULL;
46966+}
46967+
46968+static void
46969+insert_subj_map_entry(struct subject_map *subjmap)
46970+{
46971+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
46972+ struct subject_map **curr;
46973+
46974+ subjmap->prev = NULL;
46975+
46976+ curr = &subj_map_set.s_hash[index];
46977+ if (*curr != NULL)
46978+ (*curr)->prev = subjmap;
46979+
46980+ subjmap->next = *curr;
46981+ *curr = subjmap;
46982+
46983+ return;
46984+}
46985+
46986+static struct acl_role_label *
46987+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
46988+ const gid_t gid)
46989+{
46990+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
46991+ struct acl_role_label *match;
46992+ struct role_allowed_ip *ipp;
46993+ unsigned int x;
46994+ u32 curr_ip = task->signal->curr_ip;
46995+
46996+ task->signal->saved_ip = curr_ip;
46997+
46998+ match = acl_role_set.r_hash[index];
46999+
47000+ while (match) {
47001+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47002+ for (x = 0; x < match->domain_child_num; x++) {
47003+ if (match->domain_children[x] == uid)
47004+ goto found;
47005+ }
47006+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47007+ break;
47008+ match = match->next;
47009+ }
47010+found:
47011+ if (match == NULL) {
47012+ try_group:
47013+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47014+ match = acl_role_set.r_hash[index];
47015+
47016+ while (match) {
47017+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47018+ for (x = 0; x < match->domain_child_num; x++) {
47019+ if (match->domain_children[x] == gid)
47020+ goto found2;
47021+ }
47022+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47023+ break;
47024+ match = match->next;
47025+ }
47026+found2:
47027+ if (match == NULL)
47028+ match = default_role;
47029+ if (match->allowed_ips == NULL)
47030+ return match;
47031+ else {
47032+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47033+ if (likely
47034+ ((ntohl(curr_ip) & ipp->netmask) ==
47035+ (ntohl(ipp->addr) & ipp->netmask)))
47036+ return match;
47037+ }
47038+ match = default_role;
47039+ }
47040+ } else if (match->allowed_ips == NULL) {
47041+ return match;
47042+ } else {
47043+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47044+ if (likely
47045+ ((ntohl(curr_ip) & ipp->netmask) ==
47046+ (ntohl(ipp->addr) & ipp->netmask)))
47047+ return match;
47048+ }
47049+ goto try_group;
47050+ }
47051+
47052+ return match;
47053+}
47054+
47055+struct acl_subject_label *
47056+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47057+ const struct acl_role_label *role)
47058+{
47059+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47060+ struct acl_subject_label *match;
47061+
47062+ match = role->subj_hash[index];
47063+
47064+ while (match && (match->inode != ino || match->device != dev ||
47065+ (match->mode & GR_DELETED))) {
47066+ match = match->next;
47067+ }
47068+
47069+ if (match && !(match->mode & GR_DELETED))
47070+ return match;
47071+ else
47072+ return NULL;
47073+}
47074+
47075+struct acl_subject_label *
47076+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47077+ const struct acl_role_label *role)
47078+{
47079+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47080+ struct acl_subject_label *match;
47081+
47082+ match = role->subj_hash[index];
47083+
47084+ while (match && (match->inode != ino || match->device != dev ||
47085+ !(match->mode & GR_DELETED))) {
47086+ match = match->next;
47087+ }
47088+
47089+ if (match && (match->mode & GR_DELETED))
47090+ return match;
47091+ else
47092+ return NULL;
47093+}
47094+
47095+static struct acl_object_label *
47096+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47097+ const struct acl_subject_label *subj)
47098+{
47099+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47100+ struct acl_object_label *match;
47101+
47102+ match = subj->obj_hash[index];
47103+
47104+ while (match && (match->inode != ino || match->device != dev ||
47105+ (match->mode & GR_DELETED))) {
47106+ match = match->next;
47107+ }
47108+
47109+ if (match && !(match->mode & GR_DELETED))
47110+ return match;
47111+ else
47112+ return NULL;
47113+}
47114+
47115+static struct acl_object_label *
47116+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47117+ const struct acl_subject_label *subj)
47118+{
47119+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47120+ struct acl_object_label *match;
47121+
47122+ match = subj->obj_hash[index];
47123+
47124+ while (match && (match->inode != ino || match->device != dev ||
47125+ !(match->mode & GR_DELETED))) {
47126+ match = match->next;
47127+ }
47128+
47129+ if (match && (match->mode & GR_DELETED))
47130+ return match;
47131+
47132+ match = subj->obj_hash[index];
47133+
47134+ while (match && (match->inode != ino || match->device != dev ||
47135+ (match->mode & GR_DELETED))) {
47136+ match = match->next;
47137+ }
47138+
47139+ if (match && !(match->mode & GR_DELETED))
47140+ return match;
47141+ else
47142+ return NULL;
47143+}
47144+
47145+static struct name_entry *
47146+lookup_name_entry(const char *name)
47147+{
47148+ unsigned int len = strlen(name);
47149+ unsigned int key = full_name_hash(name, len);
47150+ unsigned int index = key % name_set.n_size;
47151+ struct name_entry *match;
47152+
47153+ match = name_set.n_hash[index];
47154+
47155+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47156+ match = match->next;
47157+
47158+ return match;
47159+}
47160+
47161+static struct name_entry *
47162+lookup_name_entry_create(const char *name)
47163+{
47164+ unsigned int len = strlen(name);
47165+ unsigned int key = full_name_hash(name, len);
47166+ unsigned int index = key % name_set.n_size;
47167+ struct name_entry *match;
47168+
47169+ match = name_set.n_hash[index];
47170+
47171+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47172+ !match->deleted))
47173+ match = match->next;
47174+
47175+ if (match && match->deleted)
47176+ return match;
47177+
47178+ match = name_set.n_hash[index];
47179+
47180+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47181+ match->deleted))
47182+ match = match->next;
47183+
47184+ if (match && !match->deleted)
47185+ return match;
47186+ else
47187+ return NULL;
47188+}
47189+
47190+static struct inodev_entry *
47191+lookup_inodev_entry(const ino_t ino, const dev_t dev)
47192+{
47193+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
47194+ struct inodev_entry *match;
47195+
47196+ match = inodev_set.i_hash[index];
47197+
47198+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47199+ match = match->next;
47200+
47201+ return match;
47202+}
47203+
47204+static void
47205+insert_inodev_entry(struct inodev_entry *entry)
47206+{
47207+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47208+ inodev_set.i_size);
47209+ struct inodev_entry **curr;
47210+
47211+ entry->prev = NULL;
47212+
47213+ curr = &inodev_set.i_hash[index];
47214+ if (*curr != NULL)
47215+ (*curr)->prev = entry;
47216+
47217+ entry->next = *curr;
47218+ *curr = entry;
47219+
47220+ return;
47221+}
47222+
47223+static void
47224+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47225+{
47226+ unsigned int index =
47227+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47228+ struct acl_role_label **curr;
47229+ struct acl_role_label *tmp;
47230+
47231+ curr = &acl_role_set.r_hash[index];
47232+
47233+ /* if role was already inserted due to domains and already has
47234+ a role in the same bucket as it attached, then we need to
47235+ combine these two buckets
47236+ */
47237+ if (role->next) {
47238+ tmp = role->next;
47239+ while (tmp->next)
47240+ tmp = tmp->next;
47241+ tmp->next = *curr;
47242+ } else
47243+ role->next = *curr;
47244+ *curr = role;
47245+
47246+ return;
47247+}
47248+
47249+static void
47250+insert_acl_role_label(struct acl_role_label *role)
47251+{
47252+ int i;
47253+
47254+ if (role_list == NULL) {
47255+ role_list = role;
47256+ role->prev = NULL;
47257+ } else {
47258+ role->prev = role_list;
47259+ role_list = role;
47260+ }
47261+
47262+ /* used for hash chains */
47263+ role->next = NULL;
47264+
47265+ if (role->roletype & GR_ROLE_DOMAIN) {
47266+ for (i = 0; i < role->domain_child_num; i++)
47267+ __insert_acl_role_label(role, role->domain_children[i]);
47268+ } else
47269+ __insert_acl_role_label(role, role->uidgid);
47270+}
47271+
47272+static int
47273+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47274+{
47275+ struct name_entry **curr, *nentry;
47276+ struct inodev_entry *ientry;
47277+ unsigned int len = strlen(name);
47278+ unsigned int key = full_name_hash(name, len);
47279+ unsigned int index = key % name_set.n_size;
47280+
47281+ curr = &name_set.n_hash[index];
47282+
47283+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47284+ curr = &((*curr)->next);
47285+
47286+ if (*curr != NULL)
47287+ return 1;
47288+
47289+ nentry = acl_alloc(sizeof (struct name_entry));
47290+ if (nentry == NULL)
47291+ return 0;
47292+ ientry = acl_alloc(sizeof (struct inodev_entry));
47293+ if (ientry == NULL)
47294+ return 0;
47295+ ientry->nentry = nentry;
47296+
47297+ nentry->key = key;
47298+ nentry->name = name;
47299+ nentry->inode = inode;
47300+ nentry->device = device;
47301+ nentry->len = len;
47302+ nentry->deleted = deleted;
47303+
47304+ nentry->prev = NULL;
47305+ curr = &name_set.n_hash[index];
47306+ if (*curr != NULL)
47307+ (*curr)->prev = nentry;
47308+ nentry->next = *curr;
47309+ *curr = nentry;
47310+
47311+ /* insert us into the table searchable by inode/dev */
47312+ insert_inodev_entry(ientry);
47313+
47314+ return 1;
47315+}
47316+
47317+static void
47318+insert_acl_obj_label(struct acl_object_label *obj,
47319+ struct acl_subject_label *subj)
47320+{
47321+ unsigned int index =
47322+ fhash(obj->inode, obj->device, subj->obj_hash_size);
47323+ struct acl_object_label **curr;
47324+
47325+
47326+ obj->prev = NULL;
47327+
47328+ curr = &subj->obj_hash[index];
47329+ if (*curr != NULL)
47330+ (*curr)->prev = obj;
47331+
47332+ obj->next = *curr;
47333+ *curr = obj;
47334+
47335+ return;
47336+}
47337+
47338+static void
47339+insert_acl_subj_label(struct acl_subject_label *obj,
47340+ struct acl_role_label *role)
47341+{
47342+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47343+ struct acl_subject_label **curr;
47344+
47345+ obj->prev = NULL;
47346+
47347+ curr = &role->subj_hash[index];
47348+ if (*curr != NULL)
47349+ (*curr)->prev = obj;
47350+
47351+ obj->next = *curr;
47352+ *curr = obj;
47353+
47354+ return;
47355+}
47356+
47357+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
47358+
47359+static void *
47360+create_table(__u32 * len, int elementsize)
47361+{
47362+ unsigned int table_sizes[] = {
47363+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
47364+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
47365+ 4194301, 8388593, 16777213, 33554393, 67108859
47366+ };
47367+ void *newtable = NULL;
47368+ unsigned int pwr = 0;
47369+
47370+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
47371+ table_sizes[pwr] <= *len)
47372+ pwr++;
47373+
47374+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
47375+ return newtable;
47376+
47377+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
47378+ newtable =
47379+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
47380+ else
47381+ newtable = vmalloc(table_sizes[pwr] * elementsize);
47382+
47383+ *len = table_sizes[pwr];
47384+
47385+ return newtable;
47386+}
47387+
47388+static int
47389+init_variables(const struct gr_arg *arg)
47390+{
47391+ struct task_struct *reaper = &init_task;
47392+ unsigned int stacksize;
47393+
47394+ subj_map_set.s_size = arg->role_db.num_subjects;
47395+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
47396+ name_set.n_size = arg->role_db.num_objects;
47397+ inodev_set.i_size = arg->role_db.num_objects;
47398+
47399+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
47400+ !name_set.n_size || !inodev_set.i_size)
47401+ return 1;
47402+
47403+ if (!gr_init_uidset())
47404+ return 1;
47405+
47406+ /* set up the stack that holds allocation info */
47407+
47408+ stacksize = arg->role_db.num_pointers + 5;
47409+
47410+ if (!acl_alloc_stack_init(stacksize))
47411+ return 1;
47412+
47413+ /* grab reference for the real root dentry and vfsmount */
47414+ read_lock(&reaper->fs->lock);
47415+ real_root = dget(reaper->fs->root.dentry);
47416+ real_root_mnt = mntget(reaper->fs->root.mnt);
47417+ read_unlock(&reaper->fs->lock);
47418+
47419+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47420+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
47421+#endif
47422+
47423+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
47424+ if (fakefs_obj_rw == NULL)
47425+ return 1;
47426+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
47427+
47428+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
47429+ if (fakefs_obj_rwx == NULL)
47430+ return 1;
47431+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
47432+
47433+ subj_map_set.s_hash =
47434+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
47435+ acl_role_set.r_hash =
47436+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
47437+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
47438+ inodev_set.i_hash =
47439+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
47440+
47441+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
47442+ !name_set.n_hash || !inodev_set.i_hash)
47443+ return 1;
47444+
47445+ memset(subj_map_set.s_hash, 0,
47446+ sizeof(struct subject_map *) * subj_map_set.s_size);
47447+ memset(acl_role_set.r_hash, 0,
47448+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
47449+ memset(name_set.n_hash, 0,
47450+ sizeof (struct name_entry *) * name_set.n_size);
47451+ memset(inodev_set.i_hash, 0,
47452+ sizeof (struct inodev_entry *) * inodev_set.i_size);
47453+
47454+ return 0;
47455+}
47456+
47457+/* free information not needed after startup
47458+ currently contains user->kernel pointer mappings for subjects
47459+*/
47460+
47461+static void
47462+free_init_variables(void)
47463+{
47464+ __u32 i;
47465+
47466+ if (subj_map_set.s_hash) {
47467+ for (i = 0; i < subj_map_set.s_size; i++) {
47468+ if (subj_map_set.s_hash[i]) {
47469+ kfree(subj_map_set.s_hash[i]);
47470+ subj_map_set.s_hash[i] = NULL;
47471+ }
47472+ }
47473+
47474+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
47475+ PAGE_SIZE)
47476+ kfree(subj_map_set.s_hash);
47477+ else
47478+ vfree(subj_map_set.s_hash);
47479+ }
47480+
47481+ return;
47482+}
47483+
47484+static void
47485+free_variables(void)
47486+{
47487+ struct acl_subject_label *s;
47488+ struct acl_role_label *r;
47489+ struct task_struct *task, *task2;
47490+ unsigned int x;
47491+
47492+ gr_clear_learn_entries();
47493+
47494+ read_lock(&tasklist_lock);
47495+ do_each_thread(task2, task) {
47496+ task->acl_sp_role = 0;
47497+ task->acl_role_id = 0;
47498+ task->acl = NULL;
47499+ task->role = NULL;
47500+ } while_each_thread(task2, task);
47501+ read_unlock(&tasklist_lock);
47502+
47503+ /* release the reference to the real root dentry and vfsmount */
47504+ if (real_root)
47505+ dput(real_root);
47506+ real_root = NULL;
47507+ if (real_root_mnt)
47508+ mntput(real_root_mnt);
47509+ real_root_mnt = NULL;
47510+
47511+ /* free all object hash tables */
47512+
47513+ FOR_EACH_ROLE_START(r)
47514+ if (r->subj_hash == NULL)
47515+ goto next_role;
47516+ FOR_EACH_SUBJECT_START(r, s, x)
47517+ if (s->obj_hash == NULL)
47518+ break;
47519+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47520+ kfree(s->obj_hash);
47521+ else
47522+ vfree(s->obj_hash);
47523+ FOR_EACH_SUBJECT_END(s, x)
47524+ FOR_EACH_NESTED_SUBJECT_START(r, s)
47525+ if (s->obj_hash == NULL)
47526+ break;
47527+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47528+ kfree(s->obj_hash);
47529+ else
47530+ vfree(s->obj_hash);
47531+ FOR_EACH_NESTED_SUBJECT_END(s)
47532+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
47533+ kfree(r->subj_hash);
47534+ else
47535+ vfree(r->subj_hash);
47536+ r->subj_hash = NULL;
47537+next_role:
47538+ FOR_EACH_ROLE_END(r)
47539+
47540+ acl_free_all();
47541+
47542+ if (acl_role_set.r_hash) {
47543+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
47544+ PAGE_SIZE)
47545+ kfree(acl_role_set.r_hash);
47546+ else
47547+ vfree(acl_role_set.r_hash);
47548+ }
47549+ if (name_set.n_hash) {
47550+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
47551+ PAGE_SIZE)
47552+ kfree(name_set.n_hash);
47553+ else
47554+ vfree(name_set.n_hash);
47555+ }
47556+
47557+ if (inodev_set.i_hash) {
47558+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
47559+ PAGE_SIZE)
47560+ kfree(inodev_set.i_hash);
47561+ else
47562+ vfree(inodev_set.i_hash);
47563+ }
47564+
47565+ gr_free_uidset();
47566+
47567+ memset(&name_set, 0, sizeof (struct name_db));
47568+ memset(&inodev_set, 0, sizeof (struct inodev_db));
47569+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
47570+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
47571+
47572+ default_role = NULL;
47573+ role_list = NULL;
47574+
47575+ return;
47576+}
47577+
47578+static __u32
47579+count_user_objs(struct acl_object_label *userp)
47580+{
47581+ struct acl_object_label o_tmp;
47582+ __u32 num = 0;
47583+
47584+ while (userp) {
47585+ if (copy_from_user(&o_tmp, userp,
47586+ sizeof (struct acl_object_label)))
47587+ break;
47588+
47589+ userp = o_tmp.prev;
47590+ num++;
47591+ }
47592+
47593+ return num;
47594+}
47595+
47596+static struct acl_subject_label *
47597+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
47598+
47599+static int
47600+copy_user_glob(struct acl_object_label *obj)
47601+{
47602+ struct acl_object_label *g_tmp, **guser;
47603+ unsigned int len;
47604+ char *tmp;
47605+
47606+ if (obj->globbed == NULL)
47607+ return 0;
47608+
47609+ guser = &obj->globbed;
47610+ while (*guser) {
47611+ g_tmp = (struct acl_object_label *)
47612+ acl_alloc(sizeof (struct acl_object_label));
47613+ if (g_tmp == NULL)
47614+ return -ENOMEM;
47615+
47616+ if (copy_from_user(g_tmp, *guser,
47617+ sizeof (struct acl_object_label)))
47618+ return -EFAULT;
47619+
47620+ len = strnlen_user(g_tmp->filename, PATH_MAX);
47621+
47622+ if (!len || len >= PATH_MAX)
47623+ return -EINVAL;
47624+
47625+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47626+ return -ENOMEM;
47627+
47628+ if (copy_from_user(tmp, g_tmp->filename, len))
47629+ return -EFAULT;
47630+ tmp[len-1] = '\0';
47631+ g_tmp->filename = tmp;
47632+
47633+ *guser = g_tmp;
47634+ guser = &(g_tmp->next);
47635+ }
47636+
47637+ return 0;
47638+}
47639+
47640+static int
47641+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
47642+ struct acl_role_label *role)
47643+{
47644+ struct acl_object_label *o_tmp;
47645+ unsigned int len;
47646+ int ret;
47647+ char *tmp;
47648+
47649+ while (userp) {
47650+ if ((o_tmp = (struct acl_object_label *)
47651+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
47652+ return -ENOMEM;
47653+
47654+ if (copy_from_user(o_tmp, userp,
47655+ sizeof (struct acl_object_label)))
47656+ return -EFAULT;
47657+
47658+ userp = o_tmp->prev;
47659+
47660+ len = strnlen_user(o_tmp->filename, PATH_MAX);
47661+
47662+ if (!len || len >= PATH_MAX)
47663+ return -EINVAL;
47664+
47665+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47666+ return -ENOMEM;
47667+
47668+ if (copy_from_user(tmp, o_tmp->filename, len))
47669+ return -EFAULT;
47670+ tmp[len-1] = '\0';
47671+ o_tmp->filename = tmp;
47672+
47673+ insert_acl_obj_label(o_tmp, subj);
47674+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
47675+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
47676+ return -ENOMEM;
47677+
47678+ ret = copy_user_glob(o_tmp);
47679+ if (ret)
47680+ return ret;
47681+
47682+ if (o_tmp->nested) {
47683+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
47684+ if (IS_ERR(o_tmp->nested))
47685+ return PTR_ERR(o_tmp->nested);
47686+
47687+ /* insert into nested subject list */
47688+ o_tmp->nested->next = role->hash->first;
47689+ role->hash->first = o_tmp->nested;
47690+ }
47691+ }
47692+
47693+ return 0;
47694+}
47695+
47696+static __u32
47697+count_user_subjs(struct acl_subject_label *userp)
47698+{
47699+ struct acl_subject_label s_tmp;
47700+ __u32 num = 0;
47701+
47702+ while (userp) {
47703+ if (copy_from_user(&s_tmp, userp,
47704+ sizeof (struct acl_subject_label)))
47705+ break;
47706+
47707+ userp = s_tmp.prev;
47708+ /* do not count nested subjects against this count, since
47709+ they are not included in the hash table, but are
47710+ attached to objects. We have already counted
47711+ the subjects in userspace for the allocation
47712+ stack
47713+ */
47714+ if (!(s_tmp.mode & GR_NESTED))
47715+ num++;
47716+ }
47717+
47718+ return num;
47719+}
47720+
47721+static int
47722+copy_user_allowedips(struct acl_role_label *rolep)
47723+{
47724+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
47725+
47726+ ruserip = rolep->allowed_ips;
47727+
47728+ while (ruserip) {
47729+ rlast = rtmp;
47730+
47731+ if ((rtmp = (struct role_allowed_ip *)
47732+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
47733+ return -ENOMEM;
47734+
47735+ if (copy_from_user(rtmp, ruserip,
47736+ sizeof (struct role_allowed_ip)))
47737+ return -EFAULT;
47738+
47739+ ruserip = rtmp->prev;
47740+
47741+ if (!rlast) {
47742+ rtmp->prev = NULL;
47743+ rolep->allowed_ips = rtmp;
47744+ } else {
47745+ rlast->next = rtmp;
47746+ rtmp->prev = rlast;
47747+ }
47748+
47749+ if (!ruserip)
47750+ rtmp->next = NULL;
47751+ }
47752+
47753+ return 0;
47754+}
47755+
47756+static int
47757+copy_user_transitions(struct acl_role_label *rolep)
47758+{
47759+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
47760+
47761+ unsigned int len;
47762+ char *tmp;
47763+
47764+ rusertp = rolep->transitions;
47765+
47766+ while (rusertp) {
47767+ rlast = rtmp;
47768+
47769+ if ((rtmp = (struct role_transition *)
47770+ acl_alloc(sizeof (struct role_transition))) == NULL)
47771+ return -ENOMEM;
47772+
47773+ if (copy_from_user(rtmp, rusertp,
47774+ sizeof (struct role_transition)))
47775+ return -EFAULT;
47776+
47777+ rusertp = rtmp->prev;
47778+
47779+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
47780+
47781+ if (!len || len >= GR_SPROLE_LEN)
47782+ return -EINVAL;
47783+
47784+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47785+ return -ENOMEM;
47786+
47787+ if (copy_from_user(tmp, rtmp->rolename, len))
47788+ return -EFAULT;
47789+ tmp[len-1] = '\0';
47790+ rtmp->rolename = tmp;
47791+
47792+ if (!rlast) {
47793+ rtmp->prev = NULL;
47794+ rolep->transitions = rtmp;
47795+ } else {
47796+ rlast->next = rtmp;
47797+ rtmp->prev = rlast;
47798+ }
47799+
47800+ if (!rusertp)
47801+ rtmp->next = NULL;
47802+ }
47803+
47804+ return 0;
47805+}
47806+
47807+static struct acl_subject_label *
47808+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
47809+{
47810+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
47811+ unsigned int len;
47812+ char *tmp;
47813+ __u32 num_objs;
47814+ struct acl_ip_label **i_tmp, *i_utmp2;
47815+ struct gr_hash_struct ghash;
47816+ struct subject_map *subjmap;
47817+ unsigned int i_num;
47818+ int err;
47819+
47820+ s_tmp = lookup_subject_map(userp);
47821+
47822+ /* we've already copied this subject into the kernel, just return
47823+ the reference to it, and don't copy it over again
47824+ */
47825+ if (s_tmp)
47826+ return(s_tmp);
47827+
47828+ if ((s_tmp = (struct acl_subject_label *)
47829+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
47830+ return ERR_PTR(-ENOMEM);
47831+
47832+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
47833+ if (subjmap == NULL)
47834+ return ERR_PTR(-ENOMEM);
47835+
47836+ subjmap->user = userp;
47837+ subjmap->kernel = s_tmp;
47838+ insert_subj_map_entry(subjmap);
47839+
47840+ if (copy_from_user(s_tmp, userp,
47841+ sizeof (struct acl_subject_label)))
47842+ return ERR_PTR(-EFAULT);
47843+
47844+ len = strnlen_user(s_tmp->filename, PATH_MAX);
47845+
47846+ if (!len || len >= PATH_MAX)
47847+ return ERR_PTR(-EINVAL);
47848+
47849+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47850+ return ERR_PTR(-ENOMEM);
47851+
47852+ if (copy_from_user(tmp, s_tmp->filename, len))
47853+ return ERR_PTR(-EFAULT);
47854+ tmp[len-1] = '\0';
47855+ s_tmp->filename = tmp;
47856+
47857+ if (!strcmp(s_tmp->filename, "/"))
47858+ role->root_label = s_tmp;
47859+
47860+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
47861+ return ERR_PTR(-EFAULT);
47862+
47863+ /* copy user and group transition tables */
47864+
47865+ if (s_tmp->user_trans_num) {
47866+ uid_t *uidlist;
47867+
47868+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
47869+ if (uidlist == NULL)
47870+ return ERR_PTR(-ENOMEM);
47871+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
47872+ return ERR_PTR(-EFAULT);
47873+
47874+ s_tmp->user_transitions = uidlist;
47875+ }
47876+
47877+ if (s_tmp->group_trans_num) {
47878+ gid_t *gidlist;
47879+
47880+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
47881+ if (gidlist == NULL)
47882+ return ERR_PTR(-ENOMEM);
47883+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
47884+ return ERR_PTR(-EFAULT);
47885+
47886+ s_tmp->group_transitions = gidlist;
47887+ }
47888+
47889+ /* set up object hash table */
47890+ num_objs = count_user_objs(ghash.first);
47891+
47892+ s_tmp->obj_hash_size = num_objs;
47893+ s_tmp->obj_hash =
47894+ (struct acl_object_label **)
47895+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
47896+
47897+ if (!s_tmp->obj_hash)
47898+ return ERR_PTR(-ENOMEM);
47899+
47900+ memset(s_tmp->obj_hash, 0,
47901+ s_tmp->obj_hash_size *
47902+ sizeof (struct acl_object_label *));
47903+
47904+ /* add in objects */
47905+ err = copy_user_objs(ghash.first, s_tmp, role);
47906+
47907+ if (err)
47908+ return ERR_PTR(err);
47909+
47910+ /* set pointer for parent subject */
47911+ if (s_tmp->parent_subject) {
47912+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
47913+
47914+ if (IS_ERR(s_tmp2))
47915+ return s_tmp2;
47916+
47917+ s_tmp->parent_subject = s_tmp2;
47918+ }
47919+
47920+ /* add in ip acls */
47921+
47922+ if (!s_tmp->ip_num) {
47923+ s_tmp->ips = NULL;
47924+ goto insert;
47925+ }
47926+
47927+ i_tmp =
47928+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
47929+ sizeof (struct acl_ip_label *));
47930+
47931+ if (!i_tmp)
47932+ return ERR_PTR(-ENOMEM);
47933+
47934+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
47935+ *(i_tmp + i_num) =
47936+ (struct acl_ip_label *)
47937+ acl_alloc(sizeof (struct acl_ip_label));
47938+ if (!*(i_tmp + i_num))
47939+ return ERR_PTR(-ENOMEM);
47940+
47941+ if (copy_from_user
47942+ (&i_utmp2, s_tmp->ips + i_num,
47943+ sizeof (struct acl_ip_label *)))
47944+ return ERR_PTR(-EFAULT);
47945+
47946+ if (copy_from_user
47947+ (*(i_tmp + i_num), i_utmp2,
47948+ sizeof (struct acl_ip_label)))
47949+ return ERR_PTR(-EFAULT);
47950+
47951+ if ((*(i_tmp + i_num))->iface == NULL)
47952+ continue;
47953+
47954+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
47955+ if (!len || len >= IFNAMSIZ)
47956+ return ERR_PTR(-EINVAL);
47957+ tmp = acl_alloc(len);
47958+ if (tmp == NULL)
47959+ return ERR_PTR(-ENOMEM);
47960+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
47961+ return ERR_PTR(-EFAULT);
47962+ (*(i_tmp + i_num))->iface = tmp;
47963+ }
47964+
47965+ s_tmp->ips = i_tmp;
47966+
47967+insert:
47968+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
47969+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
47970+ return ERR_PTR(-ENOMEM);
47971+
47972+ return s_tmp;
47973+}
47974+
47975+static int
47976+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
47977+{
47978+ struct acl_subject_label s_pre;
47979+ struct acl_subject_label * ret;
47980+ int err;
47981+
47982+ while (userp) {
47983+ if (copy_from_user(&s_pre, userp,
47984+ sizeof (struct acl_subject_label)))
47985+ return -EFAULT;
47986+
47987+ /* do not add nested subjects here, add
47988+ while parsing objects
47989+ */
47990+
47991+ if (s_pre.mode & GR_NESTED) {
47992+ userp = s_pre.prev;
47993+ continue;
47994+ }
47995+
47996+ ret = do_copy_user_subj(userp, role);
47997+
47998+ err = PTR_ERR(ret);
47999+ if (IS_ERR(ret))
48000+ return err;
48001+
48002+ insert_acl_subj_label(ret, role);
48003+
48004+ userp = s_pre.prev;
48005+ }
48006+
48007+ return 0;
48008+}
48009+
48010+static int
48011+copy_user_acl(struct gr_arg *arg)
48012+{
48013+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48014+ struct sprole_pw *sptmp;
48015+ struct gr_hash_struct *ghash;
48016+ uid_t *domainlist;
48017+ unsigned int r_num;
48018+ unsigned int len;
48019+ char *tmp;
48020+ int err = 0;
48021+ __u16 i;
48022+ __u32 num_subjs;
48023+
48024+ /* we need a default and kernel role */
48025+ if (arg->role_db.num_roles < 2)
48026+ return -EINVAL;
48027+
48028+ /* copy special role authentication info from userspace */
48029+
48030+ num_sprole_pws = arg->num_sprole_pws;
48031+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48032+
48033+ if (!acl_special_roles) {
48034+ err = -ENOMEM;
48035+ goto cleanup;
48036+ }
48037+
48038+ for (i = 0; i < num_sprole_pws; i++) {
48039+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48040+ if (!sptmp) {
48041+ err = -ENOMEM;
48042+ goto cleanup;
48043+ }
48044+ if (copy_from_user(sptmp, arg->sprole_pws + i,
48045+ sizeof (struct sprole_pw))) {
48046+ err = -EFAULT;
48047+ goto cleanup;
48048+ }
48049+
48050+ len =
48051+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48052+
48053+ if (!len || len >= GR_SPROLE_LEN) {
48054+ err = -EINVAL;
48055+ goto cleanup;
48056+ }
48057+
48058+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48059+ err = -ENOMEM;
48060+ goto cleanup;
48061+ }
48062+
48063+ if (copy_from_user(tmp, sptmp->rolename, len)) {
48064+ err = -EFAULT;
48065+ goto cleanup;
48066+ }
48067+ tmp[len-1] = '\0';
48068+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48069+ printk(KERN_ALERT "Copying special role %s\n", tmp);
48070+#endif
48071+ sptmp->rolename = tmp;
48072+ acl_special_roles[i] = sptmp;
48073+ }
48074+
48075+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48076+
48077+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48078+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
48079+
48080+ if (!r_tmp) {
48081+ err = -ENOMEM;
48082+ goto cleanup;
48083+ }
48084+
48085+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
48086+ sizeof (struct acl_role_label *))) {
48087+ err = -EFAULT;
48088+ goto cleanup;
48089+ }
48090+
48091+ if (copy_from_user(r_tmp, r_utmp2,
48092+ sizeof (struct acl_role_label))) {
48093+ err = -EFAULT;
48094+ goto cleanup;
48095+ }
48096+
48097+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48098+
48099+ if (!len || len >= PATH_MAX) {
48100+ err = -EINVAL;
48101+ goto cleanup;
48102+ }
48103+
48104+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48105+ err = -ENOMEM;
48106+ goto cleanup;
48107+ }
48108+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
48109+ err = -EFAULT;
48110+ goto cleanup;
48111+ }
48112+ tmp[len-1] = '\0';
48113+ r_tmp->rolename = tmp;
48114+
48115+ if (!strcmp(r_tmp->rolename, "default")
48116+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48117+ default_role = r_tmp;
48118+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48119+ kernel_role = r_tmp;
48120+ }
48121+
48122+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48123+ err = -ENOMEM;
48124+ goto cleanup;
48125+ }
48126+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48127+ err = -EFAULT;
48128+ goto cleanup;
48129+ }
48130+
48131+ r_tmp->hash = ghash;
48132+
48133+ num_subjs = count_user_subjs(r_tmp->hash->first);
48134+
48135+ r_tmp->subj_hash_size = num_subjs;
48136+ r_tmp->subj_hash =
48137+ (struct acl_subject_label **)
48138+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48139+
48140+ if (!r_tmp->subj_hash) {
48141+ err = -ENOMEM;
48142+ goto cleanup;
48143+ }
48144+
48145+ err = copy_user_allowedips(r_tmp);
48146+ if (err)
48147+ goto cleanup;
48148+
48149+ /* copy domain info */
48150+ if (r_tmp->domain_children != NULL) {
48151+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48152+ if (domainlist == NULL) {
48153+ err = -ENOMEM;
48154+ goto cleanup;
48155+ }
48156+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48157+ err = -EFAULT;
48158+ goto cleanup;
48159+ }
48160+ r_tmp->domain_children = domainlist;
48161+ }
48162+
48163+ err = copy_user_transitions(r_tmp);
48164+ if (err)
48165+ goto cleanup;
48166+
48167+ memset(r_tmp->subj_hash, 0,
48168+ r_tmp->subj_hash_size *
48169+ sizeof (struct acl_subject_label *));
48170+
48171+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48172+
48173+ if (err)
48174+ goto cleanup;
48175+
48176+ /* set nested subject list to null */
48177+ r_tmp->hash->first = NULL;
48178+
48179+ insert_acl_role_label(r_tmp);
48180+ }
48181+
48182+ goto return_err;
48183+ cleanup:
48184+ free_variables();
48185+ return_err:
48186+ return err;
48187+
48188+}
48189+
48190+static int
48191+gracl_init(struct gr_arg *args)
48192+{
48193+ int error = 0;
48194+
48195+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48196+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48197+
48198+ if (init_variables(args)) {
48199+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48200+ error = -ENOMEM;
48201+ free_variables();
48202+ goto out;
48203+ }
48204+
48205+ error = copy_user_acl(args);
48206+ free_init_variables();
48207+ if (error) {
48208+ free_variables();
48209+ goto out;
48210+ }
48211+
48212+ if ((error = gr_set_acls(0))) {
48213+ free_variables();
48214+ goto out;
48215+ }
48216+
48217+ pax_open_kernel();
48218+ gr_status |= GR_READY;
48219+ pax_close_kernel();
48220+
48221+ out:
48222+ return error;
48223+}
48224+
48225+/* derived from glibc fnmatch() 0: match, 1: no match*/
48226+
48227+static int
48228+glob_match(const char *p, const char *n)
48229+{
48230+ char c;
48231+
48232+ while ((c = *p++) != '\0') {
48233+ switch (c) {
48234+ case '?':
48235+ if (*n == '\0')
48236+ return 1;
48237+ else if (*n == '/')
48238+ return 1;
48239+ break;
48240+ case '\\':
48241+ if (*n != c)
48242+ return 1;
48243+ break;
48244+ case '*':
48245+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
48246+ if (*n == '/')
48247+ return 1;
48248+ else if (c == '?') {
48249+ if (*n == '\0')
48250+ return 1;
48251+ else
48252+ ++n;
48253+ }
48254+ }
48255+ if (c == '\0') {
48256+ return 0;
48257+ } else {
48258+ const char *endp;
48259+
48260+ if ((endp = strchr(n, '/')) == NULL)
48261+ endp = n + strlen(n);
48262+
48263+ if (c == '[') {
48264+ for (--p; n < endp; ++n)
48265+ if (!glob_match(p, n))
48266+ return 0;
48267+ } else if (c == '/') {
48268+ while (*n != '\0' && *n != '/')
48269+ ++n;
48270+ if (*n == '/' && !glob_match(p, n + 1))
48271+ return 0;
48272+ } else {
48273+ for (--p; n < endp; ++n)
48274+ if (*n == c && !glob_match(p, n))
48275+ return 0;
48276+ }
48277+
48278+ return 1;
48279+ }
48280+ case '[':
48281+ {
48282+ int not;
48283+ char cold;
48284+
48285+ if (*n == '\0' || *n == '/')
48286+ return 1;
48287+
48288+ not = (*p == '!' || *p == '^');
48289+ if (not)
48290+ ++p;
48291+
48292+ c = *p++;
48293+ for (;;) {
48294+ unsigned char fn = (unsigned char)*n;
48295+
48296+ if (c == '\0')
48297+ return 1;
48298+ else {
48299+ if (c == fn)
48300+ goto matched;
48301+ cold = c;
48302+ c = *p++;
48303+
48304+ if (c == '-' && *p != ']') {
48305+ unsigned char cend = *p++;
48306+
48307+ if (cend == '\0')
48308+ return 1;
48309+
48310+ if (cold <= fn && fn <= cend)
48311+ goto matched;
48312+
48313+ c = *p++;
48314+ }
48315+ }
48316+
48317+ if (c == ']')
48318+ break;
48319+ }
48320+ if (!not)
48321+ return 1;
48322+ break;
48323+ matched:
48324+ while (c != ']') {
48325+ if (c == '\0')
48326+ return 1;
48327+
48328+ c = *p++;
48329+ }
48330+ if (not)
48331+ return 1;
48332+ }
48333+ break;
48334+ default:
48335+ if (c != *n)
48336+ return 1;
48337+ }
48338+
48339+ ++n;
48340+ }
48341+
48342+ if (*n == '\0')
48343+ return 0;
48344+
48345+ if (*n == '/')
48346+ return 0;
48347+
48348+ return 1;
48349+}
48350+
48351+static struct acl_object_label *
48352+chk_glob_label(struct acl_object_label *globbed,
48353+ struct dentry *dentry, struct vfsmount *mnt, char **path)
48354+{
48355+ struct acl_object_label *tmp;
48356+
48357+ if (*path == NULL)
48358+ *path = gr_to_filename_nolock(dentry, mnt);
48359+
48360+ tmp = globbed;
48361+
48362+ while (tmp) {
48363+ if (!glob_match(tmp->filename, *path))
48364+ return tmp;
48365+ tmp = tmp->next;
48366+ }
48367+
48368+ return NULL;
48369+}
48370+
48371+static struct acl_object_label *
48372+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48373+ const ino_t curr_ino, const dev_t curr_dev,
48374+ const struct acl_subject_label *subj, char **path, const int checkglob)
48375+{
48376+ struct acl_subject_label *tmpsubj;
48377+ struct acl_object_label *retval;
48378+ struct acl_object_label *retval2;
48379+
48380+ tmpsubj = (struct acl_subject_label *) subj;
48381+ read_lock(&gr_inode_lock);
48382+ do {
48383+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
48384+ if (retval) {
48385+ if (checkglob && retval->globbed) {
48386+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
48387+ (struct vfsmount *)orig_mnt, path);
48388+ if (retval2)
48389+ retval = retval2;
48390+ }
48391+ break;
48392+ }
48393+ } while ((tmpsubj = tmpsubj->parent_subject));
48394+ read_unlock(&gr_inode_lock);
48395+
48396+ return retval;
48397+}
48398+
48399+static __inline__ struct acl_object_label *
48400+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48401+ const struct dentry *curr_dentry,
48402+ const struct acl_subject_label *subj, char **path, const int checkglob)
48403+{
48404+ int newglob = checkglob;
48405+
48406+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
48407+ as we don't want a / * rule to match instead of the / object
48408+ don't do this for create lookups that call this function though, since they're looking up
48409+ on the parent and thus need globbing checks on all paths
48410+ */
48411+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
48412+ newglob = GR_NO_GLOB;
48413+
48414+ return __full_lookup(orig_dentry, orig_mnt,
48415+ curr_dentry->d_inode->i_ino,
48416+ __get_dev(curr_dentry), subj, path, newglob);
48417+}
48418+
48419+static struct acl_object_label *
48420+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48421+ const struct acl_subject_label *subj, char *path, const int checkglob)
48422+{
48423+ struct dentry *dentry = (struct dentry *) l_dentry;
48424+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48425+ struct acl_object_label *retval;
48426+
48427+ spin_lock(&dcache_lock);
48428+ spin_lock(&vfsmount_lock);
48429+
48430+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
48431+#ifdef CONFIG_NET
48432+ mnt == sock_mnt ||
48433+#endif
48434+#ifdef CONFIG_HUGETLBFS
48435+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
48436+#endif
48437+ /* ignore Eric Biederman */
48438+ IS_PRIVATE(l_dentry->d_inode))) {
48439+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
48440+ goto out;
48441+ }
48442+
48443+ for (;;) {
48444+ if (dentry == real_root && mnt == real_root_mnt)
48445+ break;
48446+
48447+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48448+ if (mnt->mnt_parent == mnt)
48449+ break;
48450+
48451+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48452+ if (retval != NULL)
48453+ goto out;
48454+
48455+ dentry = mnt->mnt_mountpoint;
48456+ mnt = mnt->mnt_parent;
48457+ continue;
48458+ }
48459+
48460+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48461+ if (retval != NULL)
48462+ goto out;
48463+
48464+ dentry = dentry->d_parent;
48465+ }
48466+
48467+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48468+
48469+ if (retval == NULL)
48470+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
48471+out:
48472+ spin_unlock(&vfsmount_lock);
48473+ spin_unlock(&dcache_lock);
48474+
48475+ BUG_ON(retval == NULL);
48476+
48477+ return retval;
48478+}
48479+
48480+static __inline__ struct acl_object_label *
48481+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48482+ const struct acl_subject_label *subj)
48483+{
48484+ char *path = NULL;
48485+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
48486+}
48487+
48488+static __inline__ struct acl_object_label *
48489+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48490+ const struct acl_subject_label *subj)
48491+{
48492+ char *path = NULL;
48493+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
48494+}
48495+
48496+static __inline__ struct acl_object_label *
48497+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48498+ const struct acl_subject_label *subj, char *path)
48499+{
48500+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
48501+}
48502+
48503+static struct acl_subject_label *
48504+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48505+ const struct acl_role_label *role)
48506+{
48507+ struct dentry *dentry = (struct dentry *) l_dentry;
48508+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48509+ struct acl_subject_label *retval;
48510+
48511+ spin_lock(&dcache_lock);
48512+ spin_lock(&vfsmount_lock);
48513+
48514+ for (;;) {
48515+ if (dentry == real_root && mnt == real_root_mnt)
48516+ break;
48517+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48518+ if (mnt->mnt_parent == mnt)
48519+ break;
48520+
48521+ read_lock(&gr_inode_lock);
48522+ retval =
48523+ lookup_acl_subj_label(dentry->d_inode->i_ino,
48524+ __get_dev(dentry), role);
48525+ read_unlock(&gr_inode_lock);
48526+ if (retval != NULL)
48527+ goto out;
48528+
48529+ dentry = mnt->mnt_mountpoint;
48530+ mnt = mnt->mnt_parent;
48531+ continue;
48532+ }
48533+
48534+ read_lock(&gr_inode_lock);
48535+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48536+ __get_dev(dentry), role);
48537+ read_unlock(&gr_inode_lock);
48538+ if (retval != NULL)
48539+ goto out;
48540+
48541+ dentry = dentry->d_parent;
48542+ }
48543+
48544+ read_lock(&gr_inode_lock);
48545+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48546+ __get_dev(dentry), role);
48547+ read_unlock(&gr_inode_lock);
48548+
48549+ if (unlikely(retval == NULL)) {
48550+ read_lock(&gr_inode_lock);
48551+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
48552+ __get_dev(real_root), role);
48553+ read_unlock(&gr_inode_lock);
48554+ }
48555+out:
48556+ spin_unlock(&vfsmount_lock);
48557+ spin_unlock(&dcache_lock);
48558+
48559+ BUG_ON(retval == NULL);
48560+
48561+ return retval;
48562+}
48563+
48564+static void
48565+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
48566+{
48567+ struct task_struct *task = current;
48568+ const struct cred *cred = current_cred();
48569+
48570+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48571+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48572+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48573+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
48574+
48575+ return;
48576+}
48577+
48578+static void
48579+gr_log_learn_sysctl(const char *path, const __u32 mode)
48580+{
48581+ struct task_struct *task = current;
48582+ const struct cred *cred = current_cred();
48583+
48584+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48585+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48586+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48587+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
48588+
48589+ return;
48590+}
48591+
48592+static void
48593+gr_log_learn_id_change(const char type, const unsigned int real,
48594+ const unsigned int effective, const unsigned int fs)
48595+{
48596+ struct task_struct *task = current;
48597+ const struct cred *cred = current_cred();
48598+
48599+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
48600+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48601+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48602+ type, real, effective, fs, &task->signal->saved_ip);
48603+
48604+ return;
48605+}
48606+
48607+__u32
48608+gr_check_link(const struct dentry * new_dentry,
48609+ const struct dentry * parent_dentry,
48610+ const struct vfsmount * parent_mnt,
48611+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
48612+{
48613+ struct acl_object_label *obj;
48614+ __u32 oldmode, newmode;
48615+ __u32 needmode;
48616+
48617+ if (unlikely(!(gr_status & GR_READY)))
48618+ return (GR_CREATE | GR_LINK);
48619+
48620+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
48621+ oldmode = obj->mode;
48622+
48623+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48624+ oldmode |= (GR_CREATE | GR_LINK);
48625+
48626+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
48627+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
48628+ needmode |= GR_SETID | GR_AUDIT_SETID;
48629+
48630+ newmode =
48631+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
48632+ oldmode | needmode);
48633+
48634+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
48635+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
48636+ GR_INHERIT | GR_AUDIT_INHERIT);
48637+
48638+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
48639+ goto bad;
48640+
48641+ if ((oldmode & needmode) != needmode)
48642+ goto bad;
48643+
48644+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
48645+ if ((newmode & needmode) != needmode)
48646+ goto bad;
48647+
48648+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
48649+ return newmode;
48650+bad:
48651+ needmode = oldmode;
48652+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
48653+ needmode |= GR_SETID;
48654+
48655+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
48656+ gr_log_learn(old_dentry, old_mnt, needmode);
48657+ return (GR_CREATE | GR_LINK);
48658+ } else if (newmode & GR_SUPPRESS)
48659+ return GR_SUPPRESS;
48660+ else
48661+ return 0;
48662+}
48663+
48664+__u32
48665+gr_search_file(const struct dentry * dentry, const __u32 mode,
48666+ const struct vfsmount * mnt)
48667+{
48668+ __u32 retval = mode;
48669+ struct acl_subject_label *curracl;
48670+ struct acl_object_label *currobj;
48671+
48672+ if (unlikely(!(gr_status & GR_READY)))
48673+ return (mode & ~GR_AUDITS);
48674+
48675+ curracl = current->acl;
48676+
48677+ currobj = chk_obj_label(dentry, mnt, curracl);
48678+ retval = currobj->mode & mode;
48679+
48680+ /* if we're opening a specified transfer file for writing
48681+ (e.g. /dev/initctl), then transfer our role to init
48682+ */
48683+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
48684+ current->role->roletype & GR_ROLE_PERSIST)) {
48685+ struct task_struct *task = init_pid_ns.child_reaper;
48686+
48687+ if (task->role != current->role) {
48688+ task->acl_sp_role = 0;
48689+ task->acl_role_id = current->acl_role_id;
48690+ task->role = current->role;
48691+ rcu_read_lock();
48692+ read_lock(&grsec_exec_file_lock);
48693+ gr_apply_subject_to_task(task);
48694+ read_unlock(&grsec_exec_file_lock);
48695+ rcu_read_unlock();
48696+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
48697+ }
48698+ }
48699+
48700+ if (unlikely
48701+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
48702+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
48703+ __u32 new_mode = mode;
48704+
48705+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48706+
48707+ retval = new_mode;
48708+
48709+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
48710+ new_mode |= GR_INHERIT;
48711+
48712+ if (!(mode & GR_NOLEARN))
48713+ gr_log_learn(dentry, mnt, new_mode);
48714+ }
48715+
48716+ return retval;
48717+}
48718+
48719+__u32
48720+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
48721+ const struct vfsmount * mnt, const __u32 mode)
48722+{
48723+ struct name_entry *match;
48724+ struct acl_object_label *matchpo;
48725+ struct acl_subject_label *curracl;
48726+ char *path;
48727+ __u32 retval;
48728+
48729+ if (unlikely(!(gr_status & GR_READY)))
48730+ return (mode & ~GR_AUDITS);
48731+
48732+ preempt_disable();
48733+ path = gr_to_filename_rbac(new_dentry, mnt);
48734+ match = lookup_name_entry_create(path);
48735+
48736+ if (!match)
48737+ goto check_parent;
48738+
48739+ curracl = current->acl;
48740+
48741+ read_lock(&gr_inode_lock);
48742+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
48743+ read_unlock(&gr_inode_lock);
48744+
48745+ if (matchpo) {
48746+ if ((matchpo->mode & mode) !=
48747+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
48748+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
48749+ __u32 new_mode = mode;
48750+
48751+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48752+
48753+ gr_log_learn(new_dentry, mnt, new_mode);
48754+
48755+ preempt_enable();
48756+ return new_mode;
48757+ }
48758+ preempt_enable();
48759+ return (matchpo->mode & mode);
48760+ }
48761+
48762+ check_parent:
48763+ curracl = current->acl;
48764+
48765+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
48766+ retval = matchpo->mode & mode;
48767+
48768+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
48769+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
48770+ __u32 new_mode = mode;
48771+
48772+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48773+
48774+ gr_log_learn(new_dentry, mnt, new_mode);
48775+ preempt_enable();
48776+ return new_mode;
48777+ }
48778+
48779+ preempt_enable();
48780+ return retval;
48781+}
48782+
48783+int
48784+gr_check_hidden_task(const struct task_struct *task)
48785+{
48786+ if (unlikely(!(gr_status & GR_READY)))
48787+ return 0;
48788+
48789+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
48790+ return 1;
48791+
48792+ return 0;
48793+}
48794+
48795+int
48796+gr_check_protected_task(const struct task_struct *task)
48797+{
48798+ if (unlikely(!(gr_status & GR_READY) || !task))
48799+ return 0;
48800+
48801+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
48802+ task->acl != current->acl)
48803+ return 1;
48804+
48805+ return 0;
48806+}
48807+
48808+int
48809+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
48810+{
48811+ struct task_struct *p;
48812+ int ret = 0;
48813+
48814+ if (unlikely(!(gr_status & GR_READY) || !pid))
48815+ return ret;
48816+
48817+ read_lock(&tasklist_lock);
48818+ do_each_pid_task(pid, type, p) {
48819+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
48820+ p->acl != current->acl) {
48821+ ret = 1;
48822+ goto out;
48823+ }
48824+ } while_each_pid_task(pid, type, p);
48825+out:
48826+ read_unlock(&tasklist_lock);
48827+
48828+ return ret;
48829+}
48830+
48831+void
48832+gr_copy_label(struct task_struct *tsk)
48833+{
48834+ tsk->signal->used_accept = 0;
48835+ tsk->acl_sp_role = 0;
48836+ tsk->acl_role_id = current->acl_role_id;
48837+ tsk->acl = current->acl;
48838+ tsk->role = current->role;
48839+ tsk->signal->curr_ip = current->signal->curr_ip;
48840+ tsk->signal->saved_ip = current->signal->saved_ip;
48841+ if (current->exec_file)
48842+ get_file(current->exec_file);
48843+ tsk->exec_file = current->exec_file;
48844+ tsk->is_writable = current->is_writable;
48845+ if (unlikely(current->signal->used_accept)) {
48846+ current->signal->curr_ip = 0;
48847+ current->signal->saved_ip = 0;
48848+ }
48849+
48850+ return;
48851+}
48852+
48853+static void
48854+gr_set_proc_res(struct task_struct *task)
48855+{
48856+ struct acl_subject_label *proc;
48857+ unsigned short i;
48858+
48859+ proc = task->acl;
48860+
48861+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
48862+ return;
48863+
48864+ for (i = 0; i < RLIM_NLIMITS; i++) {
48865+ if (!(proc->resmask & (1 << i)))
48866+ continue;
48867+
48868+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
48869+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
48870+ }
48871+
48872+ return;
48873+}
48874+
48875+extern int __gr_process_user_ban(struct user_struct *user);
48876+
48877+int
48878+gr_check_user_change(int real, int effective, int fs)
48879+{
48880+ unsigned int i;
48881+ __u16 num;
48882+ uid_t *uidlist;
48883+ int curuid;
48884+ int realok = 0;
48885+ int effectiveok = 0;
48886+ int fsok = 0;
48887+
48888+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
48889+ struct user_struct *user;
48890+
48891+ if (real == -1)
48892+ goto skipit;
48893+
48894+ user = find_user(real);
48895+ if (user == NULL)
48896+ goto skipit;
48897+
48898+ if (__gr_process_user_ban(user)) {
48899+ /* for find_user */
48900+ free_uid(user);
48901+ return 1;
48902+ }
48903+
48904+ /* for find_user */
48905+ free_uid(user);
48906+
48907+skipit:
48908+#endif
48909+
48910+ if (unlikely(!(gr_status & GR_READY)))
48911+ return 0;
48912+
48913+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48914+ gr_log_learn_id_change('u', real, effective, fs);
48915+
48916+ num = current->acl->user_trans_num;
48917+ uidlist = current->acl->user_transitions;
48918+
48919+ if (uidlist == NULL)
48920+ return 0;
48921+
48922+ if (real == -1)
48923+ realok = 1;
48924+ if (effective == -1)
48925+ effectiveok = 1;
48926+ if (fs == -1)
48927+ fsok = 1;
48928+
48929+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
48930+ for (i = 0; i < num; i++) {
48931+ curuid = (int)uidlist[i];
48932+ if (real == curuid)
48933+ realok = 1;
48934+ if (effective == curuid)
48935+ effectiveok = 1;
48936+ if (fs == curuid)
48937+ fsok = 1;
48938+ }
48939+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
48940+ for (i = 0; i < num; i++) {
48941+ curuid = (int)uidlist[i];
48942+ if (real == curuid)
48943+ break;
48944+ if (effective == curuid)
48945+ break;
48946+ if (fs == curuid)
48947+ break;
48948+ }
48949+ /* not in deny list */
48950+ if (i == num) {
48951+ realok = 1;
48952+ effectiveok = 1;
48953+ fsok = 1;
48954+ }
48955+ }
48956+
48957+ if (realok && effectiveok && fsok)
48958+ return 0;
48959+ else {
48960+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
48961+ return 1;
48962+ }
48963+}
48964+
48965+int
48966+gr_check_group_change(int real, int effective, int fs)
48967+{
48968+ unsigned int i;
48969+ __u16 num;
48970+ gid_t *gidlist;
48971+ int curgid;
48972+ int realok = 0;
48973+ int effectiveok = 0;
48974+ int fsok = 0;
48975+
48976+ if (unlikely(!(gr_status & GR_READY)))
48977+ return 0;
48978+
48979+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48980+ gr_log_learn_id_change('g', real, effective, fs);
48981+
48982+ num = current->acl->group_trans_num;
48983+ gidlist = current->acl->group_transitions;
48984+
48985+ if (gidlist == NULL)
48986+ return 0;
48987+
48988+ if (real == -1)
48989+ realok = 1;
48990+ if (effective == -1)
48991+ effectiveok = 1;
48992+ if (fs == -1)
48993+ fsok = 1;
48994+
48995+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
48996+ for (i = 0; i < num; i++) {
48997+ curgid = (int)gidlist[i];
48998+ if (real == curgid)
48999+ realok = 1;
49000+ if (effective == curgid)
49001+ effectiveok = 1;
49002+ if (fs == curgid)
49003+ fsok = 1;
49004+ }
49005+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
49006+ for (i = 0; i < num; i++) {
49007+ curgid = (int)gidlist[i];
49008+ if (real == curgid)
49009+ break;
49010+ if (effective == curgid)
49011+ break;
49012+ if (fs == curgid)
49013+ break;
49014+ }
49015+ /* not in deny list */
49016+ if (i == num) {
49017+ realok = 1;
49018+ effectiveok = 1;
49019+ fsok = 1;
49020+ }
49021+ }
49022+
49023+ if (realok && effectiveok && fsok)
49024+ return 0;
49025+ else {
49026+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49027+ return 1;
49028+ }
49029+}
49030+
49031+void
49032+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49033+{
49034+ struct acl_role_label *role = task->role;
49035+ struct acl_subject_label *subj = NULL;
49036+ struct acl_object_label *obj;
49037+ struct file *filp;
49038+
49039+ if (unlikely(!(gr_status & GR_READY)))
49040+ return;
49041+
49042+ filp = task->exec_file;
49043+
49044+ /* kernel process, we'll give them the kernel role */
49045+ if (unlikely(!filp)) {
49046+ task->role = kernel_role;
49047+ task->acl = kernel_role->root_label;
49048+ return;
49049+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49050+ role = lookup_acl_role_label(task, uid, gid);
49051+
49052+ /* perform subject lookup in possibly new role
49053+ we can use this result below in the case where role == task->role
49054+ */
49055+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49056+
49057+ /* if we changed uid/gid, but result in the same role
49058+ and are using inheritance, don't lose the inherited subject
49059+ if current subject is other than what normal lookup
49060+ would result in, we arrived via inheritance, don't
49061+ lose subject
49062+ */
49063+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49064+ (subj == task->acl)))
49065+ task->acl = subj;
49066+
49067+ task->role = role;
49068+
49069+ task->is_writable = 0;
49070+
49071+ /* ignore additional mmap checks for processes that are writable
49072+ by the default ACL */
49073+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49074+ if (unlikely(obj->mode & GR_WRITE))
49075+ task->is_writable = 1;
49076+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49077+ if (unlikely(obj->mode & GR_WRITE))
49078+ task->is_writable = 1;
49079+
49080+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49081+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49082+#endif
49083+
49084+ gr_set_proc_res(task);
49085+
49086+ return;
49087+}
49088+
49089+int
49090+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49091+ const int unsafe_share)
49092+{
49093+ struct task_struct *task = current;
49094+ struct acl_subject_label *newacl;
49095+ struct acl_object_label *obj;
49096+ __u32 retmode;
49097+
49098+ if (unlikely(!(gr_status & GR_READY)))
49099+ return 0;
49100+
49101+ newacl = chk_subj_label(dentry, mnt, task->role);
49102+
49103+ task_lock(task);
49104+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49105+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49106+ !(task->role->roletype & GR_ROLE_GOD) &&
49107+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49108+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49109+ task_unlock(task);
49110+ if (unsafe_share)
49111+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49112+ else
49113+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49114+ return -EACCES;
49115+ }
49116+ task_unlock(task);
49117+
49118+ obj = chk_obj_label(dentry, mnt, task->acl);
49119+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49120+
49121+ if (!(task->acl->mode & GR_INHERITLEARN) &&
49122+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49123+ if (obj->nested)
49124+ task->acl = obj->nested;
49125+ else
49126+ task->acl = newacl;
49127+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49128+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49129+
49130+ task->is_writable = 0;
49131+
49132+ /* ignore additional mmap checks for processes that are writable
49133+ by the default ACL */
49134+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
49135+ if (unlikely(obj->mode & GR_WRITE))
49136+ task->is_writable = 1;
49137+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
49138+ if (unlikely(obj->mode & GR_WRITE))
49139+ task->is_writable = 1;
49140+
49141+ gr_set_proc_res(task);
49142+
49143+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49144+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49145+#endif
49146+ return 0;
49147+}
49148+
49149+/* always called with valid inodev ptr */
49150+static void
49151+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49152+{
49153+ struct acl_object_label *matchpo;
49154+ struct acl_subject_label *matchps;
49155+ struct acl_subject_label *subj;
49156+ struct acl_role_label *role;
49157+ unsigned int x;
49158+
49159+ FOR_EACH_ROLE_START(role)
49160+ FOR_EACH_SUBJECT_START(role, subj, x)
49161+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49162+ matchpo->mode |= GR_DELETED;
49163+ FOR_EACH_SUBJECT_END(subj,x)
49164+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49165+ if (subj->inode == ino && subj->device == dev)
49166+ subj->mode |= GR_DELETED;
49167+ FOR_EACH_NESTED_SUBJECT_END(subj)
49168+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49169+ matchps->mode |= GR_DELETED;
49170+ FOR_EACH_ROLE_END(role)
49171+
49172+ inodev->nentry->deleted = 1;
49173+
49174+ return;
49175+}
49176+
49177+void
49178+gr_handle_delete(const ino_t ino, const dev_t dev)
49179+{
49180+ struct inodev_entry *inodev;
49181+
49182+ if (unlikely(!(gr_status & GR_READY)))
49183+ return;
49184+
49185+ write_lock(&gr_inode_lock);
49186+ inodev = lookup_inodev_entry(ino, dev);
49187+ if (inodev != NULL)
49188+ do_handle_delete(inodev, ino, dev);
49189+ write_unlock(&gr_inode_lock);
49190+
49191+ return;
49192+}
49193+
49194+static void
49195+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49196+ const ino_t newinode, const dev_t newdevice,
49197+ struct acl_subject_label *subj)
49198+{
49199+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49200+ struct acl_object_label *match;
49201+
49202+ match = subj->obj_hash[index];
49203+
49204+ while (match && (match->inode != oldinode ||
49205+ match->device != olddevice ||
49206+ !(match->mode & GR_DELETED)))
49207+ match = match->next;
49208+
49209+ if (match && (match->inode == oldinode)
49210+ && (match->device == olddevice)
49211+ && (match->mode & GR_DELETED)) {
49212+ if (match->prev == NULL) {
49213+ subj->obj_hash[index] = match->next;
49214+ if (match->next != NULL)
49215+ match->next->prev = NULL;
49216+ } else {
49217+ match->prev->next = match->next;
49218+ if (match->next != NULL)
49219+ match->next->prev = match->prev;
49220+ }
49221+ match->prev = NULL;
49222+ match->next = NULL;
49223+ match->inode = newinode;
49224+ match->device = newdevice;
49225+ match->mode &= ~GR_DELETED;
49226+
49227+ insert_acl_obj_label(match, subj);
49228+ }
49229+
49230+ return;
49231+}
49232+
49233+static void
49234+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49235+ const ino_t newinode, const dev_t newdevice,
49236+ struct acl_role_label *role)
49237+{
49238+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49239+ struct acl_subject_label *match;
49240+
49241+ match = role->subj_hash[index];
49242+
49243+ while (match && (match->inode != oldinode ||
49244+ match->device != olddevice ||
49245+ !(match->mode & GR_DELETED)))
49246+ match = match->next;
49247+
49248+ if (match && (match->inode == oldinode)
49249+ && (match->device == olddevice)
49250+ && (match->mode & GR_DELETED)) {
49251+ if (match->prev == NULL) {
49252+ role->subj_hash[index] = match->next;
49253+ if (match->next != NULL)
49254+ match->next->prev = NULL;
49255+ } else {
49256+ match->prev->next = match->next;
49257+ if (match->next != NULL)
49258+ match->next->prev = match->prev;
49259+ }
49260+ match->prev = NULL;
49261+ match->next = NULL;
49262+ match->inode = newinode;
49263+ match->device = newdevice;
49264+ match->mode &= ~GR_DELETED;
49265+
49266+ insert_acl_subj_label(match, role);
49267+ }
49268+
49269+ return;
49270+}
49271+
49272+static void
49273+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49274+ const ino_t newinode, const dev_t newdevice)
49275+{
49276+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49277+ struct inodev_entry *match;
49278+
49279+ match = inodev_set.i_hash[index];
49280+
49281+ while (match && (match->nentry->inode != oldinode ||
49282+ match->nentry->device != olddevice || !match->nentry->deleted))
49283+ match = match->next;
49284+
49285+ if (match && (match->nentry->inode == oldinode)
49286+ && (match->nentry->device == olddevice) &&
49287+ match->nentry->deleted) {
49288+ if (match->prev == NULL) {
49289+ inodev_set.i_hash[index] = match->next;
49290+ if (match->next != NULL)
49291+ match->next->prev = NULL;
49292+ } else {
49293+ match->prev->next = match->next;
49294+ if (match->next != NULL)
49295+ match->next->prev = match->prev;
49296+ }
49297+ match->prev = NULL;
49298+ match->next = NULL;
49299+ match->nentry->inode = newinode;
49300+ match->nentry->device = newdevice;
49301+ match->nentry->deleted = 0;
49302+
49303+ insert_inodev_entry(match);
49304+ }
49305+
49306+ return;
49307+}
49308+
49309+static void
49310+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49311+ const struct vfsmount *mnt)
49312+{
49313+ struct acl_subject_label *subj;
49314+ struct acl_role_label *role;
49315+ unsigned int x;
49316+ ino_t inode = dentry->d_inode->i_ino;
49317+ dev_t dev = __get_dev(dentry);
49318+
49319+ FOR_EACH_ROLE_START(role)
49320+ update_acl_subj_label(matchn->inode, matchn->device,
49321+ inode, dev, role);
49322+
49323+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49324+ if ((subj->inode == inode) && (subj->device == dev)) {
49325+ subj->inode = inode;
49326+ subj->device = dev;
49327+ }
49328+ FOR_EACH_NESTED_SUBJECT_END(subj)
49329+ FOR_EACH_SUBJECT_START(role, subj, x)
49330+ update_acl_obj_label(matchn->inode, matchn->device,
49331+ inode, dev, subj);
49332+ FOR_EACH_SUBJECT_END(subj,x)
49333+ FOR_EACH_ROLE_END(role)
49334+
49335+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
49336+
49337+ return;
49338+}
49339+
49340+void
49341+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49342+{
49343+ struct name_entry *matchn;
49344+
49345+ if (unlikely(!(gr_status & GR_READY)))
49346+ return;
49347+
49348+ preempt_disable();
49349+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
49350+
49351+ if (unlikely((unsigned long)matchn)) {
49352+ write_lock(&gr_inode_lock);
49353+ do_handle_create(matchn, dentry, mnt);
49354+ write_unlock(&gr_inode_lock);
49355+ }
49356+ preempt_enable();
49357+
49358+ return;
49359+}
49360+
49361+void
49362+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49363+ struct dentry *old_dentry,
49364+ struct dentry *new_dentry,
49365+ struct vfsmount *mnt, const __u8 replace)
49366+{
49367+ struct name_entry *matchn;
49368+ struct inodev_entry *inodev;
49369+ ino_t oldinode = old_dentry->d_inode->i_ino;
49370+ dev_t olddev = __get_dev(old_dentry);
49371+
49372+ /* vfs_rename swaps the name and parent link for old_dentry and
49373+ new_dentry
49374+ at this point, old_dentry has the new name, parent link, and inode
49375+ for the renamed file
49376+ if a file is being replaced by a rename, new_dentry has the inode
49377+ and name for the replaced file
49378+ */
49379+
49380+ if (unlikely(!(gr_status & GR_READY)))
49381+ return;
49382+
49383+ preempt_disable();
49384+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
49385+
49386+ /* we wouldn't have to check d_inode if it weren't for
49387+ NFS silly-renaming
49388+ */
49389+
49390+ write_lock(&gr_inode_lock);
49391+ if (unlikely(replace && new_dentry->d_inode)) {
49392+ ino_t newinode = new_dentry->d_inode->i_ino;
49393+ dev_t newdev = __get_dev(new_dentry);
49394+ inodev = lookup_inodev_entry(newinode, newdev);
49395+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
49396+ do_handle_delete(inodev, newinode, newdev);
49397+ }
49398+
49399+ inodev = lookup_inodev_entry(oldinode, olddev);
49400+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
49401+ do_handle_delete(inodev, oldinode, olddev);
49402+
49403+ if (unlikely((unsigned long)matchn))
49404+ do_handle_create(matchn, old_dentry, mnt);
49405+
49406+ write_unlock(&gr_inode_lock);
49407+ preempt_enable();
49408+
49409+ return;
49410+}
49411+
49412+static int
49413+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
49414+ unsigned char **sum)
49415+{
49416+ struct acl_role_label *r;
49417+ struct role_allowed_ip *ipp;
49418+ struct role_transition *trans;
49419+ unsigned int i;
49420+ int found = 0;
49421+ u32 curr_ip = current->signal->curr_ip;
49422+
49423+ current->signal->saved_ip = curr_ip;
49424+
49425+ /* check transition table */
49426+
49427+ for (trans = current->role->transitions; trans; trans = trans->next) {
49428+ if (!strcmp(rolename, trans->rolename)) {
49429+ found = 1;
49430+ break;
49431+ }
49432+ }
49433+
49434+ if (!found)
49435+ return 0;
49436+
49437+ /* handle special roles that do not require authentication
49438+ and check ip */
49439+
49440+ FOR_EACH_ROLE_START(r)
49441+ if (!strcmp(rolename, r->rolename) &&
49442+ (r->roletype & GR_ROLE_SPECIAL)) {
49443+ found = 0;
49444+ if (r->allowed_ips != NULL) {
49445+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
49446+ if ((ntohl(curr_ip) & ipp->netmask) ==
49447+ (ntohl(ipp->addr) & ipp->netmask))
49448+ found = 1;
49449+ }
49450+ } else
49451+ found = 2;
49452+ if (!found)
49453+ return 0;
49454+
49455+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
49456+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
49457+ *salt = NULL;
49458+ *sum = NULL;
49459+ return 1;
49460+ }
49461+ }
49462+ FOR_EACH_ROLE_END(r)
49463+
49464+ for (i = 0; i < num_sprole_pws; i++) {
49465+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
49466+ *salt = acl_special_roles[i]->salt;
49467+ *sum = acl_special_roles[i]->sum;
49468+ return 1;
49469+ }
49470+ }
49471+
49472+ return 0;
49473+}
49474+
49475+static void
49476+assign_special_role(char *rolename)
49477+{
49478+ struct acl_object_label *obj;
49479+ struct acl_role_label *r;
49480+ struct acl_role_label *assigned = NULL;
49481+ struct task_struct *tsk;
49482+ struct file *filp;
49483+
49484+ FOR_EACH_ROLE_START(r)
49485+ if (!strcmp(rolename, r->rolename) &&
49486+ (r->roletype & GR_ROLE_SPECIAL)) {
49487+ assigned = r;
49488+ break;
49489+ }
49490+ FOR_EACH_ROLE_END(r)
49491+
49492+ if (!assigned)
49493+ return;
49494+
49495+ read_lock(&tasklist_lock);
49496+ read_lock(&grsec_exec_file_lock);
49497+
49498+ tsk = current->real_parent;
49499+ if (tsk == NULL)
49500+ goto out_unlock;
49501+
49502+ filp = tsk->exec_file;
49503+ if (filp == NULL)
49504+ goto out_unlock;
49505+
49506+ tsk->is_writable = 0;
49507+
49508+ tsk->acl_sp_role = 1;
49509+ tsk->acl_role_id = ++acl_sp_role_value;
49510+ tsk->role = assigned;
49511+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
49512+
49513+ /* ignore additional mmap checks for processes that are writable
49514+ by the default ACL */
49515+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49516+ if (unlikely(obj->mode & GR_WRITE))
49517+ tsk->is_writable = 1;
49518+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
49519+ if (unlikely(obj->mode & GR_WRITE))
49520+ tsk->is_writable = 1;
49521+
49522+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49523+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
49524+#endif
49525+
49526+out_unlock:
49527+ read_unlock(&grsec_exec_file_lock);
49528+ read_unlock(&tasklist_lock);
49529+ return;
49530+}
49531+
49532+int gr_check_secure_terminal(struct task_struct *task)
49533+{
49534+ struct task_struct *p, *p2, *p3;
49535+ struct files_struct *files;
49536+ struct fdtable *fdt;
49537+ struct file *our_file = NULL, *file;
49538+ int i;
49539+
49540+ if (task->signal->tty == NULL)
49541+ return 1;
49542+
49543+ files = get_files_struct(task);
49544+ if (files != NULL) {
49545+ rcu_read_lock();
49546+ fdt = files_fdtable(files);
49547+ for (i=0; i < fdt->max_fds; i++) {
49548+ file = fcheck_files(files, i);
49549+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
49550+ get_file(file);
49551+ our_file = file;
49552+ }
49553+ }
49554+ rcu_read_unlock();
49555+ put_files_struct(files);
49556+ }
49557+
49558+ if (our_file == NULL)
49559+ return 1;
49560+
49561+ read_lock(&tasklist_lock);
49562+ do_each_thread(p2, p) {
49563+ files = get_files_struct(p);
49564+ if (files == NULL ||
49565+ (p->signal && p->signal->tty == task->signal->tty)) {
49566+ if (files != NULL)
49567+ put_files_struct(files);
49568+ continue;
49569+ }
49570+ rcu_read_lock();
49571+ fdt = files_fdtable(files);
49572+ for (i=0; i < fdt->max_fds; i++) {
49573+ file = fcheck_files(files, i);
49574+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
49575+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
49576+ p3 = task;
49577+ while (p3->pid > 0) {
49578+ if (p3 == p)
49579+ break;
49580+ p3 = p3->real_parent;
49581+ }
49582+ if (p3 == p)
49583+ break;
49584+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
49585+ gr_handle_alertkill(p);
49586+ rcu_read_unlock();
49587+ put_files_struct(files);
49588+ read_unlock(&tasklist_lock);
49589+ fput(our_file);
49590+ return 0;
49591+ }
49592+ }
49593+ rcu_read_unlock();
49594+ put_files_struct(files);
49595+ } while_each_thread(p2, p);
49596+ read_unlock(&tasklist_lock);
49597+
49598+ fput(our_file);
49599+ return 1;
49600+}
49601+
49602+ssize_t
49603+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
49604+{
49605+ struct gr_arg_wrapper uwrap;
49606+ unsigned char *sprole_salt = NULL;
49607+ unsigned char *sprole_sum = NULL;
49608+ int error = sizeof (struct gr_arg_wrapper);
49609+ int error2 = 0;
49610+
49611+ mutex_lock(&gr_dev_mutex);
49612+
49613+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
49614+ error = -EPERM;
49615+ goto out;
49616+ }
49617+
49618+ if (count != sizeof (struct gr_arg_wrapper)) {
49619+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
49620+ error = -EINVAL;
49621+ goto out;
49622+ }
49623+
49624+
49625+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
49626+ gr_auth_expires = 0;
49627+ gr_auth_attempts = 0;
49628+ }
49629+
49630+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
49631+ error = -EFAULT;
49632+ goto out;
49633+ }
49634+
49635+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
49636+ error = -EINVAL;
49637+ goto out;
49638+ }
49639+
49640+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
49641+ error = -EFAULT;
49642+ goto out;
49643+ }
49644+
49645+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49646+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49647+ time_after(gr_auth_expires, get_seconds())) {
49648+ error = -EBUSY;
49649+ goto out;
49650+ }
49651+
49652+ /* if non-root trying to do anything other than use a special role,
49653+ do not attempt authentication, do not count towards authentication
49654+ locking
49655+ */
49656+
49657+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
49658+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49659+ current_uid()) {
49660+ error = -EPERM;
49661+ goto out;
49662+ }
49663+
49664+ /* ensure pw and special role name are null terminated */
49665+
49666+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
49667+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
49668+
49669+ /* Okay.
49670+ * We have our enough of the argument structure..(we have yet
49671+ * to copy_from_user the tables themselves) . Copy the tables
49672+ * only if we need them, i.e. for loading operations. */
49673+
49674+ switch (gr_usermode->mode) {
49675+ case GR_STATUS:
49676+ if (gr_status & GR_READY) {
49677+ error = 1;
49678+ if (!gr_check_secure_terminal(current))
49679+ error = 3;
49680+ } else
49681+ error = 2;
49682+ goto out;
49683+ case GR_SHUTDOWN:
49684+ if ((gr_status & GR_READY)
49685+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49686+ pax_open_kernel();
49687+ gr_status &= ~GR_READY;
49688+ pax_close_kernel();
49689+
49690+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
49691+ free_variables();
49692+ memset(gr_usermode, 0, sizeof (struct gr_arg));
49693+ memset(gr_system_salt, 0, GR_SALT_LEN);
49694+ memset(gr_system_sum, 0, GR_SHA_LEN);
49695+ } else if (gr_status & GR_READY) {
49696+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
49697+ error = -EPERM;
49698+ } else {
49699+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
49700+ error = -EAGAIN;
49701+ }
49702+ break;
49703+ case GR_ENABLE:
49704+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
49705+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
49706+ else {
49707+ if (gr_status & GR_READY)
49708+ error = -EAGAIN;
49709+ else
49710+ error = error2;
49711+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
49712+ }
49713+ break;
49714+ case GR_RELOAD:
49715+ if (!(gr_status & GR_READY)) {
49716+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
49717+ error = -EAGAIN;
49718+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49719+ lock_kernel();
49720+
49721+ pax_open_kernel();
49722+ gr_status &= ~GR_READY;
49723+ pax_close_kernel();
49724+
49725+ free_variables();
49726+ if (!(error2 = gracl_init(gr_usermode))) {
49727+ unlock_kernel();
49728+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
49729+ } else {
49730+ unlock_kernel();
49731+ error = error2;
49732+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
49733+ }
49734+ } else {
49735+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
49736+ error = -EPERM;
49737+ }
49738+ break;
49739+ case GR_SEGVMOD:
49740+ if (unlikely(!(gr_status & GR_READY))) {
49741+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
49742+ error = -EAGAIN;
49743+ break;
49744+ }
49745+
49746+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49747+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
49748+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
49749+ struct acl_subject_label *segvacl;
49750+ segvacl =
49751+ lookup_acl_subj_label(gr_usermode->segv_inode,
49752+ gr_usermode->segv_device,
49753+ current->role);
49754+ if (segvacl) {
49755+ segvacl->crashes = 0;
49756+ segvacl->expires = 0;
49757+ }
49758+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
49759+ gr_remove_uid(gr_usermode->segv_uid);
49760+ }
49761+ } else {
49762+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
49763+ error = -EPERM;
49764+ }
49765+ break;
49766+ case GR_SPROLE:
49767+ case GR_SPROLEPAM:
49768+ if (unlikely(!(gr_status & GR_READY))) {
49769+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
49770+ error = -EAGAIN;
49771+ break;
49772+ }
49773+
49774+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
49775+ current->role->expires = 0;
49776+ current->role->auth_attempts = 0;
49777+ }
49778+
49779+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49780+ time_after(current->role->expires, get_seconds())) {
49781+ error = -EBUSY;
49782+ goto out;
49783+ }
49784+
49785+ if (lookup_special_role_auth
49786+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
49787+ && ((!sprole_salt && !sprole_sum)
49788+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
49789+ char *p = "";
49790+ assign_special_role(gr_usermode->sp_role);
49791+ read_lock(&tasklist_lock);
49792+ if (current->real_parent)
49793+ p = current->real_parent->role->rolename;
49794+ read_unlock(&tasklist_lock);
49795+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
49796+ p, acl_sp_role_value);
49797+ } else {
49798+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
49799+ error = -EPERM;
49800+ if(!(current->role->auth_attempts++))
49801+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
49802+
49803+ goto out;
49804+ }
49805+ break;
49806+ case GR_UNSPROLE:
49807+ if (unlikely(!(gr_status & GR_READY))) {
49808+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
49809+ error = -EAGAIN;
49810+ break;
49811+ }
49812+
49813+ if (current->role->roletype & GR_ROLE_SPECIAL) {
49814+ char *p = "";
49815+ int i = 0;
49816+
49817+ read_lock(&tasklist_lock);
49818+ if (current->real_parent) {
49819+ p = current->real_parent->role->rolename;
49820+ i = current->real_parent->acl_role_id;
49821+ }
49822+ read_unlock(&tasklist_lock);
49823+
49824+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
49825+ gr_set_acls(1);
49826+ } else {
49827+ error = -EPERM;
49828+ goto out;
49829+ }
49830+ break;
49831+ default:
49832+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
49833+ error = -EINVAL;
49834+ break;
49835+ }
49836+
49837+ if (error != -EPERM)
49838+ goto out;
49839+
49840+ if(!(gr_auth_attempts++))
49841+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
49842+
49843+ out:
49844+ mutex_unlock(&gr_dev_mutex);
49845+ return error;
49846+}
49847+
49848+/* must be called with
49849+ rcu_read_lock();
49850+ read_lock(&tasklist_lock);
49851+ read_lock(&grsec_exec_file_lock);
49852+*/
49853+int gr_apply_subject_to_task(struct task_struct *task)
49854+{
49855+ struct acl_object_label *obj;
49856+ char *tmpname;
49857+ struct acl_subject_label *tmpsubj;
49858+ struct file *filp;
49859+ struct name_entry *nmatch;
49860+
49861+ filp = task->exec_file;
49862+ if (filp == NULL)
49863+ return 0;
49864+
49865+ /* the following is to apply the correct subject
49866+ on binaries running when the RBAC system
49867+ is enabled, when the binaries have been
49868+ replaced or deleted since their execution
49869+ -----
49870+ when the RBAC system starts, the inode/dev
49871+ from exec_file will be one the RBAC system
49872+ is unaware of. It only knows the inode/dev
49873+ of the present file on disk, or the absence
49874+ of it.
49875+ */
49876+ preempt_disable();
49877+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
49878+
49879+ nmatch = lookup_name_entry(tmpname);
49880+ preempt_enable();
49881+ tmpsubj = NULL;
49882+ if (nmatch) {
49883+ if (nmatch->deleted)
49884+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
49885+ else
49886+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
49887+ if (tmpsubj != NULL)
49888+ task->acl = tmpsubj;
49889+ }
49890+ if (tmpsubj == NULL)
49891+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
49892+ task->role);
49893+ if (task->acl) {
49894+ task->is_writable = 0;
49895+ /* ignore additional mmap checks for processes that are writable
49896+ by the default ACL */
49897+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49898+ if (unlikely(obj->mode & GR_WRITE))
49899+ task->is_writable = 1;
49900+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49901+ if (unlikely(obj->mode & GR_WRITE))
49902+ task->is_writable = 1;
49903+
49904+ gr_set_proc_res(task);
49905+
49906+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49907+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49908+#endif
49909+ } else {
49910+ return 1;
49911+ }
49912+
49913+ return 0;
49914+}
49915+
49916+int
49917+gr_set_acls(const int type)
49918+{
49919+ struct task_struct *task, *task2;
49920+ struct acl_role_label *role = current->role;
49921+ __u16 acl_role_id = current->acl_role_id;
49922+ const struct cred *cred;
49923+ int ret;
49924+
49925+ rcu_read_lock();
49926+ read_lock(&tasklist_lock);
49927+ read_lock(&grsec_exec_file_lock);
49928+ do_each_thread(task2, task) {
49929+ /* check to see if we're called from the exit handler,
49930+ if so, only replace ACLs that have inherited the admin
49931+ ACL */
49932+
49933+ if (type && (task->role != role ||
49934+ task->acl_role_id != acl_role_id))
49935+ continue;
49936+
49937+ task->acl_role_id = 0;
49938+ task->acl_sp_role = 0;
49939+
49940+ if (task->exec_file) {
49941+ cred = __task_cred(task);
49942+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
49943+
49944+ ret = gr_apply_subject_to_task(task);
49945+ if (ret) {
49946+ read_unlock(&grsec_exec_file_lock);
49947+ read_unlock(&tasklist_lock);
49948+ rcu_read_unlock();
49949+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
49950+ return ret;
49951+ }
49952+ } else {
49953+ // it's a kernel process
49954+ task->role = kernel_role;
49955+ task->acl = kernel_role->root_label;
49956+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
49957+ task->acl->mode &= ~GR_PROCFIND;
49958+#endif
49959+ }
49960+ } while_each_thread(task2, task);
49961+ read_unlock(&grsec_exec_file_lock);
49962+ read_unlock(&tasklist_lock);
49963+ rcu_read_unlock();
49964+
49965+ return 0;
49966+}
49967+
49968+void
49969+gr_learn_resource(const struct task_struct *task,
49970+ const int res, const unsigned long wanted, const int gt)
49971+{
49972+ struct acl_subject_label *acl;
49973+ const struct cred *cred;
49974+
49975+ if (unlikely((gr_status & GR_READY) &&
49976+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
49977+ goto skip_reslog;
49978+
49979+#ifdef CONFIG_GRKERNSEC_RESLOG
49980+ gr_log_resource(task, res, wanted, gt);
49981+#endif
49982+ skip_reslog:
49983+
49984+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
49985+ return;
49986+
49987+ acl = task->acl;
49988+
49989+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
49990+ !(acl->resmask & (1 << (unsigned short) res))))
49991+ return;
49992+
49993+ if (wanted >= acl->res[res].rlim_cur) {
49994+ unsigned long res_add;
49995+
49996+ res_add = wanted;
49997+ switch (res) {
49998+ case RLIMIT_CPU:
49999+ res_add += GR_RLIM_CPU_BUMP;
50000+ break;
50001+ case RLIMIT_FSIZE:
50002+ res_add += GR_RLIM_FSIZE_BUMP;
50003+ break;
50004+ case RLIMIT_DATA:
50005+ res_add += GR_RLIM_DATA_BUMP;
50006+ break;
50007+ case RLIMIT_STACK:
50008+ res_add += GR_RLIM_STACK_BUMP;
50009+ break;
50010+ case RLIMIT_CORE:
50011+ res_add += GR_RLIM_CORE_BUMP;
50012+ break;
50013+ case RLIMIT_RSS:
50014+ res_add += GR_RLIM_RSS_BUMP;
50015+ break;
50016+ case RLIMIT_NPROC:
50017+ res_add += GR_RLIM_NPROC_BUMP;
50018+ break;
50019+ case RLIMIT_NOFILE:
50020+ res_add += GR_RLIM_NOFILE_BUMP;
50021+ break;
50022+ case RLIMIT_MEMLOCK:
50023+ res_add += GR_RLIM_MEMLOCK_BUMP;
50024+ break;
50025+ case RLIMIT_AS:
50026+ res_add += GR_RLIM_AS_BUMP;
50027+ break;
50028+ case RLIMIT_LOCKS:
50029+ res_add += GR_RLIM_LOCKS_BUMP;
50030+ break;
50031+ case RLIMIT_SIGPENDING:
50032+ res_add += GR_RLIM_SIGPENDING_BUMP;
50033+ break;
50034+ case RLIMIT_MSGQUEUE:
50035+ res_add += GR_RLIM_MSGQUEUE_BUMP;
50036+ break;
50037+ case RLIMIT_NICE:
50038+ res_add += GR_RLIM_NICE_BUMP;
50039+ break;
50040+ case RLIMIT_RTPRIO:
50041+ res_add += GR_RLIM_RTPRIO_BUMP;
50042+ break;
50043+ case RLIMIT_RTTIME:
50044+ res_add += GR_RLIM_RTTIME_BUMP;
50045+ break;
50046+ }
50047+
50048+ acl->res[res].rlim_cur = res_add;
50049+
50050+ if (wanted > acl->res[res].rlim_max)
50051+ acl->res[res].rlim_max = res_add;
50052+
50053+ /* only log the subject filename, since resource logging is supported for
50054+ single-subject learning only */
50055+ rcu_read_lock();
50056+ cred = __task_cred(task);
50057+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50058+ task->role->roletype, cred->uid, cred->gid, acl->filename,
50059+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50060+ "", (unsigned long) res, &task->signal->saved_ip);
50061+ rcu_read_unlock();
50062+ }
50063+
50064+ return;
50065+}
50066+
50067+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50068+void
50069+pax_set_initial_flags(struct linux_binprm *bprm)
50070+{
50071+ struct task_struct *task = current;
50072+ struct acl_subject_label *proc;
50073+ unsigned long flags;
50074+
50075+ if (unlikely(!(gr_status & GR_READY)))
50076+ return;
50077+
50078+ flags = pax_get_flags(task);
50079+
50080+ proc = task->acl;
50081+
50082+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50083+ flags &= ~MF_PAX_PAGEEXEC;
50084+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50085+ flags &= ~MF_PAX_SEGMEXEC;
50086+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50087+ flags &= ~MF_PAX_RANDMMAP;
50088+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50089+ flags &= ~MF_PAX_EMUTRAMP;
50090+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50091+ flags &= ~MF_PAX_MPROTECT;
50092+
50093+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50094+ flags |= MF_PAX_PAGEEXEC;
50095+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50096+ flags |= MF_PAX_SEGMEXEC;
50097+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50098+ flags |= MF_PAX_RANDMMAP;
50099+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50100+ flags |= MF_PAX_EMUTRAMP;
50101+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50102+ flags |= MF_PAX_MPROTECT;
50103+
50104+ pax_set_flags(task, flags);
50105+
50106+ return;
50107+}
50108+#endif
50109+
50110+#ifdef CONFIG_SYSCTL
50111+/* Eric Biederman likes breaking userland ABI and every inode-based security
50112+ system to save 35kb of memory */
50113+
50114+/* we modify the passed in filename, but adjust it back before returning */
50115+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50116+{
50117+ struct name_entry *nmatch;
50118+ char *p, *lastp = NULL;
50119+ struct acl_object_label *obj = NULL, *tmp;
50120+ struct acl_subject_label *tmpsubj;
50121+ char c = '\0';
50122+
50123+ read_lock(&gr_inode_lock);
50124+
50125+ p = name + len - 1;
50126+ do {
50127+ nmatch = lookup_name_entry(name);
50128+ if (lastp != NULL)
50129+ *lastp = c;
50130+
50131+ if (nmatch == NULL)
50132+ goto next_component;
50133+ tmpsubj = current->acl;
50134+ do {
50135+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50136+ if (obj != NULL) {
50137+ tmp = obj->globbed;
50138+ while (tmp) {
50139+ if (!glob_match(tmp->filename, name)) {
50140+ obj = tmp;
50141+ goto found_obj;
50142+ }
50143+ tmp = tmp->next;
50144+ }
50145+ goto found_obj;
50146+ }
50147+ } while ((tmpsubj = tmpsubj->parent_subject));
50148+next_component:
50149+ /* end case */
50150+ if (p == name)
50151+ break;
50152+
50153+ while (*p != '/')
50154+ p--;
50155+ if (p == name)
50156+ lastp = p + 1;
50157+ else {
50158+ lastp = p;
50159+ p--;
50160+ }
50161+ c = *lastp;
50162+ *lastp = '\0';
50163+ } while (1);
50164+found_obj:
50165+ read_unlock(&gr_inode_lock);
50166+ /* obj returned will always be non-null */
50167+ return obj;
50168+}
50169+
50170+/* returns 0 when allowing, non-zero on error
50171+ op of 0 is used for readdir, so we don't log the names of hidden files
50172+*/
50173+__u32
50174+gr_handle_sysctl(const struct ctl_table *table, const int op)
50175+{
50176+ ctl_table *tmp;
50177+ const char *proc_sys = "/proc/sys";
50178+ char *path;
50179+ struct acl_object_label *obj;
50180+ unsigned short len = 0, pos = 0, depth = 0, i;
50181+ __u32 err = 0;
50182+ __u32 mode = 0;
50183+
50184+ if (unlikely(!(gr_status & GR_READY)))
50185+ return 0;
50186+
50187+ /* for now, ignore operations on non-sysctl entries if it's not a
50188+ readdir*/
50189+ if (table->child != NULL && op != 0)
50190+ return 0;
50191+
50192+ mode |= GR_FIND;
50193+ /* it's only a read if it's an entry, read on dirs is for readdir */
50194+ if (op & MAY_READ)
50195+ mode |= GR_READ;
50196+ if (op & MAY_WRITE)
50197+ mode |= GR_WRITE;
50198+
50199+ preempt_disable();
50200+
50201+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50202+
50203+ /* it's only a read/write if it's an actual entry, not a dir
50204+ (which are opened for readdir)
50205+ */
50206+
50207+ /* convert the requested sysctl entry into a pathname */
50208+
50209+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50210+ len += strlen(tmp->procname);
50211+ len++;
50212+ depth++;
50213+ }
50214+
50215+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50216+ /* deny */
50217+ goto out;
50218+ }
50219+
50220+ memset(path, 0, PAGE_SIZE);
50221+
50222+ memcpy(path, proc_sys, strlen(proc_sys));
50223+
50224+ pos += strlen(proc_sys);
50225+
50226+ for (; depth > 0; depth--) {
50227+ path[pos] = '/';
50228+ pos++;
50229+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50230+ if (depth == i) {
50231+ memcpy(path + pos, tmp->procname,
50232+ strlen(tmp->procname));
50233+ pos += strlen(tmp->procname);
50234+ }
50235+ i++;
50236+ }
50237+ }
50238+
50239+ obj = gr_lookup_by_name(path, pos);
50240+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50241+
50242+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50243+ ((err & mode) != mode))) {
50244+ __u32 new_mode = mode;
50245+
50246+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50247+
50248+ err = 0;
50249+ gr_log_learn_sysctl(path, new_mode);
50250+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50251+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50252+ err = -ENOENT;
50253+ } else if (!(err & GR_FIND)) {
50254+ err = -ENOENT;
50255+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50256+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50257+ path, (mode & GR_READ) ? " reading" : "",
50258+ (mode & GR_WRITE) ? " writing" : "");
50259+ err = -EACCES;
50260+ } else if ((err & mode) != mode) {
50261+ err = -EACCES;
50262+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50263+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50264+ path, (mode & GR_READ) ? " reading" : "",
50265+ (mode & GR_WRITE) ? " writing" : "");
50266+ err = 0;
50267+ } else
50268+ err = 0;
50269+
50270+ out:
50271+ preempt_enable();
50272+
50273+ return err;
50274+}
50275+#endif
50276+
50277+int
50278+gr_handle_proc_ptrace(struct task_struct *task)
50279+{
50280+ struct file *filp;
50281+ struct task_struct *tmp = task;
50282+ struct task_struct *curtemp = current;
50283+ __u32 retmode;
50284+
50285+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50286+ if (unlikely(!(gr_status & GR_READY)))
50287+ return 0;
50288+#endif
50289+
50290+ read_lock(&tasklist_lock);
50291+ read_lock(&grsec_exec_file_lock);
50292+ filp = task->exec_file;
50293+
50294+ while (tmp->pid > 0) {
50295+ if (tmp == curtemp)
50296+ break;
50297+ tmp = tmp->real_parent;
50298+ }
50299+
50300+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50301+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50302+ read_unlock(&grsec_exec_file_lock);
50303+ read_unlock(&tasklist_lock);
50304+ return 1;
50305+ }
50306+
50307+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50308+ if (!(gr_status & GR_READY)) {
50309+ read_unlock(&grsec_exec_file_lock);
50310+ read_unlock(&tasklist_lock);
50311+ return 0;
50312+ }
50313+#endif
50314+
50315+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50316+ read_unlock(&grsec_exec_file_lock);
50317+ read_unlock(&tasklist_lock);
50318+
50319+ if (retmode & GR_NOPTRACE)
50320+ return 1;
50321+
50322+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
50323+ && (current->acl != task->acl || (current->acl != current->role->root_label
50324+ && current->pid != task->pid)))
50325+ return 1;
50326+
50327+ return 0;
50328+}
50329+
50330+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
50331+{
50332+ if (unlikely(!(gr_status & GR_READY)))
50333+ return;
50334+
50335+ if (!(current->role->roletype & GR_ROLE_GOD))
50336+ return;
50337+
50338+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
50339+ p->role->rolename, gr_task_roletype_to_char(p),
50340+ p->acl->filename);
50341+}
50342+
50343+int
50344+gr_handle_ptrace(struct task_struct *task, const long request)
50345+{
50346+ struct task_struct *tmp = task;
50347+ struct task_struct *curtemp = current;
50348+ __u32 retmode;
50349+
50350+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50351+ if (unlikely(!(gr_status & GR_READY)))
50352+ return 0;
50353+#endif
50354+
50355+ read_lock(&tasklist_lock);
50356+ while (tmp->pid > 0) {
50357+ if (tmp == curtemp)
50358+ break;
50359+ tmp = tmp->real_parent;
50360+ }
50361+
50362+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50363+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
50364+ read_unlock(&tasklist_lock);
50365+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50366+ return 1;
50367+ }
50368+ read_unlock(&tasklist_lock);
50369+
50370+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50371+ if (!(gr_status & GR_READY))
50372+ return 0;
50373+#endif
50374+
50375+ read_lock(&grsec_exec_file_lock);
50376+ if (unlikely(!task->exec_file)) {
50377+ read_unlock(&grsec_exec_file_lock);
50378+ return 0;
50379+ }
50380+
50381+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
50382+ read_unlock(&grsec_exec_file_lock);
50383+
50384+ if (retmode & GR_NOPTRACE) {
50385+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50386+ return 1;
50387+ }
50388+
50389+ if (retmode & GR_PTRACERD) {
50390+ switch (request) {
50391+ case PTRACE_POKETEXT:
50392+ case PTRACE_POKEDATA:
50393+ case PTRACE_POKEUSR:
50394+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
50395+ case PTRACE_SETREGS:
50396+ case PTRACE_SETFPREGS:
50397+#endif
50398+#ifdef CONFIG_X86
50399+ case PTRACE_SETFPXREGS:
50400+#endif
50401+#ifdef CONFIG_ALTIVEC
50402+ case PTRACE_SETVRREGS:
50403+#endif
50404+ return 1;
50405+ default:
50406+ return 0;
50407+ }
50408+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
50409+ !(current->role->roletype & GR_ROLE_GOD) &&
50410+ (current->acl != task->acl)) {
50411+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50412+ return 1;
50413+ }
50414+
50415+ return 0;
50416+}
50417+
50418+static int is_writable_mmap(const struct file *filp)
50419+{
50420+ struct task_struct *task = current;
50421+ struct acl_object_label *obj, *obj2;
50422+
50423+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
50424+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
50425+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50426+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
50427+ task->role->root_label);
50428+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
50429+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
50430+ return 1;
50431+ }
50432+ }
50433+ return 0;
50434+}
50435+
50436+int
50437+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
50438+{
50439+ __u32 mode;
50440+
50441+ if (unlikely(!file || !(prot & PROT_EXEC)))
50442+ return 1;
50443+
50444+ if (is_writable_mmap(file))
50445+ return 0;
50446+
50447+ mode =
50448+ gr_search_file(file->f_path.dentry,
50449+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50450+ file->f_path.mnt);
50451+
50452+ if (!gr_tpe_allow(file))
50453+ return 0;
50454+
50455+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50456+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50457+ return 0;
50458+ } else if (unlikely(!(mode & GR_EXEC))) {
50459+ return 0;
50460+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50461+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50462+ return 1;
50463+ }
50464+
50465+ return 1;
50466+}
50467+
50468+int
50469+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50470+{
50471+ __u32 mode;
50472+
50473+ if (unlikely(!file || !(prot & PROT_EXEC)))
50474+ return 1;
50475+
50476+ if (is_writable_mmap(file))
50477+ return 0;
50478+
50479+ mode =
50480+ gr_search_file(file->f_path.dentry,
50481+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50482+ file->f_path.mnt);
50483+
50484+ if (!gr_tpe_allow(file))
50485+ return 0;
50486+
50487+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50488+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50489+ return 0;
50490+ } else if (unlikely(!(mode & GR_EXEC))) {
50491+ return 0;
50492+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50493+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50494+ return 1;
50495+ }
50496+
50497+ return 1;
50498+}
50499+
50500+void
50501+gr_acl_handle_psacct(struct task_struct *task, const long code)
50502+{
50503+ unsigned long runtime;
50504+ unsigned long cputime;
50505+ unsigned int wday, cday;
50506+ __u8 whr, chr;
50507+ __u8 wmin, cmin;
50508+ __u8 wsec, csec;
50509+ struct timespec timeval;
50510+
50511+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
50512+ !(task->acl->mode & GR_PROCACCT)))
50513+ return;
50514+
50515+ do_posix_clock_monotonic_gettime(&timeval);
50516+ runtime = timeval.tv_sec - task->start_time.tv_sec;
50517+ wday = runtime / (3600 * 24);
50518+ runtime -= wday * (3600 * 24);
50519+ whr = runtime / 3600;
50520+ runtime -= whr * 3600;
50521+ wmin = runtime / 60;
50522+ runtime -= wmin * 60;
50523+ wsec = runtime;
50524+
50525+ cputime = (task->utime + task->stime) / HZ;
50526+ cday = cputime / (3600 * 24);
50527+ cputime -= cday * (3600 * 24);
50528+ chr = cputime / 3600;
50529+ cputime -= chr * 3600;
50530+ cmin = cputime / 60;
50531+ cputime -= cmin * 60;
50532+ csec = cputime;
50533+
50534+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
50535+
50536+ return;
50537+}
50538+
50539+void gr_set_kernel_label(struct task_struct *task)
50540+{
50541+ if (gr_status & GR_READY) {
50542+ task->role = kernel_role;
50543+ task->acl = kernel_role->root_label;
50544+ }
50545+ return;
50546+}
50547+
50548+#ifdef CONFIG_TASKSTATS
50549+int gr_is_taskstats_denied(int pid)
50550+{
50551+ struct task_struct *task;
50552+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50553+ const struct cred *cred;
50554+#endif
50555+ int ret = 0;
50556+
50557+ /* restrict taskstats viewing to un-chrooted root users
50558+ who have the 'view' subject flag if the RBAC system is enabled
50559+ */
50560+
50561+ rcu_read_lock();
50562+ read_lock(&tasklist_lock);
50563+ task = find_task_by_vpid(pid);
50564+ if (task) {
50565+#ifdef CONFIG_GRKERNSEC_CHROOT
50566+ if (proc_is_chrooted(task))
50567+ ret = -EACCES;
50568+#endif
50569+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50570+ cred = __task_cred(task);
50571+#ifdef CONFIG_GRKERNSEC_PROC_USER
50572+ if (cred->uid != 0)
50573+ ret = -EACCES;
50574+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50575+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
50576+ ret = -EACCES;
50577+#endif
50578+#endif
50579+ if (gr_status & GR_READY) {
50580+ if (!(task->acl->mode & GR_VIEW))
50581+ ret = -EACCES;
50582+ }
50583+ } else
50584+ ret = -ENOENT;
50585+
50586+ read_unlock(&tasklist_lock);
50587+ rcu_read_unlock();
50588+
50589+ return ret;
50590+}
50591+#endif
50592+
50593+/* AUXV entries are filled via a descendant of search_binary_handler
50594+ after we've already applied the subject for the target
50595+*/
50596+int gr_acl_enable_at_secure(void)
50597+{
50598+ if (unlikely(!(gr_status & GR_READY)))
50599+ return 0;
50600+
50601+ if (current->acl->mode & GR_ATSECURE)
50602+ return 1;
50603+
50604+ return 0;
50605+}
50606+
50607+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
50608+{
50609+ struct task_struct *task = current;
50610+ struct dentry *dentry = file->f_path.dentry;
50611+ struct vfsmount *mnt = file->f_path.mnt;
50612+ struct acl_object_label *obj, *tmp;
50613+ struct acl_subject_label *subj;
50614+ unsigned int bufsize;
50615+ int is_not_root;
50616+ char *path;
50617+ dev_t dev = __get_dev(dentry);
50618+
50619+ if (unlikely(!(gr_status & GR_READY)))
50620+ return 1;
50621+
50622+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50623+ return 1;
50624+
50625+ /* ignore Eric Biederman */
50626+ if (IS_PRIVATE(dentry->d_inode))
50627+ return 1;
50628+
50629+ subj = task->acl;
50630+ do {
50631+ obj = lookup_acl_obj_label(ino, dev, subj);
50632+ if (obj != NULL)
50633+ return (obj->mode & GR_FIND) ? 1 : 0;
50634+ } while ((subj = subj->parent_subject));
50635+
50636+ /* this is purely an optimization since we're looking for an object
50637+ for the directory we're doing a readdir on
50638+ if it's possible for any globbed object to match the entry we're
50639+ filling into the directory, then the object we find here will be
50640+ an anchor point with attached globbed objects
50641+ */
50642+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
50643+ if (obj->globbed == NULL)
50644+ return (obj->mode & GR_FIND) ? 1 : 0;
50645+
50646+ is_not_root = ((obj->filename[0] == '/') &&
50647+ (obj->filename[1] == '\0')) ? 0 : 1;
50648+ bufsize = PAGE_SIZE - namelen - is_not_root;
50649+
50650+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
50651+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
50652+ return 1;
50653+
50654+ preempt_disable();
50655+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50656+ bufsize);
50657+
50658+ bufsize = strlen(path);
50659+
50660+ /* if base is "/", don't append an additional slash */
50661+ if (is_not_root)
50662+ *(path + bufsize) = '/';
50663+ memcpy(path + bufsize + is_not_root, name, namelen);
50664+ *(path + bufsize + namelen + is_not_root) = '\0';
50665+
50666+ tmp = obj->globbed;
50667+ while (tmp) {
50668+ if (!glob_match(tmp->filename, path)) {
50669+ preempt_enable();
50670+ return (tmp->mode & GR_FIND) ? 1 : 0;
50671+ }
50672+ tmp = tmp->next;
50673+ }
50674+ preempt_enable();
50675+ return (obj->mode & GR_FIND) ? 1 : 0;
50676+}
50677+
50678+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
50679+EXPORT_SYMBOL(gr_acl_is_enabled);
50680+#endif
50681+EXPORT_SYMBOL(gr_learn_resource);
50682+EXPORT_SYMBOL(gr_set_kernel_label);
50683+#ifdef CONFIG_SECURITY
50684+EXPORT_SYMBOL(gr_check_user_change);
50685+EXPORT_SYMBOL(gr_check_group_change);
50686+#endif
50687+
50688diff -urNp linux-2.6.32.45/grsecurity/gracl_cap.c linux-2.6.32.45/grsecurity/gracl_cap.c
50689--- linux-2.6.32.45/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
50690+++ linux-2.6.32.45/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
50691@@ -0,0 +1,138 @@
50692+#include <linux/kernel.h>
50693+#include <linux/module.h>
50694+#include <linux/sched.h>
50695+#include <linux/gracl.h>
50696+#include <linux/grsecurity.h>
50697+#include <linux/grinternal.h>
50698+
50699+static const char *captab_log[] = {
50700+ "CAP_CHOWN",
50701+ "CAP_DAC_OVERRIDE",
50702+ "CAP_DAC_READ_SEARCH",
50703+ "CAP_FOWNER",
50704+ "CAP_FSETID",
50705+ "CAP_KILL",
50706+ "CAP_SETGID",
50707+ "CAP_SETUID",
50708+ "CAP_SETPCAP",
50709+ "CAP_LINUX_IMMUTABLE",
50710+ "CAP_NET_BIND_SERVICE",
50711+ "CAP_NET_BROADCAST",
50712+ "CAP_NET_ADMIN",
50713+ "CAP_NET_RAW",
50714+ "CAP_IPC_LOCK",
50715+ "CAP_IPC_OWNER",
50716+ "CAP_SYS_MODULE",
50717+ "CAP_SYS_RAWIO",
50718+ "CAP_SYS_CHROOT",
50719+ "CAP_SYS_PTRACE",
50720+ "CAP_SYS_PACCT",
50721+ "CAP_SYS_ADMIN",
50722+ "CAP_SYS_BOOT",
50723+ "CAP_SYS_NICE",
50724+ "CAP_SYS_RESOURCE",
50725+ "CAP_SYS_TIME",
50726+ "CAP_SYS_TTY_CONFIG",
50727+ "CAP_MKNOD",
50728+ "CAP_LEASE",
50729+ "CAP_AUDIT_WRITE",
50730+ "CAP_AUDIT_CONTROL",
50731+ "CAP_SETFCAP",
50732+ "CAP_MAC_OVERRIDE",
50733+ "CAP_MAC_ADMIN"
50734+};
50735+
50736+EXPORT_SYMBOL(gr_is_capable);
50737+EXPORT_SYMBOL(gr_is_capable_nolog);
50738+
50739+int
50740+gr_is_capable(const int cap)
50741+{
50742+ struct task_struct *task = current;
50743+ const struct cred *cred = current_cred();
50744+ struct acl_subject_label *curracl;
50745+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
50746+ kernel_cap_t cap_audit = __cap_empty_set;
50747+
50748+ if (!gr_acl_is_enabled())
50749+ return 1;
50750+
50751+ curracl = task->acl;
50752+
50753+ cap_drop = curracl->cap_lower;
50754+ cap_mask = curracl->cap_mask;
50755+ cap_audit = curracl->cap_invert_audit;
50756+
50757+ while ((curracl = curracl->parent_subject)) {
50758+ /* if the cap isn't specified in the current computed mask but is specified in the
50759+ current level subject, and is lowered in the current level subject, then add
50760+ it to the set of dropped capabilities
50761+ otherwise, add the current level subject's mask to the current computed mask
50762+ */
50763+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
50764+ cap_raise(cap_mask, cap);
50765+ if (cap_raised(curracl->cap_lower, cap))
50766+ cap_raise(cap_drop, cap);
50767+ if (cap_raised(curracl->cap_invert_audit, cap))
50768+ cap_raise(cap_audit, cap);
50769+ }
50770+ }
50771+
50772+ if (!cap_raised(cap_drop, cap)) {
50773+ if (cap_raised(cap_audit, cap))
50774+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
50775+ return 1;
50776+ }
50777+
50778+ curracl = task->acl;
50779+
50780+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
50781+ && cap_raised(cred->cap_effective, cap)) {
50782+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50783+ task->role->roletype, cred->uid,
50784+ cred->gid, task->exec_file ?
50785+ gr_to_filename(task->exec_file->f_path.dentry,
50786+ task->exec_file->f_path.mnt) : curracl->filename,
50787+ curracl->filename, 0UL,
50788+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
50789+ return 1;
50790+ }
50791+
50792+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
50793+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
50794+ return 0;
50795+}
50796+
50797+int
50798+gr_is_capable_nolog(const int cap)
50799+{
50800+ struct acl_subject_label *curracl;
50801+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
50802+
50803+ if (!gr_acl_is_enabled())
50804+ return 1;
50805+
50806+ curracl = current->acl;
50807+
50808+ cap_drop = curracl->cap_lower;
50809+ cap_mask = curracl->cap_mask;
50810+
50811+ while ((curracl = curracl->parent_subject)) {
50812+ /* if the cap isn't specified in the current computed mask but is specified in the
50813+ current level subject, and is lowered in the current level subject, then add
50814+ it to the set of dropped capabilities
50815+ otherwise, add the current level subject's mask to the current computed mask
50816+ */
50817+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
50818+ cap_raise(cap_mask, cap);
50819+ if (cap_raised(curracl->cap_lower, cap))
50820+ cap_raise(cap_drop, cap);
50821+ }
50822+ }
50823+
50824+ if (!cap_raised(cap_drop, cap))
50825+ return 1;
50826+
50827+ return 0;
50828+}
50829+
50830diff -urNp linux-2.6.32.45/grsecurity/gracl_fs.c linux-2.6.32.45/grsecurity/gracl_fs.c
50831--- linux-2.6.32.45/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
50832+++ linux-2.6.32.45/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
50833@@ -0,0 +1,431 @@
50834+#include <linux/kernel.h>
50835+#include <linux/sched.h>
50836+#include <linux/types.h>
50837+#include <linux/fs.h>
50838+#include <linux/file.h>
50839+#include <linux/stat.h>
50840+#include <linux/grsecurity.h>
50841+#include <linux/grinternal.h>
50842+#include <linux/gracl.h>
50843+
50844+__u32
50845+gr_acl_handle_hidden_file(const struct dentry * dentry,
50846+ const struct vfsmount * mnt)
50847+{
50848+ __u32 mode;
50849+
50850+ if (unlikely(!dentry->d_inode))
50851+ return GR_FIND;
50852+
50853+ mode =
50854+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
50855+
50856+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
50857+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
50858+ return mode;
50859+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
50860+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
50861+ return 0;
50862+ } else if (unlikely(!(mode & GR_FIND)))
50863+ return 0;
50864+
50865+ return GR_FIND;
50866+}
50867+
50868+__u32
50869+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50870+ const int fmode)
50871+{
50872+ __u32 reqmode = GR_FIND;
50873+ __u32 mode;
50874+
50875+ if (unlikely(!dentry->d_inode))
50876+ return reqmode;
50877+
50878+ if (unlikely(fmode & O_APPEND))
50879+ reqmode |= GR_APPEND;
50880+ else if (unlikely(fmode & FMODE_WRITE))
50881+ reqmode |= GR_WRITE;
50882+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
50883+ reqmode |= GR_READ;
50884+ if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
50885+ reqmode &= ~GR_READ;
50886+ mode =
50887+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
50888+ mnt);
50889+
50890+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
50891+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
50892+ reqmode & GR_READ ? " reading" : "",
50893+ reqmode & GR_WRITE ? " writing" : reqmode &
50894+ GR_APPEND ? " appending" : "");
50895+ return reqmode;
50896+ } else
50897+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
50898+ {
50899+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
50900+ reqmode & GR_READ ? " reading" : "",
50901+ reqmode & GR_WRITE ? " writing" : reqmode &
50902+ GR_APPEND ? " appending" : "");
50903+ return 0;
50904+ } else if (unlikely((mode & reqmode) != reqmode))
50905+ return 0;
50906+
50907+ return reqmode;
50908+}
50909+
50910+__u32
50911+gr_acl_handle_creat(const struct dentry * dentry,
50912+ const struct dentry * p_dentry,
50913+ const struct vfsmount * p_mnt, const int fmode,
50914+ const int imode)
50915+{
50916+ __u32 reqmode = GR_WRITE | GR_CREATE;
50917+ __u32 mode;
50918+
50919+ if (unlikely(fmode & O_APPEND))
50920+ reqmode |= GR_APPEND;
50921+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
50922+ reqmode |= GR_READ;
50923+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
50924+ reqmode |= GR_SETID;
50925+
50926+ mode =
50927+ gr_check_create(dentry, p_dentry, p_mnt,
50928+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
50929+
50930+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
50931+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
50932+ reqmode & GR_READ ? " reading" : "",
50933+ reqmode & GR_WRITE ? " writing" : reqmode &
50934+ GR_APPEND ? " appending" : "");
50935+ return reqmode;
50936+ } else
50937+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
50938+ {
50939+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
50940+ reqmode & GR_READ ? " reading" : "",
50941+ reqmode & GR_WRITE ? " writing" : reqmode &
50942+ GR_APPEND ? " appending" : "");
50943+ return 0;
50944+ } else if (unlikely((mode & reqmode) != reqmode))
50945+ return 0;
50946+
50947+ return reqmode;
50948+}
50949+
50950+__u32
50951+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
50952+ const int fmode)
50953+{
50954+ __u32 mode, reqmode = GR_FIND;
50955+
50956+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
50957+ reqmode |= GR_EXEC;
50958+ if (fmode & S_IWOTH)
50959+ reqmode |= GR_WRITE;
50960+ if (fmode & S_IROTH)
50961+ reqmode |= GR_READ;
50962+
50963+ mode =
50964+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
50965+ mnt);
50966+
50967+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
50968+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
50969+ reqmode & GR_READ ? " reading" : "",
50970+ reqmode & GR_WRITE ? " writing" : "",
50971+ reqmode & GR_EXEC ? " executing" : "");
50972+ return reqmode;
50973+ } else
50974+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
50975+ {
50976+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
50977+ reqmode & GR_READ ? " reading" : "",
50978+ reqmode & GR_WRITE ? " writing" : "",
50979+ reqmode & GR_EXEC ? " executing" : "");
50980+ return 0;
50981+ } else if (unlikely((mode & reqmode) != reqmode))
50982+ return 0;
50983+
50984+ return reqmode;
50985+}
50986+
50987+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
50988+{
50989+ __u32 mode;
50990+
50991+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
50992+
50993+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
50994+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
50995+ return mode;
50996+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
50997+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
50998+ return 0;
50999+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51000+ return 0;
51001+
51002+ return (reqmode);
51003+}
51004+
51005+__u32
51006+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51007+{
51008+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51009+}
51010+
51011+__u32
51012+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51013+{
51014+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51015+}
51016+
51017+__u32
51018+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51019+{
51020+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51021+}
51022+
51023+__u32
51024+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51025+{
51026+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51027+}
51028+
51029+__u32
51030+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51031+ mode_t mode)
51032+{
51033+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51034+ return 1;
51035+
51036+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51037+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51038+ GR_FCHMOD_ACL_MSG);
51039+ } else {
51040+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51041+ }
51042+}
51043+
51044+__u32
51045+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51046+ mode_t mode)
51047+{
51048+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51049+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51050+ GR_CHMOD_ACL_MSG);
51051+ } else {
51052+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51053+ }
51054+}
51055+
51056+__u32
51057+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51058+{
51059+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51060+}
51061+
51062+__u32
51063+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51064+{
51065+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51066+}
51067+
51068+__u32
51069+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51070+{
51071+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51072+}
51073+
51074+__u32
51075+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51076+{
51077+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51078+ GR_UNIXCONNECT_ACL_MSG);
51079+}
51080+
51081+/* hardlinks require at minimum create permission,
51082+ any additional privilege required is based on the
51083+ privilege of the file being linked to
51084+*/
51085+__u32
51086+gr_acl_handle_link(const struct dentry * new_dentry,
51087+ const struct dentry * parent_dentry,
51088+ const struct vfsmount * parent_mnt,
51089+ const struct dentry * old_dentry,
51090+ const struct vfsmount * old_mnt, const char *to)
51091+{
51092+ __u32 mode;
51093+ __u32 needmode = GR_CREATE | GR_LINK;
51094+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51095+
51096+ mode =
51097+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51098+ old_mnt);
51099+
51100+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51101+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51102+ return mode;
51103+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51104+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51105+ return 0;
51106+ } else if (unlikely((mode & needmode) != needmode))
51107+ return 0;
51108+
51109+ return 1;
51110+}
51111+
51112+__u32
51113+gr_acl_handle_symlink(const struct dentry * new_dentry,
51114+ const struct dentry * parent_dentry,
51115+ const struct vfsmount * parent_mnt, const char *from)
51116+{
51117+ __u32 needmode = GR_WRITE | GR_CREATE;
51118+ __u32 mode;
51119+
51120+ mode =
51121+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
51122+ GR_CREATE | GR_AUDIT_CREATE |
51123+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51124+
51125+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51126+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51127+ return mode;
51128+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51129+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51130+ return 0;
51131+ } else if (unlikely((mode & needmode) != needmode))
51132+ return 0;
51133+
51134+ return (GR_WRITE | GR_CREATE);
51135+}
51136+
51137+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51138+{
51139+ __u32 mode;
51140+
51141+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51142+
51143+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51144+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51145+ return mode;
51146+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51147+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51148+ return 0;
51149+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51150+ return 0;
51151+
51152+ return (reqmode);
51153+}
51154+
51155+__u32
51156+gr_acl_handle_mknod(const struct dentry * new_dentry,
51157+ const struct dentry * parent_dentry,
51158+ const struct vfsmount * parent_mnt,
51159+ const int mode)
51160+{
51161+ __u32 reqmode = GR_WRITE | GR_CREATE;
51162+ if (unlikely(mode & (S_ISUID | S_ISGID)))
51163+ reqmode |= GR_SETID;
51164+
51165+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51166+ reqmode, GR_MKNOD_ACL_MSG);
51167+}
51168+
51169+__u32
51170+gr_acl_handle_mkdir(const struct dentry *new_dentry,
51171+ const struct dentry *parent_dentry,
51172+ const struct vfsmount *parent_mnt)
51173+{
51174+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51175+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51176+}
51177+
51178+#define RENAME_CHECK_SUCCESS(old, new) \
51179+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51180+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51181+
51182+int
51183+gr_acl_handle_rename(struct dentry *new_dentry,
51184+ struct dentry *parent_dentry,
51185+ const struct vfsmount *parent_mnt,
51186+ struct dentry *old_dentry,
51187+ struct inode *old_parent_inode,
51188+ struct vfsmount *old_mnt, const char *newname)
51189+{
51190+ __u32 comp1, comp2;
51191+ int error = 0;
51192+
51193+ if (unlikely(!gr_acl_is_enabled()))
51194+ return 0;
51195+
51196+ if (!new_dentry->d_inode) {
51197+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51198+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51199+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51200+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51201+ GR_DELETE | GR_AUDIT_DELETE |
51202+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51203+ GR_SUPPRESS, old_mnt);
51204+ } else {
51205+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51206+ GR_CREATE | GR_DELETE |
51207+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51208+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51209+ GR_SUPPRESS, parent_mnt);
51210+ comp2 =
51211+ gr_search_file(old_dentry,
51212+ GR_READ | GR_WRITE | GR_AUDIT_READ |
51213+ GR_DELETE | GR_AUDIT_DELETE |
51214+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51215+ }
51216+
51217+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51218+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51219+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51220+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51221+ && !(comp2 & GR_SUPPRESS)) {
51222+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51223+ error = -EACCES;
51224+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51225+ error = -EACCES;
51226+
51227+ return error;
51228+}
51229+
51230+void
51231+gr_acl_handle_exit(void)
51232+{
51233+ u16 id;
51234+ char *rolename;
51235+ struct file *exec_file;
51236+
51237+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51238+ !(current->role->roletype & GR_ROLE_PERSIST))) {
51239+ id = current->acl_role_id;
51240+ rolename = current->role->rolename;
51241+ gr_set_acls(1);
51242+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51243+ }
51244+
51245+ write_lock(&grsec_exec_file_lock);
51246+ exec_file = current->exec_file;
51247+ current->exec_file = NULL;
51248+ write_unlock(&grsec_exec_file_lock);
51249+
51250+ if (exec_file)
51251+ fput(exec_file);
51252+}
51253+
51254+int
51255+gr_acl_handle_procpidmem(const struct task_struct *task)
51256+{
51257+ if (unlikely(!gr_acl_is_enabled()))
51258+ return 0;
51259+
51260+ if (task != current && task->acl->mode & GR_PROTPROCFD)
51261+ return -EACCES;
51262+
51263+ return 0;
51264+}
51265diff -urNp linux-2.6.32.45/grsecurity/gracl_ip.c linux-2.6.32.45/grsecurity/gracl_ip.c
51266--- linux-2.6.32.45/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51267+++ linux-2.6.32.45/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
51268@@ -0,0 +1,382 @@
51269+#include <linux/kernel.h>
51270+#include <asm/uaccess.h>
51271+#include <asm/errno.h>
51272+#include <net/sock.h>
51273+#include <linux/file.h>
51274+#include <linux/fs.h>
51275+#include <linux/net.h>
51276+#include <linux/in.h>
51277+#include <linux/skbuff.h>
51278+#include <linux/ip.h>
51279+#include <linux/udp.h>
51280+#include <linux/smp_lock.h>
51281+#include <linux/types.h>
51282+#include <linux/sched.h>
51283+#include <linux/netdevice.h>
51284+#include <linux/inetdevice.h>
51285+#include <linux/gracl.h>
51286+#include <linux/grsecurity.h>
51287+#include <linux/grinternal.h>
51288+
51289+#define GR_BIND 0x01
51290+#define GR_CONNECT 0x02
51291+#define GR_INVERT 0x04
51292+#define GR_BINDOVERRIDE 0x08
51293+#define GR_CONNECTOVERRIDE 0x10
51294+#define GR_SOCK_FAMILY 0x20
51295+
51296+static const char * gr_protocols[IPPROTO_MAX] = {
51297+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51298+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51299+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51300+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51301+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51302+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51303+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51304+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51305+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51306+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51307+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51308+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51309+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51310+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51311+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51312+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51313+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51314+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51315+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51316+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51317+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51318+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51319+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51320+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51321+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51322+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51323+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51324+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51325+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51326+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51327+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51328+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51329+ };
51330+
51331+static const char * gr_socktypes[SOCK_MAX] = {
51332+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51333+ "unknown:7", "unknown:8", "unknown:9", "packet"
51334+ };
51335+
51336+static const char * gr_sockfamilies[AF_MAX+1] = {
51337+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51338+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51339+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51340+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
51341+ };
51342+
51343+const char *
51344+gr_proto_to_name(unsigned char proto)
51345+{
51346+ return gr_protocols[proto];
51347+}
51348+
51349+const char *
51350+gr_socktype_to_name(unsigned char type)
51351+{
51352+ return gr_socktypes[type];
51353+}
51354+
51355+const char *
51356+gr_sockfamily_to_name(unsigned char family)
51357+{
51358+ return gr_sockfamilies[family];
51359+}
51360+
51361+int
51362+gr_search_socket(const int domain, const int type, const int protocol)
51363+{
51364+ struct acl_subject_label *curr;
51365+ const struct cred *cred = current_cred();
51366+
51367+ if (unlikely(!gr_acl_is_enabled()))
51368+ goto exit;
51369+
51370+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
51371+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
51372+ goto exit; // let the kernel handle it
51373+
51374+ curr = current->acl;
51375+
51376+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
51377+ /* the family is allowed, if this is PF_INET allow it only if
51378+ the extra sock type/protocol checks pass */
51379+ if (domain == PF_INET)
51380+ goto inet_check;
51381+ goto exit;
51382+ } else {
51383+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51384+ __u32 fakeip = 0;
51385+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51386+ current->role->roletype, cred->uid,
51387+ cred->gid, current->exec_file ?
51388+ gr_to_filename(current->exec_file->f_path.dentry,
51389+ current->exec_file->f_path.mnt) :
51390+ curr->filename, curr->filename,
51391+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
51392+ &current->signal->saved_ip);
51393+ goto exit;
51394+ }
51395+ goto exit_fail;
51396+ }
51397+
51398+inet_check:
51399+ /* the rest of this checking is for IPv4 only */
51400+ if (!curr->ips)
51401+ goto exit;
51402+
51403+ if ((curr->ip_type & (1 << type)) &&
51404+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
51405+ goto exit;
51406+
51407+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51408+ /* we don't place acls on raw sockets , and sometimes
51409+ dgram/ip sockets are opened for ioctl and not
51410+ bind/connect, so we'll fake a bind learn log */
51411+ if (type == SOCK_RAW || type == SOCK_PACKET) {
51412+ __u32 fakeip = 0;
51413+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51414+ current->role->roletype, cred->uid,
51415+ cred->gid, current->exec_file ?
51416+ gr_to_filename(current->exec_file->f_path.dentry,
51417+ current->exec_file->f_path.mnt) :
51418+ curr->filename, curr->filename,
51419+ &fakeip, 0, type,
51420+ protocol, GR_CONNECT, &current->signal->saved_ip);
51421+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
51422+ __u32 fakeip = 0;
51423+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51424+ current->role->roletype, cred->uid,
51425+ cred->gid, current->exec_file ?
51426+ gr_to_filename(current->exec_file->f_path.dentry,
51427+ current->exec_file->f_path.mnt) :
51428+ curr->filename, curr->filename,
51429+ &fakeip, 0, type,
51430+ protocol, GR_BIND, &current->signal->saved_ip);
51431+ }
51432+ /* we'll log when they use connect or bind */
51433+ goto exit;
51434+ }
51435+
51436+exit_fail:
51437+ if (domain == PF_INET)
51438+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
51439+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
51440+ else
51441+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
51442+ gr_socktype_to_name(type), protocol);
51443+
51444+ return 0;
51445+exit:
51446+ return 1;
51447+}
51448+
51449+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
51450+{
51451+ if ((ip->mode & mode) &&
51452+ (ip_port >= ip->low) &&
51453+ (ip_port <= ip->high) &&
51454+ ((ntohl(ip_addr) & our_netmask) ==
51455+ (ntohl(our_addr) & our_netmask))
51456+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
51457+ && (ip->type & (1 << type))) {
51458+ if (ip->mode & GR_INVERT)
51459+ return 2; // specifically denied
51460+ else
51461+ return 1; // allowed
51462+ }
51463+
51464+ return 0; // not specifically allowed, may continue parsing
51465+}
51466+
51467+static int
51468+gr_search_connectbind(const int full_mode, struct sock *sk,
51469+ struct sockaddr_in *addr, const int type)
51470+{
51471+ char iface[IFNAMSIZ] = {0};
51472+ struct acl_subject_label *curr;
51473+ struct acl_ip_label *ip;
51474+ struct inet_sock *isk;
51475+ struct net_device *dev;
51476+ struct in_device *idev;
51477+ unsigned long i;
51478+ int ret;
51479+ int mode = full_mode & (GR_BIND | GR_CONNECT);
51480+ __u32 ip_addr = 0;
51481+ __u32 our_addr;
51482+ __u32 our_netmask;
51483+ char *p;
51484+ __u16 ip_port = 0;
51485+ const struct cred *cred = current_cred();
51486+
51487+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
51488+ return 0;
51489+
51490+ curr = current->acl;
51491+ isk = inet_sk(sk);
51492+
51493+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
51494+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
51495+ addr->sin_addr.s_addr = curr->inaddr_any_override;
51496+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
51497+ struct sockaddr_in saddr;
51498+ int err;
51499+
51500+ saddr.sin_family = AF_INET;
51501+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
51502+ saddr.sin_port = isk->sport;
51503+
51504+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51505+ if (err)
51506+ return err;
51507+
51508+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51509+ if (err)
51510+ return err;
51511+ }
51512+
51513+ if (!curr->ips)
51514+ return 0;
51515+
51516+ ip_addr = addr->sin_addr.s_addr;
51517+ ip_port = ntohs(addr->sin_port);
51518+
51519+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51520+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51521+ current->role->roletype, cred->uid,
51522+ cred->gid, current->exec_file ?
51523+ gr_to_filename(current->exec_file->f_path.dentry,
51524+ current->exec_file->f_path.mnt) :
51525+ curr->filename, curr->filename,
51526+ &ip_addr, ip_port, type,
51527+ sk->sk_protocol, mode, &current->signal->saved_ip);
51528+ return 0;
51529+ }
51530+
51531+ for (i = 0; i < curr->ip_num; i++) {
51532+ ip = *(curr->ips + i);
51533+ if (ip->iface != NULL) {
51534+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
51535+ p = strchr(iface, ':');
51536+ if (p != NULL)
51537+ *p = '\0';
51538+ dev = dev_get_by_name(sock_net(sk), iface);
51539+ if (dev == NULL)
51540+ continue;
51541+ idev = in_dev_get(dev);
51542+ if (idev == NULL) {
51543+ dev_put(dev);
51544+ continue;
51545+ }
51546+ rcu_read_lock();
51547+ for_ifa(idev) {
51548+ if (!strcmp(ip->iface, ifa->ifa_label)) {
51549+ our_addr = ifa->ifa_address;
51550+ our_netmask = 0xffffffff;
51551+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51552+ if (ret == 1) {
51553+ rcu_read_unlock();
51554+ in_dev_put(idev);
51555+ dev_put(dev);
51556+ return 0;
51557+ } else if (ret == 2) {
51558+ rcu_read_unlock();
51559+ in_dev_put(idev);
51560+ dev_put(dev);
51561+ goto denied;
51562+ }
51563+ }
51564+ } endfor_ifa(idev);
51565+ rcu_read_unlock();
51566+ in_dev_put(idev);
51567+ dev_put(dev);
51568+ } else {
51569+ our_addr = ip->addr;
51570+ our_netmask = ip->netmask;
51571+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51572+ if (ret == 1)
51573+ return 0;
51574+ else if (ret == 2)
51575+ goto denied;
51576+ }
51577+ }
51578+
51579+denied:
51580+ if (mode == GR_BIND)
51581+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51582+ else if (mode == GR_CONNECT)
51583+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51584+
51585+ return -EACCES;
51586+}
51587+
51588+int
51589+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
51590+{
51591+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
51592+}
51593+
51594+int
51595+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
51596+{
51597+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
51598+}
51599+
51600+int gr_search_listen(struct socket *sock)
51601+{
51602+ struct sock *sk = sock->sk;
51603+ struct sockaddr_in addr;
51604+
51605+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51606+ addr.sin_port = inet_sk(sk)->sport;
51607+
51608+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51609+}
51610+
51611+int gr_search_accept(struct socket *sock)
51612+{
51613+ struct sock *sk = sock->sk;
51614+ struct sockaddr_in addr;
51615+
51616+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51617+ addr.sin_port = inet_sk(sk)->sport;
51618+
51619+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51620+}
51621+
51622+int
51623+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
51624+{
51625+ if (addr)
51626+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
51627+ else {
51628+ struct sockaddr_in sin;
51629+ const struct inet_sock *inet = inet_sk(sk);
51630+
51631+ sin.sin_addr.s_addr = inet->daddr;
51632+ sin.sin_port = inet->dport;
51633+
51634+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51635+ }
51636+}
51637+
51638+int
51639+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
51640+{
51641+ struct sockaddr_in sin;
51642+
51643+ if (unlikely(skb->len < sizeof (struct udphdr)))
51644+ return 0; // skip this packet
51645+
51646+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
51647+ sin.sin_port = udp_hdr(skb)->source;
51648+
51649+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51650+}
51651diff -urNp linux-2.6.32.45/grsecurity/gracl_learn.c linux-2.6.32.45/grsecurity/gracl_learn.c
51652--- linux-2.6.32.45/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
51653+++ linux-2.6.32.45/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
51654@@ -0,0 +1,208 @@
51655+#include <linux/kernel.h>
51656+#include <linux/mm.h>
51657+#include <linux/sched.h>
51658+#include <linux/poll.h>
51659+#include <linux/smp_lock.h>
51660+#include <linux/string.h>
51661+#include <linux/file.h>
51662+#include <linux/types.h>
51663+#include <linux/vmalloc.h>
51664+#include <linux/grinternal.h>
51665+
51666+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
51667+ size_t count, loff_t *ppos);
51668+extern int gr_acl_is_enabled(void);
51669+
51670+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
51671+static int gr_learn_attached;
51672+
51673+/* use a 512k buffer */
51674+#define LEARN_BUFFER_SIZE (512 * 1024)
51675+
51676+static DEFINE_SPINLOCK(gr_learn_lock);
51677+static DEFINE_MUTEX(gr_learn_user_mutex);
51678+
51679+/* we need to maintain two buffers, so that the kernel context of grlearn
51680+ uses a semaphore around the userspace copying, and the other kernel contexts
51681+ use a spinlock when copying into the buffer, since they cannot sleep
51682+*/
51683+static char *learn_buffer;
51684+static char *learn_buffer_user;
51685+static int learn_buffer_len;
51686+static int learn_buffer_user_len;
51687+
51688+static ssize_t
51689+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
51690+{
51691+ DECLARE_WAITQUEUE(wait, current);
51692+ ssize_t retval = 0;
51693+
51694+ add_wait_queue(&learn_wait, &wait);
51695+ set_current_state(TASK_INTERRUPTIBLE);
51696+ do {
51697+ mutex_lock(&gr_learn_user_mutex);
51698+ spin_lock(&gr_learn_lock);
51699+ if (learn_buffer_len)
51700+ break;
51701+ spin_unlock(&gr_learn_lock);
51702+ mutex_unlock(&gr_learn_user_mutex);
51703+ if (file->f_flags & O_NONBLOCK) {
51704+ retval = -EAGAIN;
51705+ goto out;
51706+ }
51707+ if (signal_pending(current)) {
51708+ retval = -ERESTARTSYS;
51709+ goto out;
51710+ }
51711+
51712+ schedule();
51713+ } while (1);
51714+
51715+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
51716+ learn_buffer_user_len = learn_buffer_len;
51717+ retval = learn_buffer_len;
51718+ learn_buffer_len = 0;
51719+
51720+ spin_unlock(&gr_learn_lock);
51721+
51722+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
51723+ retval = -EFAULT;
51724+
51725+ mutex_unlock(&gr_learn_user_mutex);
51726+out:
51727+ set_current_state(TASK_RUNNING);
51728+ remove_wait_queue(&learn_wait, &wait);
51729+ return retval;
51730+}
51731+
51732+static unsigned int
51733+poll_learn(struct file * file, poll_table * wait)
51734+{
51735+ poll_wait(file, &learn_wait, wait);
51736+
51737+ if (learn_buffer_len)
51738+ return (POLLIN | POLLRDNORM);
51739+
51740+ return 0;
51741+}
51742+
51743+void
51744+gr_clear_learn_entries(void)
51745+{
51746+ char *tmp;
51747+
51748+ mutex_lock(&gr_learn_user_mutex);
51749+ spin_lock(&gr_learn_lock);
51750+ tmp = learn_buffer;
51751+ learn_buffer = NULL;
51752+ spin_unlock(&gr_learn_lock);
51753+ if (tmp)
51754+ vfree(tmp);
51755+ if (learn_buffer_user != NULL) {
51756+ vfree(learn_buffer_user);
51757+ learn_buffer_user = NULL;
51758+ }
51759+ learn_buffer_len = 0;
51760+ mutex_unlock(&gr_learn_user_mutex);
51761+
51762+ return;
51763+}
51764+
51765+void
51766+gr_add_learn_entry(const char *fmt, ...)
51767+{
51768+ va_list args;
51769+ unsigned int len;
51770+
51771+ if (!gr_learn_attached)
51772+ return;
51773+
51774+ spin_lock(&gr_learn_lock);
51775+
51776+ /* leave a gap at the end so we know when it's "full" but don't have to
51777+ compute the exact length of the string we're trying to append
51778+ */
51779+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
51780+ spin_unlock(&gr_learn_lock);
51781+ wake_up_interruptible(&learn_wait);
51782+ return;
51783+ }
51784+ if (learn_buffer == NULL) {
51785+ spin_unlock(&gr_learn_lock);
51786+ return;
51787+ }
51788+
51789+ va_start(args, fmt);
51790+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
51791+ va_end(args);
51792+
51793+ learn_buffer_len += len + 1;
51794+
51795+ spin_unlock(&gr_learn_lock);
51796+ wake_up_interruptible(&learn_wait);
51797+
51798+ return;
51799+}
51800+
51801+static int
51802+open_learn(struct inode *inode, struct file *file)
51803+{
51804+ if (file->f_mode & FMODE_READ && gr_learn_attached)
51805+ return -EBUSY;
51806+ if (file->f_mode & FMODE_READ) {
51807+ int retval = 0;
51808+ mutex_lock(&gr_learn_user_mutex);
51809+ if (learn_buffer == NULL)
51810+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
51811+ if (learn_buffer_user == NULL)
51812+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
51813+ if (learn_buffer == NULL) {
51814+ retval = -ENOMEM;
51815+ goto out_error;
51816+ }
51817+ if (learn_buffer_user == NULL) {
51818+ retval = -ENOMEM;
51819+ goto out_error;
51820+ }
51821+ learn_buffer_len = 0;
51822+ learn_buffer_user_len = 0;
51823+ gr_learn_attached = 1;
51824+out_error:
51825+ mutex_unlock(&gr_learn_user_mutex);
51826+ return retval;
51827+ }
51828+ return 0;
51829+}
51830+
51831+static int
51832+close_learn(struct inode *inode, struct file *file)
51833+{
51834+ if (file->f_mode & FMODE_READ) {
51835+ char *tmp = NULL;
51836+ mutex_lock(&gr_learn_user_mutex);
51837+ spin_lock(&gr_learn_lock);
51838+ tmp = learn_buffer;
51839+ learn_buffer = NULL;
51840+ spin_unlock(&gr_learn_lock);
51841+ if (tmp)
51842+ vfree(tmp);
51843+ if (learn_buffer_user != NULL) {
51844+ vfree(learn_buffer_user);
51845+ learn_buffer_user = NULL;
51846+ }
51847+ learn_buffer_len = 0;
51848+ learn_buffer_user_len = 0;
51849+ gr_learn_attached = 0;
51850+ mutex_unlock(&gr_learn_user_mutex);
51851+ }
51852+
51853+ return 0;
51854+}
51855+
51856+const struct file_operations grsec_fops = {
51857+ .read = read_learn,
51858+ .write = write_grsec_handler,
51859+ .open = open_learn,
51860+ .release = close_learn,
51861+ .poll = poll_learn,
51862+};
51863diff -urNp linux-2.6.32.45/grsecurity/gracl_res.c linux-2.6.32.45/grsecurity/gracl_res.c
51864--- linux-2.6.32.45/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
51865+++ linux-2.6.32.45/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
51866@@ -0,0 +1,67 @@
51867+#include <linux/kernel.h>
51868+#include <linux/sched.h>
51869+#include <linux/gracl.h>
51870+#include <linux/grinternal.h>
51871+
51872+static const char *restab_log[] = {
51873+ [RLIMIT_CPU] = "RLIMIT_CPU",
51874+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
51875+ [RLIMIT_DATA] = "RLIMIT_DATA",
51876+ [RLIMIT_STACK] = "RLIMIT_STACK",
51877+ [RLIMIT_CORE] = "RLIMIT_CORE",
51878+ [RLIMIT_RSS] = "RLIMIT_RSS",
51879+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
51880+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
51881+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
51882+ [RLIMIT_AS] = "RLIMIT_AS",
51883+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
51884+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
51885+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
51886+ [RLIMIT_NICE] = "RLIMIT_NICE",
51887+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
51888+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
51889+ [GR_CRASH_RES] = "RLIMIT_CRASH"
51890+};
51891+
51892+void
51893+gr_log_resource(const struct task_struct *task,
51894+ const int res, const unsigned long wanted, const int gt)
51895+{
51896+ const struct cred *cred;
51897+ unsigned long rlim;
51898+
51899+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
51900+ return;
51901+
51902+ // not yet supported resource
51903+ if (unlikely(!restab_log[res]))
51904+ return;
51905+
51906+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
51907+ rlim = task->signal->rlim[res].rlim_max;
51908+ else
51909+ rlim = task->signal->rlim[res].rlim_cur;
51910+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
51911+ return;
51912+
51913+ rcu_read_lock();
51914+ cred = __task_cred(task);
51915+
51916+ if (res == RLIMIT_NPROC &&
51917+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
51918+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
51919+ goto out_rcu_unlock;
51920+ else if (res == RLIMIT_MEMLOCK &&
51921+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
51922+ goto out_rcu_unlock;
51923+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
51924+ goto out_rcu_unlock;
51925+ rcu_read_unlock();
51926+
51927+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
51928+
51929+ return;
51930+out_rcu_unlock:
51931+ rcu_read_unlock();
51932+ return;
51933+}
51934diff -urNp linux-2.6.32.45/grsecurity/gracl_segv.c linux-2.6.32.45/grsecurity/gracl_segv.c
51935--- linux-2.6.32.45/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
51936+++ linux-2.6.32.45/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
51937@@ -0,0 +1,284 @@
51938+#include <linux/kernel.h>
51939+#include <linux/mm.h>
51940+#include <asm/uaccess.h>
51941+#include <asm/errno.h>
51942+#include <asm/mman.h>
51943+#include <net/sock.h>
51944+#include <linux/file.h>
51945+#include <linux/fs.h>
51946+#include <linux/net.h>
51947+#include <linux/in.h>
51948+#include <linux/smp_lock.h>
51949+#include <linux/slab.h>
51950+#include <linux/types.h>
51951+#include <linux/sched.h>
51952+#include <linux/timer.h>
51953+#include <linux/gracl.h>
51954+#include <linux/grsecurity.h>
51955+#include <linux/grinternal.h>
51956+
51957+static struct crash_uid *uid_set;
51958+static unsigned short uid_used;
51959+static DEFINE_SPINLOCK(gr_uid_lock);
51960+extern rwlock_t gr_inode_lock;
51961+extern struct acl_subject_label *
51962+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
51963+ struct acl_role_label *role);
51964+extern int gr_fake_force_sig(int sig, struct task_struct *t);
51965+
51966+int
51967+gr_init_uidset(void)
51968+{
51969+ uid_set =
51970+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
51971+ uid_used = 0;
51972+
51973+ return uid_set ? 1 : 0;
51974+}
51975+
51976+void
51977+gr_free_uidset(void)
51978+{
51979+ if (uid_set)
51980+ kfree(uid_set);
51981+
51982+ return;
51983+}
51984+
51985+int
51986+gr_find_uid(const uid_t uid)
51987+{
51988+ struct crash_uid *tmp = uid_set;
51989+ uid_t buid;
51990+ int low = 0, high = uid_used - 1, mid;
51991+
51992+ while (high >= low) {
51993+ mid = (low + high) >> 1;
51994+ buid = tmp[mid].uid;
51995+ if (buid == uid)
51996+ return mid;
51997+ if (buid > uid)
51998+ high = mid - 1;
51999+ if (buid < uid)
52000+ low = mid + 1;
52001+ }
52002+
52003+ return -1;
52004+}
52005+
52006+static __inline__ void
52007+gr_insertsort(void)
52008+{
52009+ unsigned short i, j;
52010+ struct crash_uid index;
52011+
52012+ for (i = 1; i < uid_used; i++) {
52013+ index = uid_set[i];
52014+ j = i;
52015+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52016+ uid_set[j] = uid_set[j - 1];
52017+ j--;
52018+ }
52019+ uid_set[j] = index;
52020+ }
52021+
52022+ return;
52023+}
52024+
52025+static __inline__ void
52026+gr_insert_uid(const uid_t uid, const unsigned long expires)
52027+{
52028+ int loc;
52029+
52030+ if (uid_used == GR_UIDTABLE_MAX)
52031+ return;
52032+
52033+ loc = gr_find_uid(uid);
52034+
52035+ if (loc >= 0) {
52036+ uid_set[loc].expires = expires;
52037+ return;
52038+ }
52039+
52040+ uid_set[uid_used].uid = uid;
52041+ uid_set[uid_used].expires = expires;
52042+ uid_used++;
52043+
52044+ gr_insertsort();
52045+
52046+ return;
52047+}
52048+
52049+void
52050+gr_remove_uid(const unsigned short loc)
52051+{
52052+ unsigned short i;
52053+
52054+ for (i = loc + 1; i < uid_used; i++)
52055+ uid_set[i - 1] = uid_set[i];
52056+
52057+ uid_used--;
52058+
52059+ return;
52060+}
52061+
52062+int
52063+gr_check_crash_uid(const uid_t uid)
52064+{
52065+ int loc;
52066+ int ret = 0;
52067+
52068+ if (unlikely(!gr_acl_is_enabled()))
52069+ return 0;
52070+
52071+ spin_lock(&gr_uid_lock);
52072+ loc = gr_find_uid(uid);
52073+
52074+ if (loc < 0)
52075+ goto out_unlock;
52076+
52077+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
52078+ gr_remove_uid(loc);
52079+ else
52080+ ret = 1;
52081+
52082+out_unlock:
52083+ spin_unlock(&gr_uid_lock);
52084+ return ret;
52085+}
52086+
52087+static __inline__ int
52088+proc_is_setxid(const struct cred *cred)
52089+{
52090+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
52091+ cred->uid != cred->fsuid)
52092+ return 1;
52093+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52094+ cred->gid != cred->fsgid)
52095+ return 1;
52096+
52097+ return 0;
52098+}
52099+
52100+void
52101+gr_handle_crash(struct task_struct *task, const int sig)
52102+{
52103+ struct acl_subject_label *curr;
52104+ struct acl_subject_label *curr2;
52105+ struct task_struct *tsk, *tsk2;
52106+ const struct cred *cred;
52107+ const struct cred *cred2;
52108+
52109+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52110+ return;
52111+
52112+ if (unlikely(!gr_acl_is_enabled()))
52113+ return;
52114+
52115+ curr = task->acl;
52116+
52117+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
52118+ return;
52119+
52120+ if (time_before_eq(curr->expires, get_seconds())) {
52121+ curr->expires = 0;
52122+ curr->crashes = 0;
52123+ }
52124+
52125+ curr->crashes++;
52126+
52127+ if (!curr->expires)
52128+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52129+
52130+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52131+ time_after(curr->expires, get_seconds())) {
52132+ rcu_read_lock();
52133+ cred = __task_cred(task);
52134+ if (cred->uid && proc_is_setxid(cred)) {
52135+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52136+ spin_lock(&gr_uid_lock);
52137+ gr_insert_uid(cred->uid, curr->expires);
52138+ spin_unlock(&gr_uid_lock);
52139+ curr->expires = 0;
52140+ curr->crashes = 0;
52141+ read_lock(&tasklist_lock);
52142+ do_each_thread(tsk2, tsk) {
52143+ cred2 = __task_cred(tsk);
52144+ if (tsk != task && cred2->uid == cred->uid)
52145+ gr_fake_force_sig(SIGKILL, tsk);
52146+ } while_each_thread(tsk2, tsk);
52147+ read_unlock(&tasklist_lock);
52148+ } else {
52149+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52150+ read_lock(&tasklist_lock);
52151+ do_each_thread(tsk2, tsk) {
52152+ if (likely(tsk != task)) {
52153+ curr2 = tsk->acl;
52154+
52155+ if (curr2->device == curr->device &&
52156+ curr2->inode == curr->inode)
52157+ gr_fake_force_sig(SIGKILL, tsk);
52158+ }
52159+ } while_each_thread(tsk2, tsk);
52160+ read_unlock(&tasklist_lock);
52161+ }
52162+ rcu_read_unlock();
52163+ }
52164+
52165+ return;
52166+}
52167+
52168+int
52169+gr_check_crash_exec(const struct file *filp)
52170+{
52171+ struct acl_subject_label *curr;
52172+
52173+ if (unlikely(!gr_acl_is_enabled()))
52174+ return 0;
52175+
52176+ read_lock(&gr_inode_lock);
52177+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52178+ filp->f_path.dentry->d_inode->i_sb->s_dev,
52179+ current->role);
52180+ read_unlock(&gr_inode_lock);
52181+
52182+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52183+ (!curr->crashes && !curr->expires))
52184+ return 0;
52185+
52186+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52187+ time_after(curr->expires, get_seconds()))
52188+ return 1;
52189+ else if (time_before_eq(curr->expires, get_seconds())) {
52190+ curr->crashes = 0;
52191+ curr->expires = 0;
52192+ }
52193+
52194+ return 0;
52195+}
52196+
52197+void
52198+gr_handle_alertkill(struct task_struct *task)
52199+{
52200+ struct acl_subject_label *curracl;
52201+ __u32 curr_ip;
52202+ struct task_struct *p, *p2;
52203+
52204+ if (unlikely(!gr_acl_is_enabled()))
52205+ return;
52206+
52207+ curracl = task->acl;
52208+ curr_ip = task->signal->curr_ip;
52209+
52210+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52211+ read_lock(&tasklist_lock);
52212+ do_each_thread(p2, p) {
52213+ if (p->signal->curr_ip == curr_ip)
52214+ gr_fake_force_sig(SIGKILL, p);
52215+ } while_each_thread(p2, p);
52216+ read_unlock(&tasklist_lock);
52217+ } else if (curracl->mode & GR_KILLPROC)
52218+ gr_fake_force_sig(SIGKILL, task);
52219+
52220+ return;
52221+}
52222diff -urNp linux-2.6.32.45/grsecurity/gracl_shm.c linux-2.6.32.45/grsecurity/gracl_shm.c
52223--- linux-2.6.32.45/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52224+++ linux-2.6.32.45/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
52225@@ -0,0 +1,40 @@
52226+#include <linux/kernel.h>
52227+#include <linux/mm.h>
52228+#include <linux/sched.h>
52229+#include <linux/file.h>
52230+#include <linux/ipc.h>
52231+#include <linux/gracl.h>
52232+#include <linux/grsecurity.h>
52233+#include <linux/grinternal.h>
52234+
52235+int
52236+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52237+ const time_t shm_createtime, const uid_t cuid, const int shmid)
52238+{
52239+ struct task_struct *task;
52240+
52241+ if (!gr_acl_is_enabled())
52242+ return 1;
52243+
52244+ rcu_read_lock();
52245+ read_lock(&tasklist_lock);
52246+
52247+ task = find_task_by_vpid(shm_cprid);
52248+
52249+ if (unlikely(!task))
52250+ task = find_task_by_vpid(shm_lapid);
52251+
52252+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52253+ (task->pid == shm_lapid)) &&
52254+ (task->acl->mode & GR_PROTSHM) &&
52255+ (task->acl != current->acl))) {
52256+ read_unlock(&tasklist_lock);
52257+ rcu_read_unlock();
52258+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52259+ return 0;
52260+ }
52261+ read_unlock(&tasklist_lock);
52262+ rcu_read_unlock();
52263+
52264+ return 1;
52265+}
52266diff -urNp linux-2.6.32.45/grsecurity/grsec_chdir.c linux-2.6.32.45/grsecurity/grsec_chdir.c
52267--- linux-2.6.32.45/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52268+++ linux-2.6.32.45/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
52269@@ -0,0 +1,19 @@
52270+#include <linux/kernel.h>
52271+#include <linux/sched.h>
52272+#include <linux/fs.h>
52273+#include <linux/file.h>
52274+#include <linux/grsecurity.h>
52275+#include <linux/grinternal.h>
52276+
52277+void
52278+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52279+{
52280+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52281+ if ((grsec_enable_chdir && grsec_enable_group &&
52282+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52283+ !grsec_enable_group)) {
52284+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52285+ }
52286+#endif
52287+ return;
52288+}
52289diff -urNp linux-2.6.32.45/grsecurity/grsec_chroot.c linux-2.6.32.45/grsecurity/grsec_chroot.c
52290--- linux-2.6.32.45/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52291+++ linux-2.6.32.45/grsecurity/grsec_chroot.c 2011-07-18 17:14:10.000000000 -0400
52292@@ -0,0 +1,384 @@
52293+#include <linux/kernel.h>
52294+#include <linux/module.h>
52295+#include <linux/sched.h>
52296+#include <linux/file.h>
52297+#include <linux/fs.h>
52298+#include <linux/mount.h>
52299+#include <linux/types.h>
52300+#include <linux/pid_namespace.h>
52301+#include <linux/grsecurity.h>
52302+#include <linux/grinternal.h>
52303+
52304+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52305+{
52306+#ifdef CONFIG_GRKERNSEC
52307+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52308+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52309+ task->gr_is_chrooted = 1;
52310+ else
52311+ task->gr_is_chrooted = 0;
52312+
52313+ task->gr_chroot_dentry = path->dentry;
52314+#endif
52315+ return;
52316+}
52317+
52318+void gr_clear_chroot_entries(struct task_struct *task)
52319+{
52320+#ifdef CONFIG_GRKERNSEC
52321+ task->gr_is_chrooted = 0;
52322+ task->gr_chroot_dentry = NULL;
52323+#endif
52324+ return;
52325+}
52326+
52327+int
52328+gr_handle_chroot_unix(const pid_t pid)
52329+{
52330+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52331+ struct task_struct *p;
52332+
52333+ if (unlikely(!grsec_enable_chroot_unix))
52334+ return 1;
52335+
52336+ if (likely(!proc_is_chrooted(current)))
52337+ return 1;
52338+
52339+ rcu_read_lock();
52340+ read_lock(&tasklist_lock);
52341+
52342+ p = find_task_by_vpid_unrestricted(pid);
52343+ if (unlikely(p && !have_same_root(current, p))) {
52344+ read_unlock(&tasklist_lock);
52345+ rcu_read_unlock();
52346+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
52347+ return 0;
52348+ }
52349+ read_unlock(&tasklist_lock);
52350+ rcu_read_unlock();
52351+#endif
52352+ return 1;
52353+}
52354+
52355+int
52356+gr_handle_chroot_nice(void)
52357+{
52358+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52359+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
52360+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
52361+ return -EPERM;
52362+ }
52363+#endif
52364+ return 0;
52365+}
52366+
52367+int
52368+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
52369+{
52370+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52371+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
52372+ && proc_is_chrooted(current)) {
52373+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
52374+ return -EACCES;
52375+ }
52376+#endif
52377+ return 0;
52378+}
52379+
52380+int
52381+gr_handle_chroot_rawio(const struct inode *inode)
52382+{
52383+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52384+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
52385+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
52386+ return 1;
52387+#endif
52388+ return 0;
52389+}
52390+
52391+int
52392+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
52393+{
52394+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52395+ struct task_struct *p;
52396+ int ret = 0;
52397+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
52398+ return ret;
52399+
52400+ read_lock(&tasklist_lock);
52401+ do_each_pid_task(pid, type, p) {
52402+ if (!have_same_root(current, p)) {
52403+ ret = 1;
52404+ goto out;
52405+ }
52406+ } while_each_pid_task(pid, type, p);
52407+out:
52408+ read_unlock(&tasklist_lock);
52409+ return ret;
52410+#endif
52411+ return 0;
52412+}
52413+
52414+int
52415+gr_pid_is_chrooted(struct task_struct *p)
52416+{
52417+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52418+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
52419+ return 0;
52420+
52421+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
52422+ !have_same_root(current, p)) {
52423+ return 1;
52424+ }
52425+#endif
52426+ return 0;
52427+}
52428+
52429+EXPORT_SYMBOL(gr_pid_is_chrooted);
52430+
52431+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
52432+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
52433+{
52434+ struct dentry *dentry = (struct dentry *)u_dentry;
52435+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
52436+ struct dentry *realroot;
52437+ struct vfsmount *realrootmnt;
52438+ struct dentry *currentroot;
52439+ struct vfsmount *currentmnt;
52440+ struct task_struct *reaper = &init_task;
52441+ int ret = 1;
52442+
52443+ read_lock(&reaper->fs->lock);
52444+ realrootmnt = mntget(reaper->fs->root.mnt);
52445+ realroot = dget(reaper->fs->root.dentry);
52446+ read_unlock(&reaper->fs->lock);
52447+
52448+ read_lock(&current->fs->lock);
52449+ currentmnt = mntget(current->fs->root.mnt);
52450+ currentroot = dget(current->fs->root.dentry);
52451+ read_unlock(&current->fs->lock);
52452+
52453+ spin_lock(&dcache_lock);
52454+ for (;;) {
52455+ if (unlikely((dentry == realroot && mnt == realrootmnt)
52456+ || (dentry == currentroot && mnt == currentmnt)))
52457+ break;
52458+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
52459+ if (mnt->mnt_parent == mnt)
52460+ break;
52461+ dentry = mnt->mnt_mountpoint;
52462+ mnt = mnt->mnt_parent;
52463+ continue;
52464+ }
52465+ dentry = dentry->d_parent;
52466+ }
52467+ spin_unlock(&dcache_lock);
52468+
52469+ dput(currentroot);
52470+ mntput(currentmnt);
52471+
52472+ /* access is outside of chroot */
52473+ if (dentry == realroot && mnt == realrootmnt)
52474+ ret = 0;
52475+
52476+ dput(realroot);
52477+ mntput(realrootmnt);
52478+ return ret;
52479+}
52480+#endif
52481+
52482+int
52483+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
52484+{
52485+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52486+ if (!grsec_enable_chroot_fchdir)
52487+ return 1;
52488+
52489+ if (!proc_is_chrooted(current))
52490+ return 1;
52491+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
52492+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
52493+ return 0;
52494+ }
52495+#endif
52496+ return 1;
52497+}
52498+
52499+int
52500+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52501+ const time_t shm_createtime)
52502+{
52503+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52504+ struct task_struct *p;
52505+ time_t starttime;
52506+
52507+ if (unlikely(!grsec_enable_chroot_shmat))
52508+ return 1;
52509+
52510+ if (likely(!proc_is_chrooted(current)))
52511+ return 1;
52512+
52513+ rcu_read_lock();
52514+ read_lock(&tasklist_lock);
52515+
52516+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
52517+ starttime = p->start_time.tv_sec;
52518+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
52519+ if (have_same_root(current, p)) {
52520+ goto allow;
52521+ } else {
52522+ read_unlock(&tasklist_lock);
52523+ rcu_read_unlock();
52524+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52525+ return 0;
52526+ }
52527+ }
52528+ /* creator exited, pid reuse, fall through to next check */
52529+ }
52530+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
52531+ if (unlikely(!have_same_root(current, p))) {
52532+ read_unlock(&tasklist_lock);
52533+ rcu_read_unlock();
52534+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52535+ return 0;
52536+ }
52537+ }
52538+
52539+allow:
52540+ read_unlock(&tasklist_lock);
52541+ rcu_read_unlock();
52542+#endif
52543+ return 1;
52544+}
52545+
52546+void
52547+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
52548+{
52549+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52550+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
52551+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
52552+#endif
52553+ return;
52554+}
52555+
52556+int
52557+gr_handle_chroot_mknod(const struct dentry *dentry,
52558+ const struct vfsmount *mnt, const int mode)
52559+{
52560+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52561+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
52562+ proc_is_chrooted(current)) {
52563+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
52564+ return -EPERM;
52565+ }
52566+#endif
52567+ return 0;
52568+}
52569+
52570+int
52571+gr_handle_chroot_mount(const struct dentry *dentry,
52572+ const struct vfsmount *mnt, const char *dev_name)
52573+{
52574+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52575+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
52576+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
52577+ return -EPERM;
52578+ }
52579+#endif
52580+ return 0;
52581+}
52582+
52583+int
52584+gr_handle_chroot_pivot(void)
52585+{
52586+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52587+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
52588+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
52589+ return -EPERM;
52590+ }
52591+#endif
52592+ return 0;
52593+}
52594+
52595+int
52596+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
52597+{
52598+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52599+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
52600+ !gr_is_outside_chroot(dentry, mnt)) {
52601+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
52602+ return -EPERM;
52603+ }
52604+#endif
52605+ return 0;
52606+}
52607+
52608+int
52609+gr_handle_chroot_caps(struct path *path)
52610+{
52611+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52612+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
52613+ (init_task.fs->root.dentry != path->dentry) &&
52614+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
52615+
52616+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52617+ const struct cred *old = current_cred();
52618+ struct cred *new = prepare_creds();
52619+ if (new == NULL)
52620+ return 1;
52621+
52622+ new->cap_permitted = cap_drop(old->cap_permitted,
52623+ chroot_caps);
52624+ new->cap_inheritable = cap_drop(old->cap_inheritable,
52625+ chroot_caps);
52626+ new->cap_effective = cap_drop(old->cap_effective,
52627+ chroot_caps);
52628+
52629+ commit_creds(new);
52630+
52631+ return 0;
52632+ }
52633+#endif
52634+ return 0;
52635+}
52636+
52637+int
52638+gr_handle_chroot_sysctl(const int op)
52639+{
52640+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52641+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
52642+ && (op & MAY_WRITE))
52643+ return -EACCES;
52644+#endif
52645+ return 0;
52646+}
52647+
52648+void
52649+gr_handle_chroot_chdir(struct path *path)
52650+{
52651+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52652+ if (grsec_enable_chroot_chdir)
52653+ set_fs_pwd(current->fs, path);
52654+#endif
52655+ return;
52656+}
52657+
52658+int
52659+gr_handle_chroot_chmod(const struct dentry *dentry,
52660+ const struct vfsmount *mnt, const int mode)
52661+{
52662+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52663+ /* allow chmod +s on directories, but not on files */
52664+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
52665+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
52666+ proc_is_chrooted(current)) {
52667+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
52668+ return -EPERM;
52669+ }
52670+#endif
52671+ return 0;
52672+}
52673+
52674+#ifdef CONFIG_SECURITY
52675+EXPORT_SYMBOL(gr_handle_chroot_caps);
52676+#endif
52677diff -urNp linux-2.6.32.45/grsecurity/grsec_disabled.c linux-2.6.32.45/grsecurity/grsec_disabled.c
52678--- linux-2.6.32.45/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
52679+++ linux-2.6.32.45/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
52680@@ -0,0 +1,447 @@
52681+#include <linux/kernel.h>
52682+#include <linux/module.h>
52683+#include <linux/sched.h>
52684+#include <linux/file.h>
52685+#include <linux/fs.h>
52686+#include <linux/kdev_t.h>
52687+#include <linux/net.h>
52688+#include <linux/in.h>
52689+#include <linux/ip.h>
52690+#include <linux/skbuff.h>
52691+#include <linux/sysctl.h>
52692+
52693+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
52694+void
52695+pax_set_initial_flags(struct linux_binprm *bprm)
52696+{
52697+ return;
52698+}
52699+#endif
52700+
52701+#ifdef CONFIG_SYSCTL
52702+__u32
52703+gr_handle_sysctl(const struct ctl_table * table, const int op)
52704+{
52705+ return 0;
52706+}
52707+#endif
52708+
52709+#ifdef CONFIG_TASKSTATS
52710+int gr_is_taskstats_denied(int pid)
52711+{
52712+ return 0;
52713+}
52714+#endif
52715+
52716+int
52717+gr_acl_is_enabled(void)
52718+{
52719+ return 0;
52720+}
52721+
52722+int
52723+gr_handle_rawio(const struct inode *inode)
52724+{
52725+ return 0;
52726+}
52727+
52728+void
52729+gr_acl_handle_psacct(struct task_struct *task, const long code)
52730+{
52731+ return;
52732+}
52733+
52734+int
52735+gr_handle_ptrace(struct task_struct *task, const long request)
52736+{
52737+ return 0;
52738+}
52739+
52740+int
52741+gr_handle_proc_ptrace(struct task_struct *task)
52742+{
52743+ return 0;
52744+}
52745+
52746+void
52747+gr_learn_resource(const struct task_struct *task,
52748+ const int res, const unsigned long wanted, const int gt)
52749+{
52750+ return;
52751+}
52752+
52753+int
52754+gr_set_acls(const int type)
52755+{
52756+ return 0;
52757+}
52758+
52759+int
52760+gr_check_hidden_task(const struct task_struct *tsk)
52761+{
52762+ return 0;
52763+}
52764+
52765+int
52766+gr_check_protected_task(const struct task_struct *task)
52767+{
52768+ return 0;
52769+}
52770+
52771+int
52772+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52773+{
52774+ return 0;
52775+}
52776+
52777+void
52778+gr_copy_label(struct task_struct *tsk)
52779+{
52780+ return;
52781+}
52782+
52783+void
52784+gr_set_pax_flags(struct task_struct *task)
52785+{
52786+ return;
52787+}
52788+
52789+int
52790+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52791+ const int unsafe_share)
52792+{
52793+ return 0;
52794+}
52795+
52796+void
52797+gr_handle_delete(const ino_t ino, const dev_t dev)
52798+{
52799+ return;
52800+}
52801+
52802+void
52803+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52804+{
52805+ return;
52806+}
52807+
52808+void
52809+gr_handle_crash(struct task_struct *task, const int sig)
52810+{
52811+ return;
52812+}
52813+
52814+int
52815+gr_check_crash_exec(const struct file *filp)
52816+{
52817+ return 0;
52818+}
52819+
52820+int
52821+gr_check_crash_uid(const uid_t uid)
52822+{
52823+ return 0;
52824+}
52825+
52826+void
52827+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52828+ struct dentry *old_dentry,
52829+ struct dentry *new_dentry,
52830+ struct vfsmount *mnt, const __u8 replace)
52831+{
52832+ return;
52833+}
52834+
52835+int
52836+gr_search_socket(const int family, const int type, const int protocol)
52837+{
52838+ return 1;
52839+}
52840+
52841+int
52842+gr_search_connectbind(const int mode, const struct socket *sock,
52843+ const struct sockaddr_in *addr)
52844+{
52845+ return 0;
52846+}
52847+
52848+int
52849+gr_is_capable(const int cap)
52850+{
52851+ return 1;
52852+}
52853+
52854+int
52855+gr_is_capable_nolog(const int cap)
52856+{
52857+ return 1;
52858+}
52859+
52860+void
52861+gr_handle_alertkill(struct task_struct *task)
52862+{
52863+ return;
52864+}
52865+
52866+__u32
52867+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
52868+{
52869+ return 1;
52870+}
52871+
52872+__u32
52873+gr_acl_handle_hidden_file(const struct dentry * dentry,
52874+ const struct vfsmount * mnt)
52875+{
52876+ return 1;
52877+}
52878+
52879+__u32
52880+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52881+ const int fmode)
52882+{
52883+ return 1;
52884+}
52885+
52886+__u32
52887+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52888+{
52889+ return 1;
52890+}
52891+
52892+__u32
52893+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
52894+{
52895+ return 1;
52896+}
52897+
52898+int
52899+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
52900+ unsigned int *vm_flags)
52901+{
52902+ return 1;
52903+}
52904+
52905+__u32
52906+gr_acl_handle_truncate(const struct dentry * dentry,
52907+ const struct vfsmount * mnt)
52908+{
52909+ return 1;
52910+}
52911+
52912+__u32
52913+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
52914+{
52915+ return 1;
52916+}
52917+
52918+__u32
52919+gr_acl_handle_access(const struct dentry * dentry,
52920+ const struct vfsmount * mnt, const int fmode)
52921+{
52922+ return 1;
52923+}
52924+
52925+__u32
52926+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
52927+ mode_t mode)
52928+{
52929+ return 1;
52930+}
52931+
52932+__u32
52933+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
52934+ mode_t mode)
52935+{
52936+ return 1;
52937+}
52938+
52939+__u32
52940+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
52941+{
52942+ return 1;
52943+}
52944+
52945+__u32
52946+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
52947+{
52948+ return 1;
52949+}
52950+
52951+void
52952+grsecurity_init(void)
52953+{
52954+ return;
52955+}
52956+
52957+__u32
52958+gr_acl_handle_mknod(const struct dentry * new_dentry,
52959+ const struct dentry * parent_dentry,
52960+ const struct vfsmount * parent_mnt,
52961+ const int mode)
52962+{
52963+ return 1;
52964+}
52965+
52966+__u32
52967+gr_acl_handle_mkdir(const struct dentry * new_dentry,
52968+ const struct dentry * parent_dentry,
52969+ const struct vfsmount * parent_mnt)
52970+{
52971+ return 1;
52972+}
52973+
52974+__u32
52975+gr_acl_handle_symlink(const struct dentry * new_dentry,
52976+ const struct dentry * parent_dentry,
52977+ const struct vfsmount * parent_mnt, const char *from)
52978+{
52979+ return 1;
52980+}
52981+
52982+__u32
52983+gr_acl_handle_link(const struct dentry * new_dentry,
52984+ const struct dentry * parent_dentry,
52985+ const struct vfsmount * parent_mnt,
52986+ const struct dentry * old_dentry,
52987+ const struct vfsmount * old_mnt, const char *to)
52988+{
52989+ return 1;
52990+}
52991+
52992+int
52993+gr_acl_handle_rename(const struct dentry *new_dentry,
52994+ const struct dentry *parent_dentry,
52995+ const struct vfsmount *parent_mnt,
52996+ const struct dentry *old_dentry,
52997+ const struct inode *old_parent_inode,
52998+ const struct vfsmount *old_mnt, const char *newname)
52999+{
53000+ return 0;
53001+}
53002+
53003+int
53004+gr_acl_handle_filldir(const struct file *file, const char *name,
53005+ const int namelen, const ino_t ino)
53006+{
53007+ return 1;
53008+}
53009+
53010+int
53011+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53012+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53013+{
53014+ return 1;
53015+}
53016+
53017+int
53018+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53019+{
53020+ return 0;
53021+}
53022+
53023+int
53024+gr_search_accept(const struct socket *sock)
53025+{
53026+ return 0;
53027+}
53028+
53029+int
53030+gr_search_listen(const struct socket *sock)
53031+{
53032+ return 0;
53033+}
53034+
53035+int
53036+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53037+{
53038+ return 0;
53039+}
53040+
53041+__u32
53042+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53043+{
53044+ return 1;
53045+}
53046+
53047+__u32
53048+gr_acl_handle_creat(const struct dentry * dentry,
53049+ const struct dentry * p_dentry,
53050+ const struct vfsmount * p_mnt, const int fmode,
53051+ const int imode)
53052+{
53053+ return 1;
53054+}
53055+
53056+void
53057+gr_acl_handle_exit(void)
53058+{
53059+ return;
53060+}
53061+
53062+int
53063+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53064+{
53065+ return 1;
53066+}
53067+
53068+void
53069+gr_set_role_label(const uid_t uid, const gid_t gid)
53070+{
53071+ return;
53072+}
53073+
53074+int
53075+gr_acl_handle_procpidmem(const struct task_struct *task)
53076+{
53077+ return 0;
53078+}
53079+
53080+int
53081+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53082+{
53083+ return 0;
53084+}
53085+
53086+int
53087+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53088+{
53089+ return 0;
53090+}
53091+
53092+void
53093+gr_set_kernel_label(struct task_struct *task)
53094+{
53095+ return;
53096+}
53097+
53098+int
53099+gr_check_user_change(int real, int effective, int fs)
53100+{
53101+ return 0;
53102+}
53103+
53104+int
53105+gr_check_group_change(int real, int effective, int fs)
53106+{
53107+ return 0;
53108+}
53109+
53110+int gr_acl_enable_at_secure(void)
53111+{
53112+ return 0;
53113+}
53114+
53115+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53116+{
53117+ return dentry->d_inode->i_sb->s_dev;
53118+}
53119+
53120+EXPORT_SYMBOL(gr_is_capable);
53121+EXPORT_SYMBOL(gr_is_capable_nolog);
53122+EXPORT_SYMBOL(gr_learn_resource);
53123+EXPORT_SYMBOL(gr_set_kernel_label);
53124+#ifdef CONFIG_SECURITY
53125+EXPORT_SYMBOL(gr_check_user_change);
53126+EXPORT_SYMBOL(gr_check_group_change);
53127+#endif
53128diff -urNp linux-2.6.32.45/grsecurity/grsec_exec.c linux-2.6.32.45/grsecurity/grsec_exec.c
53129--- linux-2.6.32.45/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53130+++ linux-2.6.32.45/grsecurity/grsec_exec.c 2011-08-11 19:57:19.000000000 -0400
53131@@ -0,0 +1,132 @@
53132+#include <linux/kernel.h>
53133+#include <linux/sched.h>
53134+#include <linux/file.h>
53135+#include <linux/binfmts.h>
53136+#include <linux/smp_lock.h>
53137+#include <linux/fs.h>
53138+#include <linux/types.h>
53139+#include <linux/grdefs.h>
53140+#include <linux/grinternal.h>
53141+#include <linux/capability.h>
53142+#include <linux/compat.h>
53143+
53144+#include <asm/uaccess.h>
53145+
53146+#ifdef CONFIG_GRKERNSEC_EXECLOG
53147+static char gr_exec_arg_buf[132];
53148+static DEFINE_MUTEX(gr_exec_arg_mutex);
53149+#endif
53150+
53151+void
53152+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
53153+{
53154+#ifdef CONFIG_GRKERNSEC_EXECLOG
53155+ char *grarg = gr_exec_arg_buf;
53156+ unsigned int i, x, execlen = 0;
53157+ char c;
53158+
53159+ if (!((grsec_enable_execlog && grsec_enable_group &&
53160+ in_group_p(grsec_audit_gid))
53161+ || (grsec_enable_execlog && !grsec_enable_group)))
53162+ return;
53163+
53164+ mutex_lock(&gr_exec_arg_mutex);
53165+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53166+
53167+ if (unlikely(argv == NULL))
53168+ goto log;
53169+
53170+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53171+ const char __user *p;
53172+ unsigned int len;
53173+
53174+ if (copy_from_user(&p, argv + i, sizeof(p)))
53175+ goto log;
53176+ if (!p)
53177+ goto log;
53178+ len = strnlen_user(p, 128 - execlen);
53179+ if (len > 128 - execlen)
53180+ len = 128 - execlen;
53181+ else if (len > 0)
53182+ len--;
53183+ if (copy_from_user(grarg + execlen, p, len))
53184+ goto log;
53185+
53186+ /* rewrite unprintable characters */
53187+ for (x = 0; x < len; x++) {
53188+ c = *(grarg + execlen + x);
53189+ if (c < 32 || c > 126)
53190+ *(grarg + execlen + x) = ' ';
53191+ }
53192+
53193+ execlen += len;
53194+ *(grarg + execlen) = ' ';
53195+ *(grarg + execlen + 1) = '\0';
53196+ execlen++;
53197+ }
53198+
53199+ log:
53200+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53201+ bprm->file->f_path.mnt, grarg);
53202+ mutex_unlock(&gr_exec_arg_mutex);
53203+#endif
53204+ return;
53205+}
53206+
53207+#ifdef CONFIG_COMPAT
53208+void
53209+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
53210+{
53211+#ifdef CONFIG_GRKERNSEC_EXECLOG
53212+ char *grarg = gr_exec_arg_buf;
53213+ unsigned int i, x, execlen = 0;
53214+ char c;
53215+
53216+ if (!((grsec_enable_execlog && grsec_enable_group &&
53217+ in_group_p(grsec_audit_gid))
53218+ || (grsec_enable_execlog && !grsec_enable_group)))
53219+ return;
53220+
53221+ mutex_lock(&gr_exec_arg_mutex);
53222+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53223+
53224+ if (unlikely(argv == NULL))
53225+ goto log;
53226+
53227+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53228+ compat_uptr_t p;
53229+ unsigned int len;
53230+
53231+ if (get_user(p, argv + i))
53232+ goto log;
53233+ len = strnlen_user(compat_ptr(p), 128 - execlen);
53234+ if (len > 128 - execlen)
53235+ len = 128 - execlen;
53236+ else if (len > 0)
53237+ len--;
53238+ else
53239+ goto log;
53240+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
53241+ goto log;
53242+
53243+ /* rewrite unprintable characters */
53244+ for (x = 0; x < len; x++) {
53245+ c = *(grarg + execlen + x);
53246+ if (c < 32 || c > 126)
53247+ *(grarg + execlen + x) = ' ';
53248+ }
53249+
53250+ execlen += len;
53251+ *(grarg + execlen) = ' ';
53252+ *(grarg + execlen + 1) = '\0';
53253+ execlen++;
53254+ }
53255+
53256+ log:
53257+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53258+ bprm->file->f_path.mnt, grarg);
53259+ mutex_unlock(&gr_exec_arg_mutex);
53260+#endif
53261+ return;
53262+}
53263+#endif
53264diff -urNp linux-2.6.32.45/grsecurity/grsec_fifo.c linux-2.6.32.45/grsecurity/grsec_fifo.c
53265--- linux-2.6.32.45/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53266+++ linux-2.6.32.45/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
53267@@ -0,0 +1,24 @@
53268+#include <linux/kernel.h>
53269+#include <linux/sched.h>
53270+#include <linux/fs.h>
53271+#include <linux/file.h>
53272+#include <linux/grinternal.h>
53273+
53274+int
53275+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53276+ const struct dentry *dir, const int flag, const int acc_mode)
53277+{
53278+#ifdef CONFIG_GRKERNSEC_FIFO
53279+ const struct cred *cred = current_cred();
53280+
53281+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53282+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53283+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53284+ (cred->fsuid != dentry->d_inode->i_uid)) {
53285+ if (!inode_permission(dentry->d_inode, acc_mode))
53286+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53287+ return -EACCES;
53288+ }
53289+#endif
53290+ return 0;
53291+}
53292diff -urNp linux-2.6.32.45/grsecurity/grsec_fork.c linux-2.6.32.45/grsecurity/grsec_fork.c
53293--- linux-2.6.32.45/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53294+++ linux-2.6.32.45/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
53295@@ -0,0 +1,23 @@
53296+#include <linux/kernel.h>
53297+#include <linux/sched.h>
53298+#include <linux/grsecurity.h>
53299+#include <linux/grinternal.h>
53300+#include <linux/errno.h>
53301+
53302+void
53303+gr_log_forkfail(const int retval)
53304+{
53305+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53306+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53307+ switch (retval) {
53308+ case -EAGAIN:
53309+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53310+ break;
53311+ case -ENOMEM:
53312+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53313+ break;
53314+ }
53315+ }
53316+#endif
53317+ return;
53318+}
53319diff -urNp linux-2.6.32.45/grsecurity/grsec_init.c linux-2.6.32.45/grsecurity/grsec_init.c
53320--- linux-2.6.32.45/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53321+++ linux-2.6.32.45/grsecurity/grsec_init.c 2011-08-11 19:57:42.000000000 -0400
53322@@ -0,0 +1,270 @@
53323+#include <linux/kernel.h>
53324+#include <linux/sched.h>
53325+#include <linux/mm.h>
53326+#include <linux/smp_lock.h>
53327+#include <linux/gracl.h>
53328+#include <linux/slab.h>
53329+#include <linux/vmalloc.h>
53330+#include <linux/percpu.h>
53331+#include <linux/module.h>
53332+
53333+int grsec_enable_brute;
53334+int grsec_enable_link;
53335+int grsec_enable_dmesg;
53336+int grsec_enable_harden_ptrace;
53337+int grsec_enable_fifo;
53338+int grsec_enable_execlog;
53339+int grsec_enable_signal;
53340+int grsec_enable_forkfail;
53341+int grsec_enable_audit_ptrace;
53342+int grsec_enable_time;
53343+int grsec_enable_audit_textrel;
53344+int grsec_enable_group;
53345+int grsec_audit_gid;
53346+int grsec_enable_chdir;
53347+int grsec_enable_mount;
53348+int grsec_enable_rofs;
53349+int grsec_enable_chroot_findtask;
53350+int grsec_enable_chroot_mount;
53351+int grsec_enable_chroot_shmat;
53352+int grsec_enable_chroot_fchdir;
53353+int grsec_enable_chroot_double;
53354+int grsec_enable_chroot_pivot;
53355+int grsec_enable_chroot_chdir;
53356+int grsec_enable_chroot_chmod;
53357+int grsec_enable_chroot_mknod;
53358+int grsec_enable_chroot_nice;
53359+int grsec_enable_chroot_execlog;
53360+int grsec_enable_chroot_caps;
53361+int grsec_enable_chroot_sysctl;
53362+int grsec_enable_chroot_unix;
53363+int grsec_enable_tpe;
53364+int grsec_tpe_gid;
53365+int grsec_enable_blackhole;
53366+#ifdef CONFIG_IPV6_MODULE
53367+EXPORT_SYMBOL(grsec_enable_blackhole);
53368+#endif
53369+int grsec_lastack_retries;
53370+int grsec_enable_tpe_all;
53371+int grsec_enable_tpe_invert;
53372+int grsec_enable_socket_all;
53373+int grsec_socket_all_gid;
53374+int grsec_enable_socket_client;
53375+int grsec_socket_client_gid;
53376+int grsec_enable_socket_server;
53377+int grsec_socket_server_gid;
53378+int grsec_resource_logging;
53379+int grsec_disable_privio;
53380+int grsec_enable_log_rwxmaps;
53381+int grsec_lock;
53382+
53383+DEFINE_SPINLOCK(grsec_alert_lock);
53384+unsigned long grsec_alert_wtime = 0;
53385+unsigned long grsec_alert_fyet = 0;
53386+
53387+DEFINE_SPINLOCK(grsec_audit_lock);
53388+
53389+DEFINE_RWLOCK(grsec_exec_file_lock);
53390+
53391+char *gr_shared_page[4];
53392+
53393+char *gr_alert_log_fmt;
53394+char *gr_audit_log_fmt;
53395+char *gr_alert_log_buf;
53396+char *gr_audit_log_buf;
53397+
53398+extern struct gr_arg *gr_usermode;
53399+extern unsigned char *gr_system_salt;
53400+extern unsigned char *gr_system_sum;
53401+
53402+void __init
53403+grsecurity_init(void)
53404+{
53405+ int j;
53406+ /* create the per-cpu shared pages */
53407+
53408+#ifdef CONFIG_X86
53409+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
53410+#endif
53411+
53412+ for (j = 0; j < 4; j++) {
53413+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
53414+ if (gr_shared_page[j] == NULL) {
53415+ panic("Unable to allocate grsecurity shared page");
53416+ return;
53417+ }
53418+ }
53419+
53420+ /* allocate log buffers */
53421+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
53422+ if (!gr_alert_log_fmt) {
53423+ panic("Unable to allocate grsecurity alert log format buffer");
53424+ return;
53425+ }
53426+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
53427+ if (!gr_audit_log_fmt) {
53428+ panic("Unable to allocate grsecurity audit log format buffer");
53429+ return;
53430+ }
53431+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53432+ if (!gr_alert_log_buf) {
53433+ panic("Unable to allocate grsecurity alert log buffer");
53434+ return;
53435+ }
53436+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53437+ if (!gr_audit_log_buf) {
53438+ panic("Unable to allocate grsecurity audit log buffer");
53439+ return;
53440+ }
53441+
53442+ /* allocate memory for authentication structure */
53443+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
53444+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
53445+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
53446+
53447+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
53448+ panic("Unable to allocate grsecurity authentication structure");
53449+ return;
53450+ }
53451+
53452+
53453+#ifdef CONFIG_GRKERNSEC_IO
53454+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
53455+ grsec_disable_privio = 1;
53456+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53457+ grsec_disable_privio = 1;
53458+#else
53459+ grsec_disable_privio = 0;
53460+#endif
53461+#endif
53462+
53463+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53464+ /* for backward compatibility, tpe_invert always defaults to on if
53465+ enabled in the kernel
53466+ */
53467+ grsec_enable_tpe_invert = 1;
53468+#endif
53469+
53470+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53471+#ifndef CONFIG_GRKERNSEC_SYSCTL
53472+ grsec_lock = 1;
53473+#endif
53474+
53475+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53476+ grsec_enable_audit_textrel = 1;
53477+#endif
53478+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53479+ grsec_enable_log_rwxmaps = 1;
53480+#endif
53481+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53482+ grsec_enable_group = 1;
53483+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
53484+#endif
53485+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53486+ grsec_enable_chdir = 1;
53487+#endif
53488+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53489+ grsec_enable_harden_ptrace = 1;
53490+#endif
53491+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53492+ grsec_enable_mount = 1;
53493+#endif
53494+#ifdef CONFIG_GRKERNSEC_LINK
53495+ grsec_enable_link = 1;
53496+#endif
53497+#ifdef CONFIG_GRKERNSEC_BRUTE
53498+ grsec_enable_brute = 1;
53499+#endif
53500+#ifdef CONFIG_GRKERNSEC_DMESG
53501+ grsec_enable_dmesg = 1;
53502+#endif
53503+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53504+ grsec_enable_blackhole = 1;
53505+ grsec_lastack_retries = 4;
53506+#endif
53507+#ifdef CONFIG_GRKERNSEC_FIFO
53508+ grsec_enable_fifo = 1;
53509+#endif
53510+#ifdef CONFIG_GRKERNSEC_EXECLOG
53511+ grsec_enable_execlog = 1;
53512+#endif
53513+#ifdef CONFIG_GRKERNSEC_SIGNAL
53514+ grsec_enable_signal = 1;
53515+#endif
53516+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53517+ grsec_enable_forkfail = 1;
53518+#endif
53519+#ifdef CONFIG_GRKERNSEC_TIME
53520+ grsec_enable_time = 1;
53521+#endif
53522+#ifdef CONFIG_GRKERNSEC_RESLOG
53523+ grsec_resource_logging = 1;
53524+#endif
53525+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53526+ grsec_enable_chroot_findtask = 1;
53527+#endif
53528+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53529+ grsec_enable_chroot_unix = 1;
53530+#endif
53531+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53532+ grsec_enable_chroot_mount = 1;
53533+#endif
53534+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53535+ grsec_enable_chroot_fchdir = 1;
53536+#endif
53537+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53538+ grsec_enable_chroot_shmat = 1;
53539+#endif
53540+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53541+ grsec_enable_audit_ptrace = 1;
53542+#endif
53543+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53544+ grsec_enable_chroot_double = 1;
53545+#endif
53546+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53547+ grsec_enable_chroot_pivot = 1;
53548+#endif
53549+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53550+ grsec_enable_chroot_chdir = 1;
53551+#endif
53552+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53553+ grsec_enable_chroot_chmod = 1;
53554+#endif
53555+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53556+ grsec_enable_chroot_mknod = 1;
53557+#endif
53558+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53559+ grsec_enable_chroot_nice = 1;
53560+#endif
53561+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53562+ grsec_enable_chroot_execlog = 1;
53563+#endif
53564+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53565+ grsec_enable_chroot_caps = 1;
53566+#endif
53567+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53568+ grsec_enable_chroot_sysctl = 1;
53569+#endif
53570+#ifdef CONFIG_GRKERNSEC_TPE
53571+ grsec_enable_tpe = 1;
53572+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
53573+#ifdef CONFIG_GRKERNSEC_TPE_ALL
53574+ grsec_enable_tpe_all = 1;
53575+#endif
53576+#endif
53577+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53578+ grsec_enable_socket_all = 1;
53579+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
53580+#endif
53581+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53582+ grsec_enable_socket_client = 1;
53583+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
53584+#endif
53585+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53586+ grsec_enable_socket_server = 1;
53587+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
53588+#endif
53589+#endif
53590+
53591+ return;
53592+}
53593diff -urNp linux-2.6.32.45/grsecurity/grsec_link.c linux-2.6.32.45/grsecurity/grsec_link.c
53594--- linux-2.6.32.45/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
53595+++ linux-2.6.32.45/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
53596@@ -0,0 +1,43 @@
53597+#include <linux/kernel.h>
53598+#include <linux/sched.h>
53599+#include <linux/fs.h>
53600+#include <linux/file.h>
53601+#include <linux/grinternal.h>
53602+
53603+int
53604+gr_handle_follow_link(const struct inode *parent,
53605+ const struct inode *inode,
53606+ const struct dentry *dentry, const struct vfsmount *mnt)
53607+{
53608+#ifdef CONFIG_GRKERNSEC_LINK
53609+ const struct cred *cred = current_cred();
53610+
53611+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
53612+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
53613+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
53614+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
53615+ return -EACCES;
53616+ }
53617+#endif
53618+ return 0;
53619+}
53620+
53621+int
53622+gr_handle_hardlink(const struct dentry *dentry,
53623+ const struct vfsmount *mnt,
53624+ struct inode *inode, const int mode, const char *to)
53625+{
53626+#ifdef CONFIG_GRKERNSEC_LINK
53627+ const struct cred *cred = current_cred();
53628+
53629+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
53630+ (!S_ISREG(mode) || (mode & S_ISUID) ||
53631+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
53632+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
53633+ !capable(CAP_FOWNER) && cred->uid) {
53634+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
53635+ return -EPERM;
53636+ }
53637+#endif
53638+ return 0;
53639+}
53640diff -urNp linux-2.6.32.45/grsecurity/grsec_log.c linux-2.6.32.45/grsecurity/grsec_log.c
53641--- linux-2.6.32.45/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
53642+++ linux-2.6.32.45/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
53643@@ -0,0 +1,310 @@
53644+#include <linux/kernel.h>
53645+#include <linux/sched.h>
53646+#include <linux/file.h>
53647+#include <linux/tty.h>
53648+#include <linux/fs.h>
53649+#include <linux/grinternal.h>
53650+
53651+#ifdef CONFIG_TREE_PREEMPT_RCU
53652+#define DISABLE_PREEMPT() preempt_disable()
53653+#define ENABLE_PREEMPT() preempt_enable()
53654+#else
53655+#define DISABLE_PREEMPT()
53656+#define ENABLE_PREEMPT()
53657+#endif
53658+
53659+#define BEGIN_LOCKS(x) \
53660+ DISABLE_PREEMPT(); \
53661+ rcu_read_lock(); \
53662+ read_lock(&tasklist_lock); \
53663+ read_lock(&grsec_exec_file_lock); \
53664+ if (x != GR_DO_AUDIT) \
53665+ spin_lock(&grsec_alert_lock); \
53666+ else \
53667+ spin_lock(&grsec_audit_lock)
53668+
53669+#define END_LOCKS(x) \
53670+ if (x != GR_DO_AUDIT) \
53671+ spin_unlock(&grsec_alert_lock); \
53672+ else \
53673+ spin_unlock(&grsec_audit_lock); \
53674+ read_unlock(&grsec_exec_file_lock); \
53675+ read_unlock(&tasklist_lock); \
53676+ rcu_read_unlock(); \
53677+ ENABLE_PREEMPT(); \
53678+ if (x == GR_DONT_AUDIT) \
53679+ gr_handle_alertkill(current)
53680+
53681+enum {
53682+ FLOODING,
53683+ NO_FLOODING
53684+};
53685+
53686+extern char *gr_alert_log_fmt;
53687+extern char *gr_audit_log_fmt;
53688+extern char *gr_alert_log_buf;
53689+extern char *gr_audit_log_buf;
53690+
53691+static int gr_log_start(int audit)
53692+{
53693+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
53694+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
53695+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53696+
53697+ if (audit == GR_DO_AUDIT)
53698+ goto set_fmt;
53699+
53700+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
53701+ grsec_alert_wtime = jiffies;
53702+ grsec_alert_fyet = 0;
53703+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
53704+ grsec_alert_fyet++;
53705+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
53706+ grsec_alert_wtime = jiffies;
53707+ grsec_alert_fyet++;
53708+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
53709+ return FLOODING;
53710+ } else return FLOODING;
53711+
53712+set_fmt:
53713+ memset(buf, 0, PAGE_SIZE);
53714+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
53715+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
53716+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53717+ } else if (current->signal->curr_ip) {
53718+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
53719+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
53720+ } else if (gr_acl_is_enabled()) {
53721+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
53722+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53723+ } else {
53724+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
53725+ strcpy(buf, fmt);
53726+ }
53727+
53728+ return NO_FLOODING;
53729+}
53730+
53731+static void gr_log_middle(int audit, const char *msg, va_list ap)
53732+ __attribute__ ((format (printf, 2, 0)));
53733+
53734+static void gr_log_middle(int audit, const char *msg, va_list ap)
53735+{
53736+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53737+ unsigned int len = strlen(buf);
53738+
53739+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53740+
53741+ return;
53742+}
53743+
53744+static void gr_log_middle_varargs(int audit, const char *msg, ...)
53745+ __attribute__ ((format (printf, 2, 3)));
53746+
53747+static void gr_log_middle_varargs(int audit, const char *msg, ...)
53748+{
53749+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53750+ unsigned int len = strlen(buf);
53751+ va_list ap;
53752+
53753+ va_start(ap, msg);
53754+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53755+ va_end(ap);
53756+
53757+ return;
53758+}
53759+
53760+static void gr_log_end(int audit)
53761+{
53762+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53763+ unsigned int len = strlen(buf);
53764+
53765+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
53766+ printk("%s\n", buf);
53767+
53768+ return;
53769+}
53770+
53771+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
53772+{
53773+ int logtype;
53774+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
53775+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
53776+ void *voidptr = NULL;
53777+ int num1 = 0, num2 = 0;
53778+ unsigned long ulong1 = 0, ulong2 = 0;
53779+ struct dentry *dentry = NULL;
53780+ struct vfsmount *mnt = NULL;
53781+ struct file *file = NULL;
53782+ struct task_struct *task = NULL;
53783+ const struct cred *cred, *pcred;
53784+ va_list ap;
53785+
53786+ BEGIN_LOCKS(audit);
53787+ logtype = gr_log_start(audit);
53788+ if (logtype == FLOODING) {
53789+ END_LOCKS(audit);
53790+ return;
53791+ }
53792+ va_start(ap, argtypes);
53793+ switch (argtypes) {
53794+ case GR_TTYSNIFF:
53795+ task = va_arg(ap, struct task_struct *);
53796+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
53797+ break;
53798+ case GR_SYSCTL_HIDDEN:
53799+ str1 = va_arg(ap, char *);
53800+ gr_log_middle_varargs(audit, msg, result, str1);
53801+ break;
53802+ case GR_RBAC:
53803+ dentry = va_arg(ap, struct dentry *);
53804+ mnt = va_arg(ap, struct vfsmount *);
53805+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
53806+ break;
53807+ case GR_RBAC_STR:
53808+ dentry = va_arg(ap, struct dentry *);
53809+ mnt = va_arg(ap, struct vfsmount *);
53810+ str1 = va_arg(ap, char *);
53811+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
53812+ break;
53813+ case GR_STR_RBAC:
53814+ str1 = va_arg(ap, char *);
53815+ dentry = va_arg(ap, struct dentry *);
53816+ mnt = va_arg(ap, struct vfsmount *);
53817+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
53818+ break;
53819+ case GR_RBAC_MODE2:
53820+ dentry = va_arg(ap, struct dentry *);
53821+ mnt = va_arg(ap, struct vfsmount *);
53822+ str1 = va_arg(ap, char *);
53823+ str2 = va_arg(ap, char *);
53824+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
53825+ break;
53826+ case GR_RBAC_MODE3:
53827+ dentry = va_arg(ap, struct dentry *);
53828+ mnt = va_arg(ap, struct vfsmount *);
53829+ str1 = va_arg(ap, char *);
53830+ str2 = va_arg(ap, char *);
53831+ str3 = va_arg(ap, char *);
53832+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
53833+ break;
53834+ case GR_FILENAME:
53835+ dentry = va_arg(ap, struct dentry *);
53836+ mnt = va_arg(ap, struct vfsmount *);
53837+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
53838+ break;
53839+ case GR_STR_FILENAME:
53840+ str1 = va_arg(ap, char *);
53841+ dentry = va_arg(ap, struct dentry *);
53842+ mnt = va_arg(ap, struct vfsmount *);
53843+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
53844+ break;
53845+ case GR_FILENAME_STR:
53846+ dentry = va_arg(ap, struct dentry *);
53847+ mnt = va_arg(ap, struct vfsmount *);
53848+ str1 = va_arg(ap, char *);
53849+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
53850+ break;
53851+ case GR_FILENAME_TWO_INT:
53852+ dentry = va_arg(ap, struct dentry *);
53853+ mnt = va_arg(ap, struct vfsmount *);
53854+ num1 = va_arg(ap, int);
53855+ num2 = va_arg(ap, int);
53856+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
53857+ break;
53858+ case GR_FILENAME_TWO_INT_STR:
53859+ dentry = va_arg(ap, struct dentry *);
53860+ mnt = va_arg(ap, struct vfsmount *);
53861+ num1 = va_arg(ap, int);
53862+ num2 = va_arg(ap, int);
53863+ str1 = va_arg(ap, char *);
53864+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
53865+ break;
53866+ case GR_TEXTREL:
53867+ file = va_arg(ap, struct file *);
53868+ ulong1 = va_arg(ap, unsigned long);
53869+ ulong2 = va_arg(ap, unsigned long);
53870+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
53871+ break;
53872+ case GR_PTRACE:
53873+ task = va_arg(ap, struct task_struct *);
53874+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
53875+ break;
53876+ case GR_RESOURCE:
53877+ task = va_arg(ap, struct task_struct *);
53878+ cred = __task_cred(task);
53879+ pcred = __task_cred(task->real_parent);
53880+ ulong1 = va_arg(ap, unsigned long);
53881+ str1 = va_arg(ap, char *);
53882+ ulong2 = va_arg(ap, unsigned long);
53883+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53884+ break;
53885+ case GR_CAP:
53886+ task = va_arg(ap, struct task_struct *);
53887+ cred = __task_cred(task);
53888+ pcred = __task_cred(task->real_parent);
53889+ str1 = va_arg(ap, char *);
53890+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53891+ break;
53892+ case GR_SIG:
53893+ str1 = va_arg(ap, char *);
53894+ voidptr = va_arg(ap, void *);
53895+ gr_log_middle_varargs(audit, msg, str1, voidptr);
53896+ break;
53897+ case GR_SIG2:
53898+ task = va_arg(ap, struct task_struct *);
53899+ cred = __task_cred(task);
53900+ pcred = __task_cred(task->real_parent);
53901+ num1 = va_arg(ap, int);
53902+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53903+ break;
53904+ case GR_CRASH1:
53905+ task = va_arg(ap, struct task_struct *);
53906+ cred = __task_cred(task);
53907+ pcred = __task_cred(task->real_parent);
53908+ ulong1 = va_arg(ap, unsigned long);
53909+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
53910+ break;
53911+ case GR_CRASH2:
53912+ task = va_arg(ap, struct task_struct *);
53913+ cred = __task_cred(task);
53914+ pcred = __task_cred(task->real_parent);
53915+ ulong1 = va_arg(ap, unsigned long);
53916+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
53917+ break;
53918+ case GR_RWXMAP:
53919+ file = va_arg(ap, struct file *);
53920+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
53921+ break;
53922+ case GR_PSACCT:
53923+ {
53924+ unsigned int wday, cday;
53925+ __u8 whr, chr;
53926+ __u8 wmin, cmin;
53927+ __u8 wsec, csec;
53928+ char cur_tty[64] = { 0 };
53929+ char parent_tty[64] = { 0 };
53930+
53931+ task = va_arg(ap, struct task_struct *);
53932+ wday = va_arg(ap, unsigned int);
53933+ cday = va_arg(ap, unsigned int);
53934+ whr = va_arg(ap, int);
53935+ chr = va_arg(ap, int);
53936+ wmin = va_arg(ap, int);
53937+ cmin = va_arg(ap, int);
53938+ wsec = va_arg(ap, int);
53939+ csec = va_arg(ap, int);
53940+ ulong1 = va_arg(ap, unsigned long);
53941+ cred = __task_cred(task);
53942+ pcred = __task_cred(task->real_parent);
53943+
53944+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53945+ }
53946+ break;
53947+ default:
53948+ gr_log_middle(audit, msg, ap);
53949+ }
53950+ va_end(ap);
53951+ gr_log_end(audit);
53952+ END_LOCKS(audit);
53953+}
53954diff -urNp linux-2.6.32.45/grsecurity/grsec_mem.c linux-2.6.32.45/grsecurity/grsec_mem.c
53955--- linux-2.6.32.45/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
53956+++ linux-2.6.32.45/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
53957@@ -0,0 +1,33 @@
53958+#include <linux/kernel.h>
53959+#include <linux/sched.h>
53960+#include <linux/mm.h>
53961+#include <linux/mman.h>
53962+#include <linux/grinternal.h>
53963+
53964+void
53965+gr_handle_ioperm(void)
53966+{
53967+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
53968+ return;
53969+}
53970+
53971+void
53972+gr_handle_iopl(void)
53973+{
53974+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
53975+ return;
53976+}
53977+
53978+void
53979+gr_handle_mem_readwrite(u64 from, u64 to)
53980+{
53981+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
53982+ return;
53983+}
53984+
53985+void
53986+gr_handle_vm86(void)
53987+{
53988+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
53989+ return;
53990+}
53991diff -urNp linux-2.6.32.45/grsecurity/grsec_mount.c linux-2.6.32.45/grsecurity/grsec_mount.c
53992--- linux-2.6.32.45/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
53993+++ linux-2.6.32.45/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
53994@@ -0,0 +1,62 @@
53995+#include <linux/kernel.h>
53996+#include <linux/sched.h>
53997+#include <linux/mount.h>
53998+#include <linux/grsecurity.h>
53999+#include <linux/grinternal.h>
54000+
54001+void
54002+gr_log_remount(const char *devname, const int retval)
54003+{
54004+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54005+ if (grsec_enable_mount && (retval >= 0))
54006+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54007+#endif
54008+ return;
54009+}
54010+
54011+void
54012+gr_log_unmount(const char *devname, const int retval)
54013+{
54014+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54015+ if (grsec_enable_mount && (retval >= 0))
54016+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54017+#endif
54018+ return;
54019+}
54020+
54021+void
54022+gr_log_mount(const char *from, const char *to, const int retval)
54023+{
54024+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54025+ if (grsec_enable_mount && (retval >= 0))
54026+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54027+#endif
54028+ return;
54029+}
54030+
54031+int
54032+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54033+{
54034+#ifdef CONFIG_GRKERNSEC_ROFS
54035+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54036+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54037+ return -EPERM;
54038+ } else
54039+ return 0;
54040+#endif
54041+ return 0;
54042+}
54043+
54044+int
54045+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54046+{
54047+#ifdef CONFIG_GRKERNSEC_ROFS
54048+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54049+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54050+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54051+ return -EPERM;
54052+ } else
54053+ return 0;
54054+#endif
54055+ return 0;
54056+}
54057diff -urNp linux-2.6.32.45/grsecurity/grsec_pax.c linux-2.6.32.45/grsecurity/grsec_pax.c
54058--- linux-2.6.32.45/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54059+++ linux-2.6.32.45/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
54060@@ -0,0 +1,36 @@
54061+#include <linux/kernel.h>
54062+#include <linux/sched.h>
54063+#include <linux/mm.h>
54064+#include <linux/file.h>
54065+#include <linux/grinternal.h>
54066+#include <linux/grsecurity.h>
54067+
54068+void
54069+gr_log_textrel(struct vm_area_struct * vma)
54070+{
54071+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54072+ if (grsec_enable_audit_textrel)
54073+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54074+#endif
54075+ return;
54076+}
54077+
54078+void
54079+gr_log_rwxmmap(struct file *file)
54080+{
54081+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54082+ if (grsec_enable_log_rwxmaps)
54083+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54084+#endif
54085+ return;
54086+}
54087+
54088+void
54089+gr_log_rwxmprotect(struct file *file)
54090+{
54091+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54092+ if (grsec_enable_log_rwxmaps)
54093+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54094+#endif
54095+ return;
54096+}
54097diff -urNp linux-2.6.32.45/grsecurity/grsec_ptrace.c linux-2.6.32.45/grsecurity/grsec_ptrace.c
54098--- linux-2.6.32.45/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54099+++ linux-2.6.32.45/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
54100@@ -0,0 +1,14 @@
54101+#include <linux/kernel.h>
54102+#include <linux/sched.h>
54103+#include <linux/grinternal.h>
54104+#include <linux/grsecurity.h>
54105+
54106+void
54107+gr_audit_ptrace(struct task_struct *task)
54108+{
54109+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54110+ if (grsec_enable_audit_ptrace)
54111+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54112+#endif
54113+ return;
54114+}
54115diff -urNp linux-2.6.32.45/grsecurity/grsec_sig.c linux-2.6.32.45/grsecurity/grsec_sig.c
54116--- linux-2.6.32.45/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54117+++ linux-2.6.32.45/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
54118@@ -0,0 +1,205 @@
54119+#include <linux/kernel.h>
54120+#include <linux/sched.h>
54121+#include <linux/delay.h>
54122+#include <linux/grsecurity.h>
54123+#include <linux/grinternal.h>
54124+#include <linux/hardirq.h>
54125+
54126+char *signames[] = {
54127+ [SIGSEGV] = "Segmentation fault",
54128+ [SIGILL] = "Illegal instruction",
54129+ [SIGABRT] = "Abort",
54130+ [SIGBUS] = "Invalid alignment/Bus error"
54131+};
54132+
54133+void
54134+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54135+{
54136+#ifdef CONFIG_GRKERNSEC_SIGNAL
54137+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54138+ (sig == SIGABRT) || (sig == SIGBUS))) {
54139+ if (t->pid == current->pid) {
54140+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54141+ } else {
54142+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54143+ }
54144+ }
54145+#endif
54146+ return;
54147+}
54148+
54149+int
54150+gr_handle_signal(const struct task_struct *p, const int sig)
54151+{
54152+#ifdef CONFIG_GRKERNSEC
54153+ if (current->pid > 1 && gr_check_protected_task(p)) {
54154+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54155+ return -EPERM;
54156+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54157+ return -EPERM;
54158+ }
54159+#endif
54160+ return 0;
54161+}
54162+
54163+#ifdef CONFIG_GRKERNSEC
54164+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54165+
54166+int gr_fake_force_sig(int sig, struct task_struct *t)
54167+{
54168+ unsigned long int flags;
54169+ int ret, blocked, ignored;
54170+ struct k_sigaction *action;
54171+
54172+ spin_lock_irqsave(&t->sighand->siglock, flags);
54173+ action = &t->sighand->action[sig-1];
54174+ ignored = action->sa.sa_handler == SIG_IGN;
54175+ blocked = sigismember(&t->blocked, sig);
54176+ if (blocked || ignored) {
54177+ action->sa.sa_handler = SIG_DFL;
54178+ if (blocked) {
54179+ sigdelset(&t->blocked, sig);
54180+ recalc_sigpending_and_wake(t);
54181+ }
54182+ }
54183+ if (action->sa.sa_handler == SIG_DFL)
54184+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
54185+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54186+
54187+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
54188+
54189+ return ret;
54190+}
54191+#endif
54192+
54193+#ifdef CONFIG_GRKERNSEC_BRUTE
54194+#define GR_USER_BAN_TIME (15 * 60)
54195+
54196+static int __get_dumpable(unsigned long mm_flags)
54197+{
54198+ int ret;
54199+
54200+ ret = mm_flags & MMF_DUMPABLE_MASK;
54201+ return (ret >= 2) ? 2 : ret;
54202+}
54203+#endif
54204+
54205+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54206+{
54207+#ifdef CONFIG_GRKERNSEC_BRUTE
54208+ uid_t uid = 0;
54209+
54210+ if (!grsec_enable_brute)
54211+ return;
54212+
54213+ rcu_read_lock();
54214+ read_lock(&tasklist_lock);
54215+ read_lock(&grsec_exec_file_lock);
54216+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54217+ p->real_parent->brute = 1;
54218+ else {
54219+ const struct cred *cred = __task_cred(p), *cred2;
54220+ struct task_struct *tsk, *tsk2;
54221+
54222+ if (!__get_dumpable(mm_flags) && cred->uid) {
54223+ struct user_struct *user;
54224+
54225+ uid = cred->uid;
54226+
54227+ /* this is put upon execution past expiration */
54228+ user = find_user(uid);
54229+ if (user == NULL)
54230+ goto unlock;
54231+ user->banned = 1;
54232+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54233+ if (user->ban_expires == ~0UL)
54234+ user->ban_expires--;
54235+
54236+ do_each_thread(tsk2, tsk) {
54237+ cred2 = __task_cred(tsk);
54238+ if (tsk != p && cred2->uid == uid)
54239+ gr_fake_force_sig(SIGKILL, tsk);
54240+ } while_each_thread(tsk2, tsk);
54241+ }
54242+ }
54243+unlock:
54244+ read_unlock(&grsec_exec_file_lock);
54245+ read_unlock(&tasklist_lock);
54246+ rcu_read_unlock();
54247+
54248+ if (uid)
54249+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54250+#endif
54251+ return;
54252+}
54253+
54254+void gr_handle_brute_check(void)
54255+{
54256+#ifdef CONFIG_GRKERNSEC_BRUTE
54257+ if (current->brute)
54258+ msleep(30 * 1000);
54259+#endif
54260+ return;
54261+}
54262+
54263+void gr_handle_kernel_exploit(void)
54264+{
54265+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54266+ const struct cred *cred;
54267+ struct task_struct *tsk, *tsk2;
54268+ struct user_struct *user;
54269+ uid_t uid;
54270+
54271+ if (in_irq() || in_serving_softirq() || in_nmi())
54272+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54273+
54274+ uid = current_uid();
54275+
54276+ if (uid == 0)
54277+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
54278+ else {
54279+ /* kill all the processes of this user, hold a reference
54280+ to their creds struct, and prevent them from creating
54281+ another process until system reset
54282+ */
54283+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54284+ /* we intentionally leak this ref */
54285+ user = get_uid(current->cred->user);
54286+ if (user) {
54287+ user->banned = 1;
54288+ user->ban_expires = ~0UL;
54289+ }
54290+
54291+ read_lock(&tasklist_lock);
54292+ do_each_thread(tsk2, tsk) {
54293+ cred = __task_cred(tsk);
54294+ if (cred->uid == uid)
54295+ gr_fake_force_sig(SIGKILL, tsk);
54296+ } while_each_thread(tsk2, tsk);
54297+ read_unlock(&tasklist_lock);
54298+ }
54299+#endif
54300+}
54301+
54302+int __gr_process_user_ban(struct user_struct *user)
54303+{
54304+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54305+ if (unlikely(user->banned)) {
54306+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54307+ user->banned = 0;
54308+ user->ban_expires = 0;
54309+ free_uid(user);
54310+ } else
54311+ return -EPERM;
54312+ }
54313+#endif
54314+ return 0;
54315+}
54316+
54317+int gr_process_user_ban(void)
54318+{
54319+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54320+ return __gr_process_user_ban(current->cred->user);
54321+#endif
54322+ return 0;
54323+}
54324diff -urNp linux-2.6.32.45/grsecurity/grsec_sock.c linux-2.6.32.45/grsecurity/grsec_sock.c
54325--- linux-2.6.32.45/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54326+++ linux-2.6.32.45/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
54327@@ -0,0 +1,275 @@
54328+#include <linux/kernel.h>
54329+#include <linux/module.h>
54330+#include <linux/sched.h>
54331+#include <linux/file.h>
54332+#include <linux/net.h>
54333+#include <linux/in.h>
54334+#include <linux/ip.h>
54335+#include <net/sock.h>
54336+#include <net/inet_sock.h>
54337+#include <linux/grsecurity.h>
54338+#include <linux/grinternal.h>
54339+#include <linux/gracl.h>
54340+
54341+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
54342+EXPORT_SYMBOL(gr_cap_rtnetlink);
54343+
54344+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54345+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54346+
54347+EXPORT_SYMBOL(gr_search_udp_recvmsg);
54348+EXPORT_SYMBOL(gr_search_udp_sendmsg);
54349+
54350+#ifdef CONFIG_UNIX_MODULE
54351+EXPORT_SYMBOL(gr_acl_handle_unix);
54352+EXPORT_SYMBOL(gr_acl_handle_mknod);
54353+EXPORT_SYMBOL(gr_handle_chroot_unix);
54354+EXPORT_SYMBOL(gr_handle_create);
54355+#endif
54356+
54357+#ifdef CONFIG_GRKERNSEC
54358+#define gr_conn_table_size 32749
54359+struct conn_table_entry {
54360+ struct conn_table_entry *next;
54361+ struct signal_struct *sig;
54362+};
54363+
54364+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
54365+DEFINE_SPINLOCK(gr_conn_table_lock);
54366+
54367+extern const char * gr_socktype_to_name(unsigned char type);
54368+extern const char * gr_proto_to_name(unsigned char proto);
54369+extern const char * gr_sockfamily_to_name(unsigned char family);
54370+
54371+static __inline__ int
54372+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
54373+{
54374+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
54375+}
54376+
54377+static __inline__ int
54378+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
54379+ __u16 sport, __u16 dport)
54380+{
54381+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
54382+ sig->gr_sport == sport && sig->gr_dport == dport))
54383+ return 1;
54384+ else
54385+ return 0;
54386+}
54387+
54388+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
54389+{
54390+ struct conn_table_entry **match;
54391+ unsigned int index;
54392+
54393+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54394+ sig->gr_sport, sig->gr_dport,
54395+ gr_conn_table_size);
54396+
54397+ newent->sig = sig;
54398+
54399+ match = &gr_conn_table[index];
54400+ newent->next = *match;
54401+ *match = newent;
54402+
54403+ return;
54404+}
54405+
54406+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
54407+{
54408+ struct conn_table_entry *match, *last = NULL;
54409+ unsigned int index;
54410+
54411+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54412+ sig->gr_sport, sig->gr_dport,
54413+ gr_conn_table_size);
54414+
54415+ match = gr_conn_table[index];
54416+ while (match && !conn_match(match->sig,
54417+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
54418+ sig->gr_dport)) {
54419+ last = match;
54420+ match = match->next;
54421+ }
54422+
54423+ if (match) {
54424+ if (last)
54425+ last->next = match->next;
54426+ else
54427+ gr_conn_table[index] = NULL;
54428+ kfree(match);
54429+ }
54430+
54431+ return;
54432+}
54433+
54434+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
54435+ __u16 sport, __u16 dport)
54436+{
54437+ struct conn_table_entry *match;
54438+ unsigned int index;
54439+
54440+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
54441+
54442+ match = gr_conn_table[index];
54443+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
54444+ match = match->next;
54445+
54446+ if (match)
54447+ return match->sig;
54448+ else
54449+ return NULL;
54450+}
54451+
54452+#endif
54453+
54454+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
54455+{
54456+#ifdef CONFIG_GRKERNSEC
54457+ struct signal_struct *sig = task->signal;
54458+ struct conn_table_entry *newent;
54459+
54460+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
54461+ if (newent == NULL)
54462+ return;
54463+ /* no bh lock needed since we are called with bh disabled */
54464+ spin_lock(&gr_conn_table_lock);
54465+ gr_del_task_from_ip_table_nolock(sig);
54466+ sig->gr_saddr = inet->rcv_saddr;
54467+ sig->gr_daddr = inet->daddr;
54468+ sig->gr_sport = inet->sport;
54469+ sig->gr_dport = inet->dport;
54470+ gr_add_to_task_ip_table_nolock(sig, newent);
54471+ spin_unlock(&gr_conn_table_lock);
54472+#endif
54473+ return;
54474+}
54475+
54476+void gr_del_task_from_ip_table(struct task_struct *task)
54477+{
54478+#ifdef CONFIG_GRKERNSEC
54479+ spin_lock_bh(&gr_conn_table_lock);
54480+ gr_del_task_from_ip_table_nolock(task->signal);
54481+ spin_unlock_bh(&gr_conn_table_lock);
54482+#endif
54483+ return;
54484+}
54485+
54486+void
54487+gr_attach_curr_ip(const struct sock *sk)
54488+{
54489+#ifdef CONFIG_GRKERNSEC
54490+ struct signal_struct *p, *set;
54491+ const struct inet_sock *inet = inet_sk(sk);
54492+
54493+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
54494+ return;
54495+
54496+ set = current->signal;
54497+
54498+ spin_lock_bh(&gr_conn_table_lock);
54499+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
54500+ inet->dport, inet->sport);
54501+ if (unlikely(p != NULL)) {
54502+ set->curr_ip = p->curr_ip;
54503+ set->used_accept = 1;
54504+ gr_del_task_from_ip_table_nolock(p);
54505+ spin_unlock_bh(&gr_conn_table_lock);
54506+ return;
54507+ }
54508+ spin_unlock_bh(&gr_conn_table_lock);
54509+
54510+ set->curr_ip = inet->daddr;
54511+ set->used_accept = 1;
54512+#endif
54513+ return;
54514+}
54515+
54516+int
54517+gr_handle_sock_all(const int family, const int type, const int protocol)
54518+{
54519+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54520+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
54521+ (family != AF_UNIX)) {
54522+ if (family == AF_INET)
54523+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
54524+ else
54525+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
54526+ return -EACCES;
54527+ }
54528+#endif
54529+ return 0;
54530+}
54531+
54532+int
54533+gr_handle_sock_server(const struct sockaddr *sck)
54534+{
54535+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54536+ if (grsec_enable_socket_server &&
54537+ in_group_p(grsec_socket_server_gid) &&
54538+ sck && (sck->sa_family != AF_UNIX) &&
54539+ (sck->sa_family != AF_LOCAL)) {
54540+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54541+ return -EACCES;
54542+ }
54543+#endif
54544+ return 0;
54545+}
54546+
54547+int
54548+gr_handle_sock_server_other(const struct sock *sck)
54549+{
54550+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54551+ if (grsec_enable_socket_server &&
54552+ in_group_p(grsec_socket_server_gid) &&
54553+ sck && (sck->sk_family != AF_UNIX) &&
54554+ (sck->sk_family != AF_LOCAL)) {
54555+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54556+ return -EACCES;
54557+ }
54558+#endif
54559+ return 0;
54560+}
54561+
54562+int
54563+gr_handle_sock_client(const struct sockaddr *sck)
54564+{
54565+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54566+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
54567+ sck && (sck->sa_family != AF_UNIX) &&
54568+ (sck->sa_family != AF_LOCAL)) {
54569+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
54570+ return -EACCES;
54571+ }
54572+#endif
54573+ return 0;
54574+}
54575+
54576+kernel_cap_t
54577+gr_cap_rtnetlink(struct sock *sock)
54578+{
54579+#ifdef CONFIG_GRKERNSEC
54580+ if (!gr_acl_is_enabled())
54581+ return current_cap();
54582+ else if (sock->sk_protocol == NETLINK_ISCSI &&
54583+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
54584+ gr_is_capable(CAP_SYS_ADMIN))
54585+ return current_cap();
54586+ else if (sock->sk_protocol == NETLINK_AUDIT &&
54587+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
54588+ gr_is_capable(CAP_AUDIT_WRITE) &&
54589+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
54590+ gr_is_capable(CAP_AUDIT_CONTROL))
54591+ return current_cap();
54592+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
54593+ ((sock->sk_protocol == NETLINK_ROUTE) ?
54594+ gr_is_capable_nolog(CAP_NET_ADMIN) :
54595+ gr_is_capable(CAP_NET_ADMIN)))
54596+ return current_cap();
54597+ else
54598+ return __cap_empty_set;
54599+#else
54600+ return current_cap();
54601+#endif
54602+}
54603diff -urNp linux-2.6.32.45/grsecurity/grsec_sysctl.c linux-2.6.32.45/grsecurity/grsec_sysctl.c
54604--- linux-2.6.32.45/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
54605+++ linux-2.6.32.45/grsecurity/grsec_sysctl.c 2011-08-11 19:57:54.000000000 -0400
54606@@ -0,0 +1,479 @@
54607+#include <linux/kernel.h>
54608+#include <linux/sched.h>
54609+#include <linux/sysctl.h>
54610+#include <linux/grsecurity.h>
54611+#include <linux/grinternal.h>
54612+
54613+int
54614+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
54615+{
54616+#ifdef CONFIG_GRKERNSEC_SYSCTL
54617+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
54618+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
54619+ return -EACCES;
54620+ }
54621+#endif
54622+ return 0;
54623+}
54624+
54625+#ifdef CONFIG_GRKERNSEC_ROFS
54626+static int __maybe_unused one = 1;
54627+#endif
54628+
54629+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
54630+ctl_table grsecurity_table[] = {
54631+#ifdef CONFIG_GRKERNSEC_SYSCTL
54632+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
54633+#ifdef CONFIG_GRKERNSEC_IO
54634+ {
54635+ .ctl_name = CTL_UNNUMBERED,
54636+ .procname = "disable_priv_io",
54637+ .data = &grsec_disable_privio,
54638+ .maxlen = sizeof(int),
54639+ .mode = 0600,
54640+ .proc_handler = &proc_dointvec,
54641+ },
54642+#endif
54643+#endif
54644+#ifdef CONFIG_GRKERNSEC_LINK
54645+ {
54646+ .ctl_name = CTL_UNNUMBERED,
54647+ .procname = "linking_restrictions",
54648+ .data = &grsec_enable_link,
54649+ .maxlen = sizeof(int),
54650+ .mode = 0600,
54651+ .proc_handler = &proc_dointvec,
54652+ },
54653+#endif
54654+#ifdef CONFIG_GRKERNSEC_BRUTE
54655+ {
54656+ .ctl_name = CTL_UNNUMBERED,
54657+ .procname = "deter_bruteforce",
54658+ .data = &grsec_enable_brute,
54659+ .maxlen = sizeof(int),
54660+ .mode = 0600,
54661+ .proc_handler = &proc_dointvec,
54662+ },
54663+#endif
54664+#ifdef CONFIG_GRKERNSEC_FIFO
54665+ {
54666+ .ctl_name = CTL_UNNUMBERED,
54667+ .procname = "fifo_restrictions",
54668+ .data = &grsec_enable_fifo,
54669+ .maxlen = sizeof(int),
54670+ .mode = 0600,
54671+ .proc_handler = &proc_dointvec,
54672+ },
54673+#endif
54674+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54675+ {
54676+ .ctl_name = CTL_UNNUMBERED,
54677+ .procname = "ip_blackhole",
54678+ .data = &grsec_enable_blackhole,
54679+ .maxlen = sizeof(int),
54680+ .mode = 0600,
54681+ .proc_handler = &proc_dointvec,
54682+ },
54683+ {
54684+ .ctl_name = CTL_UNNUMBERED,
54685+ .procname = "lastack_retries",
54686+ .data = &grsec_lastack_retries,
54687+ .maxlen = sizeof(int),
54688+ .mode = 0600,
54689+ .proc_handler = &proc_dointvec,
54690+ },
54691+#endif
54692+#ifdef CONFIG_GRKERNSEC_EXECLOG
54693+ {
54694+ .ctl_name = CTL_UNNUMBERED,
54695+ .procname = "exec_logging",
54696+ .data = &grsec_enable_execlog,
54697+ .maxlen = sizeof(int),
54698+ .mode = 0600,
54699+ .proc_handler = &proc_dointvec,
54700+ },
54701+#endif
54702+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54703+ {
54704+ .ctl_name = CTL_UNNUMBERED,
54705+ .procname = "rwxmap_logging",
54706+ .data = &grsec_enable_log_rwxmaps,
54707+ .maxlen = sizeof(int),
54708+ .mode = 0600,
54709+ .proc_handler = &proc_dointvec,
54710+ },
54711+#endif
54712+#ifdef CONFIG_GRKERNSEC_SIGNAL
54713+ {
54714+ .ctl_name = CTL_UNNUMBERED,
54715+ .procname = "signal_logging",
54716+ .data = &grsec_enable_signal,
54717+ .maxlen = sizeof(int),
54718+ .mode = 0600,
54719+ .proc_handler = &proc_dointvec,
54720+ },
54721+#endif
54722+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54723+ {
54724+ .ctl_name = CTL_UNNUMBERED,
54725+ .procname = "forkfail_logging",
54726+ .data = &grsec_enable_forkfail,
54727+ .maxlen = sizeof(int),
54728+ .mode = 0600,
54729+ .proc_handler = &proc_dointvec,
54730+ },
54731+#endif
54732+#ifdef CONFIG_GRKERNSEC_TIME
54733+ {
54734+ .ctl_name = CTL_UNNUMBERED,
54735+ .procname = "timechange_logging",
54736+ .data = &grsec_enable_time,
54737+ .maxlen = sizeof(int),
54738+ .mode = 0600,
54739+ .proc_handler = &proc_dointvec,
54740+ },
54741+#endif
54742+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54743+ {
54744+ .ctl_name = CTL_UNNUMBERED,
54745+ .procname = "chroot_deny_shmat",
54746+ .data = &grsec_enable_chroot_shmat,
54747+ .maxlen = sizeof(int),
54748+ .mode = 0600,
54749+ .proc_handler = &proc_dointvec,
54750+ },
54751+#endif
54752+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54753+ {
54754+ .ctl_name = CTL_UNNUMBERED,
54755+ .procname = "chroot_deny_unix",
54756+ .data = &grsec_enable_chroot_unix,
54757+ .maxlen = sizeof(int),
54758+ .mode = 0600,
54759+ .proc_handler = &proc_dointvec,
54760+ },
54761+#endif
54762+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54763+ {
54764+ .ctl_name = CTL_UNNUMBERED,
54765+ .procname = "chroot_deny_mount",
54766+ .data = &grsec_enable_chroot_mount,
54767+ .maxlen = sizeof(int),
54768+ .mode = 0600,
54769+ .proc_handler = &proc_dointvec,
54770+ },
54771+#endif
54772+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54773+ {
54774+ .ctl_name = CTL_UNNUMBERED,
54775+ .procname = "chroot_deny_fchdir",
54776+ .data = &grsec_enable_chroot_fchdir,
54777+ .maxlen = sizeof(int),
54778+ .mode = 0600,
54779+ .proc_handler = &proc_dointvec,
54780+ },
54781+#endif
54782+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54783+ {
54784+ .ctl_name = CTL_UNNUMBERED,
54785+ .procname = "chroot_deny_chroot",
54786+ .data = &grsec_enable_chroot_double,
54787+ .maxlen = sizeof(int),
54788+ .mode = 0600,
54789+ .proc_handler = &proc_dointvec,
54790+ },
54791+#endif
54792+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54793+ {
54794+ .ctl_name = CTL_UNNUMBERED,
54795+ .procname = "chroot_deny_pivot",
54796+ .data = &grsec_enable_chroot_pivot,
54797+ .maxlen = sizeof(int),
54798+ .mode = 0600,
54799+ .proc_handler = &proc_dointvec,
54800+ },
54801+#endif
54802+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54803+ {
54804+ .ctl_name = CTL_UNNUMBERED,
54805+ .procname = "chroot_enforce_chdir",
54806+ .data = &grsec_enable_chroot_chdir,
54807+ .maxlen = sizeof(int),
54808+ .mode = 0600,
54809+ .proc_handler = &proc_dointvec,
54810+ },
54811+#endif
54812+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54813+ {
54814+ .ctl_name = CTL_UNNUMBERED,
54815+ .procname = "chroot_deny_chmod",
54816+ .data = &grsec_enable_chroot_chmod,
54817+ .maxlen = sizeof(int),
54818+ .mode = 0600,
54819+ .proc_handler = &proc_dointvec,
54820+ },
54821+#endif
54822+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54823+ {
54824+ .ctl_name = CTL_UNNUMBERED,
54825+ .procname = "chroot_deny_mknod",
54826+ .data = &grsec_enable_chroot_mknod,
54827+ .maxlen = sizeof(int),
54828+ .mode = 0600,
54829+ .proc_handler = &proc_dointvec,
54830+ },
54831+#endif
54832+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54833+ {
54834+ .ctl_name = CTL_UNNUMBERED,
54835+ .procname = "chroot_restrict_nice",
54836+ .data = &grsec_enable_chroot_nice,
54837+ .maxlen = sizeof(int),
54838+ .mode = 0600,
54839+ .proc_handler = &proc_dointvec,
54840+ },
54841+#endif
54842+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54843+ {
54844+ .ctl_name = CTL_UNNUMBERED,
54845+ .procname = "chroot_execlog",
54846+ .data = &grsec_enable_chroot_execlog,
54847+ .maxlen = sizeof(int),
54848+ .mode = 0600,
54849+ .proc_handler = &proc_dointvec,
54850+ },
54851+#endif
54852+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54853+ {
54854+ .ctl_name = CTL_UNNUMBERED,
54855+ .procname = "chroot_caps",
54856+ .data = &grsec_enable_chroot_caps,
54857+ .maxlen = sizeof(int),
54858+ .mode = 0600,
54859+ .proc_handler = &proc_dointvec,
54860+ },
54861+#endif
54862+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54863+ {
54864+ .ctl_name = CTL_UNNUMBERED,
54865+ .procname = "chroot_deny_sysctl",
54866+ .data = &grsec_enable_chroot_sysctl,
54867+ .maxlen = sizeof(int),
54868+ .mode = 0600,
54869+ .proc_handler = &proc_dointvec,
54870+ },
54871+#endif
54872+#ifdef CONFIG_GRKERNSEC_TPE
54873+ {
54874+ .ctl_name = CTL_UNNUMBERED,
54875+ .procname = "tpe",
54876+ .data = &grsec_enable_tpe,
54877+ .maxlen = sizeof(int),
54878+ .mode = 0600,
54879+ .proc_handler = &proc_dointvec,
54880+ },
54881+ {
54882+ .ctl_name = CTL_UNNUMBERED,
54883+ .procname = "tpe_gid",
54884+ .data = &grsec_tpe_gid,
54885+ .maxlen = sizeof(int),
54886+ .mode = 0600,
54887+ .proc_handler = &proc_dointvec,
54888+ },
54889+#endif
54890+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54891+ {
54892+ .ctl_name = CTL_UNNUMBERED,
54893+ .procname = "tpe_invert",
54894+ .data = &grsec_enable_tpe_invert,
54895+ .maxlen = sizeof(int),
54896+ .mode = 0600,
54897+ .proc_handler = &proc_dointvec,
54898+ },
54899+#endif
54900+#ifdef CONFIG_GRKERNSEC_TPE_ALL
54901+ {
54902+ .ctl_name = CTL_UNNUMBERED,
54903+ .procname = "tpe_restrict_all",
54904+ .data = &grsec_enable_tpe_all,
54905+ .maxlen = sizeof(int),
54906+ .mode = 0600,
54907+ .proc_handler = &proc_dointvec,
54908+ },
54909+#endif
54910+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54911+ {
54912+ .ctl_name = CTL_UNNUMBERED,
54913+ .procname = "socket_all",
54914+ .data = &grsec_enable_socket_all,
54915+ .maxlen = sizeof(int),
54916+ .mode = 0600,
54917+ .proc_handler = &proc_dointvec,
54918+ },
54919+ {
54920+ .ctl_name = CTL_UNNUMBERED,
54921+ .procname = "socket_all_gid",
54922+ .data = &grsec_socket_all_gid,
54923+ .maxlen = sizeof(int),
54924+ .mode = 0600,
54925+ .proc_handler = &proc_dointvec,
54926+ },
54927+#endif
54928+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54929+ {
54930+ .ctl_name = CTL_UNNUMBERED,
54931+ .procname = "socket_client",
54932+ .data = &grsec_enable_socket_client,
54933+ .maxlen = sizeof(int),
54934+ .mode = 0600,
54935+ .proc_handler = &proc_dointvec,
54936+ },
54937+ {
54938+ .ctl_name = CTL_UNNUMBERED,
54939+ .procname = "socket_client_gid",
54940+ .data = &grsec_socket_client_gid,
54941+ .maxlen = sizeof(int),
54942+ .mode = 0600,
54943+ .proc_handler = &proc_dointvec,
54944+ },
54945+#endif
54946+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54947+ {
54948+ .ctl_name = CTL_UNNUMBERED,
54949+ .procname = "socket_server",
54950+ .data = &grsec_enable_socket_server,
54951+ .maxlen = sizeof(int),
54952+ .mode = 0600,
54953+ .proc_handler = &proc_dointvec,
54954+ },
54955+ {
54956+ .ctl_name = CTL_UNNUMBERED,
54957+ .procname = "socket_server_gid",
54958+ .data = &grsec_socket_server_gid,
54959+ .maxlen = sizeof(int),
54960+ .mode = 0600,
54961+ .proc_handler = &proc_dointvec,
54962+ },
54963+#endif
54964+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
54965+ {
54966+ .ctl_name = CTL_UNNUMBERED,
54967+ .procname = "audit_group",
54968+ .data = &grsec_enable_group,
54969+ .maxlen = sizeof(int),
54970+ .mode = 0600,
54971+ .proc_handler = &proc_dointvec,
54972+ },
54973+ {
54974+ .ctl_name = CTL_UNNUMBERED,
54975+ .procname = "audit_gid",
54976+ .data = &grsec_audit_gid,
54977+ .maxlen = sizeof(int),
54978+ .mode = 0600,
54979+ .proc_handler = &proc_dointvec,
54980+ },
54981+#endif
54982+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54983+ {
54984+ .ctl_name = CTL_UNNUMBERED,
54985+ .procname = "audit_chdir",
54986+ .data = &grsec_enable_chdir,
54987+ .maxlen = sizeof(int),
54988+ .mode = 0600,
54989+ .proc_handler = &proc_dointvec,
54990+ },
54991+#endif
54992+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54993+ {
54994+ .ctl_name = CTL_UNNUMBERED,
54995+ .procname = "audit_mount",
54996+ .data = &grsec_enable_mount,
54997+ .maxlen = sizeof(int),
54998+ .mode = 0600,
54999+ .proc_handler = &proc_dointvec,
55000+ },
55001+#endif
55002+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55003+ {
55004+ .ctl_name = CTL_UNNUMBERED,
55005+ .procname = "audit_textrel",
55006+ .data = &grsec_enable_audit_textrel,
55007+ .maxlen = sizeof(int),
55008+ .mode = 0600,
55009+ .proc_handler = &proc_dointvec,
55010+ },
55011+#endif
55012+#ifdef CONFIG_GRKERNSEC_DMESG
55013+ {
55014+ .ctl_name = CTL_UNNUMBERED,
55015+ .procname = "dmesg",
55016+ .data = &grsec_enable_dmesg,
55017+ .maxlen = sizeof(int),
55018+ .mode = 0600,
55019+ .proc_handler = &proc_dointvec,
55020+ },
55021+#endif
55022+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55023+ {
55024+ .ctl_name = CTL_UNNUMBERED,
55025+ .procname = "chroot_findtask",
55026+ .data = &grsec_enable_chroot_findtask,
55027+ .maxlen = sizeof(int),
55028+ .mode = 0600,
55029+ .proc_handler = &proc_dointvec,
55030+ },
55031+#endif
55032+#ifdef CONFIG_GRKERNSEC_RESLOG
55033+ {
55034+ .ctl_name = CTL_UNNUMBERED,
55035+ .procname = "resource_logging",
55036+ .data = &grsec_resource_logging,
55037+ .maxlen = sizeof(int),
55038+ .mode = 0600,
55039+ .proc_handler = &proc_dointvec,
55040+ },
55041+#endif
55042+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55043+ {
55044+ .ctl_name = CTL_UNNUMBERED,
55045+ .procname = "audit_ptrace",
55046+ .data = &grsec_enable_audit_ptrace,
55047+ .maxlen = sizeof(int),
55048+ .mode = 0600,
55049+ .proc_handler = &proc_dointvec,
55050+ },
55051+#endif
55052+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55053+ {
55054+ .ctl_name = CTL_UNNUMBERED,
55055+ .procname = "harden_ptrace",
55056+ .data = &grsec_enable_harden_ptrace,
55057+ .maxlen = sizeof(int),
55058+ .mode = 0600,
55059+ .proc_handler = &proc_dointvec,
55060+ },
55061+#endif
55062+ {
55063+ .ctl_name = CTL_UNNUMBERED,
55064+ .procname = "grsec_lock",
55065+ .data = &grsec_lock,
55066+ .maxlen = sizeof(int),
55067+ .mode = 0600,
55068+ .proc_handler = &proc_dointvec,
55069+ },
55070+#endif
55071+#ifdef CONFIG_GRKERNSEC_ROFS
55072+ {
55073+ .ctl_name = CTL_UNNUMBERED,
55074+ .procname = "romount_protect",
55075+ .data = &grsec_enable_rofs,
55076+ .maxlen = sizeof(int),
55077+ .mode = 0600,
55078+ .proc_handler = &proc_dointvec_minmax,
55079+ .extra1 = &one,
55080+ .extra2 = &one,
55081+ },
55082+#endif
55083+ { .ctl_name = 0 }
55084+};
55085+#endif
55086diff -urNp linux-2.6.32.45/grsecurity/grsec_time.c linux-2.6.32.45/grsecurity/grsec_time.c
55087--- linux-2.6.32.45/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55088+++ linux-2.6.32.45/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
55089@@ -0,0 +1,16 @@
55090+#include <linux/kernel.h>
55091+#include <linux/sched.h>
55092+#include <linux/grinternal.h>
55093+#include <linux/module.h>
55094+
55095+void
55096+gr_log_timechange(void)
55097+{
55098+#ifdef CONFIG_GRKERNSEC_TIME
55099+ if (grsec_enable_time)
55100+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55101+#endif
55102+ return;
55103+}
55104+
55105+EXPORT_SYMBOL(gr_log_timechange);
55106diff -urNp linux-2.6.32.45/grsecurity/grsec_tpe.c linux-2.6.32.45/grsecurity/grsec_tpe.c
55107--- linux-2.6.32.45/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55108+++ linux-2.6.32.45/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
55109@@ -0,0 +1,39 @@
55110+#include <linux/kernel.h>
55111+#include <linux/sched.h>
55112+#include <linux/file.h>
55113+#include <linux/fs.h>
55114+#include <linux/grinternal.h>
55115+
55116+extern int gr_acl_tpe_check(void);
55117+
55118+int
55119+gr_tpe_allow(const struct file *file)
55120+{
55121+#ifdef CONFIG_GRKERNSEC
55122+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55123+ const struct cred *cred = current_cred();
55124+
55125+ if (cred->uid && ((grsec_enable_tpe &&
55126+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55127+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55128+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55129+#else
55130+ in_group_p(grsec_tpe_gid)
55131+#endif
55132+ ) || gr_acl_tpe_check()) &&
55133+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55134+ (inode->i_mode & S_IWOTH))))) {
55135+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55136+ return 0;
55137+ }
55138+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55139+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55140+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55141+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55142+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55143+ return 0;
55144+ }
55145+#endif
55146+#endif
55147+ return 1;
55148+}
55149diff -urNp linux-2.6.32.45/grsecurity/grsum.c linux-2.6.32.45/grsecurity/grsum.c
55150--- linux-2.6.32.45/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55151+++ linux-2.6.32.45/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
55152@@ -0,0 +1,61 @@
55153+#include <linux/err.h>
55154+#include <linux/kernel.h>
55155+#include <linux/sched.h>
55156+#include <linux/mm.h>
55157+#include <linux/scatterlist.h>
55158+#include <linux/crypto.h>
55159+#include <linux/gracl.h>
55160+
55161+
55162+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55163+#error "crypto and sha256 must be built into the kernel"
55164+#endif
55165+
55166+int
55167+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55168+{
55169+ char *p;
55170+ struct crypto_hash *tfm;
55171+ struct hash_desc desc;
55172+ struct scatterlist sg;
55173+ unsigned char temp_sum[GR_SHA_LEN];
55174+ volatile int retval = 0;
55175+ volatile int dummy = 0;
55176+ unsigned int i;
55177+
55178+ sg_init_table(&sg, 1);
55179+
55180+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55181+ if (IS_ERR(tfm)) {
55182+ /* should never happen, since sha256 should be built in */
55183+ return 1;
55184+ }
55185+
55186+ desc.tfm = tfm;
55187+ desc.flags = 0;
55188+
55189+ crypto_hash_init(&desc);
55190+
55191+ p = salt;
55192+ sg_set_buf(&sg, p, GR_SALT_LEN);
55193+ crypto_hash_update(&desc, &sg, sg.length);
55194+
55195+ p = entry->pw;
55196+ sg_set_buf(&sg, p, strlen(p));
55197+
55198+ crypto_hash_update(&desc, &sg, sg.length);
55199+
55200+ crypto_hash_final(&desc, temp_sum);
55201+
55202+ memset(entry->pw, 0, GR_PW_LEN);
55203+
55204+ for (i = 0; i < GR_SHA_LEN; i++)
55205+ if (sum[i] != temp_sum[i])
55206+ retval = 1;
55207+ else
55208+ dummy = 1; // waste a cycle
55209+
55210+ crypto_free_hash(tfm);
55211+
55212+ return retval;
55213+}
55214diff -urNp linux-2.6.32.45/grsecurity/Kconfig linux-2.6.32.45/grsecurity/Kconfig
55215--- linux-2.6.32.45/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55216+++ linux-2.6.32.45/grsecurity/Kconfig 2011-08-17 19:04:25.000000000 -0400
55217@@ -0,0 +1,1037 @@
55218+#
55219+# grecurity configuration
55220+#
55221+
55222+menu "Grsecurity"
55223+
55224+config GRKERNSEC
55225+ bool "Grsecurity"
55226+ select CRYPTO
55227+ select CRYPTO_SHA256
55228+ help
55229+ If you say Y here, you will be able to configure many features
55230+ that will enhance the security of your system. It is highly
55231+ recommended that you say Y here and read through the help
55232+ for each option so that you fully understand the features and
55233+ can evaluate their usefulness for your machine.
55234+
55235+choice
55236+ prompt "Security Level"
55237+ depends on GRKERNSEC
55238+ default GRKERNSEC_CUSTOM
55239+
55240+config GRKERNSEC_LOW
55241+ bool "Low"
55242+ select GRKERNSEC_LINK
55243+ select GRKERNSEC_FIFO
55244+ select GRKERNSEC_RANDNET
55245+ select GRKERNSEC_DMESG
55246+ select GRKERNSEC_CHROOT
55247+ select GRKERNSEC_CHROOT_CHDIR
55248+
55249+ help
55250+ If you choose this option, several of the grsecurity options will
55251+ be enabled that will give you greater protection against a number
55252+ of attacks, while assuring that none of your software will have any
55253+ conflicts with the additional security measures. If you run a lot
55254+ of unusual software, or you are having problems with the higher
55255+ security levels, you should say Y here. With this option, the
55256+ following features are enabled:
55257+
55258+ - Linking restrictions
55259+ - FIFO restrictions
55260+ - Restricted dmesg
55261+ - Enforced chdir("/") on chroot
55262+ - Runtime module disabling
55263+
55264+config GRKERNSEC_MEDIUM
55265+ bool "Medium"
55266+ select PAX
55267+ select PAX_EI_PAX
55268+ select PAX_PT_PAX_FLAGS
55269+ select PAX_HAVE_ACL_FLAGS
55270+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55271+ select GRKERNSEC_CHROOT
55272+ select GRKERNSEC_CHROOT_SYSCTL
55273+ select GRKERNSEC_LINK
55274+ select GRKERNSEC_FIFO
55275+ select GRKERNSEC_DMESG
55276+ select GRKERNSEC_RANDNET
55277+ select GRKERNSEC_FORKFAIL
55278+ select GRKERNSEC_TIME
55279+ select GRKERNSEC_SIGNAL
55280+ select GRKERNSEC_CHROOT
55281+ select GRKERNSEC_CHROOT_UNIX
55282+ select GRKERNSEC_CHROOT_MOUNT
55283+ select GRKERNSEC_CHROOT_PIVOT
55284+ select GRKERNSEC_CHROOT_DOUBLE
55285+ select GRKERNSEC_CHROOT_CHDIR
55286+ select GRKERNSEC_CHROOT_MKNOD
55287+ select GRKERNSEC_PROC
55288+ select GRKERNSEC_PROC_USERGROUP
55289+ select PAX_RANDUSTACK
55290+ select PAX_ASLR
55291+ select PAX_RANDMMAP
55292+ select PAX_REFCOUNT if (X86 || SPARC64)
55293+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55294+
55295+ help
55296+ If you say Y here, several features in addition to those included
55297+ in the low additional security level will be enabled. These
55298+ features provide even more security to your system, though in rare
55299+ cases they may be incompatible with very old or poorly written
55300+ software. If you enable this option, make sure that your auth
55301+ service (identd) is running as gid 1001. With this option,
55302+ the following features (in addition to those provided in the
55303+ low additional security level) will be enabled:
55304+
55305+ - Failed fork logging
55306+ - Time change logging
55307+ - Signal logging
55308+ - Deny mounts in chroot
55309+ - Deny double chrooting
55310+ - Deny sysctl writes in chroot
55311+ - Deny mknod in chroot
55312+ - Deny access to abstract AF_UNIX sockets out of chroot
55313+ - Deny pivot_root in chroot
55314+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
55315+ - /proc restrictions with special GID set to 10 (usually wheel)
55316+ - Address Space Layout Randomization (ASLR)
55317+ - Prevent exploitation of most refcount overflows
55318+ - Bounds checking of copying between the kernel and userland
55319+
55320+config GRKERNSEC_HIGH
55321+ bool "High"
55322+ select GRKERNSEC_LINK
55323+ select GRKERNSEC_FIFO
55324+ select GRKERNSEC_DMESG
55325+ select GRKERNSEC_FORKFAIL
55326+ select GRKERNSEC_TIME
55327+ select GRKERNSEC_SIGNAL
55328+ select GRKERNSEC_CHROOT
55329+ select GRKERNSEC_CHROOT_SHMAT
55330+ select GRKERNSEC_CHROOT_UNIX
55331+ select GRKERNSEC_CHROOT_MOUNT
55332+ select GRKERNSEC_CHROOT_FCHDIR
55333+ select GRKERNSEC_CHROOT_PIVOT
55334+ select GRKERNSEC_CHROOT_DOUBLE
55335+ select GRKERNSEC_CHROOT_CHDIR
55336+ select GRKERNSEC_CHROOT_MKNOD
55337+ select GRKERNSEC_CHROOT_CAPS
55338+ select GRKERNSEC_CHROOT_SYSCTL
55339+ select GRKERNSEC_CHROOT_FINDTASK
55340+ select GRKERNSEC_SYSFS_RESTRICT
55341+ select GRKERNSEC_PROC
55342+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55343+ select GRKERNSEC_HIDESYM
55344+ select GRKERNSEC_BRUTE
55345+ select GRKERNSEC_PROC_USERGROUP
55346+ select GRKERNSEC_KMEM
55347+ select GRKERNSEC_RESLOG
55348+ select GRKERNSEC_RANDNET
55349+ select GRKERNSEC_PROC_ADD
55350+ select GRKERNSEC_CHROOT_CHMOD
55351+ select GRKERNSEC_CHROOT_NICE
55352+ select GRKERNSEC_AUDIT_MOUNT
55353+ select GRKERNSEC_MODHARDEN if (MODULES)
55354+ select GRKERNSEC_HARDEN_PTRACE
55355+ select GRKERNSEC_VM86 if (X86_32)
55356+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55357+ select PAX
55358+ select PAX_RANDUSTACK
55359+ select PAX_ASLR
55360+ select PAX_RANDMMAP
55361+ select PAX_NOEXEC
55362+ select PAX_MPROTECT
55363+ select PAX_EI_PAX
55364+ select PAX_PT_PAX_FLAGS
55365+ select PAX_HAVE_ACL_FLAGS
55366+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55367+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
55368+ select PAX_RANDKSTACK if (X86_TSC && X86)
55369+ select PAX_SEGMEXEC if (X86_32)
55370+ select PAX_PAGEEXEC
55371+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55372+ select PAX_EMUTRAMP if (PARISC)
55373+ select PAX_EMUSIGRT if (PARISC)
55374+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55375+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55376+ select PAX_REFCOUNT if (X86 || SPARC64)
55377+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55378+ help
55379+ If you say Y here, many of the features of grsecurity will be
55380+ enabled, which will protect you against many kinds of attacks
55381+ against your system. The heightened security comes at a cost
55382+ of an increased chance of incompatibilities with rare software
55383+ on your machine. Since this security level enables PaX, you should
55384+ view <http://pax.grsecurity.net> and read about the PaX
55385+ project. While you are there, download chpax and run it on
55386+ binaries that cause problems with PaX. Also remember that
55387+ since the /proc restrictions are enabled, you must run your
55388+ identd as gid 1001. This security level enables the following
55389+ features in addition to those listed in the low and medium
55390+ security levels:
55391+
55392+ - Additional /proc restrictions
55393+ - Chmod restrictions in chroot
55394+ - No signals, ptrace, or viewing of processes outside of chroot
55395+ - Capability restrictions in chroot
55396+ - Deny fchdir out of chroot
55397+ - Priority restrictions in chroot
55398+ - Segmentation-based implementation of PaX
55399+ - Mprotect restrictions
55400+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55401+ - Kernel stack randomization
55402+ - Mount/unmount/remount logging
55403+ - Kernel symbol hiding
55404+ - Prevention of memory exhaustion-based exploits
55405+ - Hardening of module auto-loading
55406+ - Ptrace restrictions
55407+ - Restricted vm86 mode
55408+ - Restricted sysfs/debugfs
55409+ - Active kernel exploit response
55410+
55411+config GRKERNSEC_CUSTOM
55412+ bool "Custom"
55413+ help
55414+ If you say Y here, you will be able to configure every grsecurity
55415+ option, which allows you to enable many more features that aren't
55416+ covered in the basic security levels. These additional features
55417+ include TPE, socket restrictions, and the sysctl system for
55418+ grsecurity. It is advised that you read through the help for
55419+ each option to determine its usefulness in your situation.
55420+
55421+endchoice
55422+
55423+menu "Address Space Protection"
55424+depends on GRKERNSEC
55425+
55426+config GRKERNSEC_KMEM
55427+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
55428+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55429+ help
55430+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55431+ be written to via mmap or otherwise to modify the running kernel.
55432+ /dev/port will also not be allowed to be opened. If you have module
55433+ support disabled, enabling this will close up four ways that are
55434+ currently used to insert malicious code into the running kernel.
55435+ Even with all these features enabled, we still highly recommend that
55436+ you use the RBAC system, as it is still possible for an attacker to
55437+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55438+ If you are not using XFree86, you may be able to stop this additional
55439+ case by enabling the 'Disable privileged I/O' option. Though nothing
55440+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55441+ but only to video memory, which is the only writing we allow in this
55442+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55443+ not be allowed to mprotect it with PROT_WRITE later.
55444+ It is highly recommended that you say Y here if you meet all the
55445+ conditions above.
55446+
55447+config GRKERNSEC_VM86
55448+ bool "Restrict VM86 mode"
55449+ depends on X86_32
55450+
55451+ help
55452+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55453+ make use of a special execution mode on 32bit x86 processors called
55454+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55455+ video cards and will still work with this option enabled. The purpose
55456+ of the option is to prevent exploitation of emulation errors in
55457+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55458+ Nearly all users should be able to enable this option.
55459+
55460+config GRKERNSEC_IO
55461+ bool "Disable privileged I/O"
55462+ depends on X86
55463+ select RTC_CLASS
55464+ select RTC_INTF_DEV
55465+ select RTC_DRV_CMOS
55466+
55467+ help
55468+ If you say Y here, all ioperm and iopl calls will return an error.
55469+ Ioperm and iopl can be used to modify the running kernel.
55470+ Unfortunately, some programs need this access to operate properly,
55471+ the most notable of which are XFree86 and hwclock. hwclock can be
55472+ remedied by having RTC support in the kernel, so real-time
55473+ clock support is enabled if this option is enabled, to ensure
55474+ that hwclock operates correctly. XFree86 still will not
55475+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55476+ IF YOU USE XFree86. If you use XFree86 and you still want to
55477+ protect your kernel against modification, use the RBAC system.
55478+
55479+config GRKERNSEC_PROC_MEMMAP
55480+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55481+ default y if (PAX_NOEXEC || PAX_ASLR)
55482+ depends on PAX_NOEXEC || PAX_ASLR
55483+ help
55484+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55485+ give no information about the addresses of its mappings if
55486+ PaX features that rely on random addresses are enabled on the task.
55487+ If you use PaX it is greatly recommended that you say Y here as it
55488+ closes up a hole that makes the full ASLR useless for suid
55489+ binaries.
55490+
55491+config GRKERNSEC_BRUTE
55492+ bool "Deter exploit bruteforcing"
55493+ help
55494+ If you say Y here, attempts to bruteforce exploits against forking
55495+ daemons such as apache or sshd, as well as against suid/sgid binaries
55496+ will be deterred. When a child of a forking daemon is killed by PaX
55497+ or crashes due to an illegal instruction or other suspicious signal,
55498+ the parent process will be delayed 30 seconds upon every subsequent
55499+ fork until the administrator is able to assess the situation and
55500+ restart the daemon.
55501+ In the suid/sgid case, the attempt is logged, the user has all their
55502+ processes terminated, and they are prevented from executing any further
55503+ processes for 15 minutes.
55504+ It is recommended that you also enable signal logging in the auditing
55505+ section so that logs are generated when a process triggers a suspicious
55506+ signal.
55507+ If the sysctl option is enabled, a sysctl option with name
55508+ "deter_bruteforce" is created.
55509+
55510+config GRKERNSEC_MODHARDEN
55511+ bool "Harden module auto-loading"
55512+ depends on MODULES
55513+ help
55514+ If you say Y here, module auto-loading in response to use of some
55515+ feature implemented by an unloaded module will be restricted to
55516+ root users. Enabling this option helps defend against attacks
55517+ by unprivileged users who abuse the auto-loading behavior to
55518+ cause a vulnerable module to load that is then exploited.
55519+
55520+ If this option prevents a legitimate use of auto-loading for a
55521+ non-root user, the administrator can execute modprobe manually
55522+ with the exact name of the module mentioned in the alert log.
55523+ Alternatively, the administrator can add the module to the list
55524+ of modules loaded at boot by modifying init scripts.
55525+
55526+ Modification of init scripts will most likely be needed on
55527+ Ubuntu servers with encrypted home directory support enabled,
55528+ as the first non-root user logging in will cause the ecb(aes),
55529+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55530+
55531+config GRKERNSEC_HIDESYM
55532+ bool "Hide kernel symbols"
55533+ help
55534+ If you say Y here, getting information on loaded modules, and
55535+ displaying all kernel symbols through a syscall will be restricted
55536+ to users with CAP_SYS_MODULE. For software compatibility reasons,
55537+ /proc/kallsyms will be restricted to the root user. The RBAC
55538+ system can hide that entry even from root.
55539+
55540+ This option also prevents leaking of kernel addresses through
55541+ several /proc entries.
55542+
55543+ Note that this option is only effective provided the following
55544+ conditions are met:
55545+ 1) The kernel using grsecurity is not precompiled by some distribution
55546+ 2) You have also enabled GRKERNSEC_DMESG
55547+ 3) You are using the RBAC system and hiding other files such as your
55548+ kernel image and System.map. Alternatively, enabling this option
55549+ causes the permissions on /boot, /lib/modules, and the kernel
55550+ source directory to change at compile time to prevent
55551+ reading by non-root users.
55552+ If the above conditions are met, this option will aid in providing a
55553+ useful protection against local kernel exploitation of overflows
55554+ and arbitrary read/write vulnerabilities.
55555+
55556+config GRKERNSEC_KERN_LOCKOUT
55557+ bool "Active kernel exploit response"
55558+ depends on X86 || ARM || PPC || SPARC
55559+ help
55560+ If you say Y here, when a PaX alert is triggered due to suspicious
55561+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55562+ or an OOPs occurs due to bad memory accesses, instead of just
55563+ terminating the offending process (and potentially allowing
55564+ a subsequent exploit from the same user), we will take one of two
55565+ actions:
55566+ If the user was root, we will panic the system
55567+ If the user was non-root, we will log the attempt, terminate
55568+ all processes owned by the user, then prevent them from creating
55569+ any new processes until the system is restarted
55570+ This deters repeated kernel exploitation/bruteforcing attempts
55571+ and is useful for later forensics.
55572+
55573+endmenu
55574+menu "Role Based Access Control Options"
55575+depends on GRKERNSEC
55576+
55577+config GRKERNSEC_RBAC_DEBUG
55578+ bool
55579+
55580+config GRKERNSEC_NO_RBAC
55581+ bool "Disable RBAC system"
55582+ help
55583+ If you say Y here, the /dev/grsec device will be removed from the kernel,
55584+ preventing the RBAC system from being enabled. You should only say Y
55585+ here if you have no intention of using the RBAC system, so as to prevent
55586+ an attacker with root access from misusing the RBAC system to hide files
55587+ and processes when loadable module support and /dev/[k]mem have been
55588+ locked down.
55589+
55590+config GRKERNSEC_ACL_HIDEKERN
55591+ bool "Hide kernel processes"
55592+ help
55593+ If you say Y here, all kernel threads will be hidden to all
55594+ processes but those whose subject has the "view hidden processes"
55595+ flag.
55596+
55597+config GRKERNSEC_ACL_MAXTRIES
55598+ int "Maximum tries before password lockout"
55599+ default 3
55600+ help
55601+ This option enforces the maximum number of times a user can attempt
55602+ to authorize themselves with the grsecurity RBAC system before being
55603+ denied the ability to attempt authorization again for a specified time.
55604+ The lower the number, the harder it will be to brute-force a password.
55605+
55606+config GRKERNSEC_ACL_TIMEOUT
55607+ int "Time to wait after max password tries, in seconds"
55608+ default 30
55609+ help
55610+ This option specifies the time the user must wait after attempting to
55611+ authorize to the RBAC system with the maximum number of invalid
55612+ passwords. The higher the number, the harder it will be to brute-force
55613+ a password.
55614+
55615+endmenu
55616+menu "Filesystem Protections"
55617+depends on GRKERNSEC
55618+
55619+config GRKERNSEC_PROC
55620+ bool "Proc restrictions"
55621+ help
55622+ If you say Y here, the permissions of the /proc filesystem
55623+ will be altered to enhance system security and privacy. You MUST
55624+ choose either a user only restriction or a user and group restriction.
55625+ Depending upon the option you choose, you can either restrict users to
55626+ see only the processes they themselves run, or choose a group that can
55627+ view all processes and files normally restricted to root if you choose
55628+ the "restrict to user only" option. NOTE: If you're running identd as
55629+ a non-root user, you will have to run it as the group you specify here.
55630+
55631+config GRKERNSEC_PROC_USER
55632+ bool "Restrict /proc to user only"
55633+ depends on GRKERNSEC_PROC
55634+ help
55635+ If you say Y here, non-root users will only be able to view their own
55636+ processes, and restricts them from viewing network-related information,
55637+ and viewing kernel symbol and module information.
55638+
55639+config GRKERNSEC_PROC_USERGROUP
55640+ bool "Allow special group"
55641+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55642+ help
55643+ If you say Y here, you will be able to select a group that will be
55644+ able to view all processes and network-related information. If you've
55645+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55646+ remain hidden. This option is useful if you want to run identd as
55647+ a non-root user.
55648+
55649+config GRKERNSEC_PROC_GID
55650+ int "GID for special group"
55651+ depends on GRKERNSEC_PROC_USERGROUP
55652+ default 1001
55653+
55654+config GRKERNSEC_PROC_ADD
55655+ bool "Additional restrictions"
55656+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55657+ help
55658+ If you say Y here, additional restrictions will be placed on
55659+ /proc that keep normal users from viewing device information and
55660+ slabinfo information that could be useful for exploits.
55661+
55662+config GRKERNSEC_LINK
55663+ bool "Linking restrictions"
55664+ help
55665+ If you say Y here, /tmp race exploits will be prevented, since users
55666+ will no longer be able to follow symlinks owned by other users in
55667+ world-writable +t directories (e.g. /tmp), unless the owner of the
55668+ symlink is the owner of the directory. users will also not be
55669+ able to hardlink to files they do not own. If the sysctl option is
55670+ enabled, a sysctl option with name "linking_restrictions" is created.
55671+
55672+config GRKERNSEC_FIFO
55673+ bool "FIFO restrictions"
55674+ help
55675+ If you say Y here, users will not be able to write to FIFOs they don't
55676+ own in world-writable +t directories (e.g. /tmp), unless the owner of
55677+ the FIFO is the same owner of the directory it's held in. If the sysctl
55678+ option is enabled, a sysctl option with name "fifo_restrictions" is
55679+ created.
55680+
55681+config GRKERNSEC_SYSFS_RESTRICT
55682+ bool "Sysfs/debugfs restriction"
55683+ depends on SYSFS
55684+ help
55685+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55686+ any filesystem normally mounted under it (e.g. debugfs) will only
55687+ be accessible by root. These filesystems generally provide access
55688+ to hardware and debug information that isn't appropriate for unprivileged
55689+ users of the system. Sysfs and debugfs have also become a large source
55690+ of new vulnerabilities, ranging from infoleaks to local compromise.
55691+ There has been very little oversight with an eye toward security involved
55692+ in adding new exporters of information to these filesystems, so their
55693+ use is discouraged.
55694+ This option is equivalent to a chmod 0700 of the mount paths.
55695+
55696+config GRKERNSEC_ROFS
55697+ bool "Runtime read-only mount protection"
55698+ help
55699+ If you say Y here, a sysctl option with name "romount_protect" will
55700+ be created. By setting this option to 1 at runtime, filesystems
55701+ will be protected in the following ways:
55702+ * No new writable mounts will be allowed
55703+ * Existing read-only mounts won't be able to be remounted read/write
55704+ * Write operations will be denied on all block devices
55705+ This option acts independently of grsec_lock: once it is set to 1,
55706+ it cannot be turned off. Therefore, please be mindful of the resulting
55707+ behavior if this option is enabled in an init script on a read-only
55708+ filesystem. This feature is mainly intended for secure embedded systems.
55709+
55710+config GRKERNSEC_CHROOT
55711+ bool "Chroot jail restrictions"
55712+ help
55713+ If you say Y here, you will be able to choose several options that will
55714+ make breaking out of a chrooted jail much more difficult. If you
55715+ encounter no software incompatibilities with the following options, it
55716+ is recommended that you enable each one.
55717+
55718+config GRKERNSEC_CHROOT_MOUNT
55719+ bool "Deny mounts"
55720+ depends on GRKERNSEC_CHROOT
55721+ help
55722+ If you say Y here, processes inside a chroot will not be able to
55723+ mount or remount filesystems. If the sysctl option is enabled, a
55724+ sysctl option with name "chroot_deny_mount" is created.
55725+
55726+config GRKERNSEC_CHROOT_DOUBLE
55727+ bool "Deny double-chroots"
55728+ depends on GRKERNSEC_CHROOT
55729+ help
55730+ If you say Y here, processes inside a chroot will not be able to chroot
55731+ again outside the chroot. This is a widely used method of breaking
55732+ out of a chroot jail and should not be allowed. If the sysctl
55733+ option is enabled, a sysctl option with name
55734+ "chroot_deny_chroot" is created.
55735+
55736+config GRKERNSEC_CHROOT_PIVOT
55737+ bool "Deny pivot_root in chroot"
55738+ depends on GRKERNSEC_CHROOT
55739+ help
55740+ If you say Y here, processes inside a chroot will not be able to use
55741+ a function called pivot_root() that was introduced in Linux 2.3.41. It
55742+ works similar to chroot in that it changes the root filesystem. This
55743+ function could be misused in a chrooted process to attempt to break out
55744+ of the chroot, and therefore should not be allowed. If the sysctl
55745+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
55746+ created.
55747+
55748+config GRKERNSEC_CHROOT_CHDIR
55749+ bool "Enforce chdir(\"/\") on all chroots"
55750+ depends on GRKERNSEC_CHROOT
55751+ help
55752+ If you say Y here, the current working directory of all newly-chrooted
55753+ applications will be set to the the root directory of the chroot.
55754+ The man page on chroot(2) states:
55755+ Note that this call does not change the current working
55756+ directory, so that `.' can be outside the tree rooted at
55757+ `/'. In particular, the super-user can escape from a
55758+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55759+
55760+ It is recommended that you say Y here, since it's not known to break
55761+ any software. If the sysctl option is enabled, a sysctl option with
55762+ name "chroot_enforce_chdir" is created.
55763+
55764+config GRKERNSEC_CHROOT_CHMOD
55765+ bool "Deny (f)chmod +s"
55766+ depends on GRKERNSEC_CHROOT
55767+ help
55768+ If you say Y here, processes inside a chroot will not be able to chmod
55769+ or fchmod files to make them have suid or sgid bits. This protects
55770+ against another published method of breaking a chroot. If the sysctl
55771+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
55772+ created.
55773+
55774+config GRKERNSEC_CHROOT_FCHDIR
55775+ bool "Deny fchdir out of chroot"
55776+ depends on GRKERNSEC_CHROOT
55777+ help
55778+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
55779+ to a file descriptor of the chrooting process that points to a directory
55780+ outside the filesystem will be stopped. If the sysctl option
55781+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55782+
55783+config GRKERNSEC_CHROOT_MKNOD
55784+ bool "Deny mknod"
55785+ depends on GRKERNSEC_CHROOT
55786+ help
55787+ If you say Y here, processes inside a chroot will not be allowed to
55788+ mknod. The problem with using mknod inside a chroot is that it
55789+ would allow an attacker to create a device entry that is the same
55790+ as one on the physical root of your system, which could range from
55791+ anything from the console device to a device for your harddrive (which
55792+ they could then use to wipe the drive or steal data). It is recommended
55793+ that you say Y here, unless you run into software incompatibilities.
55794+ If the sysctl option is enabled, a sysctl option with name
55795+ "chroot_deny_mknod" is created.
55796+
55797+config GRKERNSEC_CHROOT_SHMAT
55798+ bool "Deny shmat() out of chroot"
55799+ depends on GRKERNSEC_CHROOT
55800+ help
55801+ If you say Y here, processes inside a chroot will not be able to attach
55802+ to shared memory segments that were created outside of the chroot jail.
55803+ It is recommended that you say Y here. If the sysctl option is enabled,
55804+ a sysctl option with name "chroot_deny_shmat" is created.
55805+
55806+config GRKERNSEC_CHROOT_UNIX
55807+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
55808+ depends on GRKERNSEC_CHROOT
55809+ help
55810+ If you say Y here, processes inside a chroot will not be able to
55811+ connect to abstract (meaning not belonging to a filesystem) Unix
55812+ domain sockets that were bound outside of a chroot. It is recommended
55813+ that you say Y here. If the sysctl option is enabled, a sysctl option
55814+ with name "chroot_deny_unix" is created.
55815+
55816+config GRKERNSEC_CHROOT_FINDTASK
55817+ bool "Protect outside processes"
55818+ depends on GRKERNSEC_CHROOT
55819+ help
55820+ If you say Y here, processes inside a chroot will not be able to
55821+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55822+ getsid, or view any process outside of the chroot. If the sysctl
55823+ option is enabled, a sysctl option with name "chroot_findtask" is
55824+ created.
55825+
55826+config GRKERNSEC_CHROOT_NICE
55827+ bool "Restrict priority changes"
55828+ depends on GRKERNSEC_CHROOT
55829+ help
55830+ If you say Y here, processes inside a chroot will not be able to raise
55831+ the priority of processes in the chroot, or alter the priority of
55832+ processes outside the chroot. This provides more security than simply
55833+ removing CAP_SYS_NICE from the process' capability set. If the
55834+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55835+ is created.
55836+
55837+config GRKERNSEC_CHROOT_SYSCTL
55838+ bool "Deny sysctl writes"
55839+ depends on GRKERNSEC_CHROOT
55840+ help
55841+ If you say Y here, an attacker in a chroot will not be able to
55842+ write to sysctl entries, either by sysctl(2) or through a /proc
55843+ interface. It is strongly recommended that you say Y here. If the
55844+ sysctl option is enabled, a sysctl option with name
55845+ "chroot_deny_sysctl" is created.
55846+
55847+config GRKERNSEC_CHROOT_CAPS
55848+ bool "Capability restrictions"
55849+ depends on GRKERNSEC_CHROOT
55850+ help
55851+ If you say Y here, the capabilities on all root processes within a
55852+ chroot jail will be lowered to stop module insertion, raw i/o,
55853+ system and net admin tasks, rebooting the system, modifying immutable
55854+ files, modifying IPC owned by another, and changing the system time.
55855+ This is left an option because it can break some apps. Disable this
55856+ if your chrooted apps are having problems performing those kinds of
55857+ tasks. If the sysctl option is enabled, a sysctl option with
55858+ name "chroot_caps" is created.
55859+
55860+endmenu
55861+menu "Kernel Auditing"
55862+depends on GRKERNSEC
55863+
55864+config GRKERNSEC_AUDIT_GROUP
55865+ bool "Single group for auditing"
55866+ help
55867+ If you say Y here, the exec, chdir, and (un)mount logging features
55868+ will only operate on a group you specify. This option is recommended
55869+ if you only want to watch certain users instead of having a large
55870+ amount of logs from the entire system. If the sysctl option is enabled,
55871+ a sysctl option with name "audit_group" is created.
55872+
55873+config GRKERNSEC_AUDIT_GID
55874+ int "GID for auditing"
55875+ depends on GRKERNSEC_AUDIT_GROUP
55876+ default 1007
55877+
55878+config GRKERNSEC_EXECLOG
55879+ bool "Exec logging"
55880+ help
55881+ If you say Y here, all execve() calls will be logged (since the
55882+ other exec*() calls are frontends to execve(), all execution
55883+ will be logged). Useful for shell-servers that like to keep track
55884+ of their users. If the sysctl option is enabled, a sysctl option with
55885+ name "exec_logging" is created.
55886+ WARNING: This option when enabled will produce a LOT of logs, especially
55887+ on an active system.
55888+
55889+config GRKERNSEC_RESLOG
55890+ bool "Resource logging"
55891+ help
55892+ If you say Y here, all attempts to overstep resource limits will
55893+ be logged with the resource name, the requested size, and the current
55894+ limit. It is highly recommended that you say Y here. If the sysctl
55895+ option is enabled, a sysctl option with name "resource_logging" is
55896+ created. If the RBAC system is enabled, the sysctl value is ignored.
55897+
55898+config GRKERNSEC_CHROOT_EXECLOG
55899+ bool "Log execs within chroot"
55900+ help
55901+ If you say Y here, all executions inside a chroot jail will be logged
55902+ to syslog. This can cause a large amount of logs if certain
55903+ applications (eg. djb's daemontools) are installed on the system, and
55904+ is therefore left as an option. If the sysctl option is enabled, a
55905+ sysctl option with name "chroot_execlog" is created.
55906+
55907+config GRKERNSEC_AUDIT_PTRACE
55908+ bool "Ptrace logging"
55909+ help
55910+ If you say Y here, all attempts to attach to a process via ptrace
55911+ will be logged. If the sysctl option is enabled, a sysctl option
55912+ with name "audit_ptrace" is created.
55913+
55914+config GRKERNSEC_AUDIT_CHDIR
55915+ bool "Chdir logging"
55916+ help
55917+ If you say Y here, all chdir() calls will be logged. If the sysctl
55918+ option is enabled, a sysctl option with name "audit_chdir" is created.
55919+
55920+config GRKERNSEC_AUDIT_MOUNT
55921+ bool "(Un)Mount logging"
55922+ help
55923+ If you say Y here, all mounts and unmounts will be logged. If the
55924+ sysctl option is enabled, a sysctl option with name "audit_mount" is
55925+ created.
55926+
55927+config GRKERNSEC_SIGNAL
55928+ bool "Signal logging"
55929+ help
55930+ If you say Y here, certain important signals will be logged, such as
55931+ SIGSEGV, which will as a result inform you of when a error in a program
55932+ occurred, which in some cases could mean a possible exploit attempt.
55933+ If the sysctl option is enabled, a sysctl option with name
55934+ "signal_logging" is created.
55935+
55936+config GRKERNSEC_FORKFAIL
55937+ bool "Fork failure logging"
55938+ help
55939+ If you say Y here, all failed fork() attempts will be logged.
55940+ This could suggest a fork bomb, or someone attempting to overstep
55941+ their process limit. If the sysctl option is enabled, a sysctl option
55942+ with name "forkfail_logging" is created.
55943+
55944+config GRKERNSEC_TIME
55945+ bool "Time change logging"
55946+ help
55947+ If you say Y here, any changes of the system clock will be logged.
55948+ If the sysctl option is enabled, a sysctl option with name
55949+ "timechange_logging" is created.
55950+
55951+config GRKERNSEC_PROC_IPADDR
55952+ bool "/proc/<pid>/ipaddr support"
55953+ help
55954+ If you say Y here, a new entry will be added to each /proc/<pid>
55955+ directory that contains the IP address of the person using the task.
55956+ The IP is carried across local TCP and AF_UNIX stream sockets.
55957+ This information can be useful for IDS/IPSes to perform remote response
55958+ to a local attack. The entry is readable by only the owner of the
55959+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
55960+ the RBAC system), and thus does not create privacy concerns.
55961+
55962+config GRKERNSEC_RWXMAP_LOG
55963+ bool 'Denied RWX mmap/mprotect logging'
55964+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
55965+ help
55966+ If you say Y here, calls to mmap() and mprotect() with explicit
55967+ usage of PROT_WRITE and PROT_EXEC together will be logged when
55968+ denied by the PAX_MPROTECT feature. If the sysctl option is
55969+ enabled, a sysctl option with name "rwxmap_logging" is created.
55970+
55971+config GRKERNSEC_AUDIT_TEXTREL
55972+ bool 'ELF text relocations logging (READ HELP)'
55973+ depends on PAX_MPROTECT
55974+ help
55975+ If you say Y here, text relocations will be logged with the filename
55976+ of the offending library or binary. The purpose of the feature is
55977+ to help Linux distribution developers get rid of libraries and
55978+ binaries that need text relocations which hinder the future progress
55979+ of PaX. Only Linux distribution developers should say Y here, and
55980+ never on a production machine, as this option creates an information
55981+ leak that could aid an attacker in defeating the randomization of
55982+ a single memory region. If the sysctl option is enabled, a sysctl
55983+ option with name "audit_textrel" is created.
55984+
55985+endmenu
55986+
55987+menu "Executable Protections"
55988+depends on GRKERNSEC
55989+
55990+config GRKERNSEC_DMESG
55991+ bool "Dmesg(8) restriction"
55992+ help
55993+ If you say Y here, non-root users will not be able to use dmesg(8)
55994+ to view up to the last 4kb of messages in the kernel's log buffer.
55995+ The kernel's log buffer often contains kernel addresses and other
55996+ identifying information useful to an attacker in fingerprinting a
55997+ system for a targeted exploit.
55998+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
55999+ created.
56000+
56001+config GRKERNSEC_HARDEN_PTRACE
56002+ bool "Deter ptrace-based process snooping"
56003+ help
56004+ If you say Y here, TTY sniffers and other malicious monitoring
56005+ programs implemented through ptrace will be defeated. If you
56006+ have been using the RBAC system, this option has already been
56007+ enabled for several years for all users, with the ability to make
56008+ fine-grained exceptions.
56009+
56010+ This option only affects the ability of non-root users to ptrace
56011+ processes that are not a descendent of the ptracing process.
56012+ This means that strace ./binary and gdb ./binary will still work,
56013+ but attaching to arbitrary processes will not. If the sysctl
56014+ option is enabled, a sysctl option with name "harden_ptrace" is
56015+ created.
56016+
56017+config GRKERNSEC_TPE
56018+ bool "Trusted Path Execution (TPE)"
56019+ help
56020+ If you say Y here, you will be able to choose a gid to add to the
56021+ supplementary groups of users you want to mark as "untrusted."
56022+ These users will not be able to execute any files that are not in
56023+ root-owned directories writable only by root. If the sysctl option
56024+ is enabled, a sysctl option with name "tpe" is created.
56025+
56026+config GRKERNSEC_TPE_ALL
56027+ bool "Partially restrict all non-root users"
56028+ depends on GRKERNSEC_TPE
56029+ help
56030+ If you say Y here, all non-root users will be covered under
56031+ a weaker TPE restriction. This is separate from, and in addition to,
56032+ the main TPE options that you have selected elsewhere. Thus, if a
56033+ "trusted" GID is chosen, this restriction applies to even that GID.
56034+ Under this restriction, all non-root users will only be allowed to
56035+ execute files in directories they own that are not group or
56036+ world-writable, or in directories owned by root and writable only by
56037+ root. If the sysctl option is enabled, a sysctl option with name
56038+ "tpe_restrict_all" is created.
56039+
56040+config GRKERNSEC_TPE_INVERT
56041+ bool "Invert GID option"
56042+ depends on GRKERNSEC_TPE
56043+ help
56044+ If you say Y here, the group you specify in the TPE configuration will
56045+ decide what group TPE restrictions will be *disabled* for. This
56046+ option is useful if you want TPE restrictions to be applied to most
56047+ users on the system. If the sysctl option is enabled, a sysctl option
56048+ with name "tpe_invert" is created. Unlike other sysctl options, this
56049+ entry will default to on for backward-compatibility.
56050+
56051+config GRKERNSEC_TPE_GID
56052+ int "GID for untrusted users"
56053+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56054+ default 1005
56055+ help
56056+ Setting this GID determines what group TPE restrictions will be
56057+ *enabled* for. If the sysctl option is enabled, a sysctl option
56058+ with name "tpe_gid" is created.
56059+
56060+config GRKERNSEC_TPE_GID
56061+ int "GID for trusted users"
56062+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56063+ default 1005
56064+ help
56065+ Setting this GID determines what group TPE restrictions will be
56066+ *disabled* for. If the sysctl option is enabled, a sysctl option
56067+ with name "tpe_gid" is created.
56068+
56069+endmenu
56070+menu "Network Protections"
56071+depends on GRKERNSEC
56072+
56073+config GRKERNSEC_RANDNET
56074+ bool "Larger entropy pools"
56075+ help
56076+ If you say Y here, the entropy pools used for many features of Linux
56077+ and grsecurity will be doubled in size. Since several grsecurity
56078+ features use additional randomness, it is recommended that you say Y
56079+ here. Saying Y here has a similar effect as modifying
56080+ /proc/sys/kernel/random/poolsize.
56081+
56082+config GRKERNSEC_BLACKHOLE
56083+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56084+ depends on NET
56085+ help
56086+ If you say Y here, neither TCP resets nor ICMP
56087+ destination-unreachable packets will be sent in response to packets
56088+ sent to ports for which no associated listening process exists.
56089+ This feature supports both IPV4 and IPV6 and exempts the
56090+ loopback interface from blackholing. Enabling this feature
56091+ makes a host more resilient to DoS attacks and reduces network
56092+ visibility against scanners.
56093+
56094+ The blackhole feature as-implemented is equivalent to the FreeBSD
56095+ blackhole feature, as it prevents RST responses to all packets, not
56096+ just SYNs. Under most application behavior this causes no
56097+ problems, but applications (like haproxy) may not close certain
56098+ connections in a way that cleanly terminates them on the remote
56099+ end, leaving the remote host in LAST_ACK state. Because of this
56100+ side-effect and to prevent intentional LAST_ACK DoSes, this
56101+ feature also adds automatic mitigation against such attacks.
56102+ The mitigation drastically reduces the amount of time a socket
56103+ can spend in LAST_ACK state. If you're using haproxy and not
56104+ all servers it connects to have this option enabled, consider
56105+ disabling this feature on the haproxy host.
56106+
56107+ If the sysctl option is enabled, two sysctl options with names
56108+ "ip_blackhole" and "lastack_retries" will be created.
56109+ While "ip_blackhole" takes the standard zero/non-zero on/off
56110+ toggle, "lastack_retries" uses the same kinds of values as
56111+ "tcp_retries1" and "tcp_retries2". The default value of 4
56112+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56113+ state.
56114+
56115+config GRKERNSEC_SOCKET
56116+ bool "Socket restrictions"
56117+ depends on NET
56118+ help
56119+ If you say Y here, you will be able to choose from several options.
56120+ If you assign a GID on your system and add it to the supplementary
56121+ groups of users you want to restrict socket access to, this patch
56122+ will perform up to three things, based on the option(s) you choose.
56123+
56124+config GRKERNSEC_SOCKET_ALL
56125+ bool "Deny any sockets to group"
56126+ depends on GRKERNSEC_SOCKET
56127+ help
56128+ If you say Y here, you will be able to choose a GID of whose users will
56129+ be unable to connect to other hosts from your machine or run server
56130+ applications from your machine. If the sysctl option is enabled, a
56131+ sysctl option with name "socket_all" is created.
56132+
56133+config GRKERNSEC_SOCKET_ALL_GID
56134+ int "GID to deny all sockets for"
56135+ depends on GRKERNSEC_SOCKET_ALL
56136+ default 1004
56137+ help
56138+ Here you can choose the GID to disable socket access for. Remember to
56139+ add the users you want socket access disabled for to the GID
56140+ specified here. If the sysctl option is enabled, a sysctl option
56141+ with name "socket_all_gid" is created.
56142+
56143+config GRKERNSEC_SOCKET_CLIENT
56144+ bool "Deny client sockets to group"
56145+ depends on GRKERNSEC_SOCKET
56146+ help
56147+ If you say Y here, you will be able to choose a GID of whose users will
56148+ be unable to connect to other hosts from your machine, but will be
56149+ able to run servers. If this option is enabled, all users in the group
56150+ you specify will have to use passive mode when initiating ftp transfers
56151+ from the shell on your machine. If the sysctl option is enabled, a
56152+ sysctl option with name "socket_client" is created.
56153+
56154+config GRKERNSEC_SOCKET_CLIENT_GID
56155+ int "GID to deny client sockets for"
56156+ depends on GRKERNSEC_SOCKET_CLIENT
56157+ default 1003
56158+ help
56159+ Here you can choose the GID to disable client socket access for.
56160+ Remember to add the users you want client socket access disabled for to
56161+ the GID specified here. If the sysctl option is enabled, a sysctl
56162+ option with name "socket_client_gid" is created.
56163+
56164+config GRKERNSEC_SOCKET_SERVER
56165+ bool "Deny server sockets to group"
56166+ depends on GRKERNSEC_SOCKET
56167+ help
56168+ If you say Y here, you will be able to choose a GID of whose users will
56169+ be unable to run server applications from your machine. If the sysctl
56170+ option is enabled, a sysctl option with name "socket_server" is created.
56171+
56172+config GRKERNSEC_SOCKET_SERVER_GID
56173+ int "GID to deny server sockets for"
56174+ depends on GRKERNSEC_SOCKET_SERVER
56175+ default 1002
56176+ help
56177+ Here you can choose the GID to disable server socket access for.
56178+ Remember to add the users you want server socket access disabled for to
56179+ the GID specified here. If the sysctl option is enabled, a sysctl
56180+ option with name "socket_server_gid" is created.
56181+
56182+endmenu
56183+menu "Sysctl support"
56184+depends on GRKERNSEC && SYSCTL
56185+
56186+config GRKERNSEC_SYSCTL
56187+ bool "Sysctl support"
56188+ help
56189+ If you say Y here, you will be able to change the options that
56190+ grsecurity runs with at bootup, without having to recompile your
56191+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56192+ to enable (1) or disable (0) various features. All the sysctl entries
56193+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56194+ All features enabled in the kernel configuration are disabled at boot
56195+ if you do not say Y to the "Turn on features by default" option.
56196+ All options should be set at startup, and the grsec_lock entry should
56197+ be set to a non-zero value after all the options are set.
56198+ *THIS IS EXTREMELY IMPORTANT*
56199+
56200+config GRKERNSEC_SYSCTL_DISTRO
56201+ bool "Extra sysctl support for distro makers (READ HELP)"
56202+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56203+ help
56204+ If you say Y here, additional sysctl options will be created
56205+ for features that affect processes running as root. Therefore,
56206+ it is critical when using this option that the grsec_lock entry be
56207+ enabled after boot. Only distros with prebuilt kernel packages
56208+ with this option enabled that can ensure grsec_lock is enabled
56209+ after boot should use this option.
56210+ *Failure to set grsec_lock after boot makes all grsec features
56211+ this option covers useless*
56212+
56213+ Currently this option creates the following sysctl entries:
56214+ "Disable Privileged I/O": "disable_priv_io"
56215+
56216+config GRKERNSEC_SYSCTL_ON
56217+ bool "Turn on features by default"
56218+ depends on GRKERNSEC_SYSCTL
56219+ help
56220+ If you say Y here, instead of having all features enabled in the
56221+ kernel configuration disabled at boot time, the features will be
56222+ enabled at boot time. It is recommended you say Y here unless
56223+ there is some reason you would want all sysctl-tunable features to
56224+ be disabled by default. As mentioned elsewhere, it is important
56225+ to enable the grsec_lock entry once you have finished modifying
56226+ the sysctl entries.
56227+
56228+endmenu
56229+menu "Logging Options"
56230+depends on GRKERNSEC
56231+
56232+config GRKERNSEC_FLOODTIME
56233+ int "Seconds in between log messages (minimum)"
56234+ default 10
56235+ help
56236+ This option allows you to enforce the number of seconds between
56237+ grsecurity log messages. The default should be suitable for most
56238+ people, however, if you choose to change it, choose a value small enough
56239+ to allow informative logs to be produced, but large enough to
56240+ prevent flooding.
56241+
56242+config GRKERNSEC_FLOODBURST
56243+ int "Number of messages in a burst (maximum)"
56244+ default 4
56245+ help
56246+ This option allows you to choose the maximum number of messages allowed
56247+ within the flood time interval you chose in a separate option. The
56248+ default should be suitable for most people, however if you find that
56249+ many of your logs are being interpreted as flooding, you may want to
56250+ raise this value.
56251+
56252+endmenu
56253+
56254+endmenu
56255diff -urNp linux-2.6.32.45/grsecurity/Makefile linux-2.6.32.45/grsecurity/Makefile
56256--- linux-2.6.32.45/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56257+++ linux-2.6.32.45/grsecurity/Makefile 2011-08-21 18:54:34.000000000 -0400
56258@@ -0,0 +1,34 @@
56259+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56260+# during 2001-2009 it has been completely redesigned by Brad Spengler
56261+# into an RBAC system
56262+#
56263+# All code in this directory and various hooks inserted throughout the kernel
56264+# are copyright Brad Spengler - Open Source Security, Inc., and released
56265+# under the GPL v2 or higher
56266+
56267+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56268+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56269+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56270+
56271+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56272+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56273+ gracl_learn.o grsec_log.o
56274+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56275+
56276+ifdef CONFIG_NET
56277+obj-y += grsec_sock.o
56278+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56279+endif
56280+
56281+ifndef CONFIG_GRKERNSEC
56282+obj-y += grsec_disabled.o
56283+endif
56284+
56285+ifdef CONFIG_GRKERNSEC_HIDESYM
56286+extra-y := grsec_hidesym.o
56287+$(obj)/grsec_hidesym.o:
56288+ @-chmod -f 500 /boot
56289+ @-chmod -f 500 /lib/modules
56290+ @-chmod -f 700 .
56291+ @echo ' grsec: protected kernel image paths'
56292+endif
56293diff -urNp linux-2.6.32.45/include/acpi/acpi_bus.h linux-2.6.32.45/include/acpi/acpi_bus.h
56294--- linux-2.6.32.45/include/acpi/acpi_bus.h 2011-03-27 14:31:47.000000000 -0400
56295+++ linux-2.6.32.45/include/acpi/acpi_bus.h 2011-08-05 20:33:55.000000000 -0400
56296@@ -107,7 +107,7 @@ struct acpi_device_ops {
56297 acpi_op_bind bind;
56298 acpi_op_unbind unbind;
56299 acpi_op_notify notify;
56300-};
56301+} __no_const;
56302
56303 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56304
56305diff -urNp linux-2.6.32.45/include/acpi/acpi_drivers.h linux-2.6.32.45/include/acpi/acpi_drivers.h
56306--- linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
56307+++ linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
56308@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
56309 Dock Station
56310 -------------------------------------------------------------------------- */
56311 struct acpi_dock_ops {
56312- acpi_notify_handler handler;
56313- acpi_notify_handler uevent;
56314+ const acpi_notify_handler handler;
56315+ const acpi_notify_handler uevent;
56316 };
56317
56318 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
56319@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
56320 extern int register_dock_notifier(struct notifier_block *nb);
56321 extern void unregister_dock_notifier(struct notifier_block *nb);
56322 extern int register_hotplug_dock_device(acpi_handle handle,
56323- struct acpi_dock_ops *ops,
56324+ const struct acpi_dock_ops *ops,
56325 void *context);
56326 extern void unregister_hotplug_dock_device(acpi_handle handle);
56327 #else
56328@@ -144,7 +144,7 @@ static inline void unregister_dock_notif
56329 {
56330 }
56331 static inline int register_hotplug_dock_device(acpi_handle handle,
56332- struct acpi_dock_ops *ops,
56333+ const struct acpi_dock_ops *ops,
56334 void *context)
56335 {
56336 return -ENODEV;
56337diff -urNp linux-2.6.32.45/include/asm-generic/atomic-long.h linux-2.6.32.45/include/asm-generic/atomic-long.h
56338--- linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
56339+++ linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
56340@@ -22,6 +22,12 @@
56341
56342 typedef atomic64_t atomic_long_t;
56343
56344+#ifdef CONFIG_PAX_REFCOUNT
56345+typedef atomic64_unchecked_t atomic_long_unchecked_t;
56346+#else
56347+typedef atomic64_t atomic_long_unchecked_t;
56348+#endif
56349+
56350 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56351
56352 static inline long atomic_long_read(atomic_long_t *l)
56353@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56354 return (long)atomic64_read(v);
56355 }
56356
56357+#ifdef CONFIG_PAX_REFCOUNT
56358+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56359+{
56360+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56361+
56362+ return (long)atomic64_read_unchecked(v);
56363+}
56364+#endif
56365+
56366 static inline void atomic_long_set(atomic_long_t *l, long i)
56367 {
56368 atomic64_t *v = (atomic64_t *)l;
56369@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56370 atomic64_set(v, i);
56371 }
56372
56373+#ifdef CONFIG_PAX_REFCOUNT
56374+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56375+{
56376+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56377+
56378+ atomic64_set_unchecked(v, i);
56379+}
56380+#endif
56381+
56382 static inline void atomic_long_inc(atomic_long_t *l)
56383 {
56384 atomic64_t *v = (atomic64_t *)l;
56385@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56386 atomic64_inc(v);
56387 }
56388
56389+#ifdef CONFIG_PAX_REFCOUNT
56390+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56391+{
56392+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56393+
56394+ atomic64_inc_unchecked(v);
56395+}
56396+#endif
56397+
56398 static inline void atomic_long_dec(atomic_long_t *l)
56399 {
56400 atomic64_t *v = (atomic64_t *)l;
56401@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56402 atomic64_dec(v);
56403 }
56404
56405+#ifdef CONFIG_PAX_REFCOUNT
56406+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56407+{
56408+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56409+
56410+ atomic64_dec_unchecked(v);
56411+}
56412+#endif
56413+
56414 static inline void atomic_long_add(long i, atomic_long_t *l)
56415 {
56416 atomic64_t *v = (atomic64_t *)l;
56417@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56418 atomic64_add(i, v);
56419 }
56420
56421+#ifdef CONFIG_PAX_REFCOUNT
56422+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56423+{
56424+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56425+
56426+ atomic64_add_unchecked(i, v);
56427+}
56428+#endif
56429+
56430 static inline void atomic_long_sub(long i, atomic_long_t *l)
56431 {
56432 atomic64_t *v = (atomic64_t *)l;
56433@@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
56434 return (long)atomic64_inc_return(v);
56435 }
56436
56437+#ifdef CONFIG_PAX_REFCOUNT
56438+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56439+{
56440+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56441+
56442+ return (long)atomic64_inc_return_unchecked(v);
56443+}
56444+#endif
56445+
56446 static inline long atomic_long_dec_return(atomic_long_t *l)
56447 {
56448 atomic64_t *v = (atomic64_t *)l;
56449@@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
56450
56451 typedef atomic_t atomic_long_t;
56452
56453+#ifdef CONFIG_PAX_REFCOUNT
56454+typedef atomic_unchecked_t atomic_long_unchecked_t;
56455+#else
56456+typedef atomic_t atomic_long_unchecked_t;
56457+#endif
56458+
56459 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56460 static inline long atomic_long_read(atomic_long_t *l)
56461 {
56462@@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
56463 return (long)atomic_read(v);
56464 }
56465
56466+#ifdef CONFIG_PAX_REFCOUNT
56467+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56468+{
56469+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56470+
56471+ return (long)atomic_read_unchecked(v);
56472+}
56473+#endif
56474+
56475 static inline void atomic_long_set(atomic_long_t *l, long i)
56476 {
56477 atomic_t *v = (atomic_t *)l;
56478@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
56479 atomic_set(v, i);
56480 }
56481
56482+#ifdef CONFIG_PAX_REFCOUNT
56483+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56484+{
56485+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56486+
56487+ atomic_set_unchecked(v, i);
56488+}
56489+#endif
56490+
56491 static inline void atomic_long_inc(atomic_long_t *l)
56492 {
56493 atomic_t *v = (atomic_t *)l;
56494@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
56495 atomic_inc(v);
56496 }
56497
56498+#ifdef CONFIG_PAX_REFCOUNT
56499+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56500+{
56501+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56502+
56503+ atomic_inc_unchecked(v);
56504+}
56505+#endif
56506+
56507 static inline void atomic_long_dec(atomic_long_t *l)
56508 {
56509 atomic_t *v = (atomic_t *)l;
56510@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
56511 atomic_dec(v);
56512 }
56513
56514+#ifdef CONFIG_PAX_REFCOUNT
56515+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56516+{
56517+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56518+
56519+ atomic_dec_unchecked(v);
56520+}
56521+#endif
56522+
56523 static inline void atomic_long_add(long i, atomic_long_t *l)
56524 {
56525 atomic_t *v = (atomic_t *)l;
56526@@ -176,6 +278,15 @@ static inline void atomic_long_add(long
56527 atomic_add(i, v);
56528 }
56529
56530+#ifdef CONFIG_PAX_REFCOUNT
56531+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56532+{
56533+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56534+
56535+ atomic_add_unchecked(i, v);
56536+}
56537+#endif
56538+
56539 static inline void atomic_long_sub(long i, atomic_long_t *l)
56540 {
56541 atomic_t *v = (atomic_t *)l;
56542@@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
56543 return (long)atomic_inc_return(v);
56544 }
56545
56546+#ifdef CONFIG_PAX_REFCOUNT
56547+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56548+{
56549+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56550+
56551+ return (long)atomic_inc_return_unchecked(v);
56552+}
56553+#endif
56554+
56555 static inline long atomic_long_dec_return(atomic_long_t *l)
56556 {
56557 atomic_t *v = (atomic_t *)l;
56558@@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
56559
56560 #endif /* BITS_PER_LONG == 64 */
56561
56562+#ifdef CONFIG_PAX_REFCOUNT
56563+static inline void pax_refcount_needs_these_functions(void)
56564+{
56565+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
56566+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56567+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56568+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56569+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56570+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56571+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56572+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56573+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56574+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56575+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56576+
56577+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56578+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56579+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56580+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56581+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56582+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56583+}
56584+#else
56585+#define atomic_read_unchecked(v) atomic_read(v)
56586+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56587+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56588+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56589+#define atomic_inc_unchecked(v) atomic_inc(v)
56590+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56591+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56592+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56593+#define atomic_dec_unchecked(v) atomic_dec(v)
56594+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56595+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56596+
56597+#define atomic_long_read_unchecked(v) atomic_long_read(v)
56598+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56599+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56600+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56601+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56602+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56603+#endif
56604+
56605 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56606diff -urNp linux-2.6.32.45/include/asm-generic/bug.h linux-2.6.32.45/include/asm-generic/bug.h
56607--- linux-2.6.32.45/include/asm-generic/bug.h 2011-07-13 17:23:04.000000000 -0400
56608+++ linux-2.6.32.45/include/asm-generic/bug.h 2011-08-21 17:56:07.000000000 -0400
56609@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const cha
56610
56611 #else /* !CONFIG_BUG */
56612 #ifndef HAVE_ARCH_BUG
56613-#define BUG() do {} while(0)
56614+#define BUG() do { for (;;) ; } while(0)
56615 #endif
56616
56617 #ifndef HAVE_ARCH_BUG_ON
56618-#define BUG_ON(condition) do { if (condition) ; } while(0)
56619+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
56620 #endif
56621
56622 #ifndef HAVE_ARCH_WARN_ON
56623diff -urNp linux-2.6.32.45/include/asm-generic/cache.h linux-2.6.32.45/include/asm-generic/cache.h
56624--- linux-2.6.32.45/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
56625+++ linux-2.6.32.45/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
56626@@ -6,7 +6,7 @@
56627 * cache lines need to provide their own cache.h.
56628 */
56629
56630-#define L1_CACHE_SHIFT 5
56631-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
56632+#define L1_CACHE_SHIFT 5UL
56633+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
56634
56635 #endif /* __ASM_GENERIC_CACHE_H */
56636diff -urNp linux-2.6.32.45/include/asm-generic/dma-mapping-common.h linux-2.6.32.45/include/asm-generic/dma-mapping-common.h
56637--- linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
56638+++ linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
56639@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
56640 enum dma_data_direction dir,
56641 struct dma_attrs *attrs)
56642 {
56643- struct dma_map_ops *ops = get_dma_ops(dev);
56644+ const struct dma_map_ops *ops = get_dma_ops(dev);
56645 dma_addr_t addr;
56646
56647 kmemcheck_mark_initialized(ptr, size);
56648@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
56649 enum dma_data_direction dir,
56650 struct dma_attrs *attrs)
56651 {
56652- struct dma_map_ops *ops = get_dma_ops(dev);
56653+ const struct dma_map_ops *ops = get_dma_ops(dev);
56654
56655 BUG_ON(!valid_dma_direction(dir));
56656 if (ops->unmap_page)
56657@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
56658 int nents, enum dma_data_direction dir,
56659 struct dma_attrs *attrs)
56660 {
56661- struct dma_map_ops *ops = get_dma_ops(dev);
56662+ const struct dma_map_ops *ops = get_dma_ops(dev);
56663 int i, ents;
56664 struct scatterlist *s;
56665
56666@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
56667 int nents, enum dma_data_direction dir,
56668 struct dma_attrs *attrs)
56669 {
56670- struct dma_map_ops *ops = get_dma_ops(dev);
56671+ const struct dma_map_ops *ops = get_dma_ops(dev);
56672
56673 BUG_ON(!valid_dma_direction(dir));
56674 debug_dma_unmap_sg(dev, sg, nents, dir);
56675@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
56676 size_t offset, size_t size,
56677 enum dma_data_direction dir)
56678 {
56679- struct dma_map_ops *ops = get_dma_ops(dev);
56680+ const struct dma_map_ops *ops = get_dma_ops(dev);
56681 dma_addr_t addr;
56682
56683 kmemcheck_mark_initialized(page_address(page) + offset, size);
56684@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
56685 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
56686 size_t size, enum dma_data_direction dir)
56687 {
56688- struct dma_map_ops *ops = get_dma_ops(dev);
56689+ const struct dma_map_ops *ops = get_dma_ops(dev);
56690
56691 BUG_ON(!valid_dma_direction(dir));
56692 if (ops->unmap_page)
56693@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
56694 size_t size,
56695 enum dma_data_direction dir)
56696 {
56697- struct dma_map_ops *ops = get_dma_ops(dev);
56698+ const struct dma_map_ops *ops = get_dma_ops(dev);
56699
56700 BUG_ON(!valid_dma_direction(dir));
56701 if (ops->sync_single_for_cpu)
56702@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
56703 dma_addr_t addr, size_t size,
56704 enum dma_data_direction dir)
56705 {
56706- struct dma_map_ops *ops = get_dma_ops(dev);
56707+ const struct dma_map_ops *ops = get_dma_ops(dev);
56708
56709 BUG_ON(!valid_dma_direction(dir));
56710 if (ops->sync_single_for_device)
56711@@ -123,7 +123,7 @@ static inline void dma_sync_single_range
56712 size_t size,
56713 enum dma_data_direction dir)
56714 {
56715- struct dma_map_ops *ops = get_dma_ops(dev);
56716+ const struct dma_map_ops *ops = get_dma_ops(dev);
56717
56718 BUG_ON(!valid_dma_direction(dir));
56719 if (ops->sync_single_range_for_cpu) {
56720@@ -140,7 +140,7 @@ static inline void dma_sync_single_range
56721 size_t size,
56722 enum dma_data_direction dir)
56723 {
56724- struct dma_map_ops *ops = get_dma_ops(dev);
56725+ const struct dma_map_ops *ops = get_dma_ops(dev);
56726
56727 BUG_ON(!valid_dma_direction(dir));
56728 if (ops->sync_single_range_for_device) {
56729@@ -155,7 +155,7 @@ static inline void
56730 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
56731 int nelems, enum dma_data_direction dir)
56732 {
56733- struct dma_map_ops *ops = get_dma_ops(dev);
56734+ const struct dma_map_ops *ops = get_dma_ops(dev);
56735
56736 BUG_ON(!valid_dma_direction(dir));
56737 if (ops->sync_sg_for_cpu)
56738@@ -167,7 +167,7 @@ static inline void
56739 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
56740 int nelems, enum dma_data_direction dir)
56741 {
56742- struct dma_map_ops *ops = get_dma_ops(dev);
56743+ const struct dma_map_ops *ops = get_dma_ops(dev);
56744
56745 BUG_ON(!valid_dma_direction(dir));
56746 if (ops->sync_sg_for_device)
56747diff -urNp linux-2.6.32.45/include/asm-generic/emergency-restart.h linux-2.6.32.45/include/asm-generic/emergency-restart.h
56748--- linux-2.6.32.45/include/asm-generic/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
56749+++ linux-2.6.32.45/include/asm-generic/emergency-restart.h 2011-08-21 19:17:17.000000000 -0400
56750@@ -1,7 +1,7 @@
56751 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
56752 #define _ASM_GENERIC_EMERGENCY_RESTART_H
56753
56754-static inline void machine_emergency_restart(void)
56755+static inline __noreturn void machine_emergency_restart(void)
56756 {
56757 machine_restart(NULL);
56758 }
56759diff -urNp linux-2.6.32.45/include/asm-generic/futex.h linux-2.6.32.45/include/asm-generic/futex.h
56760--- linux-2.6.32.45/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
56761+++ linux-2.6.32.45/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
56762@@ -6,7 +6,7 @@
56763 #include <asm/errno.h>
56764
56765 static inline int
56766-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
56767+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
56768 {
56769 int op = (encoded_op >> 28) & 7;
56770 int cmp = (encoded_op >> 24) & 15;
56771@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
56772 }
56773
56774 static inline int
56775-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
56776+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
56777 {
56778 return -ENOSYS;
56779 }
56780diff -urNp linux-2.6.32.45/include/asm-generic/int-l64.h linux-2.6.32.45/include/asm-generic/int-l64.h
56781--- linux-2.6.32.45/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
56782+++ linux-2.6.32.45/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
56783@@ -46,6 +46,8 @@ typedef unsigned int u32;
56784 typedef signed long s64;
56785 typedef unsigned long u64;
56786
56787+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
56788+
56789 #define S8_C(x) x
56790 #define U8_C(x) x ## U
56791 #define S16_C(x) x
56792diff -urNp linux-2.6.32.45/include/asm-generic/int-ll64.h linux-2.6.32.45/include/asm-generic/int-ll64.h
56793--- linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
56794+++ linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
56795@@ -51,6 +51,8 @@ typedef unsigned int u32;
56796 typedef signed long long s64;
56797 typedef unsigned long long u64;
56798
56799+typedef unsigned long long intoverflow_t;
56800+
56801 #define S8_C(x) x
56802 #define U8_C(x) x ## U
56803 #define S16_C(x) x
56804diff -urNp linux-2.6.32.45/include/asm-generic/kmap_types.h linux-2.6.32.45/include/asm-generic/kmap_types.h
56805--- linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
56806+++ linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
56807@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
56808 KMAP_D(16) KM_IRQ_PTE,
56809 KMAP_D(17) KM_NMI,
56810 KMAP_D(18) KM_NMI_PTE,
56811-KMAP_D(19) KM_TYPE_NR
56812+KMAP_D(19) KM_CLEARPAGE,
56813+KMAP_D(20) KM_TYPE_NR
56814 };
56815
56816 #undef KMAP_D
56817diff -urNp linux-2.6.32.45/include/asm-generic/pgtable.h linux-2.6.32.45/include/asm-generic/pgtable.h
56818--- linux-2.6.32.45/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
56819+++ linux-2.6.32.45/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
56820@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
56821 unsigned long size);
56822 #endif
56823
56824+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
56825+static inline unsigned long pax_open_kernel(void) { return 0; }
56826+#endif
56827+
56828+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
56829+static inline unsigned long pax_close_kernel(void) { return 0; }
56830+#endif
56831+
56832 #endif /* !__ASSEMBLY__ */
56833
56834 #endif /* _ASM_GENERIC_PGTABLE_H */
56835diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h
56836--- linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
56837+++ linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
56838@@ -1,14 +1,19 @@
56839 #ifndef _PGTABLE_NOPMD_H
56840 #define _PGTABLE_NOPMD_H
56841
56842-#ifndef __ASSEMBLY__
56843-
56844 #include <asm-generic/pgtable-nopud.h>
56845
56846-struct mm_struct;
56847-
56848 #define __PAGETABLE_PMD_FOLDED
56849
56850+#define PMD_SHIFT PUD_SHIFT
56851+#define PTRS_PER_PMD 1
56852+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
56853+#define PMD_MASK (~(PMD_SIZE-1))
56854+
56855+#ifndef __ASSEMBLY__
56856+
56857+struct mm_struct;
56858+
56859 /*
56860 * Having the pmd type consist of a pud gets the size right, and allows
56861 * us to conceptually access the pud entry that this pmd is folded into
56862@@ -16,11 +21,6 @@ struct mm_struct;
56863 */
56864 typedef struct { pud_t pud; } pmd_t;
56865
56866-#define PMD_SHIFT PUD_SHIFT
56867-#define PTRS_PER_PMD 1
56868-#define PMD_SIZE (1UL << PMD_SHIFT)
56869-#define PMD_MASK (~(PMD_SIZE-1))
56870-
56871 /*
56872 * The "pud_xxx()" functions here are trivial for a folded two-level
56873 * setup: the pmd is never bad, and a pmd always exists (as it's folded
56874diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopud.h linux-2.6.32.45/include/asm-generic/pgtable-nopud.h
56875--- linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
56876+++ linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
56877@@ -1,10 +1,15 @@
56878 #ifndef _PGTABLE_NOPUD_H
56879 #define _PGTABLE_NOPUD_H
56880
56881-#ifndef __ASSEMBLY__
56882-
56883 #define __PAGETABLE_PUD_FOLDED
56884
56885+#define PUD_SHIFT PGDIR_SHIFT
56886+#define PTRS_PER_PUD 1
56887+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
56888+#define PUD_MASK (~(PUD_SIZE-1))
56889+
56890+#ifndef __ASSEMBLY__
56891+
56892 /*
56893 * Having the pud type consist of a pgd gets the size right, and allows
56894 * us to conceptually access the pgd entry that this pud is folded into
56895@@ -12,11 +17,6 @@
56896 */
56897 typedef struct { pgd_t pgd; } pud_t;
56898
56899-#define PUD_SHIFT PGDIR_SHIFT
56900-#define PTRS_PER_PUD 1
56901-#define PUD_SIZE (1UL << PUD_SHIFT)
56902-#define PUD_MASK (~(PUD_SIZE-1))
56903-
56904 /*
56905 * The "pgd_xxx()" functions here are trivial for a folded two-level
56906 * setup: the pud is never bad, and a pud always exists (as it's folded
56907diff -urNp linux-2.6.32.45/include/asm-generic/vmlinux.lds.h linux-2.6.32.45/include/asm-generic/vmlinux.lds.h
56908--- linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
56909+++ linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
56910@@ -199,6 +199,7 @@
56911 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
56912 VMLINUX_SYMBOL(__start_rodata) = .; \
56913 *(.rodata) *(.rodata.*) \
56914+ *(.data.read_only) \
56915 *(__vermagic) /* Kernel version magic */ \
56916 *(__markers_strings) /* Markers: strings */ \
56917 *(__tracepoints_strings)/* Tracepoints: strings */ \
56918@@ -656,22 +657,24 @@
56919 * section in the linker script will go there too. @phdr should have
56920 * a leading colon.
56921 *
56922- * Note that this macros defines __per_cpu_load as an absolute symbol.
56923+ * Note that this macros defines per_cpu_load as an absolute symbol.
56924 * If there is no need to put the percpu section at a predetermined
56925 * address, use PERCPU().
56926 */
56927 #define PERCPU_VADDR(vaddr, phdr) \
56928- VMLINUX_SYMBOL(__per_cpu_load) = .; \
56929- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
56930+ per_cpu_load = .; \
56931+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
56932 - LOAD_OFFSET) { \
56933+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
56934 VMLINUX_SYMBOL(__per_cpu_start) = .; \
56935 *(.data.percpu.first) \
56936- *(.data.percpu.page_aligned) \
56937 *(.data.percpu) \
56938+ . = ALIGN(PAGE_SIZE); \
56939+ *(.data.percpu.page_aligned) \
56940 *(.data.percpu.shared_aligned) \
56941 VMLINUX_SYMBOL(__per_cpu_end) = .; \
56942 } phdr \
56943- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
56944+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
56945
56946 /**
56947 * PERCPU - define output section for percpu area, simple version
56948diff -urNp linux-2.6.32.45/include/drm/drm_crtc_helper.h linux-2.6.32.45/include/drm/drm_crtc_helper.h
56949--- linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-03-27 14:31:47.000000000 -0400
56950+++ linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-08-05 20:33:55.000000000 -0400
56951@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
56952
56953 /* reload the current crtc LUT */
56954 void (*load_lut)(struct drm_crtc *crtc);
56955-};
56956+} __no_const;
56957
56958 struct drm_encoder_helper_funcs {
56959 void (*dpms)(struct drm_encoder *encoder, int mode);
56960@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
56961 struct drm_connector *connector);
56962 /* disable encoder when not in use - more explicit than dpms off */
56963 void (*disable)(struct drm_encoder *encoder);
56964-};
56965+} __no_const;
56966
56967 struct drm_connector_helper_funcs {
56968 int (*get_modes)(struct drm_connector *connector);
56969diff -urNp linux-2.6.32.45/include/drm/drmP.h linux-2.6.32.45/include/drm/drmP.h
56970--- linux-2.6.32.45/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
56971+++ linux-2.6.32.45/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
56972@@ -71,6 +71,7 @@
56973 #include <linux/workqueue.h>
56974 #include <linux/poll.h>
56975 #include <asm/pgalloc.h>
56976+#include <asm/local.h>
56977 #include "drm.h"
56978
56979 #include <linux/idr.h>
56980@@ -814,7 +815,7 @@ struct drm_driver {
56981 void (*vgaarb_irq)(struct drm_device *dev, bool state);
56982
56983 /* Driver private ops for this object */
56984- struct vm_operations_struct *gem_vm_ops;
56985+ const struct vm_operations_struct *gem_vm_ops;
56986
56987 int major;
56988 int minor;
56989@@ -917,7 +918,7 @@ struct drm_device {
56990
56991 /** \name Usage Counters */
56992 /*@{ */
56993- int open_count; /**< Outstanding files open */
56994+ local_t open_count; /**< Outstanding files open */
56995 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
56996 atomic_t vma_count; /**< Outstanding vma areas open */
56997 int buf_use; /**< Buffers in use -- cannot alloc */
56998@@ -928,7 +929,7 @@ struct drm_device {
56999 /*@{ */
57000 unsigned long counters;
57001 enum drm_stat_type types[15];
57002- atomic_t counts[15];
57003+ atomic_unchecked_t counts[15];
57004 /*@} */
57005
57006 struct list_head filelist;
57007@@ -1016,7 +1017,7 @@ struct drm_device {
57008 struct pci_controller *hose;
57009 #endif
57010 struct drm_sg_mem *sg; /**< Scatter gather memory */
57011- unsigned int num_crtcs; /**< Number of CRTCs on this device */
57012+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
57013 void *dev_private; /**< device private data */
57014 void *mm_private;
57015 struct address_space *dev_mapping;
57016@@ -1042,11 +1043,11 @@ struct drm_device {
57017 spinlock_t object_name_lock;
57018 struct idr object_name_idr;
57019 atomic_t object_count;
57020- atomic_t object_memory;
57021+ atomic_unchecked_t object_memory;
57022 atomic_t pin_count;
57023- atomic_t pin_memory;
57024+ atomic_unchecked_t pin_memory;
57025 atomic_t gtt_count;
57026- atomic_t gtt_memory;
57027+ atomic_unchecked_t gtt_memory;
57028 uint32_t gtt_total;
57029 uint32_t invalidate_domains; /* domains pending invalidation */
57030 uint32_t flush_domains; /* domains pending flush */
57031diff -urNp linux-2.6.32.45/include/drm/ttm/ttm_memory.h linux-2.6.32.45/include/drm/ttm/ttm_memory.h
57032--- linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-03-27 14:31:47.000000000 -0400
57033+++ linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-08-05 20:33:55.000000000 -0400
57034@@ -47,7 +47,7 @@
57035
57036 struct ttm_mem_shrink {
57037 int (*do_shrink) (struct ttm_mem_shrink *);
57038-};
57039+} __no_const;
57040
57041 /**
57042 * struct ttm_mem_global - Global memory accounting structure.
57043diff -urNp linux-2.6.32.45/include/linux/a.out.h linux-2.6.32.45/include/linux/a.out.h
57044--- linux-2.6.32.45/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
57045+++ linux-2.6.32.45/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
57046@@ -39,6 +39,14 @@ enum machine_type {
57047 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57048 };
57049
57050+/* Constants for the N_FLAGS field */
57051+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57052+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57053+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57054+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57055+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57056+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57057+
57058 #if !defined (N_MAGIC)
57059 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57060 #endif
57061diff -urNp linux-2.6.32.45/include/linux/atmdev.h linux-2.6.32.45/include/linux/atmdev.h
57062--- linux-2.6.32.45/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
57063+++ linux-2.6.32.45/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
57064@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57065 #endif
57066
57067 struct k_atm_aal_stats {
57068-#define __HANDLE_ITEM(i) atomic_t i
57069+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57070 __AAL_STAT_ITEMS
57071 #undef __HANDLE_ITEM
57072 };
57073diff -urNp linux-2.6.32.45/include/linux/backlight.h linux-2.6.32.45/include/linux/backlight.h
57074--- linux-2.6.32.45/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
57075+++ linux-2.6.32.45/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
57076@@ -36,18 +36,18 @@ struct backlight_device;
57077 struct fb_info;
57078
57079 struct backlight_ops {
57080- unsigned int options;
57081+ const unsigned int options;
57082
57083 #define BL_CORE_SUSPENDRESUME (1 << 0)
57084
57085 /* Notify the backlight driver some property has changed */
57086- int (*update_status)(struct backlight_device *);
57087+ int (* const update_status)(struct backlight_device *);
57088 /* Return the current backlight brightness (accounting for power,
57089 fb_blank etc.) */
57090- int (*get_brightness)(struct backlight_device *);
57091+ int (* const get_brightness)(struct backlight_device *);
57092 /* Check if given framebuffer device is the one bound to this backlight;
57093 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
57094- int (*check_fb)(struct fb_info *);
57095+ int (* const check_fb)(struct fb_info *);
57096 };
57097
57098 /* This structure defines all the properties of a backlight */
57099@@ -86,7 +86,7 @@ struct backlight_device {
57100 registered this device has been unloaded, and if class_get_devdata()
57101 points to something in the body of that driver, it is also invalid. */
57102 struct mutex ops_lock;
57103- struct backlight_ops *ops;
57104+ const struct backlight_ops *ops;
57105
57106 /* The framebuffer notifier block */
57107 struct notifier_block fb_notif;
57108@@ -103,7 +103,7 @@ static inline void backlight_update_stat
57109 }
57110
57111 extern struct backlight_device *backlight_device_register(const char *name,
57112- struct device *dev, void *devdata, struct backlight_ops *ops);
57113+ struct device *dev, void *devdata, const struct backlight_ops *ops);
57114 extern void backlight_device_unregister(struct backlight_device *bd);
57115 extern void backlight_force_update(struct backlight_device *bd,
57116 enum backlight_update_reason reason);
57117diff -urNp linux-2.6.32.45/include/linux/binfmts.h linux-2.6.32.45/include/linux/binfmts.h
57118--- linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
57119+++ linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
57120@@ -83,6 +83,7 @@ struct linux_binfmt {
57121 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57122 int (*load_shlib)(struct file *);
57123 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
57124+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57125 unsigned long min_coredump; /* minimal dump size */
57126 int hasvdso;
57127 };
57128diff -urNp linux-2.6.32.45/include/linux/blkdev.h linux-2.6.32.45/include/linux/blkdev.h
57129--- linux-2.6.32.45/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
57130+++ linux-2.6.32.45/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
57131@@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
57132 #endif /* CONFIG_BLK_DEV_INTEGRITY */
57133
57134 struct block_device_operations {
57135- int (*open) (struct block_device *, fmode_t);
57136- int (*release) (struct gendisk *, fmode_t);
57137- int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57138- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57139- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57140- int (*direct_access) (struct block_device *, sector_t,
57141+ int (* const open) (struct block_device *, fmode_t);
57142+ int (* const release) (struct gendisk *, fmode_t);
57143+ int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57144+ int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57145+ int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57146+ int (* const direct_access) (struct block_device *, sector_t,
57147 void **, unsigned long *);
57148- int (*media_changed) (struct gendisk *);
57149- unsigned long long (*set_capacity) (struct gendisk *,
57150+ int (* const media_changed) (struct gendisk *);
57151+ unsigned long long (* const set_capacity) (struct gendisk *,
57152 unsigned long long);
57153- int (*revalidate_disk) (struct gendisk *);
57154- int (*getgeo)(struct block_device *, struct hd_geometry *);
57155- struct module *owner;
57156+ int (* const revalidate_disk) (struct gendisk *);
57157+ int (*const getgeo)(struct block_device *, struct hd_geometry *);
57158+ struct module * const owner;
57159 };
57160
57161 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57162diff -urNp linux-2.6.32.45/include/linux/blktrace_api.h linux-2.6.32.45/include/linux/blktrace_api.h
57163--- linux-2.6.32.45/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
57164+++ linux-2.6.32.45/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
57165@@ -160,7 +160,7 @@ struct blk_trace {
57166 struct dentry *dir;
57167 struct dentry *dropped_file;
57168 struct dentry *msg_file;
57169- atomic_t dropped;
57170+ atomic_unchecked_t dropped;
57171 };
57172
57173 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57174diff -urNp linux-2.6.32.45/include/linux/byteorder/little_endian.h linux-2.6.32.45/include/linux/byteorder/little_endian.h
57175--- linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
57176+++ linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
57177@@ -42,51 +42,51 @@
57178
57179 static inline __le64 __cpu_to_le64p(const __u64 *p)
57180 {
57181- return (__force __le64)*p;
57182+ return (__force const __le64)*p;
57183 }
57184 static inline __u64 __le64_to_cpup(const __le64 *p)
57185 {
57186- return (__force __u64)*p;
57187+ return (__force const __u64)*p;
57188 }
57189 static inline __le32 __cpu_to_le32p(const __u32 *p)
57190 {
57191- return (__force __le32)*p;
57192+ return (__force const __le32)*p;
57193 }
57194 static inline __u32 __le32_to_cpup(const __le32 *p)
57195 {
57196- return (__force __u32)*p;
57197+ return (__force const __u32)*p;
57198 }
57199 static inline __le16 __cpu_to_le16p(const __u16 *p)
57200 {
57201- return (__force __le16)*p;
57202+ return (__force const __le16)*p;
57203 }
57204 static inline __u16 __le16_to_cpup(const __le16 *p)
57205 {
57206- return (__force __u16)*p;
57207+ return (__force const __u16)*p;
57208 }
57209 static inline __be64 __cpu_to_be64p(const __u64 *p)
57210 {
57211- return (__force __be64)__swab64p(p);
57212+ return (__force const __be64)__swab64p(p);
57213 }
57214 static inline __u64 __be64_to_cpup(const __be64 *p)
57215 {
57216- return __swab64p((__u64 *)p);
57217+ return __swab64p((const __u64 *)p);
57218 }
57219 static inline __be32 __cpu_to_be32p(const __u32 *p)
57220 {
57221- return (__force __be32)__swab32p(p);
57222+ return (__force const __be32)__swab32p(p);
57223 }
57224 static inline __u32 __be32_to_cpup(const __be32 *p)
57225 {
57226- return __swab32p((__u32 *)p);
57227+ return __swab32p((const __u32 *)p);
57228 }
57229 static inline __be16 __cpu_to_be16p(const __u16 *p)
57230 {
57231- return (__force __be16)__swab16p(p);
57232+ return (__force const __be16)__swab16p(p);
57233 }
57234 static inline __u16 __be16_to_cpup(const __be16 *p)
57235 {
57236- return __swab16p((__u16 *)p);
57237+ return __swab16p((const __u16 *)p);
57238 }
57239 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57240 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57241diff -urNp linux-2.6.32.45/include/linux/cache.h linux-2.6.32.45/include/linux/cache.h
57242--- linux-2.6.32.45/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
57243+++ linux-2.6.32.45/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
57244@@ -16,6 +16,10 @@
57245 #define __read_mostly
57246 #endif
57247
57248+#ifndef __read_only
57249+#define __read_only __read_mostly
57250+#endif
57251+
57252 #ifndef ____cacheline_aligned
57253 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57254 #endif
57255diff -urNp linux-2.6.32.45/include/linux/capability.h linux-2.6.32.45/include/linux/capability.h
57256--- linux-2.6.32.45/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
57257+++ linux-2.6.32.45/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
57258@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
57259 (security_real_capable_noaudit((t), (cap)) == 0)
57260
57261 extern int capable(int cap);
57262+int capable_nolog(int cap);
57263
57264 /* audit system wants to get cap info from files as well */
57265 struct dentry;
57266diff -urNp linux-2.6.32.45/include/linux/compiler-gcc4.h linux-2.6.32.45/include/linux/compiler-gcc4.h
57267--- linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
57268+++ linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-08-05 20:33:55.000000000 -0400
57269@@ -36,4 +36,13 @@
57270 the kernel context */
57271 #define __cold __attribute__((__cold__))
57272
57273+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57274+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57275+#define __bos0(ptr) __bos((ptr), 0)
57276+#define __bos1(ptr) __bos((ptr), 1)
57277+
57278+#if __GNUC_MINOR__ >= 5
57279+#define __no_const __attribute__((no_const))
57280+#endif
57281+
57282 #endif
57283diff -urNp linux-2.6.32.45/include/linux/compiler.h linux-2.6.32.45/include/linux/compiler.h
57284--- linux-2.6.32.45/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
57285+++ linux-2.6.32.45/include/linux/compiler.h 2011-08-05 20:33:55.000000000 -0400
57286@@ -247,6 +247,10 @@ void ftrace_likely_update(struct ftrace_
57287 # define __attribute_const__ /* unimplemented */
57288 #endif
57289
57290+#ifndef __no_const
57291+# define __no_const
57292+#endif
57293+
57294 /*
57295 * Tell gcc if a function is cold. The compiler will assume any path
57296 * directly leading to the call is unlikely.
57297@@ -256,6 +260,22 @@ void ftrace_likely_update(struct ftrace_
57298 #define __cold
57299 #endif
57300
57301+#ifndef __alloc_size
57302+#define __alloc_size(...)
57303+#endif
57304+
57305+#ifndef __bos
57306+#define __bos(ptr, arg)
57307+#endif
57308+
57309+#ifndef __bos0
57310+#define __bos0(ptr)
57311+#endif
57312+
57313+#ifndef __bos1
57314+#define __bos1(ptr)
57315+#endif
57316+
57317 /* Simple shorthand for a section definition */
57318 #ifndef __section
57319 # define __section(S) __attribute__ ((__section__(#S)))
57320@@ -278,6 +298,7 @@ void ftrace_likely_update(struct ftrace_
57321 * use is to mediate communication between process-level code and irq/NMI
57322 * handlers, all running on the same CPU.
57323 */
57324-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57325+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57326+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57327
57328 #endif /* __LINUX_COMPILER_H */
57329diff -urNp linux-2.6.32.45/include/linux/crypto.h linux-2.6.32.45/include/linux/crypto.h
57330--- linux-2.6.32.45/include/linux/crypto.h 2011-03-27 14:31:47.000000000 -0400
57331+++ linux-2.6.32.45/include/linux/crypto.h 2011-08-05 20:33:55.000000000 -0400
57332@@ -394,7 +394,7 @@ struct cipher_tfm {
57333 const u8 *key, unsigned int keylen);
57334 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57335 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57336-};
57337+} __no_const;
57338
57339 struct hash_tfm {
57340 int (*init)(struct hash_desc *desc);
57341@@ -415,13 +415,13 @@ struct compress_tfm {
57342 int (*cot_decompress)(struct crypto_tfm *tfm,
57343 const u8 *src, unsigned int slen,
57344 u8 *dst, unsigned int *dlen);
57345-};
57346+} __no_const;
57347
57348 struct rng_tfm {
57349 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57350 unsigned int dlen);
57351 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57352-};
57353+} __no_const;
57354
57355 #define crt_ablkcipher crt_u.ablkcipher
57356 #define crt_aead crt_u.aead
57357diff -urNp linux-2.6.32.45/include/linux/dcache.h linux-2.6.32.45/include/linux/dcache.h
57358--- linux-2.6.32.45/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
57359+++ linux-2.6.32.45/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
57360@@ -119,6 +119,8 @@ struct dentry {
57361 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
57362 };
57363
57364+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
57365+
57366 /*
57367 * dentry->d_lock spinlock nesting subclasses:
57368 *
57369diff -urNp linux-2.6.32.45/include/linux/decompress/mm.h linux-2.6.32.45/include/linux/decompress/mm.h
57370--- linux-2.6.32.45/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
57371+++ linux-2.6.32.45/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
57372@@ -78,7 +78,7 @@ static void free(void *where)
57373 * warnings when not needed (indeed large_malloc / large_free are not
57374 * needed by inflate */
57375
57376-#define malloc(a) kmalloc(a, GFP_KERNEL)
57377+#define malloc(a) kmalloc((a), GFP_KERNEL)
57378 #define free(a) kfree(a)
57379
57380 #define large_malloc(a) vmalloc(a)
57381diff -urNp linux-2.6.32.45/include/linux/dma-mapping.h linux-2.6.32.45/include/linux/dma-mapping.h
57382--- linux-2.6.32.45/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
57383+++ linux-2.6.32.45/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
57384@@ -16,50 +16,50 @@ enum dma_data_direction {
57385 };
57386
57387 struct dma_map_ops {
57388- void* (*alloc_coherent)(struct device *dev, size_t size,
57389+ void* (* const alloc_coherent)(struct device *dev, size_t size,
57390 dma_addr_t *dma_handle, gfp_t gfp);
57391- void (*free_coherent)(struct device *dev, size_t size,
57392+ void (* const free_coherent)(struct device *dev, size_t size,
57393 void *vaddr, dma_addr_t dma_handle);
57394- dma_addr_t (*map_page)(struct device *dev, struct page *page,
57395+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
57396 unsigned long offset, size_t size,
57397 enum dma_data_direction dir,
57398 struct dma_attrs *attrs);
57399- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
57400+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
57401 size_t size, enum dma_data_direction dir,
57402 struct dma_attrs *attrs);
57403- int (*map_sg)(struct device *dev, struct scatterlist *sg,
57404+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
57405 int nents, enum dma_data_direction dir,
57406 struct dma_attrs *attrs);
57407- void (*unmap_sg)(struct device *dev,
57408+ void (* const unmap_sg)(struct device *dev,
57409 struct scatterlist *sg, int nents,
57410 enum dma_data_direction dir,
57411 struct dma_attrs *attrs);
57412- void (*sync_single_for_cpu)(struct device *dev,
57413+ void (* const sync_single_for_cpu)(struct device *dev,
57414 dma_addr_t dma_handle, size_t size,
57415 enum dma_data_direction dir);
57416- void (*sync_single_for_device)(struct device *dev,
57417+ void (* const sync_single_for_device)(struct device *dev,
57418 dma_addr_t dma_handle, size_t size,
57419 enum dma_data_direction dir);
57420- void (*sync_single_range_for_cpu)(struct device *dev,
57421+ void (* const sync_single_range_for_cpu)(struct device *dev,
57422 dma_addr_t dma_handle,
57423 unsigned long offset,
57424 size_t size,
57425 enum dma_data_direction dir);
57426- void (*sync_single_range_for_device)(struct device *dev,
57427+ void (* const sync_single_range_for_device)(struct device *dev,
57428 dma_addr_t dma_handle,
57429 unsigned long offset,
57430 size_t size,
57431 enum dma_data_direction dir);
57432- void (*sync_sg_for_cpu)(struct device *dev,
57433+ void (* const sync_sg_for_cpu)(struct device *dev,
57434 struct scatterlist *sg, int nents,
57435 enum dma_data_direction dir);
57436- void (*sync_sg_for_device)(struct device *dev,
57437+ void (* const sync_sg_for_device)(struct device *dev,
57438 struct scatterlist *sg, int nents,
57439 enum dma_data_direction dir);
57440- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
57441- int (*dma_supported)(struct device *dev, u64 mask);
57442+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
57443+ int (* const dma_supported)(struct device *dev, u64 mask);
57444 int (*set_dma_mask)(struct device *dev, u64 mask);
57445- int is_phys;
57446+ const int is_phys;
57447 };
57448
57449 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57450diff -urNp linux-2.6.32.45/include/linux/dst.h linux-2.6.32.45/include/linux/dst.h
57451--- linux-2.6.32.45/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
57452+++ linux-2.6.32.45/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
57453@@ -380,7 +380,7 @@ struct dst_node
57454 struct thread_pool *pool;
57455
57456 /* Transaction IDs live here */
57457- atomic_long_t gen;
57458+ atomic_long_unchecked_t gen;
57459
57460 /*
57461 * How frequently and how many times transaction
57462diff -urNp linux-2.6.32.45/include/linux/elf.h linux-2.6.32.45/include/linux/elf.h
57463--- linux-2.6.32.45/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
57464+++ linux-2.6.32.45/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
57465@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57466 #define PT_GNU_EH_FRAME 0x6474e550
57467
57468 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57469+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57470+
57471+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57472+
57473+/* Constants for the e_flags field */
57474+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57475+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57476+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57477+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57478+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57479+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57480
57481 /* These constants define the different elf file types */
57482 #define ET_NONE 0
57483@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
57484 #define DT_DEBUG 21
57485 #define DT_TEXTREL 22
57486 #define DT_JMPREL 23
57487+#define DT_FLAGS 30
57488+ #define DF_TEXTREL 0x00000004
57489 #define DT_ENCODING 32
57490 #define OLD_DT_LOOS 0x60000000
57491 #define DT_LOOS 0x6000000d
57492@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
57493 #define PF_W 0x2
57494 #define PF_X 0x1
57495
57496+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57497+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57498+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57499+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57500+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57501+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57502+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57503+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57504+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57505+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57506+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57507+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57508+
57509 typedef struct elf32_phdr{
57510 Elf32_Word p_type;
57511 Elf32_Off p_offset;
57512@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
57513 #define EI_OSABI 7
57514 #define EI_PAD 8
57515
57516+#define EI_PAX 14
57517+
57518 #define ELFMAG0 0x7f /* EI_MAG */
57519 #define ELFMAG1 'E'
57520 #define ELFMAG2 'L'
57521@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
57522 #define elf_phdr elf32_phdr
57523 #define elf_note elf32_note
57524 #define elf_addr_t Elf32_Off
57525+#define elf_dyn Elf32_Dyn
57526
57527 #else
57528
57529@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
57530 #define elf_phdr elf64_phdr
57531 #define elf_note elf64_note
57532 #define elf_addr_t Elf64_Off
57533+#define elf_dyn Elf64_Dyn
57534
57535 #endif
57536
57537diff -urNp linux-2.6.32.45/include/linux/fscache-cache.h linux-2.6.32.45/include/linux/fscache-cache.h
57538--- linux-2.6.32.45/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
57539+++ linux-2.6.32.45/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
57540@@ -116,7 +116,7 @@ struct fscache_operation {
57541 #endif
57542 };
57543
57544-extern atomic_t fscache_op_debug_id;
57545+extern atomic_unchecked_t fscache_op_debug_id;
57546 extern const struct slow_work_ops fscache_op_slow_work_ops;
57547
57548 extern void fscache_enqueue_operation(struct fscache_operation *);
57549@@ -134,7 +134,7 @@ static inline void fscache_operation_ini
57550 fscache_operation_release_t release)
57551 {
57552 atomic_set(&op->usage, 1);
57553- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57554+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57555 op->release = release;
57556 INIT_LIST_HEAD(&op->pend_link);
57557 fscache_set_op_state(op, "Init");
57558diff -urNp linux-2.6.32.45/include/linux/fs.h linux-2.6.32.45/include/linux/fs.h
57559--- linux-2.6.32.45/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
57560+++ linux-2.6.32.45/include/linux/fs.h 2011-08-23 21:22:32.000000000 -0400
57561@@ -90,6 +90,11 @@ struct inodes_stat_t {
57562 /* Expect random access pattern */
57563 #define FMODE_RANDOM ((__force fmode_t)4096)
57564
57565+/* Hack for grsec so as not to require read permission simply to execute
57566+ * a binary
57567+ */
57568+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
57569+
57570 /*
57571 * The below are the various read and write types that we support. Some of
57572 * them include behavioral modifiers that send information down to the
57573@@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
57574 unsigned long, unsigned long);
57575
57576 struct address_space_operations {
57577- int (*writepage)(struct page *page, struct writeback_control *wbc);
57578- int (*readpage)(struct file *, struct page *);
57579- void (*sync_page)(struct page *);
57580+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
57581+ int (* const readpage)(struct file *, struct page *);
57582+ void (* const sync_page)(struct page *);
57583
57584 /* Write back some dirty pages from this mapping. */
57585- int (*writepages)(struct address_space *, struct writeback_control *);
57586+ int (* const writepages)(struct address_space *, struct writeback_control *);
57587
57588 /* Set a page dirty. Return true if this dirtied it */
57589- int (*set_page_dirty)(struct page *page);
57590+ int (* const set_page_dirty)(struct page *page);
57591
57592- int (*readpages)(struct file *filp, struct address_space *mapping,
57593+ int (* const readpages)(struct file *filp, struct address_space *mapping,
57594 struct list_head *pages, unsigned nr_pages);
57595
57596- int (*write_begin)(struct file *, struct address_space *mapping,
57597+ int (* const write_begin)(struct file *, struct address_space *mapping,
57598 loff_t pos, unsigned len, unsigned flags,
57599 struct page **pagep, void **fsdata);
57600- int (*write_end)(struct file *, struct address_space *mapping,
57601+ int (* const write_end)(struct file *, struct address_space *mapping,
57602 loff_t pos, unsigned len, unsigned copied,
57603 struct page *page, void *fsdata);
57604
57605 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
57606- sector_t (*bmap)(struct address_space *, sector_t);
57607- void (*invalidatepage) (struct page *, unsigned long);
57608- int (*releasepage) (struct page *, gfp_t);
57609- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
57610+ sector_t (* const bmap)(struct address_space *, sector_t);
57611+ void (* const invalidatepage) (struct page *, unsigned long);
57612+ int (* const releasepage) (struct page *, gfp_t);
57613+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
57614 loff_t offset, unsigned long nr_segs);
57615- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
57616+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
57617 void **, unsigned long *);
57618 /* migrate the contents of a page to the specified target */
57619- int (*migratepage) (struct address_space *,
57620+ int (* const migratepage) (struct address_space *,
57621 struct page *, struct page *);
57622- int (*launder_page) (struct page *);
57623- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
57624+ int (* const launder_page) (struct page *);
57625+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
57626 unsigned long);
57627- int (*error_remove_page)(struct address_space *, struct page *);
57628+ int (* const error_remove_page)(struct address_space *, struct page *);
57629 };
57630
57631 /*
57632@@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
57633 typedef struct files_struct *fl_owner_t;
57634
57635 struct file_lock_operations {
57636- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57637- void (*fl_release_private)(struct file_lock *);
57638+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57639+ void (* const fl_release_private)(struct file_lock *);
57640 };
57641
57642 struct lock_manager_operations {
57643- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
57644- void (*fl_notify)(struct file_lock *); /* unblock callback */
57645- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
57646- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57647- void (*fl_release_private)(struct file_lock *);
57648- void (*fl_break)(struct file_lock *);
57649- int (*fl_mylease)(struct file_lock *, struct file_lock *);
57650- int (*fl_change)(struct file_lock **, int);
57651+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
57652+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
57653+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
57654+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57655+ void (* const fl_release_private)(struct file_lock *);
57656+ void (* const fl_break)(struct file_lock *);
57657+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
57658+ int (* const fl_change)(struct file_lock **, int);
57659 };
57660
57661 struct lock_manager {
57662@@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
57663 unsigned int fi_flags; /* Flags as passed from user */
57664 unsigned int fi_extents_mapped; /* Number of mapped extents */
57665 unsigned int fi_extents_max; /* Size of fiemap_extent array */
57666- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
57667+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
57668 * array */
57669 };
57670 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
57671@@ -1486,7 +1491,7 @@ struct block_device_operations;
57672 * can be called without the big kernel lock held in all filesystems.
57673 */
57674 struct file_operations {
57675- struct module *owner;
57676+ struct module * const owner;
57677 loff_t (*llseek) (struct file *, loff_t, int);
57678 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
57679 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
57680@@ -1513,6 +1518,7 @@ struct file_operations {
57681 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
57682 int (*setlease)(struct file *, long, struct file_lock **);
57683 };
57684+typedef struct file_operations __no_const file_operations_no_const;
57685
57686 struct inode_operations {
57687 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
57688@@ -1559,30 +1565,30 @@ extern ssize_t vfs_writev(struct file *,
57689 unsigned long, loff_t *);
57690
57691 struct super_operations {
57692- struct inode *(*alloc_inode)(struct super_block *sb);
57693- void (*destroy_inode)(struct inode *);
57694+ struct inode *(* const alloc_inode)(struct super_block *sb);
57695+ void (* const destroy_inode)(struct inode *);
57696
57697- void (*dirty_inode) (struct inode *);
57698- int (*write_inode) (struct inode *, int);
57699- void (*drop_inode) (struct inode *);
57700- void (*delete_inode) (struct inode *);
57701- void (*put_super) (struct super_block *);
57702- void (*write_super) (struct super_block *);
57703- int (*sync_fs)(struct super_block *sb, int wait);
57704- int (*freeze_fs) (struct super_block *);
57705- int (*unfreeze_fs) (struct super_block *);
57706- int (*statfs) (struct dentry *, struct kstatfs *);
57707- int (*remount_fs) (struct super_block *, int *, char *);
57708- void (*clear_inode) (struct inode *);
57709- void (*umount_begin) (struct super_block *);
57710+ void (* const dirty_inode) (struct inode *);
57711+ int (* const write_inode) (struct inode *, int);
57712+ void (* const drop_inode) (struct inode *);
57713+ void (* const delete_inode) (struct inode *);
57714+ void (* const put_super) (struct super_block *);
57715+ void (* const write_super) (struct super_block *);
57716+ int (* const sync_fs)(struct super_block *sb, int wait);
57717+ int (* const freeze_fs) (struct super_block *);
57718+ int (* const unfreeze_fs) (struct super_block *);
57719+ int (* const statfs) (struct dentry *, struct kstatfs *);
57720+ int (* const remount_fs) (struct super_block *, int *, char *);
57721+ void (* const clear_inode) (struct inode *);
57722+ void (* const umount_begin) (struct super_block *);
57723
57724- int (*show_options)(struct seq_file *, struct vfsmount *);
57725- int (*show_stats)(struct seq_file *, struct vfsmount *);
57726+ int (* const show_options)(struct seq_file *, struct vfsmount *);
57727+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
57728 #ifdef CONFIG_QUOTA
57729- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
57730- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
57731+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
57732+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
57733 #endif
57734- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
57735+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
57736 };
57737
57738 /*
57739diff -urNp linux-2.6.32.45/include/linux/fs_struct.h linux-2.6.32.45/include/linux/fs_struct.h
57740--- linux-2.6.32.45/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
57741+++ linux-2.6.32.45/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
57742@@ -4,7 +4,7 @@
57743 #include <linux/path.h>
57744
57745 struct fs_struct {
57746- int users;
57747+ atomic_t users;
57748 rwlock_t lock;
57749 int umask;
57750 int in_exec;
57751diff -urNp linux-2.6.32.45/include/linux/ftrace_event.h linux-2.6.32.45/include/linux/ftrace_event.h
57752--- linux-2.6.32.45/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
57753+++ linux-2.6.32.45/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
57754@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
57755 int filter_type);
57756 extern int trace_define_common_fields(struct ftrace_event_call *call);
57757
57758-#define is_signed_type(type) (((type)(-1)) < 0)
57759+#define is_signed_type(type) (((type)(-1)) < (type)1)
57760
57761 int trace_set_clr_event(const char *system, const char *event, int set);
57762
57763diff -urNp linux-2.6.32.45/include/linux/genhd.h linux-2.6.32.45/include/linux/genhd.h
57764--- linux-2.6.32.45/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
57765+++ linux-2.6.32.45/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
57766@@ -161,7 +161,7 @@ struct gendisk {
57767
57768 struct timer_rand_state *random;
57769
57770- atomic_t sync_io; /* RAID */
57771+ atomic_unchecked_t sync_io; /* RAID */
57772 struct work_struct async_notify;
57773 #ifdef CONFIG_BLK_DEV_INTEGRITY
57774 struct blk_integrity *integrity;
57775diff -urNp linux-2.6.32.45/include/linux/gracl.h linux-2.6.32.45/include/linux/gracl.h
57776--- linux-2.6.32.45/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
57777+++ linux-2.6.32.45/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
57778@@ -0,0 +1,317 @@
57779+#ifndef GR_ACL_H
57780+#define GR_ACL_H
57781+
57782+#include <linux/grdefs.h>
57783+#include <linux/resource.h>
57784+#include <linux/capability.h>
57785+#include <linux/dcache.h>
57786+#include <asm/resource.h>
57787+
57788+/* Major status information */
57789+
57790+#define GR_VERSION "grsecurity 2.2.2"
57791+#define GRSECURITY_VERSION 0x2202
57792+
57793+enum {
57794+ GR_SHUTDOWN = 0,
57795+ GR_ENABLE = 1,
57796+ GR_SPROLE = 2,
57797+ GR_RELOAD = 3,
57798+ GR_SEGVMOD = 4,
57799+ GR_STATUS = 5,
57800+ GR_UNSPROLE = 6,
57801+ GR_PASSSET = 7,
57802+ GR_SPROLEPAM = 8,
57803+};
57804+
57805+/* Password setup definitions
57806+ * kernel/grhash.c */
57807+enum {
57808+ GR_PW_LEN = 128,
57809+ GR_SALT_LEN = 16,
57810+ GR_SHA_LEN = 32,
57811+};
57812+
57813+enum {
57814+ GR_SPROLE_LEN = 64,
57815+};
57816+
57817+enum {
57818+ GR_NO_GLOB = 0,
57819+ GR_REG_GLOB,
57820+ GR_CREATE_GLOB
57821+};
57822+
57823+#define GR_NLIMITS 32
57824+
57825+/* Begin Data Structures */
57826+
57827+struct sprole_pw {
57828+ unsigned char *rolename;
57829+ unsigned char salt[GR_SALT_LEN];
57830+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
57831+};
57832+
57833+struct name_entry {
57834+ __u32 key;
57835+ ino_t inode;
57836+ dev_t device;
57837+ char *name;
57838+ __u16 len;
57839+ __u8 deleted;
57840+ struct name_entry *prev;
57841+ struct name_entry *next;
57842+};
57843+
57844+struct inodev_entry {
57845+ struct name_entry *nentry;
57846+ struct inodev_entry *prev;
57847+ struct inodev_entry *next;
57848+};
57849+
57850+struct acl_role_db {
57851+ struct acl_role_label **r_hash;
57852+ __u32 r_size;
57853+};
57854+
57855+struct inodev_db {
57856+ struct inodev_entry **i_hash;
57857+ __u32 i_size;
57858+};
57859+
57860+struct name_db {
57861+ struct name_entry **n_hash;
57862+ __u32 n_size;
57863+};
57864+
57865+struct crash_uid {
57866+ uid_t uid;
57867+ unsigned long expires;
57868+};
57869+
57870+struct gr_hash_struct {
57871+ void **table;
57872+ void **nametable;
57873+ void *first;
57874+ __u32 table_size;
57875+ __u32 used_size;
57876+ int type;
57877+};
57878+
57879+/* Userspace Grsecurity ACL data structures */
57880+
57881+struct acl_subject_label {
57882+ char *filename;
57883+ ino_t inode;
57884+ dev_t device;
57885+ __u32 mode;
57886+ kernel_cap_t cap_mask;
57887+ kernel_cap_t cap_lower;
57888+ kernel_cap_t cap_invert_audit;
57889+
57890+ struct rlimit res[GR_NLIMITS];
57891+ __u32 resmask;
57892+
57893+ __u8 user_trans_type;
57894+ __u8 group_trans_type;
57895+ uid_t *user_transitions;
57896+ gid_t *group_transitions;
57897+ __u16 user_trans_num;
57898+ __u16 group_trans_num;
57899+
57900+ __u32 sock_families[2];
57901+ __u32 ip_proto[8];
57902+ __u32 ip_type;
57903+ struct acl_ip_label **ips;
57904+ __u32 ip_num;
57905+ __u32 inaddr_any_override;
57906+
57907+ __u32 crashes;
57908+ unsigned long expires;
57909+
57910+ struct acl_subject_label *parent_subject;
57911+ struct gr_hash_struct *hash;
57912+ struct acl_subject_label *prev;
57913+ struct acl_subject_label *next;
57914+
57915+ struct acl_object_label **obj_hash;
57916+ __u32 obj_hash_size;
57917+ __u16 pax_flags;
57918+};
57919+
57920+struct role_allowed_ip {
57921+ __u32 addr;
57922+ __u32 netmask;
57923+
57924+ struct role_allowed_ip *prev;
57925+ struct role_allowed_ip *next;
57926+};
57927+
57928+struct role_transition {
57929+ char *rolename;
57930+
57931+ struct role_transition *prev;
57932+ struct role_transition *next;
57933+};
57934+
57935+struct acl_role_label {
57936+ char *rolename;
57937+ uid_t uidgid;
57938+ __u16 roletype;
57939+
57940+ __u16 auth_attempts;
57941+ unsigned long expires;
57942+
57943+ struct acl_subject_label *root_label;
57944+ struct gr_hash_struct *hash;
57945+
57946+ struct acl_role_label *prev;
57947+ struct acl_role_label *next;
57948+
57949+ struct role_transition *transitions;
57950+ struct role_allowed_ip *allowed_ips;
57951+ uid_t *domain_children;
57952+ __u16 domain_child_num;
57953+
57954+ struct acl_subject_label **subj_hash;
57955+ __u32 subj_hash_size;
57956+};
57957+
57958+struct user_acl_role_db {
57959+ struct acl_role_label **r_table;
57960+ __u32 num_pointers; /* Number of allocations to track */
57961+ __u32 num_roles; /* Number of roles */
57962+ __u32 num_domain_children; /* Number of domain children */
57963+ __u32 num_subjects; /* Number of subjects */
57964+ __u32 num_objects; /* Number of objects */
57965+};
57966+
57967+struct acl_object_label {
57968+ char *filename;
57969+ ino_t inode;
57970+ dev_t device;
57971+ __u32 mode;
57972+
57973+ struct acl_subject_label *nested;
57974+ struct acl_object_label *globbed;
57975+
57976+ /* next two structures not used */
57977+
57978+ struct acl_object_label *prev;
57979+ struct acl_object_label *next;
57980+};
57981+
57982+struct acl_ip_label {
57983+ char *iface;
57984+ __u32 addr;
57985+ __u32 netmask;
57986+ __u16 low, high;
57987+ __u8 mode;
57988+ __u32 type;
57989+ __u32 proto[8];
57990+
57991+ /* next two structures not used */
57992+
57993+ struct acl_ip_label *prev;
57994+ struct acl_ip_label *next;
57995+};
57996+
57997+struct gr_arg {
57998+ struct user_acl_role_db role_db;
57999+ unsigned char pw[GR_PW_LEN];
58000+ unsigned char salt[GR_SALT_LEN];
58001+ unsigned char sum[GR_SHA_LEN];
58002+ unsigned char sp_role[GR_SPROLE_LEN];
58003+ struct sprole_pw *sprole_pws;
58004+ dev_t segv_device;
58005+ ino_t segv_inode;
58006+ uid_t segv_uid;
58007+ __u16 num_sprole_pws;
58008+ __u16 mode;
58009+};
58010+
58011+struct gr_arg_wrapper {
58012+ struct gr_arg *arg;
58013+ __u32 version;
58014+ __u32 size;
58015+};
58016+
58017+struct subject_map {
58018+ struct acl_subject_label *user;
58019+ struct acl_subject_label *kernel;
58020+ struct subject_map *prev;
58021+ struct subject_map *next;
58022+};
58023+
58024+struct acl_subj_map_db {
58025+ struct subject_map **s_hash;
58026+ __u32 s_size;
58027+};
58028+
58029+/* End Data Structures Section */
58030+
58031+/* Hash functions generated by empirical testing by Brad Spengler
58032+ Makes good use of the low bits of the inode. Generally 0-1 times
58033+ in loop for successful match. 0-3 for unsuccessful match.
58034+ Shift/add algorithm with modulus of table size and an XOR*/
58035+
58036+static __inline__ unsigned int
58037+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58038+{
58039+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58040+}
58041+
58042+ static __inline__ unsigned int
58043+shash(const struct acl_subject_label *userp, const unsigned int sz)
58044+{
58045+ return ((const unsigned long)userp % sz);
58046+}
58047+
58048+static __inline__ unsigned int
58049+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58050+{
58051+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58052+}
58053+
58054+static __inline__ unsigned int
58055+nhash(const char *name, const __u16 len, const unsigned int sz)
58056+{
58057+ return full_name_hash((const unsigned char *)name, len) % sz;
58058+}
58059+
58060+#define FOR_EACH_ROLE_START(role) \
58061+ role = role_list; \
58062+ while (role) {
58063+
58064+#define FOR_EACH_ROLE_END(role) \
58065+ role = role->prev; \
58066+ }
58067+
58068+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58069+ subj = NULL; \
58070+ iter = 0; \
58071+ while (iter < role->subj_hash_size) { \
58072+ if (subj == NULL) \
58073+ subj = role->subj_hash[iter]; \
58074+ if (subj == NULL) { \
58075+ iter++; \
58076+ continue; \
58077+ }
58078+
58079+#define FOR_EACH_SUBJECT_END(subj,iter) \
58080+ subj = subj->next; \
58081+ if (subj == NULL) \
58082+ iter++; \
58083+ }
58084+
58085+
58086+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58087+ subj = role->hash->first; \
58088+ while (subj != NULL) {
58089+
58090+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58091+ subj = subj->next; \
58092+ }
58093+
58094+#endif
58095+
58096diff -urNp linux-2.6.32.45/include/linux/gralloc.h linux-2.6.32.45/include/linux/gralloc.h
58097--- linux-2.6.32.45/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58098+++ linux-2.6.32.45/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
58099@@ -0,0 +1,9 @@
58100+#ifndef __GRALLOC_H
58101+#define __GRALLOC_H
58102+
58103+void acl_free_all(void);
58104+int acl_alloc_stack_init(unsigned long size);
58105+void *acl_alloc(unsigned long len);
58106+void *acl_alloc_num(unsigned long num, unsigned long len);
58107+
58108+#endif
58109diff -urNp linux-2.6.32.45/include/linux/grdefs.h linux-2.6.32.45/include/linux/grdefs.h
58110--- linux-2.6.32.45/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58111+++ linux-2.6.32.45/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
58112@@ -0,0 +1,140 @@
58113+#ifndef GRDEFS_H
58114+#define GRDEFS_H
58115+
58116+/* Begin grsecurity status declarations */
58117+
58118+enum {
58119+ GR_READY = 0x01,
58120+ GR_STATUS_INIT = 0x00 // disabled state
58121+};
58122+
58123+/* Begin ACL declarations */
58124+
58125+/* Role flags */
58126+
58127+enum {
58128+ GR_ROLE_USER = 0x0001,
58129+ GR_ROLE_GROUP = 0x0002,
58130+ GR_ROLE_DEFAULT = 0x0004,
58131+ GR_ROLE_SPECIAL = 0x0008,
58132+ GR_ROLE_AUTH = 0x0010,
58133+ GR_ROLE_NOPW = 0x0020,
58134+ GR_ROLE_GOD = 0x0040,
58135+ GR_ROLE_LEARN = 0x0080,
58136+ GR_ROLE_TPE = 0x0100,
58137+ GR_ROLE_DOMAIN = 0x0200,
58138+ GR_ROLE_PAM = 0x0400,
58139+ GR_ROLE_PERSIST = 0x800
58140+};
58141+
58142+/* ACL Subject and Object mode flags */
58143+enum {
58144+ GR_DELETED = 0x80000000
58145+};
58146+
58147+/* ACL Object-only mode flags */
58148+enum {
58149+ GR_READ = 0x00000001,
58150+ GR_APPEND = 0x00000002,
58151+ GR_WRITE = 0x00000004,
58152+ GR_EXEC = 0x00000008,
58153+ GR_FIND = 0x00000010,
58154+ GR_INHERIT = 0x00000020,
58155+ GR_SETID = 0x00000040,
58156+ GR_CREATE = 0x00000080,
58157+ GR_DELETE = 0x00000100,
58158+ GR_LINK = 0x00000200,
58159+ GR_AUDIT_READ = 0x00000400,
58160+ GR_AUDIT_APPEND = 0x00000800,
58161+ GR_AUDIT_WRITE = 0x00001000,
58162+ GR_AUDIT_EXEC = 0x00002000,
58163+ GR_AUDIT_FIND = 0x00004000,
58164+ GR_AUDIT_INHERIT= 0x00008000,
58165+ GR_AUDIT_SETID = 0x00010000,
58166+ GR_AUDIT_CREATE = 0x00020000,
58167+ GR_AUDIT_DELETE = 0x00040000,
58168+ GR_AUDIT_LINK = 0x00080000,
58169+ GR_PTRACERD = 0x00100000,
58170+ GR_NOPTRACE = 0x00200000,
58171+ GR_SUPPRESS = 0x00400000,
58172+ GR_NOLEARN = 0x00800000,
58173+ GR_INIT_TRANSFER= 0x01000000
58174+};
58175+
58176+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58177+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58178+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58179+
58180+/* ACL subject-only mode flags */
58181+enum {
58182+ GR_KILL = 0x00000001,
58183+ GR_VIEW = 0x00000002,
58184+ GR_PROTECTED = 0x00000004,
58185+ GR_LEARN = 0x00000008,
58186+ GR_OVERRIDE = 0x00000010,
58187+ /* just a placeholder, this mode is only used in userspace */
58188+ GR_DUMMY = 0x00000020,
58189+ GR_PROTSHM = 0x00000040,
58190+ GR_KILLPROC = 0x00000080,
58191+ GR_KILLIPPROC = 0x00000100,
58192+ /* just a placeholder, this mode is only used in userspace */
58193+ GR_NOTROJAN = 0x00000200,
58194+ GR_PROTPROCFD = 0x00000400,
58195+ GR_PROCACCT = 0x00000800,
58196+ GR_RELAXPTRACE = 0x00001000,
58197+ GR_NESTED = 0x00002000,
58198+ GR_INHERITLEARN = 0x00004000,
58199+ GR_PROCFIND = 0x00008000,
58200+ GR_POVERRIDE = 0x00010000,
58201+ GR_KERNELAUTH = 0x00020000,
58202+ GR_ATSECURE = 0x00040000,
58203+ GR_SHMEXEC = 0x00080000
58204+};
58205+
58206+enum {
58207+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58208+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58209+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58210+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58211+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58212+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58213+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58214+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58215+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58216+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58217+};
58218+
58219+enum {
58220+ GR_ID_USER = 0x01,
58221+ GR_ID_GROUP = 0x02,
58222+};
58223+
58224+enum {
58225+ GR_ID_ALLOW = 0x01,
58226+ GR_ID_DENY = 0x02,
58227+};
58228+
58229+#define GR_CRASH_RES 31
58230+#define GR_UIDTABLE_MAX 500
58231+
58232+/* begin resource learning section */
58233+enum {
58234+ GR_RLIM_CPU_BUMP = 60,
58235+ GR_RLIM_FSIZE_BUMP = 50000,
58236+ GR_RLIM_DATA_BUMP = 10000,
58237+ GR_RLIM_STACK_BUMP = 1000,
58238+ GR_RLIM_CORE_BUMP = 10000,
58239+ GR_RLIM_RSS_BUMP = 500000,
58240+ GR_RLIM_NPROC_BUMP = 1,
58241+ GR_RLIM_NOFILE_BUMP = 5,
58242+ GR_RLIM_MEMLOCK_BUMP = 50000,
58243+ GR_RLIM_AS_BUMP = 500000,
58244+ GR_RLIM_LOCKS_BUMP = 2,
58245+ GR_RLIM_SIGPENDING_BUMP = 5,
58246+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58247+ GR_RLIM_NICE_BUMP = 1,
58248+ GR_RLIM_RTPRIO_BUMP = 1,
58249+ GR_RLIM_RTTIME_BUMP = 1000000
58250+};
58251+
58252+#endif
58253diff -urNp linux-2.6.32.45/include/linux/grinternal.h linux-2.6.32.45/include/linux/grinternal.h
58254--- linux-2.6.32.45/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58255+++ linux-2.6.32.45/include/linux/grinternal.h 2011-08-11 19:58:37.000000000 -0400
58256@@ -0,0 +1,217 @@
58257+#ifndef __GRINTERNAL_H
58258+#define __GRINTERNAL_H
58259+
58260+#ifdef CONFIG_GRKERNSEC
58261+
58262+#include <linux/fs.h>
58263+#include <linux/mnt_namespace.h>
58264+#include <linux/nsproxy.h>
58265+#include <linux/gracl.h>
58266+#include <linux/grdefs.h>
58267+#include <linux/grmsg.h>
58268+
58269+void gr_add_learn_entry(const char *fmt, ...)
58270+ __attribute__ ((format (printf, 1, 2)));
58271+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58272+ const struct vfsmount *mnt);
58273+__u32 gr_check_create(const struct dentry *new_dentry,
58274+ const struct dentry *parent,
58275+ const struct vfsmount *mnt, const __u32 mode);
58276+int gr_check_protected_task(const struct task_struct *task);
58277+__u32 to_gr_audit(const __u32 reqmode);
58278+int gr_set_acls(const int type);
58279+int gr_apply_subject_to_task(struct task_struct *task);
58280+int gr_acl_is_enabled(void);
58281+char gr_roletype_to_char(void);
58282+
58283+void gr_handle_alertkill(struct task_struct *task);
58284+char *gr_to_filename(const struct dentry *dentry,
58285+ const struct vfsmount *mnt);
58286+char *gr_to_filename1(const struct dentry *dentry,
58287+ const struct vfsmount *mnt);
58288+char *gr_to_filename2(const struct dentry *dentry,
58289+ const struct vfsmount *mnt);
58290+char *gr_to_filename3(const struct dentry *dentry,
58291+ const struct vfsmount *mnt);
58292+
58293+extern int grsec_enable_harden_ptrace;
58294+extern int grsec_enable_link;
58295+extern int grsec_enable_fifo;
58296+extern int grsec_enable_shm;
58297+extern int grsec_enable_execlog;
58298+extern int grsec_enable_signal;
58299+extern int grsec_enable_audit_ptrace;
58300+extern int grsec_enable_forkfail;
58301+extern int grsec_enable_time;
58302+extern int grsec_enable_rofs;
58303+extern int grsec_enable_chroot_shmat;
58304+extern int grsec_enable_chroot_mount;
58305+extern int grsec_enable_chroot_double;
58306+extern int grsec_enable_chroot_pivot;
58307+extern int grsec_enable_chroot_chdir;
58308+extern int grsec_enable_chroot_chmod;
58309+extern int grsec_enable_chroot_mknod;
58310+extern int grsec_enable_chroot_fchdir;
58311+extern int grsec_enable_chroot_nice;
58312+extern int grsec_enable_chroot_execlog;
58313+extern int grsec_enable_chroot_caps;
58314+extern int grsec_enable_chroot_sysctl;
58315+extern int grsec_enable_chroot_unix;
58316+extern int grsec_enable_tpe;
58317+extern int grsec_tpe_gid;
58318+extern int grsec_enable_tpe_all;
58319+extern int grsec_enable_tpe_invert;
58320+extern int grsec_enable_socket_all;
58321+extern int grsec_socket_all_gid;
58322+extern int grsec_enable_socket_client;
58323+extern int grsec_socket_client_gid;
58324+extern int grsec_enable_socket_server;
58325+extern int grsec_socket_server_gid;
58326+extern int grsec_audit_gid;
58327+extern int grsec_enable_group;
58328+extern int grsec_enable_audit_textrel;
58329+extern int grsec_enable_log_rwxmaps;
58330+extern int grsec_enable_mount;
58331+extern int grsec_enable_chdir;
58332+extern int grsec_resource_logging;
58333+extern int grsec_enable_blackhole;
58334+extern int grsec_lastack_retries;
58335+extern int grsec_enable_brute;
58336+extern int grsec_lock;
58337+
58338+extern spinlock_t grsec_alert_lock;
58339+extern unsigned long grsec_alert_wtime;
58340+extern unsigned long grsec_alert_fyet;
58341+
58342+extern spinlock_t grsec_audit_lock;
58343+
58344+extern rwlock_t grsec_exec_file_lock;
58345+
58346+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58347+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58348+ (tsk)->exec_file->f_vfsmnt) : "/")
58349+
58350+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58351+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58352+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58353+
58354+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58355+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58356+ (tsk)->exec_file->f_vfsmnt) : "/")
58357+
58358+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58359+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58360+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58361+
58362+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58363+
58364+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58365+
58366+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58367+ (task)->pid, (cred)->uid, \
58368+ (cred)->euid, (cred)->gid, (cred)->egid, \
58369+ gr_parent_task_fullpath(task), \
58370+ (task)->real_parent->comm, (task)->real_parent->pid, \
58371+ (pcred)->uid, (pcred)->euid, \
58372+ (pcred)->gid, (pcred)->egid
58373+
58374+#define GR_CHROOT_CAPS {{ \
58375+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58376+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58377+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58378+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58379+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58380+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
58381+
58382+#define security_learn(normal_msg,args...) \
58383+({ \
58384+ read_lock(&grsec_exec_file_lock); \
58385+ gr_add_learn_entry(normal_msg "\n", ## args); \
58386+ read_unlock(&grsec_exec_file_lock); \
58387+})
58388+
58389+enum {
58390+ GR_DO_AUDIT,
58391+ GR_DONT_AUDIT,
58392+ GR_DONT_AUDIT_GOOD
58393+};
58394+
58395+enum {
58396+ GR_TTYSNIFF,
58397+ GR_RBAC,
58398+ GR_RBAC_STR,
58399+ GR_STR_RBAC,
58400+ GR_RBAC_MODE2,
58401+ GR_RBAC_MODE3,
58402+ GR_FILENAME,
58403+ GR_SYSCTL_HIDDEN,
58404+ GR_NOARGS,
58405+ GR_ONE_INT,
58406+ GR_ONE_INT_TWO_STR,
58407+ GR_ONE_STR,
58408+ GR_STR_INT,
58409+ GR_TWO_STR_INT,
58410+ GR_TWO_INT,
58411+ GR_TWO_U64,
58412+ GR_THREE_INT,
58413+ GR_FIVE_INT_TWO_STR,
58414+ GR_TWO_STR,
58415+ GR_THREE_STR,
58416+ GR_FOUR_STR,
58417+ GR_STR_FILENAME,
58418+ GR_FILENAME_STR,
58419+ GR_FILENAME_TWO_INT,
58420+ GR_FILENAME_TWO_INT_STR,
58421+ GR_TEXTREL,
58422+ GR_PTRACE,
58423+ GR_RESOURCE,
58424+ GR_CAP,
58425+ GR_SIG,
58426+ GR_SIG2,
58427+ GR_CRASH1,
58428+ GR_CRASH2,
58429+ GR_PSACCT,
58430+ GR_RWXMAP
58431+};
58432+
58433+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58434+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58435+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58436+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58437+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58438+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58439+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58440+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58441+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58442+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58443+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58444+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58445+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58446+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58447+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58448+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58449+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58450+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58451+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58452+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58453+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58454+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58455+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58456+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58457+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58458+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58459+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58460+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58461+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58462+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58463+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58464+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58465+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58466+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58467+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58468+
58469+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58470+
58471+#endif
58472+
58473+#endif
58474diff -urNp linux-2.6.32.45/include/linux/grmsg.h linux-2.6.32.45/include/linux/grmsg.h
58475--- linux-2.6.32.45/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58476+++ linux-2.6.32.45/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
58477@@ -0,0 +1,108 @@
58478+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58479+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58480+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58481+#define GR_STOPMOD_MSG "denied modification of module state by "
58482+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58483+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58484+#define GR_IOPERM_MSG "denied use of ioperm() by "
58485+#define GR_IOPL_MSG "denied use of iopl() by "
58486+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58487+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58488+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58489+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58490+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58491+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58492+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58493+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58494+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58495+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58496+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58497+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58498+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58499+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58500+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58501+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58502+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58503+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58504+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58505+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58506+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58507+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58508+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58509+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58510+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58511+#define GR_NPROC_MSG "denied overstep of process limit by "
58512+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58513+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58514+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58515+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58516+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58517+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58518+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58519+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58520+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58521+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58522+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58523+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58524+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58525+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58526+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58527+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58528+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58529+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58530+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58531+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58532+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58533+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58534+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58535+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58536+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58537+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58538+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58539+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58540+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58541+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58542+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58543+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58544+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58545+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58546+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58547+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58548+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58549+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58550+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58551+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58552+#define GR_NICE_CHROOT_MSG "denied priority change by "
58553+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58554+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58555+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58556+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58557+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58558+#define GR_TIME_MSG "time set by "
58559+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58560+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58561+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58562+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58563+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58564+#define GR_BIND_MSG "denied bind() by "
58565+#define GR_CONNECT_MSG "denied connect() by "
58566+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58567+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58568+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58569+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58570+#define GR_CAP_ACL_MSG "use of %s denied for "
58571+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58572+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58573+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58574+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58575+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58576+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58577+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58578+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58579+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58580+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58581+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58582+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58583+#define GR_VM86_MSG "denied use of vm86 by "
58584+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58585+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58586diff -urNp linux-2.6.32.45/include/linux/grsecurity.h linux-2.6.32.45/include/linux/grsecurity.h
58587--- linux-2.6.32.45/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58588+++ linux-2.6.32.45/include/linux/grsecurity.h 2011-08-11 19:58:57.000000000 -0400
58589@@ -0,0 +1,217 @@
58590+#ifndef GR_SECURITY_H
58591+#define GR_SECURITY_H
58592+#include <linux/fs.h>
58593+#include <linux/fs_struct.h>
58594+#include <linux/binfmts.h>
58595+#include <linux/gracl.h>
58596+#include <linux/compat.h>
58597+
58598+/* notify of brain-dead configs */
58599+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58600+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58601+#endif
58602+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58603+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58604+#endif
58605+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58606+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58607+#endif
58608+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58609+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58610+#endif
58611+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58612+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58613+#endif
58614+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58615+#error "CONFIG_PAX enabled, but no PaX options are enabled."
58616+#endif
58617+
58618+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58619+void gr_handle_brute_check(void);
58620+void gr_handle_kernel_exploit(void);
58621+int gr_process_user_ban(void);
58622+
58623+char gr_roletype_to_char(void);
58624+
58625+int gr_acl_enable_at_secure(void);
58626+
58627+int gr_check_user_change(int real, int effective, int fs);
58628+int gr_check_group_change(int real, int effective, int fs);
58629+
58630+void gr_del_task_from_ip_table(struct task_struct *p);
58631+
58632+int gr_pid_is_chrooted(struct task_struct *p);
58633+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58634+int gr_handle_chroot_nice(void);
58635+int gr_handle_chroot_sysctl(const int op);
58636+int gr_handle_chroot_setpriority(struct task_struct *p,
58637+ const int niceval);
58638+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58639+int gr_handle_chroot_chroot(const struct dentry *dentry,
58640+ const struct vfsmount *mnt);
58641+int gr_handle_chroot_caps(struct path *path);
58642+void gr_handle_chroot_chdir(struct path *path);
58643+int gr_handle_chroot_chmod(const struct dentry *dentry,
58644+ const struct vfsmount *mnt, const int mode);
58645+int gr_handle_chroot_mknod(const struct dentry *dentry,
58646+ const struct vfsmount *mnt, const int mode);
58647+int gr_handle_chroot_mount(const struct dentry *dentry,
58648+ const struct vfsmount *mnt,
58649+ const char *dev_name);
58650+int gr_handle_chroot_pivot(void);
58651+int gr_handle_chroot_unix(const pid_t pid);
58652+
58653+int gr_handle_rawio(const struct inode *inode);
58654+
58655+void gr_handle_ioperm(void);
58656+void gr_handle_iopl(void);
58657+
58658+int gr_tpe_allow(const struct file *file);
58659+
58660+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58661+void gr_clear_chroot_entries(struct task_struct *task);
58662+
58663+void gr_log_forkfail(const int retval);
58664+void gr_log_timechange(void);
58665+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58666+void gr_log_chdir(const struct dentry *dentry,
58667+ const struct vfsmount *mnt);
58668+void gr_log_chroot_exec(const struct dentry *dentry,
58669+ const struct vfsmount *mnt);
58670+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
58671+#ifdef CONFIG_COMPAT
58672+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
58673+#endif
58674+void gr_log_remount(const char *devname, const int retval);
58675+void gr_log_unmount(const char *devname, const int retval);
58676+void gr_log_mount(const char *from, const char *to, const int retval);
58677+void gr_log_textrel(struct vm_area_struct *vma);
58678+void gr_log_rwxmmap(struct file *file);
58679+void gr_log_rwxmprotect(struct file *file);
58680+
58681+int gr_handle_follow_link(const struct inode *parent,
58682+ const struct inode *inode,
58683+ const struct dentry *dentry,
58684+ const struct vfsmount *mnt);
58685+int gr_handle_fifo(const struct dentry *dentry,
58686+ const struct vfsmount *mnt,
58687+ const struct dentry *dir, const int flag,
58688+ const int acc_mode);
58689+int gr_handle_hardlink(const struct dentry *dentry,
58690+ const struct vfsmount *mnt,
58691+ struct inode *inode,
58692+ const int mode, const char *to);
58693+
58694+int gr_is_capable(const int cap);
58695+int gr_is_capable_nolog(const int cap);
58696+void gr_learn_resource(const struct task_struct *task, const int limit,
58697+ const unsigned long wanted, const int gt);
58698+void gr_copy_label(struct task_struct *tsk);
58699+void gr_handle_crash(struct task_struct *task, const int sig);
58700+int gr_handle_signal(const struct task_struct *p, const int sig);
58701+int gr_check_crash_uid(const uid_t uid);
58702+int gr_check_protected_task(const struct task_struct *task);
58703+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58704+int gr_acl_handle_mmap(const struct file *file,
58705+ const unsigned long prot);
58706+int gr_acl_handle_mprotect(const struct file *file,
58707+ const unsigned long prot);
58708+int gr_check_hidden_task(const struct task_struct *tsk);
58709+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
58710+ const struct vfsmount *mnt);
58711+__u32 gr_acl_handle_utime(const struct dentry *dentry,
58712+ const struct vfsmount *mnt);
58713+__u32 gr_acl_handle_access(const struct dentry *dentry,
58714+ const struct vfsmount *mnt, const int fmode);
58715+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
58716+ const struct vfsmount *mnt, mode_t mode);
58717+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
58718+ const struct vfsmount *mnt, mode_t mode);
58719+__u32 gr_acl_handle_chown(const struct dentry *dentry,
58720+ const struct vfsmount *mnt);
58721+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
58722+ const struct vfsmount *mnt);
58723+int gr_handle_ptrace(struct task_struct *task, const long request);
58724+int gr_handle_proc_ptrace(struct task_struct *task);
58725+__u32 gr_acl_handle_execve(const struct dentry *dentry,
58726+ const struct vfsmount *mnt);
58727+int gr_check_crash_exec(const struct file *filp);
58728+int gr_acl_is_enabled(void);
58729+void gr_set_kernel_label(struct task_struct *task);
58730+void gr_set_role_label(struct task_struct *task, const uid_t uid,
58731+ const gid_t gid);
58732+int gr_set_proc_label(const struct dentry *dentry,
58733+ const struct vfsmount *mnt,
58734+ const int unsafe_share);
58735+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
58736+ const struct vfsmount *mnt);
58737+__u32 gr_acl_handle_open(const struct dentry *dentry,
58738+ const struct vfsmount *mnt, const int fmode);
58739+__u32 gr_acl_handle_creat(const struct dentry *dentry,
58740+ const struct dentry *p_dentry,
58741+ const struct vfsmount *p_mnt, const int fmode,
58742+ const int imode);
58743+void gr_handle_create(const struct dentry *dentry,
58744+ const struct vfsmount *mnt);
58745+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
58746+ const struct dentry *parent_dentry,
58747+ const struct vfsmount *parent_mnt,
58748+ const int mode);
58749+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
58750+ const struct dentry *parent_dentry,
58751+ const struct vfsmount *parent_mnt);
58752+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
58753+ const struct vfsmount *mnt);
58754+void gr_handle_delete(const ino_t ino, const dev_t dev);
58755+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
58756+ const struct vfsmount *mnt);
58757+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
58758+ const struct dentry *parent_dentry,
58759+ const struct vfsmount *parent_mnt,
58760+ const char *from);
58761+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
58762+ const struct dentry *parent_dentry,
58763+ const struct vfsmount *parent_mnt,
58764+ const struct dentry *old_dentry,
58765+ const struct vfsmount *old_mnt, const char *to);
58766+int gr_acl_handle_rename(struct dentry *new_dentry,
58767+ struct dentry *parent_dentry,
58768+ const struct vfsmount *parent_mnt,
58769+ struct dentry *old_dentry,
58770+ struct inode *old_parent_inode,
58771+ struct vfsmount *old_mnt, const char *newname);
58772+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58773+ struct dentry *old_dentry,
58774+ struct dentry *new_dentry,
58775+ struct vfsmount *mnt, const __u8 replace);
58776+__u32 gr_check_link(const struct dentry *new_dentry,
58777+ const struct dentry *parent_dentry,
58778+ const struct vfsmount *parent_mnt,
58779+ const struct dentry *old_dentry,
58780+ const struct vfsmount *old_mnt);
58781+int gr_acl_handle_filldir(const struct file *file, const char *name,
58782+ const unsigned int namelen, const ino_t ino);
58783+
58784+__u32 gr_acl_handle_unix(const struct dentry *dentry,
58785+ const struct vfsmount *mnt);
58786+void gr_acl_handle_exit(void);
58787+void gr_acl_handle_psacct(struct task_struct *task, const long code);
58788+int gr_acl_handle_procpidmem(const struct task_struct *task);
58789+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
58790+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
58791+void gr_audit_ptrace(struct task_struct *task);
58792+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
58793+
58794+#ifdef CONFIG_GRKERNSEC
58795+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
58796+void gr_handle_vm86(void);
58797+void gr_handle_mem_readwrite(u64 from, u64 to);
58798+
58799+extern int grsec_enable_dmesg;
58800+extern int grsec_disable_privio;
58801+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58802+extern int grsec_enable_chroot_findtask;
58803+#endif
58804+#endif
58805+
58806+#endif
58807diff -urNp linux-2.6.32.45/include/linux/hdpu_features.h linux-2.6.32.45/include/linux/hdpu_features.h
58808--- linux-2.6.32.45/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
58809+++ linux-2.6.32.45/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
58810@@ -3,7 +3,7 @@
58811 struct cpustate_t {
58812 spinlock_t lock;
58813 int excl;
58814- int open_count;
58815+ atomic_t open_count;
58816 unsigned char cached_val;
58817 int inited;
58818 unsigned long *set_addr;
58819diff -urNp linux-2.6.32.45/include/linux/highmem.h linux-2.6.32.45/include/linux/highmem.h
58820--- linux-2.6.32.45/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
58821+++ linux-2.6.32.45/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
58822@@ -137,6 +137,18 @@ static inline void clear_highpage(struct
58823 kunmap_atomic(kaddr, KM_USER0);
58824 }
58825
58826+static inline void sanitize_highpage(struct page *page)
58827+{
58828+ void *kaddr;
58829+ unsigned long flags;
58830+
58831+ local_irq_save(flags);
58832+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
58833+ clear_page(kaddr);
58834+ kunmap_atomic(kaddr, KM_CLEARPAGE);
58835+ local_irq_restore(flags);
58836+}
58837+
58838 static inline void zero_user_segments(struct page *page,
58839 unsigned start1, unsigned end1,
58840 unsigned start2, unsigned end2)
58841diff -urNp linux-2.6.32.45/include/linux/i2c.h linux-2.6.32.45/include/linux/i2c.h
58842--- linux-2.6.32.45/include/linux/i2c.h 2011-03-27 14:31:47.000000000 -0400
58843+++ linux-2.6.32.45/include/linux/i2c.h 2011-08-23 21:22:38.000000000 -0400
58844@@ -325,6 +325,7 @@ struct i2c_algorithm {
58845 /* To determine what the adapter supports */
58846 u32 (*functionality) (struct i2c_adapter *);
58847 };
58848+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
58849
58850 /*
58851 * i2c_adapter is the structure used to identify a physical i2c bus along
58852diff -urNp linux-2.6.32.45/include/linux/i2o.h linux-2.6.32.45/include/linux/i2o.h
58853--- linux-2.6.32.45/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
58854+++ linux-2.6.32.45/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
58855@@ -564,7 +564,7 @@ struct i2o_controller {
58856 struct i2o_device *exec; /* Executive */
58857 #if BITS_PER_LONG == 64
58858 spinlock_t context_list_lock; /* lock for context_list */
58859- atomic_t context_list_counter; /* needed for unique contexts */
58860+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
58861 struct list_head context_list; /* list of context id's
58862 and pointers */
58863 #endif
58864diff -urNp linux-2.6.32.45/include/linux/init_task.h linux-2.6.32.45/include/linux/init_task.h
58865--- linux-2.6.32.45/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
58866+++ linux-2.6.32.45/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
58867@@ -83,6 +83,12 @@ extern struct group_info init_groups;
58868 #define INIT_IDS
58869 #endif
58870
58871+#ifdef CONFIG_X86
58872+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
58873+#else
58874+#define INIT_TASK_THREAD_INFO
58875+#endif
58876+
58877 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
58878 /*
58879 * Because of the reduced scope of CAP_SETPCAP when filesystem
58880@@ -156,6 +162,7 @@ extern struct cred init_cred;
58881 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
58882 .comm = "swapper", \
58883 .thread = INIT_THREAD, \
58884+ INIT_TASK_THREAD_INFO \
58885 .fs = &init_fs, \
58886 .files = &init_files, \
58887 .signal = &init_signals, \
58888diff -urNp linux-2.6.32.45/include/linux/intel-iommu.h linux-2.6.32.45/include/linux/intel-iommu.h
58889--- linux-2.6.32.45/include/linux/intel-iommu.h 2011-03-27 14:31:47.000000000 -0400
58890+++ linux-2.6.32.45/include/linux/intel-iommu.h 2011-08-05 20:33:55.000000000 -0400
58891@@ -296,7 +296,7 @@ struct iommu_flush {
58892 u8 fm, u64 type);
58893 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
58894 unsigned int size_order, u64 type);
58895-};
58896+} __no_const;
58897
58898 enum {
58899 SR_DMAR_FECTL_REG,
58900diff -urNp linux-2.6.32.45/include/linux/interrupt.h linux-2.6.32.45/include/linux/interrupt.h
58901--- linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
58902+++ linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
58903@@ -363,7 +363,7 @@ enum
58904 /* map softirq index to softirq name. update 'softirq_to_name' in
58905 * kernel/softirq.c when adding a new softirq.
58906 */
58907-extern char *softirq_to_name[NR_SOFTIRQS];
58908+extern const char * const softirq_to_name[NR_SOFTIRQS];
58909
58910 /* softirq mask and active fields moved to irq_cpustat_t in
58911 * asm/hardirq.h to get better cache usage. KAO
58912@@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
58913
58914 struct softirq_action
58915 {
58916- void (*action)(struct softirq_action *);
58917+ void (*action)(void);
58918 };
58919
58920 asmlinkage void do_softirq(void);
58921 asmlinkage void __do_softirq(void);
58922-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
58923+extern void open_softirq(int nr, void (*action)(void));
58924 extern void softirq_init(void);
58925 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
58926 extern void raise_softirq_irqoff(unsigned int nr);
58927diff -urNp linux-2.6.32.45/include/linux/irq.h linux-2.6.32.45/include/linux/irq.h
58928--- linux-2.6.32.45/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
58929+++ linux-2.6.32.45/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
58930@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
58931 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
58932 bool boot)
58933 {
58934+#ifdef CONFIG_CPUMASK_OFFSTACK
58935 gfp_t gfp = GFP_ATOMIC;
58936
58937 if (boot)
58938 gfp = GFP_NOWAIT;
58939
58940-#ifdef CONFIG_CPUMASK_OFFSTACK
58941 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
58942 return false;
58943
58944diff -urNp linux-2.6.32.45/include/linux/kallsyms.h linux-2.6.32.45/include/linux/kallsyms.h
58945--- linux-2.6.32.45/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
58946+++ linux-2.6.32.45/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
58947@@ -15,7 +15,8 @@
58948
58949 struct module;
58950
58951-#ifdef CONFIG_KALLSYMS
58952+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
58953+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
58954 /* Lookup the address for a symbol. Returns 0 if not found. */
58955 unsigned long kallsyms_lookup_name(const char *name);
58956
58957@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
58958 /* Stupid that this does nothing, but I didn't create this mess. */
58959 #define __print_symbol(fmt, addr)
58960 #endif /*CONFIG_KALLSYMS*/
58961+#else /* when included by kallsyms.c, vsnprintf.c, or
58962+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
58963+extern void __print_symbol(const char *fmt, unsigned long address);
58964+extern int sprint_symbol(char *buffer, unsigned long address);
58965+const char *kallsyms_lookup(unsigned long addr,
58966+ unsigned long *symbolsize,
58967+ unsigned long *offset,
58968+ char **modname, char *namebuf);
58969+#endif
58970
58971 /* This macro allows us to keep printk typechecking */
58972 static void __check_printsym_format(const char *fmt, ...)
58973diff -urNp linux-2.6.32.45/include/linux/kgdb.h linux-2.6.32.45/include/linux/kgdb.h
58974--- linux-2.6.32.45/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
58975+++ linux-2.6.32.45/include/linux/kgdb.h 2011-08-05 20:33:55.000000000 -0400
58976@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
58977
58978 extern int kgdb_connected;
58979
58980-extern atomic_t kgdb_setting_breakpoint;
58981-extern atomic_t kgdb_cpu_doing_single_step;
58982+extern atomic_unchecked_t kgdb_setting_breakpoint;
58983+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
58984
58985 extern struct task_struct *kgdb_usethread;
58986 extern struct task_struct *kgdb_contthread;
58987@@ -226,8 +226,8 @@ extern int kgdb_arch_remove_breakpoint(u
58988 * hardware debug registers.
58989 */
58990 struct kgdb_arch {
58991- unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
58992- unsigned long flags;
58993+ const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
58994+ const unsigned long flags;
58995
58996 int (*set_breakpoint)(unsigned long, char *);
58997 int (*remove_breakpoint)(unsigned long, char *);
58998@@ -251,20 +251,20 @@ struct kgdb_arch {
58999 */
59000 struct kgdb_io {
59001 const char *name;
59002- int (*read_char) (void);
59003- void (*write_char) (u8);
59004- void (*flush) (void);
59005- int (*init) (void);
59006- void (*pre_exception) (void);
59007- void (*post_exception) (void);
59008+ int (* const read_char) (void);
59009+ void (* const write_char) (u8);
59010+ void (* const flush) (void);
59011+ int (* const init) (void);
59012+ void (* const pre_exception) (void);
59013+ void (* const post_exception) (void);
59014 };
59015
59016-extern struct kgdb_arch arch_kgdb_ops;
59017+extern const struct kgdb_arch arch_kgdb_ops;
59018
59019 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
59020
59021-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
59022-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
59023+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
59024+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
59025
59026 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
59027 extern int kgdb_mem2hex(char *mem, char *buf, int count);
59028diff -urNp linux-2.6.32.45/include/linux/kmod.h linux-2.6.32.45/include/linux/kmod.h
59029--- linux-2.6.32.45/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
59030+++ linux-2.6.32.45/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
59031@@ -31,6 +31,8 @@
59032 * usually useless though. */
59033 extern int __request_module(bool wait, const char *name, ...) \
59034 __attribute__((format(printf, 2, 3)));
59035+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59036+ __attribute__((format(printf, 3, 4)));
59037 #define request_module(mod...) __request_module(true, mod)
59038 #define request_module_nowait(mod...) __request_module(false, mod)
59039 #define try_then_request_module(x, mod...) \
59040diff -urNp linux-2.6.32.45/include/linux/kobject.h linux-2.6.32.45/include/linux/kobject.h
59041--- linux-2.6.32.45/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
59042+++ linux-2.6.32.45/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
59043@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
59044
59045 struct kobj_type {
59046 void (*release)(struct kobject *kobj);
59047- struct sysfs_ops *sysfs_ops;
59048+ const struct sysfs_ops *sysfs_ops;
59049 struct attribute **default_attrs;
59050 };
59051
59052@@ -118,9 +118,9 @@ struct kobj_uevent_env {
59053 };
59054
59055 struct kset_uevent_ops {
59056- int (*filter)(struct kset *kset, struct kobject *kobj);
59057- const char *(*name)(struct kset *kset, struct kobject *kobj);
59058- int (*uevent)(struct kset *kset, struct kobject *kobj,
59059+ int (* const filter)(struct kset *kset, struct kobject *kobj);
59060+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
59061+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
59062 struct kobj_uevent_env *env);
59063 };
59064
59065@@ -132,7 +132,7 @@ struct kobj_attribute {
59066 const char *buf, size_t count);
59067 };
59068
59069-extern struct sysfs_ops kobj_sysfs_ops;
59070+extern const struct sysfs_ops kobj_sysfs_ops;
59071
59072 /**
59073 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
59074@@ -155,14 +155,14 @@ struct kset {
59075 struct list_head list;
59076 spinlock_t list_lock;
59077 struct kobject kobj;
59078- struct kset_uevent_ops *uevent_ops;
59079+ const struct kset_uevent_ops *uevent_ops;
59080 };
59081
59082 extern void kset_init(struct kset *kset);
59083 extern int __must_check kset_register(struct kset *kset);
59084 extern void kset_unregister(struct kset *kset);
59085 extern struct kset * __must_check kset_create_and_add(const char *name,
59086- struct kset_uevent_ops *u,
59087+ const struct kset_uevent_ops *u,
59088 struct kobject *parent_kobj);
59089
59090 static inline struct kset *to_kset(struct kobject *kobj)
59091diff -urNp linux-2.6.32.45/include/linux/kvm_host.h linux-2.6.32.45/include/linux/kvm_host.h
59092--- linux-2.6.32.45/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
59093+++ linux-2.6.32.45/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
59094@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59095 void vcpu_load(struct kvm_vcpu *vcpu);
59096 void vcpu_put(struct kvm_vcpu *vcpu);
59097
59098-int kvm_init(void *opaque, unsigned int vcpu_size,
59099+int kvm_init(const void *opaque, unsigned int vcpu_size,
59100 struct module *module);
59101 void kvm_exit(void);
59102
59103@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59104 struct kvm_guest_debug *dbg);
59105 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59106
59107-int kvm_arch_init(void *opaque);
59108+int kvm_arch_init(const void *opaque);
59109 void kvm_arch_exit(void);
59110
59111 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59112diff -urNp linux-2.6.32.45/include/linux/libata.h linux-2.6.32.45/include/linux/libata.h
59113--- linux-2.6.32.45/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
59114+++ linux-2.6.32.45/include/linux/libata.h 2011-08-05 20:33:55.000000000 -0400
59115@@ -525,11 +525,11 @@ struct ata_ioports {
59116
59117 struct ata_host {
59118 spinlock_t lock;
59119- struct device *dev;
59120+ struct device *dev;
59121 void __iomem * const *iomap;
59122 unsigned int n_ports;
59123 void *private_data;
59124- struct ata_port_operations *ops;
59125+ const struct ata_port_operations *ops;
59126 unsigned long flags;
59127 #ifdef CONFIG_ATA_ACPI
59128 acpi_handle acpi_handle;
59129@@ -710,7 +710,7 @@ struct ata_link {
59130
59131 struct ata_port {
59132 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
59133- struct ata_port_operations *ops;
59134+ const struct ata_port_operations *ops;
59135 spinlock_t *lock;
59136 /* Flags owned by the EH context. Only EH should touch these once the
59137 port is active */
59138@@ -883,7 +883,7 @@ struct ata_port_operations {
59139 * ->inherits must be the last field and all the preceding
59140 * fields must be pointers.
59141 */
59142- const struct ata_port_operations *inherits;
59143+ const struct ata_port_operations * const inherits;
59144 };
59145
59146 struct ata_port_info {
59147@@ -892,7 +892,7 @@ struct ata_port_info {
59148 unsigned long pio_mask;
59149 unsigned long mwdma_mask;
59150 unsigned long udma_mask;
59151- struct ata_port_operations *port_ops;
59152+ const struct ata_port_operations *port_ops;
59153 void *private_data;
59154 };
59155
59156@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
59157 extern const unsigned long sata_deb_timing_hotplug[];
59158 extern const unsigned long sata_deb_timing_long[];
59159
59160-extern struct ata_port_operations ata_dummy_port_ops;
59161+extern const struct ata_port_operations ata_dummy_port_ops;
59162 extern const struct ata_port_info ata_dummy_port_info;
59163
59164 static inline const unsigned long *
59165@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
59166 struct scsi_host_template *sht);
59167 extern void ata_host_detach(struct ata_host *host);
59168 extern void ata_host_init(struct ata_host *, struct device *,
59169- unsigned long, struct ata_port_operations *);
59170+ unsigned long, const struct ata_port_operations *);
59171 extern int ata_scsi_detect(struct scsi_host_template *sht);
59172 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
59173 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
59174diff -urNp linux-2.6.32.45/include/linux/lockd/bind.h linux-2.6.32.45/include/linux/lockd/bind.h
59175--- linux-2.6.32.45/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
59176+++ linux-2.6.32.45/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
59177@@ -23,13 +23,13 @@ struct svc_rqst;
59178 * This is the set of functions for lockd->nfsd communication
59179 */
59180 struct nlmsvc_binding {
59181- __be32 (*fopen)(struct svc_rqst *,
59182+ __be32 (* const fopen)(struct svc_rqst *,
59183 struct nfs_fh *,
59184 struct file **);
59185- void (*fclose)(struct file *);
59186+ void (* const fclose)(struct file *);
59187 };
59188
59189-extern struct nlmsvc_binding * nlmsvc_ops;
59190+extern const struct nlmsvc_binding * nlmsvc_ops;
59191
59192 /*
59193 * Similar to nfs_client_initdata, but without the NFS-specific
59194diff -urNp linux-2.6.32.45/include/linux/mca.h linux-2.6.32.45/include/linux/mca.h
59195--- linux-2.6.32.45/include/linux/mca.h 2011-03-27 14:31:47.000000000 -0400
59196+++ linux-2.6.32.45/include/linux/mca.h 2011-08-05 20:33:55.000000000 -0400
59197@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59198 int region);
59199 void * (*mca_transform_memory)(struct mca_device *,
59200 void *memory);
59201-};
59202+} __no_const;
59203
59204 struct mca_bus {
59205 u64 default_dma_mask;
59206diff -urNp linux-2.6.32.45/include/linux/memory.h linux-2.6.32.45/include/linux/memory.h
59207--- linux-2.6.32.45/include/linux/memory.h 2011-03-27 14:31:47.000000000 -0400
59208+++ linux-2.6.32.45/include/linux/memory.h 2011-08-05 20:33:55.000000000 -0400
59209@@ -108,7 +108,7 @@ struct memory_accessor {
59210 size_t count);
59211 ssize_t (*write)(struct memory_accessor *, const char *buf,
59212 off_t offset, size_t count);
59213-};
59214+} __no_const;
59215
59216 /*
59217 * Kernel text modification mutex, used for code patching. Users of this lock
59218diff -urNp linux-2.6.32.45/include/linux/mm.h linux-2.6.32.45/include/linux/mm.h
59219--- linux-2.6.32.45/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
59220+++ linux-2.6.32.45/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
59221@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
59222
59223 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59224 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59225+
59226+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59227+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59228+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59229+#else
59230 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59231+#endif
59232+
59233 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59234 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59235
59236@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
59237 int set_page_dirty_lock(struct page *page);
59238 int clear_page_dirty_for_io(struct page *page);
59239
59240-/* Is the vma a continuation of the stack vma above it? */
59241-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
59242-{
59243- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59244-}
59245-
59246 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59247 unsigned long old_addr, struct vm_area_struct *new_vma,
59248 unsigned long new_addr, unsigned long len);
59249@@ -890,6 +891,8 @@ struct shrinker {
59250 extern void register_shrinker(struct shrinker *);
59251 extern void unregister_shrinker(struct shrinker *);
59252
59253+pgprot_t vm_get_page_prot(unsigned long vm_flags);
59254+
59255 int vma_wants_writenotify(struct vm_area_struct *vma);
59256
59257 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
59258@@ -1162,6 +1165,7 @@ out:
59259 }
59260
59261 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59262+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59263
59264 extern unsigned long do_brk(unsigned long, unsigned long);
59265
59266@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
59267 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59268 struct vm_area_struct **pprev);
59269
59270+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59271+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59272+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59273+
59274 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59275 NULL if none. Assume start_addr < end_addr. */
59276 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59277@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
59278 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59279 }
59280
59281-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59282 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59283 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59284 unsigned long pfn, unsigned long size, pgprot_t);
59285@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
59286 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
59287 extern int sysctl_memory_failure_early_kill;
59288 extern int sysctl_memory_failure_recovery;
59289-extern atomic_long_t mce_bad_pages;
59290+extern atomic_long_unchecked_t mce_bad_pages;
59291+
59292+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59293+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59294+#else
59295+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59296+#endif
59297
59298 #endif /* __KERNEL__ */
59299 #endif /* _LINUX_MM_H */
59300diff -urNp linux-2.6.32.45/include/linux/mm_types.h linux-2.6.32.45/include/linux/mm_types.h
59301--- linux-2.6.32.45/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
59302+++ linux-2.6.32.45/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
59303@@ -186,6 +186,8 @@ struct vm_area_struct {
59304 #ifdef CONFIG_NUMA
59305 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59306 #endif
59307+
59308+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59309 };
59310
59311 struct core_thread {
59312@@ -287,6 +289,24 @@ struct mm_struct {
59313 #ifdef CONFIG_MMU_NOTIFIER
59314 struct mmu_notifier_mm *mmu_notifier_mm;
59315 #endif
59316+
59317+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59318+ unsigned long pax_flags;
59319+#endif
59320+
59321+#ifdef CONFIG_PAX_DLRESOLVE
59322+ unsigned long call_dl_resolve;
59323+#endif
59324+
59325+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59326+ unsigned long call_syscall;
59327+#endif
59328+
59329+#ifdef CONFIG_PAX_ASLR
59330+ unsigned long delta_mmap; /* randomized offset */
59331+ unsigned long delta_stack; /* randomized offset */
59332+#endif
59333+
59334 };
59335
59336 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
59337diff -urNp linux-2.6.32.45/include/linux/mmu_notifier.h linux-2.6.32.45/include/linux/mmu_notifier.h
59338--- linux-2.6.32.45/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
59339+++ linux-2.6.32.45/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
59340@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
59341 */
59342 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59343 ({ \
59344- pte_t __pte; \
59345+ pte_t ___pte; \
59346 struct vm_area_struct *___vma = __vma; \
59347 unsigned long ___address = __address; \
59348- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59349+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59350 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59351- __pte; \
59352+ ___pte; \
59353 })
59354
59355 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
59356diff -urNp linux-2.6.32.45/include/linux/mmzone.h linux-2.6.32.45/include/linux/mmzone.h
59357--- linux-2.6.32.45/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
59358+++ linux-2.6.32.45/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
59359@@ -350,7 +350,7 @@ struct zone {
59360 unsigned long flags; /* zone flags, see below */
59361
59362 /* Zone statistics */
59363- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59364+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59365
59366 /*
59367 * prev_priority holds the scanning priority for this zone. It is
59368diff -urNp linux-2.6.32.45/include/linux/mod_devicetable.h linux-2.6.32.45/include/linux/mod_devicetable.h
59369--- linux-2.6.32.45/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
59370+++ linux-2.6.32.45/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
59371@@ -12,7 +12,7 @@
59372 typedef unsigned long kernel_ulong_t;
59373 #endif
59374
59375-#define PCI_ANY_ID (~0)
59376+#define PCI_ANY_ID ((__u16)~0)
59377
59378 struct pci_device_id {
59379 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59380@@ -131,7 +131,7 @@ struct usb_device_id {
59381 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59382 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59383
59384-#define HID_ANY_ID (~0)
59385+#define HID_ANY_ID (~0U)
59386
59387 struct hid_device_id {
59388 __u16 bus;
59389diff -urNp linux-2.6.32.45/include/linux/module.h linux-2.6.32.45/include/linux/module.h
59390--- linux-2.6.32.45/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
59391+++ linux-2.6.32.45/include/linux/module.h 2011-08-05 20:33:55.000000000 -0400
59392@@ -16,6 +16,7 @@
59393 #include <linux/kobject.h>
59394 #include <linux/moduleparam.h>
59395 #include <linux/tracepoint.h>
59396+#include <linux/fs.h>
59397
59398 #include <asm/local.h>
59399 #include <asm/module.h>
59400@@ -287,16 +288,16 @@ struct module
59401 int (*init)(void);
59402
59403 /* If this is non-NULL, vfree after init() returns */
59404- void *module_init;
59405+ void *module_init_rx, *module_init_rw;
59406
59407 /* Here is the actual code + data, vfree'd on unload. */
59408- void *module_core;
59409+ void *module_core_rx, *module_core_rw;
59410
59411 /* Here are the sizes of the init and core sections */
59412- unsigned int init_size, core_size;
59413+ unsigned int init_size_rw, core_size_rw;
59414
59415 /* The size of the executable code in each section. */
59416- unsigned int init_text_size, core_text_size;
59417+ unsigned int init_size_rx, core_size_rx;
59418
59419 /* Arch-specific module values */
59420 struct mod_arch_specific arch;
59421@@ -345,6 +346,10 @@ struct module
59422 #ifdef CONFIG_EVENT_TRACING
59423 struct ftrace_event_call *trace_events;
59424 unsigned int num_trace_events;
59425+ struct file_operations trace_id;
59426+ struct file_operations trace_enable;
59427+ struct file_operations trace_format;
59428+ struct file_operations trace_filter;
59429 #endif
59430 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59431 unsigned long *ftrace_callsites;
59432@@ -393,16 +398,46 @@ struct module *__module_address(unsigned
59433 bool is_module_address(unsigned long addr);
59434 bool is_module_text_address(unsigned long addr);
59435
59436+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59437+{
59438+
59439+#ifdef CONFIG_PAX_KERNEXEC
59440+ if (ktla_ktva(addr) >= (unsigned long)start &&
59441+ ktla_ktva(addr) < (unsigned long)start + size)
59442+ return 1;
59443+#endif
59444+
59445+ return ((void *)addr >= start && (void *)addr < start + size);
59446+}
59447+
59448+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59449+{
59450+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59451+}
59452+
59453+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59454+{
59455+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59456+}
59457+
59458+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59459+{
59460+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59461+}
59462+
59463+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59464+{
59465+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59466+}
59467+
59468 static inline int within_module_core(unsigned long addr, struct module *mod)
59469 {
59470- return (unsigned long)mod->module_core <= addr &&
59471- addr < (unsigned long)mod->module_core + mod->core_size;
59472+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59473 }
59474
59475 static inline int within_module_init(unsigned long addr, struct module *mod)
59476 {
59477- return (unsigned long)mod->module_init <= addr &&
59478- addr < (unsigned long)mod->module_init + mod->init_size;
59479+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59480 }
59481
59482 /* Search for module by name: must hold module_mutex. */
59483diff -urNp linux-2.6.32.45/include/linux/moduleloader.h linux-2.6.32.45/include/linux/moduleloader.h
59484--- linux-2.6.32.45/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
59485+++ linux-2.6.32.45/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
59486@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59487 sections. Returns NULL on failure. */
59488 void *module_alloc(unsigned long size);
59489
59490+#ifdef CONFIG_PAX_KERNEXEC
59491+void *module_alloc_exec(unsigned long size);
59492+#else
59493+#define module_alloc_exec(x) module_alloc(x)
59494+#endif
59495+
59496 /* Free memory returned from module_alloc. */
59497 void module_free(struct module *mod, void *module_region);
59498
59499+#ifdef CONFIG_PAX_KERNEXEC
59500+void module_free_exec(struct module *mod, void *module_region);
59501+#else
59502+#define module_free_exec(x, y) module_free((x), (y))
59503+#endif
59504+
59505 /* Apply the given relocation to the (simplified) ELF. Return -error
59506 or 0. */
59507 int apply_relocate(Elf_Shdr *sechdrs,
59508diff -urNp linux-2.6.32.45/include/linux/moduleparam.h linux-2.6.32.45/include/linux/moduleparam.h
59509--- linux-2.6.32.45/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
59510+++ linux-2.6.32.45/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
59511@@ -132,7 +132,7 @@ struct kparam_array
59512
59513 /* Actually copy string: maxlen param is usually sizeof(string). */
59514 #define module_param_string(name, string, len, perm) \
59515- static const struct kparam_string __param_string_##name \
59516+ static const struct kparam_string __param_string_##name __used \
59517 = { len, string }; \
59518 __module_param_call(MODULE_PARAM_PREFIX, name, \
59519 param_set_copystring, param_get_string, \
59520@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
59521
59522 /* Comma-separated array: *nump is set to number they actually specified. */
59523 #define module_param_array_named(name, array, type, nump, perm) \
59524- static const struct kparam_array __param_arr_##name \
59525+ static const struct kparam_array __param_arr_##name __used \
59526 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
59527 sizeof(array[0]), array }; \
59528 __module_param_call(MODULE_PARAM_PREFIX, name, \
59529diff -urNp linux-2.6.32.45/include/linux/mutex.h linux-2.6.32.45/include/linux/mutex.h
59530--- linux-2.6.32.45/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
59531+++ linux-2.6.32.45/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
59532@@ -51,7 +51,7 @@ struct mutex {
59533 spinlock_t wait_lock;
59534 struct list_head wait_list;
59535 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
59536- struct thread_info *owner;
59537+ struct task_struct *owner;
59538 #endif
59539 #ifdef CONFIG_DEBUG_MUTEXES
59540 const char *name;
59541diff -urNp linux-2.6.32.45/include/linux/namei.h linux-2.6.32.45/include/linux/namei.h
59542--- linux-2.6.32.45/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
59543+++ linux-2.6.32.45/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
59544@@ -22,7 +22,7 @@ struct nameidata {
59545 unsigned int flags;
59546 int last_type;
59547 unsigned depth;
59548- char *saved_names[MAX_NESTED_LINKS + 1];
59549+ const char *saved_names[MAX_NESTED_LINKS + 1];
59550
59551 /* Intent data */
59552 union {
59553@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
59554 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59555 extern void unlock_rename(struct dentry *, struct dentry *);
59556
59557-static inline void nd_set_link(struct nameidata *nd, char *path)
59558+static inline void nd_set_link(struct nameidata *nd, const char *path)
59559 {
59560 nd->saved_names[nd->depth] = path;
59561 }
59562
59563-static inline char *nd_get_link(struct nameidata *nd)
59564+static inline const char *nd_get_link(const struct nameidata *nd)
59565 {
59566 return nd->saved_names[nd->depth];
59567 }
59568diff -urNp linux-2.6.32.45/include/linux/netdevice.h linux-2.6.32.45/include/linux/netdevice.h
59569--- linux-2.6.32.45/include/linux/netdevice.h 2011-08-09 18:35:30.000000000 -0400
59570+++ linux-2.6.32.45/include/linux/netdevice.h 2011-08-23 21:22:38.000000000 -0400
59571@@ -637,6 +637,7 @@ struct net_device_ops {
59572 u16 xid);
59573 #endif
59574 };
59575+typedef struct net_device_ops __no_const net_device_ops_no_const;
59576
59577 /*
59578 * The DEVICE structure.
59579diff -urNp linux-2.6.32.45/include/linux/netfilter/xt_gradm.h linux-2.6.32.45/include/linux/netfilter/xt_gradm.h
59580--- linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59581+++ linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
59582@@ -0,0 +1,9 @@
59583+#ifndef _LINUX_NETFILTER_XT_GRADM_H
59584+#define _LINUX_NETFILTER_XT_GRADM_H 1
59585+
59586+struct xt_gradm_mtinfo {
59587+ __u16 flags;
59588+ __u16 invflags;
59589+};
59590+
59591+#endif
59592diff -urNp linux-2.6.32.45/include/linux/nodemask.h linux-2.6.32.45/include/linux/nodemask.h
59593--- linux-2.6.32.45/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
59594+++ linux-2.6.32.45/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
59595@@ -464,11 +464,11 @@ static inline int num_node_state(enum no
59596
59597 #define any_online_node(mask) \
59598 ({ \
59599- int node; \
59600- for_each_node_mask(node, (mask)) \
59601- if (node_online(node)) \
59602+ int __node; \
59603+ for_each_node_mask(__node, (mask)) \
59604+ if (node_online(__node)) \
59605 break; \
59606- node; \
59607+ __node; \
59608 })
59609
59610 #define num_online_nodes() num_node_state(N_ONLINE)
59611diff -urNp linux-2.6.32.45/include/linux/oprofile.h linux-2.6.32.45/include/linux/oprofile.h
59612--- linux-2.6.32.45/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
59613+++ linux-2.6.32.45/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
59614@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
59615 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59616 char const * name, ulong * val);
59617
59618-/** Create a file for read-only access to an atomic_t. */
59619+/** Create a file for read-only access to an atomic_unchecked_t. */
59620 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59621- char const * name, atomic_t * val);
59622+ char const * name, atomic_unchecked_t * val);
59623
59624 /** create a directory */
59625 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59626diff -urNp linux-2.6.32.45/include/linux/pagemap.h linux-2.6.32.45/include/linux/pagemap.h
59627--- linux-2.6.32.45/include/linux/pagemap.h 2011-03-27 14:31:47.000000000 -0400
59628+++ linux-2.6.32.45/include/linux/pagemap.h 2011-08-17 19:36:28.000000000 -0400
59629@@ -425,6 +425,7 @@ static inline int fault_in_pages_readabl
59630 if (((unsigned long)uaddr & PAGE_MASK) !=
59631 ((unsigned long)end & PAGE_MASK))
59632 ret = __get_user(c, end);
59633+ (void)c;
59634 }
59635 return ret;
59636 }
59637diff -urNp linux-2.6.32.45/include/linux/perf_event.h linux-2.6.32.45/include/linux/perf_event.h
59638--- linux-2.6.32.45/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
59639+++ linux-2.6.32.45/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
59640@@ -476,7 +476,7 @@ struct hw_perf_event {
59641 struct hrtimer hrtimer;
59642 };
59643 };
59644- atomic64_t prev_count;
59645+ atomic64_unchecked_t prev_count;
59646 u64 sample_period;
59647 u64 last_period;
59648 atomic64_t period_left;
59649@@ -557,7 +557,7 @@ struct perf_event {
59650 const struct pmu *pmu;
59651
59652 enum perf_event_active_state state;
59653- atomic64_t count;
59654+ atomic64_unchecked_t count;
59655
59656 /*
59657 * These are the total time in nanoseconds that the event
59658@@ -595,8 +595,8 @@ struct perf_event {
59659 * These accumulate total time (in nanoseconds) that children
59660 * events have been enabled and running, respectively.
59661 */
59662- atomic64_t child_total_time_enabled;
59663- atomic64_t child_total_time_running;
59664+ atomic64_unchecked_t child_total_time_enabled;
59665+ atomic64_unchecked_t child_total_time_running;
59666
59667 /*
59668 * Protect attach/detach and child_list:
59669diff -urNp linux-2.6.32.45/include/linux/pipe_fs_i.h linux-2.6.32.45/include/linux/pipe_fs_i.h
59670--- linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
59671+++ linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
59672@@ -46,9 +46,9 @@ struct pipe_inode_info {
59673 wait_queue_head_t wait;
59674 unsigned int nrbufs, curbuf;
59675 struct page *tmp_page;
59676- unsigned int readers;
59677- unsigned int writers;
59678- unsigned int waiting_writers;
59679+ atomic_t readers;
59680+ atomic_t writers;
59681+ atomic_t waiting_writers;
59682 unsigned int r_counter;
59683 unsigned int w_counter;
59684 struct fasync_struct *fasync_readers;
59685diff -urNp linux-2.6.32.45/include/linux/poison.h linux-2.6.32.45/include/linux/poison.h
59686--- linux-2.6.32.45/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
59687+++ linux-2.6.32.45/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
59688@@ -19,8 +19,8 @@
59689 * under normal circumstances, used to verify that nobody uses
59690 * non-initialized list entries.
59691 */
59692-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59693-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59694+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59695+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59696
59697 /********** include/linux/timer.h **********/
59698 /*
59699diff -urNp linux-2.6.32.45/include/linux/posix-timers.h linux-2.6.32.45/include/linux/posix-timers.h
59700--- linux-2.6.32.45/include/linux/posix-timers.h 2011-03-27 14:31:47.000000000 -0400
59701+++ linux-2.6.32.45/include/linux/posix-timers.h 2011-08-05 20:33:55.000000000 -0400
59702@@ -67,7 +67,7 @@ struct k_itimer {
59703 };
59704
59705 struct k_clock {
59706- int res; /* in nanoseconds */
59707+ const int res; /* in nanoseconds */
59708 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
59709 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
59710 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
59711diff -urNp linux-2.6.32.45/include/linux/preempt.h linux-2.6.32.45/include/linux/preempt.h
59712--- linux-2.6.32.45/include/linux/preempt.h 2011-03-27 14:31:47.000000000 -0400
59713+++ linux-2.6.32.45/include/linux/preempt.h 2011-08-05 20:33:55.000000000 -0400
59714@@ -110,7 +110,7 @@ struct preempt_ops {
59715 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59716 void (*sched_out)(struct preempt_notifier *notifier,
59717 struct task_struct *next);
59718-};
59719+} __no_const;
59720
59721 /**
59722 * preempt_notifier - key for installing preemption notifiers
59723diff -urNp linux-2.6.32.45/include/linux/proc_fs.h linux-2.6.32.45/include/linux/proc_fs.h
59724--- linux-2.6.32.45/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
59725+++ linux-2.6.32.45/include/linux/proc_fs.h 2011-08-05 20:33:55.000000000 -0400
59726@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
59727 return proc_create_data(name, mode, parent, proc_fops, NULL);
59728 }
59729
59730+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
59731+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
59732+{
59733+#ifdef CONFIG_GRKERNSEC_PROC_USER
59734+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
59735+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59736+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
59737+#else
59738+ return proc_create_data(name, mode, parent, proc_fops, NULL);
59739+#endif
59740+}
59741+
59742+
59743 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
59744 mode_t mode, struct proc_dir_entry *base,
59745 read_proc_t *read_proc, void * data)
59746@@ -256,7 +269,7 @@ union proc_op {
59747 int (*proc_show)(struct seq_file *m,
59748 struct pid_namespace *ns, struct pid *pid,
59749 struct task_struct *task);
59750-};
59751+} __no_const;
59752
59753 struct ctl_table_header;
59754 struct ctl_table;
59755diff -urNp linux-2.6.32.45/include/linux/ptrace.h linux-2.6.32.45/include/linux/ptrace.h
59756--- linux-2.6.32.45/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
59757+++ linux-2.6.32.45/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
59758@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
59759 extern void exit_ptrace(struct task_struct *tracer);
59760 #define PTRACE_MODE_READ 1
59761 #define PTRACE_MODE_ATTACH 2
59762-/* Returns 0 on success, -errno on denial. */
59763-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
59764 /* Returns true on success, false on denial. */
59765 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
59766+/* Returns true on success, false on denial. */
59767+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
59768
59769 static inline int ptrace_reparented(struct task_struct *child)
59770 {
59771diff -urNp linux-2.6.32.45/include/linux/random.h linux-2.6.32.45/include/linux/random.h
59772--- linux-2.6.32.45/include/linux/random.h 2011-08-16 20:37:25.000000000 -0400
59773+++ linux-2.6.32.45/include/linux/random.h 2011-08-07 19:48:09.000000000 -0400
59774@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned l
59775 u32 random32(void);
59776 void srandom32(u32 seed);
59777
59778+static inline unsigned long pax_get_random_long(void)
59779+{
59780+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
59781+}
59782+
59783 #endif /* __KERNEL___ */
59784
59785 #endif /* _LINUX_RANDOM_H */
59786diff -urNp linux-2.6.32.45/include/linux/reboot.h linux-2.6.32.45/include/linux/reboot.h
59787--- linux-2.6.32.45/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
59788+++ linux-2.6.32.45/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
59789@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
59790 * Architecture-specific implementations of sys_reboot commands.
59791 */
59792
59793-extern void machine_restart(char *cmd);
59794-extern void machine_halt(void);
59795-extern void machine_power_off(void);
59796+extern void machine_restart(char *cmd) __noreturn;
59797+extern void machine_halt(void) __noreturn;
59798+extern void machine_power_off(void) __noreturn;
59799
59800 extern void machine_shutdown(void);
59801 struct pt_regs;
59802@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
59803 */
59804
59805 extern void kernel_restart_prepare(char *cmd);
59806-extern void kernel_restart(char *cmd);
59807-extern void kernel_halt(void);
59808-extern void kernel_power_off(void);
59809+extern void kernel_restart(char *cmd) __noreturn;
59810+extern void kernel_halt(void) __noreturn;
59811+extern void kernel_power_off(void) __noreturn;
59812
59813 void ctrl_alt_del(void);
59814
59815@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
59816 * Emergency restart, callable from an interrupt handler.
59817 */
59818
59819-extern void emergency_restart(void);
59820+extern void emergency_restart(void) __noreturn;
59821 #include <asm/emergency-restart.h>
59822
59823 #endif
59824diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs.h linux-2.6.32.45/include/linux/reiserfs_fs.h
59825--- linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
59826+++ linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
59827@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
59828 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
59829
59830 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
59831-#define get_generation(s) atomic_read (&fs_generation(s))
59832+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
59833 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
59834 #define __fs_changed(gen,s) (gen != get_generation (s))
59835 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
59836@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
59837 */
59838
59839 struct item_operations {
59840- int (*bytes_number) (struct item_head * ih, int block_size);
59841- void (*decrement_key) (struct cpu_key *);
59842- int (*is_left_mergeable) (struct reiserfs_key * ih,
59843+ int (* const bytes_number) (struct item_head * ih, int block_size);
59844+ void (* const decrement_key) (struct cpu_key *);
59845+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
59846 unsigned long bsize);
59847- void (*print_item) (struct item_head *, char *item);
59848- void (*check_item) (struct item_head *, char *item);
59849+ void (* const print_item) (struct item_head *, char *item);
59850+ void (* const check_item) (struct item_head *, char *item);
59851
59852- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
59853+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
59854 int is_affected, int insert_size);
59855- int (*check_left) (struct virtual_item * vi, int free,
59856+ int (* const check_left) (struct virtual_item * vi, int free,
59857 int start_skip, int end_skip);
59858- int (*check_right) (struct virtual_item * vi, int free);
59859- int (*part_size) (struct virtual_item * vi, int from, int to);
59860- int (*unit_num) (struct virtual_item * vi);
59861- void (*print_vi) (struct virtual_item * vi);
59862+ int (* const check_right) (struct virtual_item * vi, int free);
59863+ int (* const part_size) (struct virtual_item * vi, int from, int to);
59864+ int (* const unit_num) (struct virtual_item * vi);
59865+ void (* const print_vi) (struct virtual_item * vi);
59866 };
59867
59868-extern struct item_operations *item_ops[TYPE_ANY + 1];
59869+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
59870
59871 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
59872 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
59873diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs_sb.h linux-2.6.32.45/include/linux/reiserfs_fs_sb.h
59874--- linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
59875+++ linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
59876@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
59877 /* Comment? -Hans */
59878 wait_queue_head_t s_wait;
59879 /* To be obsoleted soon by per buffer seals.. -Hans */
59880- atomic_t s_generation_counter; // increased by one every time the
59881+ atomic_unchecked_t s_generation_counter; // increased by one every time the
59882 // tree gets re-balanced
59883 unsigned long s_properties; /* File system properties. Currently holds
59884 on-disk FS format */
59885diff -urNp linux-2.6.32.45/include/linux/relay.h linux-2.6.32.45/include/linux/relay.h
59886--- linux-2.6.32.45/include/linux/relay.h 2011-03-27 14:31:47.000000000 -0400
59887+++ linux-2.6.32.45/include/linux/relay.h 2011-08-05 20:33:55.000000000 -0400
59888@@ -159,7 +159,7 @@ struct rchan_callbacks
59889 * The callback should return 0 if successful, negative if not.
59890 */
59891 int (*remove_buf_file)(struct dentry *dentry);
59892-};
59893+} __no_const;
59894
59895 /*
59896 * CONFIG_RELAY kernel API, kernel/relay.c
59897diff -urNp linux-2.6.32.45/include/linux/rfkill.h linux-2.6.32.45/include/linux/rfkill.h
59898--- linux-2.6.32.45/include/linux/rfkill.h 2011-03-27 14:31:47.000000000 -0400
59899+++ linux-2.6.32.45/include/linux/rfkill.h 2011-08-23 21:22:38.000000000 -0400
59900@@ -144,6 +144,7 @@ struct rfkill_ops {
59901 void (*query)(struct rfkill *rfkill, void *data);
59902 int (*set_block)(void *data, bool blocked);
59903 };
59904+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
59905
59906 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
59907 /**
59908diff -urNp linux-2.6.32.45/include/linux/sched.h linux-2.6.32.45/include/linux/sched.h
59909--- linux-2.6.32.45/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
59910+++ linux-2.6.32.45/include/linux/sched.h 2011-08-11 19:48:55.000000000 -0400
59911@@ -101,6 +101,7 @@ struct bio;
59912 struct fs_struct;
59913 struct bts_context;
59914 struct perf_event_context;
59915+struct linux_binprm;
59916
59917 /*
59918 * List of flags we want to share for kernel threads,
59919@@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
59920 extern signed long schedule_timeout_uninterruptible(signed long timeout);
59921 asmlinkage void __schedule(void);
59922 asmlinkage void schedule(void);
59923-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
59924+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
59925
59926 struct nsproxy;
59927 struct user_namespace;
59928@@ -371,9 +372,12 @@ struct user_namespace;
59929 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
59930
59931 extern int sysctl_max_map_count;
59932+extern unsigned long sysctl_heap_stack_gap;
59933
59934 #include <linux/aio.h>
59935
59936+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
59937+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
59938 extern unsigned long
59939 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
59940 unsigned long, unsigned long);
59941@@ -666,6 +670,16 @@ struct signal_struct {
59942 struct tty_audit_buf *tty_audit_buf;
59943 #endif
59944
59945+#ifdef CONFIG_GRKERNSEC
59946+ u32 curr_ip;
59947+ u32 saved_ip;
59948+ u32 gr_saddr;
59949+ u32 gr_daddr;
59950+ u16 gr_sport;
59951+ u16 gr_dport;
59952+ u8 used_accept:1;
59953+#endif
59954+
59955 int oom_adj; /* OOM kill score adjustment (bit shift) */
59956 };
59957
59958@@ -723,6 +737,11 @@ struct user_struct {
59959 struct key *session_keyring; /* UID's default session keyring */
59960 #endif
59961
59962+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59963+ unsigned int banned;
59964+ unsigned long ban_expires;
59965+#endif
59966+
59967 /* Hash table maintenance information */
59968 struct hlist_node uidhash_node;
59969 uid_t uid;
59970@@ -1328,8 +1347,8 @@ struct task_struct {
59971 struct list_head thread_group;
59972
59973 struct completion *vfork_done; /* for vfork() */
59974- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
59975- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
59976+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
59977+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
59978
59979 cputime_t utime, stime, utimescaled, stimescaled;
59980 cputime_t gtime;
59981@@ -1343,16 +1362,6 @@ struct task_struct {
59982 struct task_cputime cputime_expires;
59983 struct list_head cpu_timers[3];
59984
59985-/* process credentials */
59986- const struct cred *real_cred; /* objective and real subjective task
59987- * credentials (COW) */
59988- const struct cred *cred; /* effective (overridable) subjective task
59989- * credentials (COW) */
59990- struct mutex cred_guard_mutex; /* guard against foreign influences on
59991- * credential calculations
59992- * (notably. ptrace) */
59993- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
59994-
59995 char comm[TASK_COMM_LEN]; /* executable name excluding path
59996 - access with [gs]et_task_comm (which lock
59997 it with task_lock())
59998@@ -1369,6 +1378,10 @@ struct task_struct {
59999 #endif
60000 /* CPU-specific state of this task */
60001 struct thread_struct thread;
60002+/* thread_info moved to task_struct */
60003+#ifdef CONFIG_X86
60004+ struct thread_info tinfo;
60005+#endif
60006 /* filesystem information */
60007 struct fs_struct *fs;
60008 /* open file information */
60009@@ -1436,6 +1449,15 @@ struct task_struct {
60010 int hardirq_context;
60011 int softirq_context;
60012 #endif
60013+
60014+/* process credentials */
60015+ const struct cred *real_cred; /* objective and real subjective task
60016+ * credentials (COW) */
60017+ struct mutex cred_guard_mutex; /* guard against foreign influences on
60018+ * credential calculations
60019+ * (notably. ptrace) */
60020+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60021+
60022 #ifdef CONFIG_LOCKDEP
60023 # define MAX_LOCK_DEPTH 48UL
60024 u64 curr_chain_key;
60025@@ -1456,6 +1478,9 @@ struct task_struct {
60026
60027 struct backing_dev_info *backing_dev_info;
60028
60029+ const struct cred *cred; /* effective (overridable) subjective task
60030+ * credentials (COW) */
60031+
60032 struct io_context *io_context;
60033
60034 unsigned long ptrace_message;
60035@@ -1519,6 +1544,21 @@ struct task_struct {
60036 unsigned long default_timer_slack_ns;
60037
60038 struct list_head *scm_work_list;
60039+
60040+#ifdef CONFIG_GRKERNSEC
60041+ /* grsecurity */
60042+ struct dentry *gr_chroot_dentry;
60043+ struct acl_subject_label *acl;
60044+ struct acl_role_label *role;
60045+ struct file *exec_file;
60046+ u16 acl_role_id;
60047+ /* is this the task that authenticated to the special role */
60048+ u8 acl_sp_role;
60049+ u8 is_writable;
60050+ u8 brute;
60051+ u8 gr_is_chrooted;
60052+#endif
60053+
60054 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60055 /* Index of current stored adress in ret_stack */
60056 int curr_ret_stack;
60057@@ -1542,6 +1582,57 @@ struct task_struct {
60058 #endif /* CONFIG_TRACING */
60059 };
60060
60061+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60062+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60063+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60064+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60065+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60066+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60067+
60068+#ifdef CONFIG_PAX_SOFTMODE
60069+extern int pax_softmode;
60070+#endif
60071+
60072+extern int pax_check_flags(unsigned long *);
60073+
60074+/* if tsk != current then task_lock must be held on it */
60075+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60076+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60077+{
60078+ if (likely(tsk->mm))
60079+ return tsk->mm->pax_flags;
60080+ else
60081+ return 0UL;
60082+}
60083+
60084+/* if tsk != current then task_lock must be held on it */
60085+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60086+{
60087+ if (likely(tsk->mm)) {
60088+ tsk->mm->pax_flags = flags;
60089+ return 0;
60090+ }
60091+ return -EINVAL;
60092+}
60093+#endif
60094+
60095+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60096+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60097+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60098+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60099+#endif
60100+
60101+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60102+extern void pax_report_insns(void *pc, void *sp);
60103+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60104+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60105+
60106+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60107+extern void pax_track_stack(void);
60108+#else
60109+static inline void pax_track_stack(void) {}
60110+#endif
60111+
60112 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60113 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
60114
60115@@ -1740,7 +1831,7 @@ extern void thread_group_times(struct ta
60116 #define PF_DUMPCORE 0x00000200 /* dumped core */
60117 #define PF_SIGNALED 0x00000400 /* killed by a signal */
60118 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
60119-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
60120+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
60121 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
60122 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
60123 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
60124@@ -1978,7 +2069,9 @@ void yield(void);
60125 extern struct exec_domain default_exec_domain;
60126
60127 union thread_union {
60128+#ifndef CONFIG_X86
60129 struct thread_info thread_info;
60130+#endif
60131 unsigned long stack[THREAD_SIZE/sizeof(long)];
60132 };
60133
60134@@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
60135 */
60136
60137 extern struct task_struct *find_task_by_vpid(pid_t nr);
60138+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60139 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60140 struct pid_namespace *ns);
60141
60142@@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
60143 extern void exit_itimers(struct signal_struct *);
60144 extern void flush_itimer_signals(void);
60145
60146-extern NORET_TYPE void do_group_exit(int);
60147+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60148
60149 extern void daemonize(const char *, ...);
60150 extern int allow_signal(int);
60151@@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
60152
60153 #endif
60154
60155-static inline int object_is_on_stack(void *obj)
60156+static inline int object_starts_on_stack(void *obj)
60157 {
60158- void *stack = task_stack_page(current);
60159+ const void *stack = task_stack_page(current);
60160
60161 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60162 }
60163
60164+#ifdef CONFIG_PAX_USERCOPY
60165+extern int object_is_on_stack(const void *obj, unsigned long len);
60166+#endif
60167+
60168 extern void thread_info_cache_init(void);
60169
60170 #ifdef CONFIG_DEBUG_STACK_USAGE
60171diff -urNp linux-2.6.32.45/include/linux/screen_info.h linux-2.6.32.45/include/linux/screen_info.h
60172--- linux-2.6.32.45/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
60173+++ linux-2.6.32.45/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
60174@@ -42,7 +42,8 @@ struct screen_info {
60175 __u16 pages; /* 0x32 */
60176 __u16 vesa_attributes; /* 0x34 */
60177 __u32 capabilities; /* 0x36 */
60178- __u8 _reserved[6]; /* 0x3a */
60179+ __u16 vesapm_size; /* 0x3a */
60180+ __u8 _reserved[4]; /* 0x3c */
60181 } __attribute__((packed));
60182
60183 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60184diff -urNp linux-2.6.32.45/include/linux/security.h linux-2.6.32.45/include/linux/security.h
60185--- linux-2.6.32.45/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
60186+++ linux-2.6.32.45/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
60187@@ -34,6 +34,7 @@
60188 #include <linux/key.h>
60189 #include <linux/xfrm.h>
60190 #include <linux/gfp.h>
60191+#include <linux/grsecurity.h>
60192 #include <net/flow.h>
60193
60194 /* Maximum number of letters for an LSM name string */
60195diff -urNp linux-2.6.32.45/include/linux/seq_file.h linux-2.6.32.45/include/linux/seq_file.h
60196--- linux-2.6.32.45/include/linux/seq_file.h 2011-03-27 14:31:47.000000000 -0400
60197+++ linux-2.6.32.45/include/linux/seq_file.h 2011-08-23 21:22:38.000000000 -0400
60198@@ -32,6 +32,7 @@ struct seq_operations {
60199 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60200 int (*show) (struct seq_file *m, void *v);
60201 };
60202+typedef struct seq_operations __no_const seq_operations_no_const;
60203
60204 #define SEQ_SKIP 1
60205
60206diff -urNp linux-2.6.32.45/include/linux/shm.h linux-2.6.32.45/include/linux/shm.h
60207--- linux-2.6.32.45/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
60208+++ linux-2.6.32.45/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
60209@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60210 pid_t shm_cprid;
60211 pid_t shm_lprid;
60212 struct user_struct *mlock_user;
60213+#ifdef CONFIG_GRKERNSEC
60214+ time_t shm_createtime;
60215+ pid_t shm_lapid;
60216+#endif
60217 };
60218
60219 /* shm_mode upper byte flags */
60220diff -urNp linux-2.6.32.45/include/linux/skbuff.h linux-2.6.32.45/include/linux/skbuff.h
60221--- linux-2.6.32.45/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
60222+++ linux-2.6.32.45/include/linux/skbuff.h 2011-08-21 15:27:56.000000000 -0400
60223@@ -14,6 +14,7 @@
60224 #ifndef _LINUX_SKBUFF_H
60225 #define _LINUX_SKBUFF_H
60226
60227+#include <linux/const.h>
60228 #include <linux/kernel.h>
60229 #include <linux/kmemcheck.h>
60230 #include <linux/compiler.h>
60231@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_t
60232 */
60233 static inline int skb_queue_empty(const struct sk_buff_head *list)
60234 {
60235- return list->next == (struct sk_buff *)list;
60236+ return list->next == (const struct sk_buff *)list;
60237 }
60238
60239 /**
60240@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const
60241 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60242 const struct sk_buff *skb)
60243 {
60244- return (skb->next == (struct sk_buff *) list);
60245+ return (skb->next == (const struct sk_buff *) list);
60246 }
60247
60248 /**
60249@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(con
60250 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60251 const struct sk_buff *skb)
60252 {
60253- return (skb->prev == (struct sk_buff *) list);
60254+ return (skb->prev == (const struct sk_buff *) list);
60255 }
60256
60257 /**
60258@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(con
60259 * headroom, you should not reduce this.
60260 */
60261 #ifndef NET_SKB_PAD
60262-#define NET_SKB_PAD 32
60263+#define NET_SKB_PAD (_AC(32,UL))
60264 #endif
60265
60266 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60267diff -urNp linux-2.6.32.45/include/linux/slab_def.h linux-2.6.32.45/include/linux/slab_def.h
60268--- linux-2.6.32.45/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
60269+++ linux-2.6.32.45/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
60270@@ -69,10 +69,10 @@ struct kmem_cache {
60271 unsigned long node_allocs;
60272 unsigned long node_frees;
60273 unsigned long node_overflow;
60274- atomic_t allochit;
60275- atomic_t allocmiss;
60276- atomic_t freehit;
60277- atomic_t freemiss;
60278+ atomic_unchecked_t allochit;
60279+ atomic_unchecked_t allocmiss;
60280+ atomic_unchecked_t freehit;
60281+ atomic_unchecked_t freemiss;
60282
60283 /*
60284 * If debugging is enabled, then the allocator can add additional
60285diff -urNp linux-2.6.32.45/include/linux/slab.h linux-2.6.32.45/include/linux/slab.h
60286--- linux-2.6.32.45/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
60287+++ linux-2.6.32.45/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
60288@@ -11,12 +11,20 @@
60289
60290 #include <linux/gfp.h>
60291 #include <linux/types.h>
60292+#include <linux/err.h>
60293
60294 /*
60295 * Flags to pass to kmem_cache_create().
60296 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60297 */
60298 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60299+
60300+#ifdef CONFIG_PAX_USERCOPY
60301+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60302+#else
60303+#define SLAB_USERCOPY 0x00000000UL
60304+#endif
60305+
60306 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60307 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60308 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60309@@ -82,10 +90,13 @@
60310 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60311 * Both make kfree a no-op.
60312 */
60313-#define ZERO_SIZE_PTR ((void *)16)
60314+#define ZERO_SIZE_PTR \
60315+({ \
60316+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60317+ (void *)(-MAX_ERRNO-1L); \
60318+})
60319
60320-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60321- (unsigned long)ZERO_SIZE_PTR)
60322+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60323
60324 /*
60325 * struct kmem_cache related prototypes
60326@@ -138,6 +149,7 @@ void * __must_check krealloc(const void
60327 void kfree(const void *);
60328 void kzfree(const void *);
60329 size_t ksize(const void *);
60330+void check_object_size(const void *ptr, unsigned long n, bool to);
60331
60332 /*
60333 * Allocator specific definitions. These are mainly used to establish optimized
60334@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
60335
60336 void __init kmem_cache_init_late(void);
60337
60338+#define kmalloc(x, y) \
60339+({ \
60340+ void *___retval; \
60341+ intoverflow_t ___x = (intoverflow_t)x; \
60342+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
60343+ ___retval = NULL; \
60344+ else \
60345+ ___retval = kmalloc((size_t)___x, (y)); \
60346+ ___retval; \
60347+})
60348+
60349+#define kmalloc_node(x, y, z) \
60350+({ \
60351+ void *___retval; \
60352+ intoverflow_t ___x = (intoverflow_t)x; \
60353+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60354+ ___retval = NULL; \
60355+ else \
60356+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60357+ ___retval; \
60358+})
60359+
60360+#define kzalloc(x, y) \
60361+({ \
60362+ void *___retval; \
60363+ intoverflow_t ___x = (intoverflow_t)x; \
60364+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
60365+ ___retval = NULL; \
60366+ else \
60367+ ___retval = kzalloc((size_t)___x, (y)); \
60368+ ___retval; \
60369+})
60370+
60371 #endif /* _LINUX_SLAB_H */
60372diff -urNp linux-2.6.32.45/include/linux/slub_def.h linux-2.6.32.45/include/linux/slub_def.h
60373--- linux-2.6.32.45/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
60374+++ linux-2.6.32.45/include/linux/slub_def.h 2011-08-05 20:33:55.000000000 -0400
60375@@ -86,7 +86,7 @@ struct kmem_cache {
60376 struct kmem_cache_order_objects max;
60377 struct kmem_cache_order_objects min;
60378 gfp_t allocflags; /* gfp flags to use on each alloc */
60379- int refcount; /* Refcount for slab cache destroy */
60380+ atomic_t refcount; /* Refcount for slab cache destroy */
60381 void (*ctor)(void *);
60382 int inuse; /* Offset to metadata */
60383 int align; /* Alignment */
60384@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache
60385 #endif
60386
60387 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60388-void *__kmalloc(size_t size, gfp_t flags);
60389+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60390
60391 #ifdef CONFIG_KMEMTRACE
60392 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
60393diff -urNp linux-2.6.32.45/include/linux/sonet.h linux-2.6.32.45/include/linux/sonet.h
60394--- linux-2.6.32.45/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
60395+++ linux-2.6.32.45/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
60396@@ -61,7 +61,7 @@ struct sonet_stats {
60397 #include <asm/atomic.h>
60398
60399 struct k_sonet_stats {
60400-#define __HANDLE_ITEM(i) atomic_t i
60401+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60402 __SONET_ITEMS
60403 #undef __HANDLE_ITEM
60404 };
60405diff -urNp linux-2.6.32.45/include/linux/sunrpc/cache.h linux-2.6.32.45/include/linux/sunrpc/cache.h
60406--- linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-03-27 14:31:47.000000000 -0400
60407+++ linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-08-05 20:33:55.000000000 -0400
60408@@ -125,7 +125,7 @@ struct cache_detail {
60409 */
60410 struct cache_req {
60411 struct cache_deferred_req *(*defer)(struct cache_req *req);
60412-};
60413+} __no_const;
60414 /* this must be embedded in a deferred_request that is being
60415 * delayed awaiting cache-fill
60416 */
60417diff -urNp linux-2.6.32.45/include/linux/sunrpc/clnt.h linux-2.6.32.45/include/linux/sunrpc/clnt.h
60418--- linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
60419+++ linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
60420@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
60421 {
60422 switch (sap->sa_family) {
60423 case AF_INET:
60424- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60425+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60426 case AF_INET6:
60427- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60428+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60429 }
60430 return 0;
60431 }
60432@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
60433 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60434 const struct sockaddr *src)
60435 {
60436- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60437+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60438 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60439
60440 dsin->sin_family = ssin->sin_family;
60441@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
60442 if (sa->sa_family != AF_INET6)
60443 return 0;
60444
60445- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60446+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60447 }
60448
60449 #endif /* __KERNEL__ */
60450diff -urNp linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h
60451--- linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
60452+++ linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
60453@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60454 extern unsigned int svcrdma_max_requests;
60455 extern unsigned int svcrdma_max_req_size;
60456
60457-extern atomic_t rdma_stat_recv;
60458-extern atomic_t rdma_stat_read;
60459-extern atomic_t rdma_stat_write;
60460-extern atomic_t rdma_stat_sq_starve;
60461-extern atomic_t rdma_stat_rq_starve;
60462-extern atomic_t rdma_stat_rq_poll;
60463-extern atomic_t rdma_stat_rq_prod;
60464-extern atomic_t rdma_stat_sq_poll;
60465-extern atomic_t rdma_stat_sq_prod;
60466+extern atomic_unchecked_t rdma_stat_recv;
60467+extern atomic_unchecked_t rdma_stat_read;
60468+extern atomic_unchecked_t rdma_stat_write;
60469+extern atomic_unchecked_t rdma_stat_sq_starve;
60470+extern atomic_unchecked_t rdma_stat_rq_starve;
60471+extern atomic_unchecked_t rdma_stat_rq_poll;
60472+extern atomic_unchecked_t rdma_stat_rq_prod;
60473+extern atomic_unchecked_t rdma_stat_sq_poll;
60474+extern atomic_unchecked_t rdma_stat_sq_prod;
60475
60476 #define RPCRDMA_VERSION 1
60477
60478diff -urNp linux-2.6.32.45/include/linux/suspend.h linux-2.6.32.45/include/linux/suspend.h
60479--- linux-2.6.32.45/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
60480+++ linux-2.6.32.45/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
60481@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
60482 * which require special recovery actions in that situation.
60483 */
60484 struct platform_suspend_ops {
60485- int (*valid)(suspend_state_t state);
60486- int (*begin)(suspend_state_t state);
60487- int (*prepare)(void);
60488- int (*prepare_late)(void);
60489- int (*enter)(suspend_state_t state);
60490- void (*wake)(void);
60491- void (*finish)(void);
60492- void (*end)(void);
60493- void (*recover)(void);
60494+ int (* const valid)(suspend_state_t state);
60495+ int (* const begin)(suspend_state_t state);
60496+ int (* const prepare)(void);
60497+ int (* const prepare_late)(void);
60498+ int (* const enter)(suspend_state_t state);
60499+ void (* const wake)(void);
60500+ void (* const finish)(void);
60501+ void (* const end)(void);
60502+ void (* const recover)(void);
60503 };
60504
60505 #ifdef CONFIG_SUSPEND
60506@@ -120,7 +120,7 @@ struct platform_suspend_ops {
60507 * suspend_set_ops - set platform dependent suspend operations
60508 * @ops: The new suspend operations to set.
60509 */
60510-extern void suspend_set_ops(struct platform_suspend_ops *ops);
60511+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
60512 extern int suspend_valid_only_mem(suspend_state_t state);
60513
60514 /**
60515@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
60516 #else /* !CONFIG_SUSPEND */
60517 #define suspend_valid_only_mem NULL
60518
60519-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
60520+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
60521 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
60522 #endif /* !CONFIG_SUSPEND */
60523
60524@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
60525 * platforms which require special recovery actions in that situation.
60526 */
60527 struct platform_hibernation_ops {
60528- int (*begin)(void);
60529- void (*end)(void);
60530- int (*pre_snapshot)(void);
60531- void (*finish)(void);
60532- int (*prepare)(void);
60533- int (*enter)(void);
60534- void (*leave)(void);
60535- int (*pre_restore)(void);
60536- void (*restore_cleanup)(void);
60537- void (*recover)(void);
60538+ int (* const begin)(void);
60539+ void (* const end)(void);
60540+ int (* const pre_snapshot)(void);
60541+ void (* const finish)(void);
60542+ int (* const prepare)(void);
60543+ int (* const enter)(void);
60544+ void (* const leave)(void);
60545+ int (* const pre_restore)(void);
60546+ void (* const restore_cleanup)(void);
60547+ void (* const recover)(void);
60548 };
60549
60550 #ifdef CONFIG_HIBERNATION
60551@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
60552 extern void swsusp_unset_page_free(struct page *);
60553 extern unsigned long get_safe_page(gfp_t gfp_mask);
60554
60555-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
60556+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
60557 extern int hibernate(void);
60558 extern bool system_entering_hibernation(void);
60559 #else /* CONFIG_HIBERNATION */
60560@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
60561 static inline void swsusp_set_page_free(struct page *p) {}
60562 static inline void swsusp_unset_page_free(struct page *p) {}
60563
60564-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
60565+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
60566 static inline int hibernate(void) { return -ENOSYS; }
60567 static inline bool system_entering_hibernation(void) { return false; }
60568 #endif /* CONFIG_HIBERNATION */
60569diff -urNp linux-2.6.32.45/include/linux/sysctl.h linux-2.6.32.45/include/linux/sysctl.h
60570--- linux-2.6.32.45/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
60571+++ linux-2.6.32.45/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
60572@@ -164,7 +164,11 @@ enum
60573 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60574 };
60575
60576-
60577+#ifdef CONFIG_PAX_SOFTMODE
60578+enum {
60579+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60580+};
60581+#endif
60582
60583 /* CTL_VM names: */
60584 enum
60585@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
60586
60587 extern int proc_dostring(struct ctl_table *, int,
60588 void __user *, size_t *, loff_t *);
60589+extern int proc_dostring_modpriv(struct ctl_table *, int,
60590+ void __user *, size_t *, loff_t *);
60591 extern int proc_dointvec(struct ctl_table *, int,
60592 void __user *, size_t *, loff_t *);
60593 extern int proc_dointvec_minmax(struct ctl_table *, int,
60594@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
60595
60596 extern ctl_handler sysctl_data;
60597 extern ctl_handler sysctl_string;
60598+extern ctl_handler sysctl_string_modpriv;
60599 extern ctl_handler sysctl_intvec;
60600 extern ctl_handler sysctl_jiffies;
60601 extern ctl_handler sysctl_ms_jiffies;
60602diff -urNp linux-2.6.32.45/include/linux/sysfs.h linux-2.6.32.45/include/linux/sysfs.h
60603--- linux-2.6.32.45/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
60604+++ linux-2.6.32.45/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
60605@@ -75,8 +75,8 @@ struct bin_attribute {
60606 };
60607
60608 struct sysfs_ops {
60609- ssize_t (*show)(struct kobject *, struct attribute *,char *);
60610- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
60611+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
60612+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
60613 };
60614
60615 struct sysfs_dirent;
60616diff -urNp linux-2.6.32.45/include/linux/thread_info.h linux-2.6.32.45/include/linux/thread_info.h
60617--- linux-2.6.32.45/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
60618+++ linux-2.6.32.45/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
60619@@ -23,7 +23,7 @@ struct restart_block {
60620 };
60621 /* For futex_wait and futex_wait_requeue_pi */
60622 struct {
60623- u32 *uaddr;
60624+ u32 __user *uaddr;
60625 u32 val;
60626 u32 flags;
60627 u32 bitset;
60628diff -urNp linux-2.6.32.45/include/linux/tty.h linux-2.6.32.45/include/linux/tty.h
60629--- linux-2.6.32.45/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
60630+++ linux-2.6.32.45/include/linux/tty.h 2011-08-05 20:33:55.000000000 -0400
60631@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
60632 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
60633 extern void tty_ldisc_enable(struct tty_struct *tty);
60634
60635-
60636 /* n_tty.c */
60637 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
60638
60639diff -urNp linux-2.6.32.45/include/linux/tty_ldisc.h linux-2.6.32.45/include/linux/tty_ldisc.h
60640--- linux-2.6.32.45/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
60641+++ linux-2.6.32.45/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
60642@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
60643
60644 struct module *owner;
60645
60646- int refcount;
60647+ atomic_t refcount;
60648 };
60649
60650 struct tty_ldisc {
60651diff -urNp linux-2.6.32.45/include/linux/types.h linux-2.6.32.45/include/linux/types.h
60652--- linux-2.6.32.45/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
60653+++ linux-2.6.32.45/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
60654@@ -191,10 +191,26 @@ typedef struct {
60655 volatile int counter;
60656 } atomic_t;
60657
60658+#ifdef CONFIG_PAX_REFCOUNT
60659+typedef struct {
60660+ volatile int counter;
60661+} atomic_unchecked_t;
60662+#else
60663+typedef atomic_t atomic_unchecked_t;
60664+#endif
60665+
60666 #ifdef CONFIG_64BIT
60667 typedef struct {
60668 volatile long counter;
60669 } atomic64_t;
60670+
60671+#ifdef CONFIG_PAX_REFCOUNT
60672+typedef struct {
60673+ volatile long counter;
60674+} atomic64_unchecked_t;
60675+#else
60676+typedef atomic64_t atomic64_unchecked_t;
60677+#endif
60678 #endif
60679
60680 struct ustat {
60681diff -urNp linux-2.6.32.45/include/linux/uaccess.h linux-2.6.32.45/include/linux/uaccess.h
60682--- linux-2.6.32.45/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
60683+++ linux-2.6.32.45/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
60684@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60685 long ret; \
60686 mm_segment_t old_fs = get_fs(); \
60687 \
60688- set_fs(KERNEL_DS); \
60689 pagefault_disable(); \
60690+ set_fs(KERNEL_DS); \
60691 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60692- pagefault_enable(); \
60693 set_fs(old_fs); \
60694+ pagefault_enable(); \
60695 ret; \
60696 })
60697
60698@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
60699 * Safely read from address @src to the buffer at @dst. If a kernel fault
60700 * happens, handle that and return -EFAULT.
60701 */
60702-extern long probe_kernel_read(void *dst, void *src, size_t size);
60703+extern long probe_kernel_read(void *dst, const void *src, size_t size);
60704
60705 /*
60706 * probe_kernel_write(): safely attempt to write to a location
60707@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
60708 * Safely write to address @dst from the buffer at @src. If a kernel fault
60709 * happens, handle that and return -EFAULT.
60710 */
60711-extern long probe_kernel_write(void *dst, void *src, size_t size);
60712+extern long probe_kernel_write(void *dst, const void *src, size_t size);
60713
60714 #endif /* __LINUX_UACCESS_H__ */
60715diff -urNp linux-2.6.32.45/include/linux/unaligned/access_ok.h linux-2.6.32.45/include/linux/unaligned/access_ok.h
60716--- linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
60717+++ linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
60718@@ -6,32 +6,32 @@
60719
60720 static inline u16 get_unaligned_le16(const void *p)
60721 {
60722- return le16_to_cpup((__le16 *)p);
60723+ return le16_to_cpup((const __le16 *)p);
60724 }
60725
60726 static inline u32 get_unaligned_le32(const void *p)
60727 {
60728- return le32_to_cpup((__le32 *)p);
60729+ return le32_to_cpup((const __le32 *)p);
60730 }
60731
60732 static inline u64 get_unaligned_le64(const void *p)
60733 {
60734- return le64_to_cpup((__le64 *)p);
60735+ return le64_to_cpup((const __le64 *)p);
60736 }
60737
60738 static inline u16 get_unaligned_be16(const void *p)
60739 {
60740- return be16_to_cpup((__be16 *)p);
60741+ return be16_to_cpup((const __be16 *)p);
60742 }
60743
60744 static inline u32 get_unaligned_be32(const void *p)
60745 {
60746- return be32_to_cpup((__be32 *)p);
60747+ return be32_to_cpup((const __be32 *)p);
60748 }
60749
60750 static inline u64 get_unaligned_be64(const void *p)
60751 {
60752- return be64_to_cpup((__be64 *)p);
60753+ return be64_to_cpup((const __be64 *)p);
60754 }
60755
60756 static inline void put_unaligned_le16(u16 val, void *p)
60757diff -urNp linux-2.6.32.45/include/linux/vmalloc.h linux-2.6.32.45/include/linux/vmalloc.h
60758--- linux-2.6.32.45/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
60759+++ linux-2.6.32.45/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
60760@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
60761 #define VM_MAP 0x00000004 /* vmap()ed pages */
60762 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60763 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60764+
60765+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60766+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
60767+#endif
60768+
60769 /* bits [20..32] reserved for arch specific ioremap internals */
60770
60771 /*
60772@@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
60773
60774 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
60775
60776+#define vmalloc(x) \
60777+({ \
60778+ void *___retval; \
60779+ intoverflow_t ___x = (intoverflow_t)x; \
60780+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60781+ ___retval = NULL; \
60782+ else \
60783+ ___retval = vmalloc((unsigned long)___x); \
60784+ ___retval; \
60785+})
60786+
60787+#define __vmalloc(x, y, z) \
60788+({ \
60789+ void *___retval; \
60790+ intoverflow_t ___x = (intoverflow_t)x; \
60791+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60792+ ___retval = NULL; \
60793+ else \
60794+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60795+ ___retval; \
60796+})
60797+
60798+#define vmalloc_user(x) \
60799+({ \
60800+ void *___retval; \
60801+ intoverflow_t ___x = (intoverflow_t)x; \
60802+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60803+ ___retval = NULL; \
60804+ else \
60805+ ___retval = vmalloc_user((unsigned long)___x); \
60806+ ___retval; \
60807+})
60808+
60809+#define vmalloc_exec(x) \
60810+({ \
60811+ void *___retval; \
60812+ intoverflow_t ___x = (intoverflow_t)x; \
60813+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60814+ ___retval = NULL; \
60815+ else \
60816+ ___retval = vmalloc_exec((unsigned long)___x); \
60817+ ___retval; \
60818+})
60819+
60820+#define vmalloc_node(x, y) \
60821+({ \
60822+ void *___retval; \
60823+ intoverflow_t ___x = (intoverflow_t)x; \
60824+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
60825+ ___retval = NULL; \
60826+ else \
60827+ ___retval = vmalloc_node((unsigned long)___x, (y));\
60828+ ___retval; \
60829+})
60830+
60831+#define vmalloc_32(x) \
60832+({ \
60833+ void *___retval; \
60834+ intoverflow_t ___x = (intoverflow_t)x; \
60835+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
60836+ ___retval = NULL; \
60837+ else \
60838+ ___retval = vmalloc_32((unsigned long)___x); \
60839+ ___retval; \
60840+})
60841+
60842+#define vmalloc_32_user(x) \
60843+({ \
60844+ void *___retval; \
60845+ intoverflow_t ___x = (intoverflow_t)x; \
60846+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
60847+ ___retval = NULL; \
60848+ else \
60849+ ___retval = vmalloc_32_user((unsigned long)___x);\
60850+ ___retval; \
60851+})
60852+
60853 #endif /* _LINUX_VMALLOC_H */
60854diff -urNp linux-2.6.32.45/include/linux/vmstat.h linux-2.6.32.45/include/linux/vmstat.h
60855--- linux-2.6.32.45/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
60856+++ linux-2.6.32.45/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
60857@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
60858 /*
60859 * Zone based page accounting with per cpu differentials.
60860 */
60861-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60862+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60863
60864 static inline void zone_page_state_add(long x, struct zone *zone,
60865 enum zone_stat_item item)
60866 {
60867- atomic_long_add(x, &zone->vm_stat[item]);
60868- atomic_long_add(x, &vm_stat[item]);
60869+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
60870+ atomic_long_add_unchecked(x, &vm_stat[item]);
60871 }
60872
60873 static inline unsigned long global_page_state(enum zone_stat_item item)
60874 {
60875- long x = atomic_long_read(&vm_stat[item]);
60876+ long x = atomic_long_read_unchecked(&vm_stat[item]);
60877 #ifdef CONFIG_SMP
60878 if (x < 0)
60879 x = 0;
60880@@ -158,7 +158,7 @@ static inline unsigned long global_page_
60881 static inline unsigned long zone_page_state(struct zone *zone,
60882 enum zone_stat_item item)
60883 {
60884- long x = atomic_long_read(&zone->vm_stat[item]);
60885+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60886 #ifdef CONFIG_SMP
60887 if (x < 0)
60888 x = 0;
60889@@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
60890 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
60891 enum zone_stat_item item)
60892 {
60893- long x = atomic_long_read(&zone->vm_stat[item]);
60894+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60895
60896 #ifdef CONFIG_SMP
60897 int cpu;
60898@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
60899
60900 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
60901 {
60902- atomic_long_inc(&zone->vm_stat[item]);
60903- atomic_long_inc(&vm_stat[item]);
60904+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
60905+ atomic_long_inc_unchecked(&vm_stat[item]);
60906 }
60907
60908 static inline void __inc_zone_page_state(struct page *page,
60909@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
60910
60911 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
60912 {
60913- atomic_long_dec(&zone->vm_stat[item]);
60914- atomic_long_dec(&vm_stat[item]);
60915+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
60916+ atomic_long_dec_unchecked(&vm_stat[item]);
60917 }
60918
60919 static inline void __dec_zone_page_state(struct page *page,
60920diff -urNp linux-2.6.32.45/include/media/saa7146_vv.h linux-2.6.32.45/include/media/saa7146_vv.h
60921--- linux-2.6.32.45/include/media/saa7146_vv.h 2011-03-27 14:31:47.000000000 -0400
60922+++ linux-2.6.32.45/include/media/saa7146_vv.h 2011-08-23 21:22:38.000000000 -0400
60923@@ -167,7 +167,7 @@ struct saa7146_ext_vv
60924 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
60925
60926 /* the extension can override this */
60927- struct v4l2_ioctl_ops ops;
60928+ v4l2_ioctl_ops_no_const ops;
60929 /* pointer to the saa7146 core ops */
60930 const struct v4l2_ioctl_ops *core_ops;
60931
60932diff -urNp linux-2.6.32.45/include/media/v4l2-dev.h linux-2.6.32.45/include/media/v4l2-dev.h
60933--- linux-2.6.32.45/include/media/v4l2-dev.h 2011-03-27 14:31:47.000000000 -0400
60934+++ linux-2.6.32.45/include/media/v4l2-dev.h 2011-08-05 20:33:55.000000000 -0400
60935@@ -34,7 +34,7 @@ struct v4l2_device;
60936 #define V4L2_FL_UNREGISTERED (0)
60937
60938 struct v4l2_file_operations {
60939- struct module *owner;
60940+ struct module * const owner;
60941 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
60942 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
60943 unsigned int (*poll) (struct file *, struct poll_table_struct *);
60944diff -urNp linux-2.6.32.45/include/media/v4l2-device.h linux-2.6.32.45/include/media/v4l2-device.h
60945--- linux-2.6.32.45/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
60946+++ linux-2.6.32.45/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
60947@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
60948 this function returns 0. If the name ends with a digit (e.g. cx18),
60949 then the name will be set to cx18-0 since cx180 looks really odd. */
60950 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
60951- atomic_t *instance);
60952+ atomic_unchecked_t *instance);
60953
60954 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
60955 Since the parent disappears this ensures that v4l2_dev doesn't have an
60956diff -urNp linux-2.6.32.45/include/media/v4l2-ioctl.h linux-2.6.32.45/include/media/v4l2-ioctl.h
60957--- linux-2.6.32.45/include/media/v4l2-ioctl.h 2011-03-27 14:31:47.000000000 -0400
60958+++ linux-2.6.32.45/include/media/v4l2-ioctl.h 2011-08-23 21:22:38.000000000 -0400
60959@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
60960 long (*vidioc_default) (struct file *file, void *fh,
60961 int cmd, void *arg);
60962 };
60963+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
60964
60965
60966 /* v4l debugging and diagnostics */
60967diff -urNp linux-2.6.32.45/include/net/flow.h linux-2.6.32.45/include/net/flow.h
60968--- linux-2.6.32.45/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
60969+++ linux-2.6.32.45/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
60970@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
60971 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
60972 u8 dir, flow_resolve_t resolver);
60973 extern void flow_cache_flush(void);
60974-extern atomic_t flow_cache_genid;
60975+extern atomic_unchecked_t flow_cache_genid;
60976
60977 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
60978 {
60979diff -urNp linux-2.6.32.45/include/net/inetpeer.h linux-2.6.32.45/include/net/inetpeer.h
60980--- linux-2.6.32.45/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
60981+++ linux-2.6.32.45/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
60982@@ -24,7 +24,7 @@ struct inet_peer
60983 __u32 dtime; /* the time of last use of not
60984 * referenced entries */
60985 atomic_t refcnt;
60986- atomic_t rid; /* Frag reception counter */
60987+ atomic_unchecked_t rid; /* Frag reception counter */
60988 __u32 tcp_ts;
60989 unsigned long tcp_ts_stamp;
60990 };
60991diff -urNp linux-2.6.32.45/include/net/ip_vs.h linux-2.6.32.45/include/net/ip_vs.h
60992--- linux-2.6.32.45/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
60993+++ linux-2.6.32.45/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
60994@@ -365,7 +365,7 @@ struct ip_vs_conn {
60995 struct ip_vs_conn *control; /* Master control connection */
60996 atomic_t n_control; /* Number of controlled ones */
60997 struct ip_vs_dest *dest; /* real server */
60998- atomic_t in_pkts; /* incoming packet counter */
60999+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61000
61001 /* packet transmitter for different forwarding methods. If it
61002 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61003@@ -466,7 +466,7 @@ struct ip_vs_dest {
61004 union nf_inet_addr addr; /* IP address of the server */
61005 __be16 port; /* port number of the server */
61006 volatile unsigned flags; /* dest status flags */
61007- atomic_t conn_flags; /* flags to copy to conn */
61008+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61009 atomic_t weight; /* server weight */
61010
61011 atomic_t refcnt; /* reference counter */
61012diff -urNp linux-2.6.32.45/include/net/irda/ircomm_core.h linux-2.6.32.45/include/net/irda/ircomm_core.h
61013--- linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-03-27 14:31:47.000000000 -0400
61014+++ linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-08-05 20:33:55.000000000 -0400
61015@@ -51,7 +51,7 @@ typedef struct {
61016 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61017 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61018 struct ircomm_info *);
61019-} call_t;
61020+} __no_const call_t;
61021
61022 struct ircomm_cb {
61023 irda_queue_t queue;
61024diff -urNp linux-2.6.32.45/include/net/irda/ircomm_tty.h linux-2.6.32.45/include/net/irda/ircomm_tty.h
61025--- linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
61026+++ linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
61027@@ -35,6 +35,7 @@
61028 #include <linux/termios.h>
61029 #include <linux/timer.h>
61030 #include <linux/tty.h> /* struct tty_struct */
61031+#include <asm/local.h>
61032
61033 #include <net/irda/irias_object.h>
61034 #include <net/irda/ircomm_core.h>
61035@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61036 unsigned short close_delay;
61037 unsigned short closing_wait; /* time to wait before closing */
61038
61039- int open_count;
61040- int blocked_open; /* # of blocked opens */
61041+ local_t open_count;
61042+ local_t blocked_open; /* # of blocked opens */
61043
61044 /* Protect concurent access to :
61045 * o self->open_count
61046diff -urNp linux-2.6.32.45/include/net/iucv/af_iucv.h linux-2.6.32.45/include/net/iucv/af_iucv.h
61047--- linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
61048+++ linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
61049@@ -87,7 +87,7 @@ struct iucv_sock {
61050 struct iucv_sock_list {
61051 struct hlist_head head;
61052 rwlock_t lock;
61053- atomic_t autobind_name;
61054+ atomic_unchecked_t autobind_name;
61055 };
61056
61057 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61058diff -urNp linux-2.6.32.45/include/net/lapb.h linux-2.6.32.45/include/net/lapb.h
61059--- linux-2.6.32.45/include/net/lapb.h 2011-03-27 14:31:47.000000000 -0400
61060+++ linux-2.6.32.45/include/net/lapb.h 2011-08-05 20:33:55.000000000 -0400
61061@@ -95,7 +95,7 @@ struct lapb_cb {
61062 struct sk_buff_head write_queue;
61063 struct sk_buff_head ack_queue;
61064 unsigned char window;
61065- struct lapb_register_struct callbacks;
61066+ struct lapb_register_struct *callbacks;
61067
61068 /* FRMR control information */
61069 struct lapb_frame frmr_data;
61070diff -urNp linux-2.6.32.45/include/net/neighbour.h linux-2.6.32.45/include/net/neighbour.h
61071--- linux-2.6.32.45/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
61072+++ linux-2.6.32.45/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
61073@@ -125,12 +125,12 @@ struct neighbour
61074 struct neigh_ops
61075 {
61076 int family;
61077- void (*solicit)(struct neighbour *, struct sk_buff*);
61078- void (*error_report)(struct neighbour *, struct sk_buff*);
61079- int (*output)(struct sk_buff*);
61080- int (*connected_output)(struct sk_buff*);
61081- int (*hh_output)(struct sk_buff*);
61082- int (*queue_xmit)(struct sk_buff*);
61083+ void (* const solicit)(struct neighbour *, struct sk_buff*);
61084+ void (* const error_report)(struct neighbour *, struct sk_buff*);
61085+ int (* const output)(struct sk_buff*);
61086+ int (* const connected_output)(struct sk_buff*);
61087+ int (* const hh_output)(struct sk_buff*);
61088+ int (* const queue_xmit)(struct sk_buff*);
61089 };
61090
61091 struct pneigh_entry
61092diff -urNp linux-2.6.32.45/include/net/netlink.h linux-2.6.32.45/include/net/netlink.h
61093--- linux-2.6.32.45/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
61094+++ linux-2.6.32.45/include/net/netlink.h 2011-08-21 18:08:11.000000000 -0400
61095@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct
61096 {
61097 return (remaining >= (int) sizeof(struct nlmsghdr) &&
61098 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
61099- nlh->nlmsg_len <= remaining);
61100+ nlh->nlmsg_len <= (unsigned int)remaining);
61101 }
61102
61103 /**
61104@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
61105 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61106 {
61107 if (mark)
61108- skb_trim(skb, (unsigned char *) mark - skb->data);
61109+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61110 }
61111
61112 /**
61113diff -urNp linux-2.6.32.45/include/net/netns/ipv4.h linux-2.6.32.45/include/net/netns/ipv4.h
61114--- linux-2.6.32.45/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
61115+++ linux-2.6.32.45/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
61116@@ -54,7 +54,7 @@ struct netns_ipv4 {
61117 int current_rt_cache_rebuild_count;
61118
61119 struct timer_list rt_secret_timer;
61120- atomic_t rt_genid;
61121+ atomic_unchecked_t rt_genid;
61122
61123 #ifdef CONFIG_IP_MROUTE
61124 struct sock *mroute_sk;
61125diff -urNp linux-2.6.32.45/include/net/sctp/sctp.h linux-2.6.32.45/include/net/sctp/sctp.h
61126--- linux-2.6.32.45/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
61127+++ linux-2.6.32.45/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
61128@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
61129
61130 #else /* SCTP_DEBUG */
61131
61132-#define SCTP_DEBUG_PRINTK(whatever...)
61133-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61134+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61135+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61136 #define SCTP_ENABLE_DEBUG
61137 #define SCTP_DISABLE_DEBUG
61138 #define SCTP_ASSERT(expr, str, func)
61139diff -urNp linux-2.6.32.45/include/net/secure_seq.h linux-2.6.32.45/include/net/secure_seq.h
61140--- linux-2.6.32.45/include/net/secure_seq.h 2011-08-16 20:37:25.000000000 -0400
61141+++ linux-2.6.32.45/include/net/secure_seq.h 2011-08-07 19:48:09.000000000 -0400
61142@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
61143 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
61144 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
61145 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
61146- __be16 dport);
61147+ __be16 dport);
61148 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
61149 __be16 sport, __be16 dport);
61150 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61151- __be16 sport, __be16 dport);
61152+ __be16 sport, __be16 dport);
61153 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
61154- __be16 sport, __be16 dport);
61155+ __be16 sport, __be16 dport);
61156 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61157- __be16 sport, __be16 dport);
61158+ __be16 sport, __be16 dport);
61159
61160 #endif /* _NET_SECURE_SEQ */
61161diff -urNp linux-2.6.32.45/include/net/sock.h linux-2.6.32.45/include/net/sock.h
61162--- linux-2.6.32.45/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
61163+++ linux-2.6.32.45/include/net/sock.h 2011-08-21 17:24:37.000000000 -0400
61164@@ -272,7 +272,7 @@ struct sock {
61165 rwlock_t sk_callback_lock;
61166 int sk_err,
61167 sk_err_soft;
61168- atomic_t sk_drops;
61169+ atomic_unchecked_t sk_drops;
61170 unsigned short sk_ack_backlog;
61171 unsigned short sk_max_ack_backlog;
61172 __u32 sk_priority;
61173@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_relea
61174 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
61175 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
61176 #else
61177-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
61178+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
61179 int inc)
61180 {
61181 }
61182diff -urNp linux-2.6.32.45/include/net/tcp.h linux-2.6.32.45/include/net/tcp.h
61183--- linux-2.6.32.45/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
61184+++ linux-2.6.32.45/include/net/tcp.h 2011-08-23 21:29:10.000000000 -0400
61185@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
61186 struct tcp_seq_afinfo {
61187 char *name;
61188 sa_family_t family;
61189- struct file_operations seq_fops;
61190- struct seq_operations seq_ops;
61191+ file_operations_no_const seq_fops;
61192+ seq_operations_no_const seq_ops;
61193 };
61194
61195 struct tcp_iter_state {
61196diff -urNp linux-2.6.32.45/include/net/udp.h linux-2.6.32.45/include/net/udp.h
61197--- linux-2.6.32.45/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
61198+++ linux-2.6.32.45/include/net/udp.h 2011-08-23 21:29:34.000000000 -0400
61199@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
61200 char *name;
61201 sa_family_t family;
61202 struct udp_table *udp_table;
61203- struct file_operations seq_fops;
61204- struct seq_operations seq_ops;
61205+ file_operations_no_const seq_fops;
61206+ seq_operations_no_const seq_ops;
61207 };
61208
61209 struct udp_iter_state {
61210diff -urNp linux-2.6.32.45/include/rdma/iw_cm.h linux-2.6.32.45/include/rdma/iw_cm.h
61211--- linux-2.6.32.45/include/rdma/iw_cm.h 2011-03-27 14:31:47.000000000 -0400
61212+++ linux-2.6.32.45/include/rdma/iw_cm.h 2011-08-05 20:33:55.000000000 -0400
61213@@ -129,7 +129,7 @@ struct iw_cm_verbs {
61214 int backlog);
61215
61216 int (*destroy_listen)(struct iw_cm_id *cm_id);
61217-};
61218+} __no_const;
61219
61220 /**
61221 * iw_create_cm_id - Create an IW CM identifier.
61222diff -urNp linux-2.6.32.45/include/scsi/libfc.h linux-2.6.32.45/include/scsi/libfc.h
61223--- linux-2.6.32.45/include/scsi/libfc.h 2011-03-27 14:31:47.000000000 -0400
61224+++ linux-2.6.32.45/include/scsi/libfc.h 2011-08-23 21:22:38.000000000 -0400
61225@@ -675,6 +675,7 @@ struct libfc_function_template {
61226 */
61227 void (*disc_stop_final) (struct fc_lport *);
61228 };
61229+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61230
61231 /* information used by the discovery layer */
61232 struct fc_disc {
61233@@ -707,7 +708,7 @@ struct fc_lport {
61234 struct fc_disc disc;
61235
61236 /* Operational Information */
61237- struct libfc_function_template tt;
61238+ libfc_function_template_no_const tt;
61239 u8 link_up;
61240 u8 qfull;
61241 enum fc_lport_state state;
61242diff -urNp linux-2.6.32.45/include/scsi/scsi_device.h linux-2.6.32.45/include/scsi/scsi_device.h
61243--- linux-2.6.32.45/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
61244+++ linux-2.6.32.45/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
61245@@ -156,9 +156,9 @@ struct scsi_device {
61246 unsigned int max_device_blocked; /* what device_blocked counts down from */
61247 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61248
61249- atomic_t iorequest_cnt;
61250- atomic_t iodone_cnt;
61251- atomic_t ioerr_cnt;
61252+ atomic_unchecked_t iorequest_cnt;
61253+ atomic_unchecked_t iodone_cnt;
61254+ atomic_unchecked_t ioerr_cnt;
61255
61256 struct device sdev_gendev,
61257 sdev_dev;
61258diff -urNp linux-2.6.32.45/include/scsi/scsi_transport_fc.h linux-2.6.32.45/include/scsi/scsi_transport_fc.h
61259--- linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-03-27 14:31:47.000000000 -0400
61260+++ linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-08-05 20:33:55.000000000 -0400
61261@@ -663,9 +663,9 @@ struct fc_function_template {
61262 int (*bsg_timeout)(struct fc_bsg_job *);
61263
61264 /* allocation lengths for host-specific data */
61265- u32 dd_fcrport_size;
61266- u32 dd_fcvport_size;
61267- u32 dd_bsg_size;
61268+ const u32 dd_fcrport_size;
61269+ const u32 dd_fcvport_size;
61270+ const u32 dd_bsg_size;
61271
61272 /*
61273 * The driver sets these to tell the transport class it
61274@@ -675,39 +675,39 @@ struct fc_function_template {
61275 */
61276
61277 /* remote port fixed attributes */
61278- unsigned long show_rport_maxframe_size:1;
61279- unsigned long show_rport_supported_classes:1;
61280- unsigned long show_rport_dev_loss_tmo:1;
61281+ const unsigned long show_rport_maxframe_size:1;
61282+ const unsigned long show_rport_supported_classes:1;
61283+ const unsigned long show_rport_dev_loss_tmo:1;
61284
61285 /*
61286 * target dynamic attributes
61287 * These should all be "1" if the driver uses the remote port
61288 * add/delete functions (so attributes reflect rport values).
61289 */
61290- unsigned long show_starget_node_name:1;
61291- unsigned long show_starget_port_name:1;
61292- unsigned long show_starget_port_id:1;
61293+ const unsigned long show_starget_node_name:1;
61294+ const unsigned long show_starget_port_name:1;
61295+ const unsigned long show_starget_port_id:1;
61296
61297 /* host fixed attributes */
61298- unsigned long show_host_node_name:1;
61299- unsigned long show_host_port_name:1;
61300- unsigned long show_host_permanent_port_name:1;
61301- unsigned long show_host_supported_classes:1;
61302- unsigned long show_host_supported_fc4s:1;
61303- unsigned long show_host_supported_speeds:1;
61304- unsigned long show_host_maxframe_size:1;
61305- unsigned long show_host_serial_number:1;
61306+ const unsigned long show_host_node_name:1;
61307+ const unsigned long show_host_port_name:1;
61308+ const unsigned long show_host_permanent_port_name:1;
61309+ const unsigned long show_host_supported_classes:1;
61310+ const unsigned long show_host_supported_fc4s:1;
61311+ const unsigned long show_host_supported_speeds:1;
61312+ const unsigned long show_host_maxframe_size:1;
61313+ const unsigned long show_host_serial_number:1;
61314 /* host dynamic attributes */
61315- unsigned long show_host_port_id:1;
61316- unsigned long show_host_port_type:1;
61317- unsigned long show_host_port_state:1;
61318- unsigned long show_host_active_fc4s:1;
61319- unsigned long show_host_speed:1;
61320- unsigned long show_host_fabric_name:1;
61321- unsigned long show_host_symbolic_name:1;
61322- unsigned long show_host_system_hostname:1;
61323+ const unsigned long show_host_port_id:1;
61324+ const unsigned long show_host_port_type:1;
61325+ const unsigned long show_host_port_state:1;
61326+ const unsigned long show_host_active_fc4s:1;
61327+ const unsigned long show_host_speed:1;
61328+ const unsigned long show_host_fabric_name:1;
61329+ const unsigned long show_host_symbolic_name:1;
61330+ const unsigned long show_host_system_hostname:1;
61331
61332- unsigned long disable_target_scan:1;
61333+ const unsigned long disable_target_scan:1;
61334 };
61335
61336
61337diff -urNp linux-2.6.32.45/include/sound/ac97_codec.h linux-2.6.32.45/include/sound/ac97_codec.h
61338--- linux-2.6.32.45/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
61339+++ linux-2.6.32.45/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
61340@@ -419,15 +419,15 @@
61341 struct snd_ac97;
61342
61343 struct snd_ac97_build_ops {
61344- int (*build_3d) (struct snd_ac97 *ac97);
61345- int (*build_specific) (struct snd_ac97 *ac97);
61346- int (*build_spdif) (struct snd_ac97 *ac97);
61347- int (*build_post_spdif) (struct snd_ac97 *ac97);
61348+ int (* const build_3d) (struct snd_ac97 *ac97);
61349+ int (* const build_specific) (struct snd_ac97 *ac97);
61350+ int (* const build_spdif) (struct snd_ac97 *ac97);
61351+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
61352 #ifdef CONFIG_PM
61353- void (*suspend) (struct snd_ac97 *ac97);
61354- void (*resume) (struct snd_ac97 *ac97);
61355+ void (* const suspend) (struct snd_ac97 *ac97);
61356+ void (* const resume) (struct snd_ac97 *ac97);
61357 #endif
61358- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61359+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61360 };
61361
61362 struct snd_ac97_bus_ops {
61363@@ -477,7 +477,7 @@ struct snd_ac97_template {
61364
61365 struct snd_ac97 {
61366 /* -- lowlevel (hardware) driver specific -- */
61367- struct snd_ac97_build_ops * build_ops;
61368+ const struct snd_ac97_build_ops * build_ops;
61369 void *private_data;
61370 void (*private_free) (struct snd_ac97 *ac97);
61371 /* --- */
61372diff -urNp linux-2.6.32.45/include/sound/ak4xxx-adda.h linux-2.6.32.45/include/sound/ak4xxx-adda.h
61373--- linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-03-27 14:31:47.000000000 -0400
61374+++ linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-08-05 20:33:55.000000000 -0400
61375@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61376 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61377 unsigned char val);
61378 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61379-};
61380+} __no_const;
61381
61382 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61383
61384diff -urNp linux-2.6.32.45/include/sound/hwdep.h linux-2.6.32.45/include/sound/hwdep.h
61385--- linux-2.6.32.45/include/sound/hwdep.h 2011-03-27 14:31:47.000000000 -0400
61386+++ linux-2.6.32.45/include/sound/hwdep.h 2011-08-05 20:33:55.000000000 -0400
61387@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61388 struct snd_hwdep_dsp_status *status);
61389 int (*dsp_load)(struct snd_hwdep *hw,
61390 struct snd_hwdep_dsp_image *image);
61391-};
61392+} __no_const;
61393
61394 struct snd_hwdep {
61395 struct snd_card *card;
61396diff -urNp linux-2.6.32.45/include/sound/info.h linux-2.6.32.45/include/sound/info.h
61397--- linux-2.6.32.45/include/sound/info.h 2011-03-27 14:31:47.000000000 -0400
61398+++ linux-2.6.32.45/include/sound/info.h 2011-08-05 20:33:55.000000000 -0400
61399@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61400 struct snd_info_buffer *buffer);
61401 void (*write)(struct snd_info_entry *entry,
61402 struct snd_info_buffer *buffer);
61403-};
61404+} __no_const;
61405
61406 struct snd_info_entry_ops {
61407 int (*open)(struct snd_info_entry *entry,
61408diff -urNp linux-2.6.32.45/include/sound/pcm.h linux-2.6.32.45/include/sound/pcm.h
61409--- linux-2.6.32.45/include/sound/pcm.h 2011-03-27 14:31:47.000000000 -0400
61410+++ linux-2.6.32.45/include/sound/pcm.h 2011-08-23 21:22:38.000000000 -0400
61411@@ -80,6 +80,7 @@ struct snd_pcm_ops {
61412 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61413 int (*ack)(struct snd_pcm_substream *substream);
61414 };
61415+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61416
61417 /*
61418 *
61419diff -urNp linux-2.6.32.45/include/sound/sb16_csp.h linux-2.6.32.45/include/sound/sb16_csp.h
61420--- linux-2.6.32.45/include/sound/sb16_csp.h 2011-03-27 14:31:47.000000000 -0400
61421+++ linux-2.6.32.45/include/sound/sb16_csp.h 2011-08-05 20:33:55.000000000 -0400
61422@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
61423 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61424 int (*csp_stop) (struct snd_sb_csp * p);
61425 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61426-};
61427+} __no_const;
61428
61429 /*
61430 * CSP private data
61431diff -urNp linux-2.6.32.45/include/sound/ymfpci.h linux-2.6.32.45/include/sound/ymfpci.h
61432--- linux-2.6.32.45/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
61433+++ linux-2.6.32.45/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
61434@@ -358,7 +358,7 @@ struct snd_ymfpci {
61435 spinlock_t reg_lock;
61436 spinlock_t voice_lock;
61437 wait_queue_head_t interrupt_sleep;
61438- atomic_t interrupt_sleep_count;
61439+ atomic_unchecked_t interrupt_sleep_count;
61440 struct snd_info_entry *proc_entry;
61441 const struct firmware *dsp_microcode;
61442 const struct firmware *controller_microcode;
61443diff -urNp linux-2.6.32.45/include/trace/events/irq.h linux-2.6.32.45/include/trace/events/irq.h
61444--- linux-2.6.32.45/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
61445+++ linux-2.6.32.45/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
61446@@ -34,7 +34,7 @@
61447 */
61448 TRACE_EVENT(irq_handler_entry,
61449
61450- TP_PROTO(int irq, struct irqaction *action),
61451+ TP_PROTO(int irq, const struct irqaction *action),
61452
61453 TP_ARGS(irq, action),
61454
61455@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
61456 */
61457 TRACE_EVENT(irq_handler_exit,
61458
61459- TP_PROTO(int irq, struct irqaction *action, int ret),
61460+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61461
61462 TP_ARGS(irq, action, ret),
61463
61464@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
61465 */
61466 TRACE_EVENT(softirq_entry,
61467
61468- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61469+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61470
61471 TP_ARGS(h, vec),
61472
61473@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
61474 */
61475 TRACE_EVENT(softirq_exit,
61476
61477- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61478+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61479
61480 TP_ARGS(h, vec),
61481
61482diff -urNp linux-2.6.32.45/include/video/uvesafb.h linux-2.6.32.45/include/video/uvesafb.h
61483--- linux-2.6.32.45/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
61484+++ linux-2.6.32.45/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
61485@@ -177,6 +177,7 @@ struct uvesafb_par {
61486 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61487 u8 pmi_setpal; /* PMI for palette changes */
61488 u16 *pmi_base; /* protected mode interface location */
61489+ u8 *pmi_code; /* protected mode code location */
61490 void *pmi_start;
61491 void *pmi_pal;
61492 u8 *vbe_state_orig; /*
61493diff -urNp linux-2.6.32.45/init/do_mounts.c linux-2.6.32.45/init/do_mounts.c
61494--- linux-2.6.32.45/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
61495+++ linux-2.6.32.45/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
61496@@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
61497
61498 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61499 {
61500- int err = sys_mount(name, "/root", fs, flags, data);
61501+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
61502 if (err)
61503 return err;
61504
61505- sys_chdir("/root");
61506+ sys_chdir((__force const char __user *)"/root");
61507 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61508 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61509 current->fs->pwd.mnt->mnt_sb->s_type->name,
61510@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
61511 va_start(args, fmt);
61512 vsprintf(buf, fmt, args);
61513 va_end(args);
61514- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61515+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61516 if (fd >= 0) {
61517 sys_ioctl(fd, FDEJECT, 0);
61518 sys_close(fd);
61519 }
61520 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61521- fd = sys_open("/dev/console", O_RDWR, 0);
61522+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
61523 if (fd >= 0) {
61524 sys_ioctl(fd, TCGETS, (long)&termios);
61525 termios.c_lflag &= ~ICANON;
61526 sys_ioctl(fd, TCSETSF, (long)&termios);
61527- sys_read(fd, &c, 1);
61528+ sys_read(fd, (char __user *)&c, 1);
61529 termios.c_lflag |= ICANON;
61530 sys_ioctl(fd, TCSETSF, (long)&termios);
61531 sys_close(fd);
61532@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
61533 mount_root();
61534 out:
61535 devtmpfs_mount("dev");
61536- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61537- sys_chroot(".");
61538+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61539+ sys_chroot((__force char __user *)".");
61540 }
61541diff -urNp linux-2.6.32.45/init/do_mounts.h linux-2.6.32.45/init/do_mounts.h
61542--- linux-2.6.32.45/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
61543+++ linux-2.6.32.45/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
61544@@ -15,15 +15,15 @@ extern int root_mountflags;
61545
61546 static inline int create_dev(char *name, dev_t dev)
61547 {
61548- sys_unlink(name);
61549- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61550+ sys_unlink((__force char __user *)name);
61551+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
61552 }
61553
61554 #if BITS_PER_LONG == 32
61555 static inline u32 bstat(char *name)
61556 {
61557 struct stat64 stat;
61558- if (sys_stat64(name, &stat) != 0)
61559+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
61560 return 0;
61561 if (!S_ISBLK(stat.st_mode))
61562 return 0;
61563diff -urNp linux-2.6.32.45/init/do_mounts_initrd.c linux-2.6.32.45/init/do_mounts_initrd.c
61564--- linux-2.6.32.45/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
61565+++ linux-2.6.32.45/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
61566@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
61567 sys_close(old_fd);sys_close(root_fd);
61568 sys_close(0);sys_close(1);sys_close(2);
61569 sys_setsid();
61570- (void) sys_open("/dev/console",O_RDWR,0);
61571+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
61572 (void) sys_dup(0);
61573 (void) sys_dup(0);
61574 return kernel_execve(shell, argv, envp_init);
61575@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
61576 create_dev("/dev/root.old", Root_RAM0);
61577 /* mount initrd on rootfs' /root */
61578 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61579- sys_mkdir("/old", 0700);
61580- root_fd = sys_open("/", 0, 0);
61581- old_fd = sys_open("/old", 0, 0);
61582+ sys_mkdir((__force const char __user *)"/old", 0700);
61583+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
61584+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
61585 /* move initrd over / and chdir/chroot in initrd root */
61586- sys_chdir("/root");
61587- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61588- sys_chroot(".");
61589+ sys_chdir((__force const char __user *)"/root");
61590+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61591+ sys_chroot((__force const char __user *)".");
61592
61593 /*
61594 * In case that a resume from disk is carried out by linuxrc or one of
61595@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
61596
61597 /* move initrd to rootfs' /old */
61598 sys_fchdir(old_fd);
61599- sys_mount("/", ".", NULL, MS_MOVE, NULL);
61600+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
61601 /* switch root and cwd back to / of rootfs */
61602 sys_fchdir(root_fd);
61603- sys_chroot(".");
61604+ sys_chroot((__force const char __user *)".");
61605 sys_close(old_fd);
61606 sys_close(root_fd);
61607
61608 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61609- sys_chdir("/old");
61610+ sys_chdir((__force const char __user *)"/old");
61611 return;
61612 }
61613
61614@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
61615 mount_root();
61616
61617 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61618- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61619+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
61620 if (!error)
61621 printk("okay\n");
61622 else {
61623- int fd = sys_open("/dev/root.old", O_RDWR, 0);
61624+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
61625 if (error == -ENOENT)
61626 printk("/initrd does not exist. Ignored.\n");
61627 else
61628 printk("failed\n");
61629 printk(KERN_NOTICE "Unmounting old root\n");
61630- sys_umount("/old", MNT_DETACH);
61631+ sys_umount((__force char __user *)"/old", MNT_DETACH);
61632 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61633 if (fd < 0) {
61634 error = fd;
61635@@ -119,11 +119,11 @@ int __init initrd_load(void)
61636 * mounted in the normal path.
61637 */
61638 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61639- sys_unlink("/initrd.image");
61640+ sys_unlink((__force const char __user *)"/initrd.image");
61641 handle_initrd();
61642 return 1;
61643 }
61644 }
61645- sys_unlink("/initrd.image");
61646+ sys_unlink((__force const char __user *)"/initrd.image");
61647 return 0;
61648 }
61649diff -urNp linux-2.6.32.45/init/do_mounts_md.c linux-2.6.32.45/init/do_mounts_md.c
61650--- linux-2.6.32.45/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
61651+++ linux-2.6.32.45/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
61652@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61653 partitioned ? "_d" : "", minor,
61654 md_setup_args[ent].device_names);
61655
61656- fd = sys_open(name, 0, 0);
61657+ fd = sys_open((__force char __user *)name, 0, 0);
61658 if (fd < 0) {
61659 printk(KERN_ERR "md: open failed - cannot start "
61660 "array %s\n", name);
61661@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61662 * array without it
61663 */
61664 sys_close(fd);
61665- fd = sys_open(name, 0, 0);
61666+ fd = sys_open((__force char __user *)name, 0, 0);
61667 sys_ioctl(fd, BLKRRPART, 0);
61668 }
61669 sys_close(fd);
61670@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61671
61672 wait_for_device_probe();
61673
61674- fd = sys_open("/dev/md0", 0, 0);
61675+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
61676 if (fd >= 0) {
61677 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61678 sys_close(fd);
61679diff -urNp linux-2.6.32.45/init/initramfs.c linux-2.6.32.45/init/initramfs.c
61680--- linux-2.6.32.45/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
61681+++ linux-2.6.32.45/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
61682@@ -74,7 +74,7 @@ static void __init free_hash(void)
61683 }
61684 }
61685
61686-static long __init do_utime(char __user *filename, time_t mtime)
61687+static long __init do_utime(__force char __user *filename, time_t mtime)
61688 {
61689 struct timespec t[2];
61690
61691@@ -109,7 +109,7 @@ static void __init dir_utime(void)
61692 struct dir_entry *de, *tmp;
61693 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61694 list_del(&de->list);
61695- do_utime(de->name, de->mtime);
61696+ do_utime((__force char __user *)de->name, de->mtime);
61697 kfree(de->name);
61698 kfree(de);
61699 }
61700@@ -271,7 +271,7 @@ static int __init maybe_link(void)
61701 if (nlink >= 2) {
61702 char *old = find_link(major, minor, ino, mode, collected);
61703 if (old)
61704- return (sys_link(old, collected) < 0) ? -1 : 1;
61705+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
61706 }
61707 return 0;
61708 }
61709@@ -280,11 +280,11 @@ static void __init clean_path(char *path
61710 {
61711 struct stat st;
61712
61713- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61714+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
61715 if (S_ISDIR(st.st_mode))
61716- sys_rmdir(path);
61717+ sys_rmdir((__force char __user *)path);
61718 else
61719- sys_unlink(path);
61720+ sys_unlink((__force char __user *)path);
61721 }
61722 }
61723
61724@@ -305,7 +305,7 @@ static int __init do_name(void)
61725 int openflags = O_WRONLY|O_CREAT;
61726 if (ml != 1)
61727 openflags |= O_TRUNC;
61728- wfd = sys_open(collected, openflags, mode);
61729+ wfd = sys_open((__force char __user *)collected, openflags, mode);
61730
61731 if (wfd >= 0) {
61732 sys_fchown(wfd, uid, gid);
61733@@ -317,17 +317,17 @@ static int __init do_name(void)
61734 }
61735 }
61736 } else if (S_ISDIR(mode)) {
61737- sys_mkdir(collected, mode);
61738- sys_chown(collected, uid, gid);
61739- sys_chmod(collected, mode);
61740+ sys_mkdir((__force char __user *)collected, mode);
61741+ sys_chown((__force char __user *)collected, uid, gid);
61742+ sys_chmod((__force char __user *)collected, mode);
61743 dir_add(collected, mtime);
61744 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61745 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61746 if (maybe_link() == 0) {
61747- sys_mknod(collected, mode, rdev);
61748- sys_chown(collected, uid, gid);
61749- sys_chmod(collected, mode);
61750- do_utime(collected, mtime);
61751+ sys_mknod((__force char __user *)collected, mode, rdev);
61752+ sys_chown((__force char __user *)collected, uid, gid);
61753+ sys_chmod((__force char __user *)collected, mode);
61754+ do_utime((__force char __user *)collected, mtime);
61755 }
61756 }
61757 return 0;
61758@@ -336,15 +336,15 @@ static int __init do_name(void)
61759 static int __init do_copy(void)
61760 {
61761 if (count >= body_len) {
61762- sys_write(wfd, victim, body_len);
61763+ sys_write(wfd, (__force char __user *)victim, body_len);
61764 sys_close(wfd);
61765- do_utime(vcollected, mtime);
61766+ do_utime((__force char __user *)vcollected, mtime);
61767 kfree(vcollected);
61768 eat(body_len);
61769 state = SkipIt;
61770 return 0;
61771 } else {
61772- sys_write(wfd, victim, count);
61773+ sys_write(wfd, (__force char __user *)victim, count);
61774 body_len -= count;
61775 eat(count);
61776 return 1;
61777@@ -355,9 +355,9 @@ static int __init do_symlink(void)
61778 {
61779 collected[N_ALIGN(name_len) + body_len] = '\0';
61780 clean_path(collected, 0);
61781- sys_symlink(collected + N_ALIGN(name_len), collected);
61782- sys_lchown(collected, uid, gid);
61783- do_utime(collected, mtime);
61784+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
61785+ sys_lchown((__force char __user *)collected, uid, gid);
61786+ do_utime((__force char __user *)collected, mtime);
61787 state = SkipIt;
61788 next_state = Reset;
61789 return 0;
61790diff -urNp linux-2.6.32.45/init/Kconfig linux-2.6.32.45/init/Kconfig
61791--- linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
61792+++ linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
61793@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
61794
61795 config COMPAT_BRK
61796 bool "Disable heap randomization"
61797- default y
61798+ default n
61799 help
61800 Randomizing heap placement makes heap exploits harder, but it
61801 also breaks ancient binaries (including anything libc5 based).
61802diff -urNp linux-2.6.32.45/init/main.c linux-2.6.32.45/init/main.c
61803--- linux-2.6.32.45/init/main.c 2011-05-10 22:12:01.000000000 -0400
61804+++ linux-2.6.32.45/init/main.c 2011-08-05 20:33:55.000000000 -0400
61805@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
61806 #ifdef CONFIG_TC
61807 extern void tc_init(void);
61808 #endif
61809+extern void grsecurity_init(void);
61810
61811 enum system_states system_state __read_mostly;
61812 EXPORT_SYMBOL(system_state);
61813@@ -183,6 +184,49 @@ static int __init set_reset_devices(char
61814
61815 __setup("reset_devices", set_reset_devices);
61816
61817+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61818+extern char pax_enter_kernel_user[];
61819+extern char pax_exit_kernel_user[];
61820+extern pgdval_t clone_pgd_mask;
61821+#endif
61822+
61823+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61824+static int __init setup_pax_nouderef(char *str)
61825+{
61826+#ifdef CONFIG_X86_32
61827+ unsigned int cpu;
61828+ struct desc_struct *gdt;
61829+
61830+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
61831+ gdt = get_cpu_gdt_table(cpu);
61832+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61833+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61834+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61835+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61836+ }
61837+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61838+#else
61839+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61840+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61841+ clone_pgd_mask = ~(pgdval_t)0UL;
61842+#endif
61843+
61844+ return 0;
61845+}
61846+early_param("pax_nouderef", setup_pax_nouderef);
61847+#endif
61848+
61849+#ifdef CONFIG_PAX_SOFTMODE
61850+int pax_softmode;
61851+
61852+static int __init setup_pax_softmode(char *str)
61853+{
61854+ get_option(&str, &pax_softmode);
61855+ return 1;
61856+}
61857+__setup("pax_softmode=", setup_pax_softmode);
61858+#endif
61859+
61860 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61861 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
61862 static const char *panic_later, *panic_param;
61863@@ -705,52 +749,53 @@ int initcall_debug;
61864 core_param(initcall_debug, initcall_debug, bool, 0644);
61865
61866 static char msgbuf[64];
61867-static struct boot_trace_call call;
61868-static struct boot_trace_ret ret;
61869+static struct boot_trace_call trace_call;
61870+static struct boot_trace_ret trace_ret;
61871
61872 int do_one_initcall(initcall_t fn)
61873 {
61874 int count = preempt_count();
61875 ktime_t calltime, delta, rettime;
61876+ const char *msg1 = "", *msg2 = "";
61877
61878 if (initcall_debug) {
61879- call.caller = task_pid_nr(current);
61880- printk("calling %pF @ %i\n", fn, call.caller);
61881+ trace_call.caller = task_pid_nr(current);
61882+ printk("calling %pF @ %i\n", fn, trace_call.caller);
61883 calltime = ktime_get();
61884- trace_boot_call(&call, fn);
61885+ trace_boot_call(&trace_call, fn);
61886 enable_boot_trace();
61887 }
61888
61889- ret.result = fn();
61890+ trace_ret.result = fn();
61891
61892 if (initcall_debug) {
61893 disable_boot_trace();
61894 rettime = ktime_get();
61895 delta = ktime_sub(rettime, calltime);
61896- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
61897- trace_boot_ret(&ret, fn);
61898+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
61899+ trace_boot_ret(&trace_ret, fn);
61900 printk("initcall %pF returned %d after %Ld usecs\n", fn,
61901- ret.result, ret.duration);
61902+ trace_ret.result, trace_ret.duration);
61903 }
61904
61905 msgbuf[0] = 0;
61906
61907- if (ret.result && ret.result != -ENODEV && initcall_debug)
61908- sprintf(msgbuf, "error code %d ", ret.result);
61909+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
61910+ sprintf(msgbuf, "error code %d ", trace_ret.result);
61911
61912 if (preempt_count() != count) {
61913- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
61914+ msg1 = " preemption imbalance";
61915 preempt_count() = count;
61916 }
61917 if (irqs_disabled()) {
61918- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
61919+ msg2 = " disabled interrupts";
61920 local_irq_enable();
61921 }
61922- if (msgbuf[0]) {
61923- printk("initcall %pF returned with %s\n", fn, msgbuf);
61924+ if (msgbuf[0] || *msg1 || *msg2) {
61925+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
61926 }
61927
61928- return ret.result;
61929+ return trace_ret.result;
61930 }
61931
61932
61933@@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
61934 if (!ramdisk_execute_command)
61935 ramdisk_execute_command = "/init";
61936
61937- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
61938+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
61939 ramdisk_execute_command = NULL;
61940 prepare_namespace();
61941 }
61942
61943+ grsecurity_init();
61944+
61945 /*
61946 * Ok, we have completed the initial bootup, and
61947 * we're essentially up and running. Get rid of the
61948diff -urNp linux-2.6.32.45/init/noinitramfs.c linux-2.6.32.45/init/noinitramfs.c
61949--- linux-2.6.32.45/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
61950+++ linux-2.6.32.45/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
61951@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
61952 {
61953 int err;
61954
61955- err = sys_mkdir("/dev", 0755);
61956+ err = sys_mkdir((const char __user *)"/dev", 0755);
61957 if (err < 0)
61958 goto out;
61959
61960@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
61961 if (err < 0)
61962 goto out;
61963
61964- err = sys_mkdir("/root", 0700);
61965+ err = sys_mkdir((const char __user *)"/root", 0700);
61966 if (err < 0)
61967 goto out;
61968
61969diff -urNp linux-2.6.32.45/ipc/mqueue.c linux-2.6.32.45/ipc/mqueue.c
61970--- linux-2.6.32.45/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
61971+++ linux-2.6.32.45/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
61972@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
61973 mq_bytes = (mq_msg_tblsz +
61974 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
61975
61976+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
61977 spin_lock(&mq_lock);
61978 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
61979 u->mq_bytes + mq_bytes >
61980diff -urNp linux-2.6.32.45/ipc/msg.c linux-2.6.32.45/ipc/msg.c
61981--- linux-2.6.32.45/ipc/msg.c 2011-03-27 14:31:47.000000000 -0400
61982+++ linux-2.6.32.45/ipc/msg.c 2011-08-05 20:33:55.000000000 -0400
61983@@ -310,18 +310,19 @@ static inline int msg_security(struct ke
61984 return security_msg_queue_associate(msq, msgflg);
61985 }
61986
61987+static struct ipc_ops msg_ops = {
61988+ .getnew = newque,
61989+ .associate = msg_security,
61990+ .more_checks = NULL
61991+};
61992+
61993 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
61994 {
61995 struct ipc_namespace *ns;
61996- struct ipc_ops msg_ops;
61997 struct ipc_params msg_params;
61998
61999 ns = current->nsproxy->ipc_ns;
62000
62001- msg_ops.getnew = newque;
62002- msg_ops.associate = msg_security;
62003- msg_ops.more_checks = NULL;
62004-
62005 msg_params.key = key;
62006 msg_params.flg = msgflg;
62007
62008diff -urNp linux-2.6.32.45/ipc/sem.c linux-2.6.32.45/ipc/sem.c
62009--- linux-2.6.32.45/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
62010+++ linux-2.6.32.45/ipc/sem.c 2011-08-05 20:33:55.000000000 -0400
62011@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct
62012 return 0;
62013 }
62014
62015+static struct ipc_ops sem_ops = {
62016+ .getnew = newary,
62017+ .associate = sem_security,
62018+ .more_checks = sem_more_checks
62019+};
62020+
62021 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62022 {
62023 struct ipc_namespace *ns;
62024- struct ipc_ops sem_ops;
62025 struct ipc_params sem_params;
62026
62027 ns = current->nsproxy->ipc_ns;
62028@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62029 if (nsems < 0 || nsems > ns->sc_semmsl)
62030 return -EINVAL;
62031
62032- sem_ops.getnew = newary;
62033- sem_ops.associate = sem_security;
62034- sem_ops.more_checks = sem_more_checks;
62035-
62036 sem_params.key = key;
62037 sem_params.flg = semflg;
62038 sem_params.u.nsems = nsems;
62039@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namesp
62040 ushort* sem_io = fast_sem_io;
62041 int nsems;
62042
62043+ pax_track_stack();
62044+
62045 sma = sem_lock_check(ns, semid);
62046 if (IS_ERR(sma))
62047 return PTR_ERR(sma);
62048@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62049 unsigned long jiffies_left = 0;
62050 struct ipc_namespace *ns;
62051
62052+ pax_track_stack();
62053+
62054 ns = current->nsproxy->ipc_ns;
62055
62056 if (nsops < 1 || semid < 0)
62057diff -urNp linux-2.6.32.45/ipc/shm.c linux-2.6.32.45/ipc/shm.c
62058--- linux-2.6.32.45/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
62059+++ linux-2.6.32.45/ipc/shm.c 2011-08-05 20:33:55.000000000 -0400
62060@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
62061 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62062 #endif
62063
62064+#ifdef CONFIG_GRKERNSEC
62065+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62066+ const time_t shm_createtime, const uid_t cuid,
62067+ const int shmid);
62068+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62069+ const time_t shm_createtime);
62070+#endif
62071+
62072 void shm_init_ns(struct ipc_namespace *ns)
62073 {
62074 ns->shm_ctlmax = SHMMAX;
62075@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
62076 shp->shm_lprid = 0;
62077 shp->shm_atim = shp->shm_dtim = 0;
62078 shp->shm_ctim = get_seconds();
62079+#ifdef CONFIG_GRKERNSEC
62080+ {
62081+ struct timespec timeval;
62082+ do_posix_clock_monotonic_gettime(&timeval);
62083+
62084+ shp->shm_createtime = timeval.tv_sec;
62085+ }
62086+#endif
62087 shp->shm_segsz = size;
62088 shp->shm_nattch = 0;
62089 shp->shm_file = file;
62090@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct
62091 return 0;
62092 }
62093
62094+static struct ipc_ops shm_ops = {
62095+ .getnew = newseg,
62096+ .associate = shm_security,
62097+ .more_checks = shm_more_checks
62098+};
62099+
62100 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62101 {
62102 struct ipc_namespace *ns;
62103- struct ipc_ops shm_ops;
62104 struct ipc_params shm_params;
62105
62106 ns = current->nsproxy->ipc_ns;
62107
62108- shm_ops.getnew = newseg;
62109- shm_ops.associate = shm_security;
62110- shm_ops.more_checks = shm_more_checks;
62111-
62112 shm_params.key = key;
62113 shm_params.flg = shmflg;
62114 shm_params.u.size = size;
62115@@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *sh
62116 if (err)
62117 goto out_unlock;
62118
62119+#ifdef CONFIG_GRKERNSEC
62120+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62121+ shp->shm_perm.cuid, shmid) ||
62122+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62123+ err = -EACCES;
62124+ goto out_unlock;
62125+ }
62126+#endif
62127+
62128 path.dentry = dget(shp->shm_file->f_path.dentry);
62129 path.mnt = shp->shm_file->f_path.mnt;
62130 shp->shm_nattch++;
62131+#ifdef CONFIG_GRKERNSEC
62132+ shp->shm_lapid = current->pid;
62133+#endif
62134 size = i_size_read(path.dentry->d_inode);
62135 shm_unlock(shp);
62136
62137diff -urNp linux-2.6.32.45/kernel/acct.c linux-2.6.32.45/kernel/acct.c
62138--- linux-2.6.32.45/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
62139+++ linux-2.6.32.45/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
62140@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
62141 */
62142 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62143 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62144- file->f_op->write(file, (char *)&ac,
62145+ file->f_op->write(file, (__force char __user *)&ac,
62146 sizeof(acct_t), &file->f_pos);
62147 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62148 set_fs(fs);
62149diff -urNp linux-2.6.32.45/kernel/audit.c linux-2.6.32.45/kernel/audit.c
62150--- linux-2.6.32.45/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
62151+++ linux-2.6.32.45/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
62152@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
62153 3) suppressed due to audit_rate_limit
62154 4) suppressed due to audit_backlog_limit
62155 */
62156-static atomic_t audit_lost = ATOMIC_INIT(0);
62157+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62158
62159 /* The netlink socket. */
62160 static struct sock *audit_sock;
62161@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
62162 unsigned long now;
62163 int print;
62164
62165- atomic_inc(&audit_lost);
62166+ atomic_inc_unchecked(&audit_lost);
62167
62168 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62169
62170@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
62171 printk(KERN_WARNING
62172 "audit: audit_lost=%d audit_rate_limit=%d "
62173 "audit_backlog_limit=%d\n",
62174- atomic_read(&audit_lost),
62175+ atomic_read_unchecked(&audit_lost),
62176 audit_rate_limit,
62177 audit_backlog_limit);
62178 audit_panic(message);
62179@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
62180 status_set.pid = audit_pid;
62181 status_set.rate_limit = audit_rate_limit;
62182 status_set.backlog_limit = audit_backlog_limit;
62183- status_set.lost = atomic_read(&audit_lost);
62184+ status_set.lost = atomic_read_unchecked(&audit_lost);
62185 status_set.backlog = skb_queue_len(&audit_skb_queue);
62186 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62187 &status_set, sizeof(status_set));
62188@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
62189 spin_unlock_irq(&tsk->sighand->siglock);
62190 }
62191 read_unlock(&tasklist_lock);
62192- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
62193- &s, sizeof(s));
62194+
62195+ if (!err)
62196+ audit_send_reply(NETLINK_CB(skb).pid, seq,
62197+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
62198 break;
62199 }
62200 case AUDIT_TTY_SET: {
62201diff -urNp linux-2.6.32.45/kernel/auditsc.c linux-2.6.32.45/kernel/auditsc.c
62202--- linux-2.6.32.45/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
62203+++ linux-2.6.32.45/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
62204@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
62205 }
62206
62207 /* global counter which is incremented every time something logs in */
62208-static atomic_t session_id = ATOMIC_INIT(0);
62209+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62210
62211 /**
62212 * audit_set_loginuid - set a task's audit_context loginuid
62213@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
62214 */
62215 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62216 {
62217- unsigned int sessionid = atomic_inc_return(&session_id);
62218+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62219 struct audit_context *context = task->audit_context;
62220
62221 if (context && context->in_syscall) {
62222diff -urNp linux-2.6.32.45/kernel/capability.c linux-2.6.32.45/kernel/capability.c
62223--- linux-2.6.32.45/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
62224+++ linux-2.6.32.45/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
62225@@ -305,10 +305,26 @@ int capable(int cap)
62226 BUG();
62227 }
62228
62229- if (security_capable(cap) == 0) {
62230+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
62231 current->flags |= PF_SUPERPRIV;
62232 return 1;
62233 }
62234 return 0;
62235 }
62236+
62237+int capable_nolog(int cap)
62238+{
62239+ if (unlikely(!cap_valid(cap))) {
62240+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62241+ BUG();
62242+ }
62243+
62244+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
62245+ current->flags |= PF_SUPERPRIV;
62246+ return 1;
62247+ }
62248+ return 0;
62249+}
62250+
62251 EXPORT_SYMBOL(capable);
62252+EXPORT_SYMBOL(capable_nolog);
62253diff -urNp linux-2.6.32.45/kernel/cgroup.c linux-2.6.32.45/kernel/cgroup.c
62254--- linux-2.6.32.45/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
62255+++ linux-2.6.32.45/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
62256@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
62257 struct hlist_head *hhead;
62258 struct cg_cgroup_link *link;
62259
62260+ pax_track_stack();
62261+
62262 /* First see if we already have a cgroup group that matches
62263 * the desired set */
62264 read_lock(&css_set_lock);
62265diff -urNp linux-2.6.32.45/kernel/configs.c linux-2.6.32.45/kernel/configs.c
62266--- linux-2.6.32.45/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
62267+++ linux-2.6.32.45/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
62268@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
62269 struct proc_dir_entry *entry;
62270
62271 /* create the current config file */
62272+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62273+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62274+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62275+ &ikconfig_file_ops);
62276+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62277+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62278+ &ikconfig_file_ops);
62279+#endif
62280+#else
62281 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62282 &ikconfig_file_ops);
62283+#endif
62284+
62285 if (!entry)
62286 return -ENOMEM;
62287
62288diff -urNp linux-2.6.32.45/kernel/cpu.c linux-2.6.32.45/kernel/cpu.c
62289--- linux-2.6.32.45/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
62290+++ linux-2.6.32.45/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
62291@@ -19,7 +19,7 @@
62292 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
62293 static DEFINE_MUTEX(cpu_add_remove_lock);
62294
62295-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
62296+static RAW_NOTIFIER_HEAD(cpu_chain);
62297
62298 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
62299 * Should always be manipulated under cpu_add_remove_lock
62300diff -urNp linux-2.6.32.45/kernel/cred.c linux-2.6.32.45/kernel/cred.c
62301--- linux-2.6.32.45/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
62302+++ linux-2.6.32.45/kernel/cred.c 2011-08-11 19:49:38.000000000 -0400
62303@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
62304 */
62305 void __put_cred(struct cred *cred)
62306 {
62307+ pax_track_stack();
62308+
62309 kdebug("__put_cred(%p{%d,%d})", cred,
62310 atomic_read(&cred->usage),
62311 read_cred_subscribers(cred));
62312@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
62313 {
62314 struct cred *cred;
62315
62316+ pax_track_stack();
62317+
62318 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62319 atomic_read(&tsk->cred->usage),
62320 read_cred_subscribers(tsk->cred));
62321@@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
62322 {
62323 const struct cred *cred;
62324
62325+ pax_track_stack();
62326+
62327 rcu_read_lock();
62328
62329 do {
62330@@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
62331 {
62332 struct cred *new;
62333
62334+ pax_track_stack();
62335+
62336 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62337 if (!new)
62338 return NULL;
62339@@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
62340 const struct cred *old;
62341 struct cred *new;
62342
62343+ pax_track_stack();
62344+
62345 validate_process_creds();
62346
62347 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62348@@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
62349 struct thread_group_cred *tgcred = NULL;
62350 struct cred *new;
62351
62352+ pax_track_stack();
62353+
62354 #ifdef CONFIG_KEYS
62355 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62356 if (!tgcred)
62357@@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
62358 struct cred *new;
62359 int ret;
62360
62361+ pax_track_stack();
62362+
62363 mutex_init(&p->cred_guard_mutex);
62364
62365 if (
62366@@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
62367 struct task_struct *task = current;
62368 const struct cred *old = task->real_cred;
62369
62370+ pax_track_stack();
62371+
62372 kdebug("commit_creds(%p{%d,%d})", new,
62373 atomic_read(&new->usage),
62374 read_cred_subscribers(new));
62375@@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
62376
62377 get_cred(new); /* we will require a ref for the subj creds too */
62378
62379+ gr_set_role_label(task, new->uid, new->gid);
62380+
62381 /* dumpability changes */
62382 if (old->euid != new->euid ||
62383 old->egid != new->egid ||
62384@@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
62385 key_fsgid_changed(task);
62386
62387 /* do it
62388- * - What if a process setreuid()'s and this brings the
62389- * new uid over his NPROC rlimit? We can check this now
62390- * cheaply with the new uid cache, so if it matters
62391- * we should be checking for it. -DaveM
62392+ * RLIMIT_NPROC limits on user->processes have already been checked
62393+ * in set_user().
62394 */
62395 alter_cred_subscribers(new, 2);
62396 if (new->user != old->user)
62397@@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
62398 */
62399 void abort_creds(struct cred *new)
62400 {
62401+ pax_track_stack();
62402+
62403 kdebug("abort_creds(%p{%d,%d})", new,
62404 atomic_read(&new->usage),
62405 read_cred_subscribers(new));
62406@@ -629,6 +647,8 @@ const struct cred *override_creds(const
62407 {
62408 const struct cred *old = current->cred;
62409
62410+ pax_track_stack();
62411+
62412 kdebug("override_creds(%p{%d,%d})", new,
62413 atomic_read(&new->usage),
62414 read_cred_subscribers(new));
62415@@ -658,6 +678,8 @@ void revert_creds(const struct cred *old
62416 {
62417 const struct cred *override = current->cred;
62418
62419+ pax_track_stack();
62420+
62421 kdebug("revert_creds(%p{%d,%d})", old,
62422 atomic_read(&old->usage),
62423 read_cred_subscribers(old));
62424@@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct
62425 const struct cred *old;
62426 struct cred *new;
62427
62428+ pax_track_stack();
62429+
62430 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62431 if (!new)
62432 return NULL;
62433@@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62434 */
62435 int set_security_override(struct cred *new, u32 secid)
62436 {
62437+ pax_track_stack();
62438+
62439 return security_kernel_act_as(new, secid);
62440 }
62441 EXPORT_SYMBOL(set_security_override);
62442@@ -777,6 +803,8 @@ int set_security_override_from_ctx(struc
62443 u32 secid;
62444 int ret;
62445
62446+ pax_track_stack();
62447+
62448 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62449 if (ret < 0)
62450 return ret;
62451diff -urNp linux-2.6.32.45/kernel/exit.c linux-2.6.32.45/kernel/exit.c
62452--- linux-2.6.32.45/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
62453+++ linux-2.6.32.45/kernel/exit.c 2011-08-17 19:19:50.000000000 -0400
62454@@ -55,6 +55,10 @@
62455 #include <asm/pgtable.h>
62456 #include <asm/mmu_context.h>
62457
62458+#ifdef CONFIG_GRKERNSEC
62459+extern rwlock_t grsec_exec_file_lock;
62460+#endif
62461+
62462 static void exit_mm(struct task_struct * tsk);
62463
62464 static void __unhash_process(struct task_struct *p)
62465@@ -174,6 +178,10 @@ void release_task(struct task_struct * p
62466 struct task_struct *leader;
62467 int zap_leader;
62468 repeat:
62469+#ifdef CONFIG_NET
62470+ gr_del_task_from_ip_table(p);
62471+#endif
62472+
62473 tracehook_prepare_release_task(p);
62474 /* don't need to get the RCU readlock here - the process is dead and
62475 * can't be modifying its own credentials */
62476@@ -341,11 +349,22 @@ static void reparent_to_kthreadd(void)
62477 {
62478 write_lock_irq(&tasklist_lock);
62479
62480+#ifdef CONFIG_GRKERNSEC
62481+ write_lock(&grsec_exec_file_lock);
62482+ if (current->exec_file) {
62483+ fput(current->exec_file);
62484+ current->exec_file = NULL;
62485+ }
62486+ write_unlock(&grsec_exec_file_lock);
62487+#endif
62488+
62489 ptrace_unlink(current);
62490 /* Reparent to init */
62491 current->real_parent = current->parent = kthreadd_task;
62492 list_move_tail(&current->sibling, &current->real_parent->children);
62493
62494+ gr_set_kernel_label(current);
62495+
62496 /* Set the exit signal to SIGCHLD so we signal init on exit */
62497 current->exit_signal = SIGCHLD;
62498
62499@@ -397,7 +416,7 @@ int allow_signal(int sig)
62500 * know it'll be handled, so that they don't get converted to
62501 * SIGKILL or just silently dropped.
62502 */
62503- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62504+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62505 recalc_sigpending();
62506 spin_unlock_irq(&current->sighand->siglock);
62507 return 0;
62508@@ -433,6 +452,17 @@ void daemonize(const char *name, ...)
62509 vsnprintf(current->comm, sizeof(current->comm), name, args);
62510 va_end(args);
62511
62512+#ifdef CONFIG_GRKERNSEC
62513+ write_lock(&grsec_exec_file_lock);
62514+ if (current->exec_file) {
62515+ fput(current->exec_file);
62516+ current->exec_file = NULL;
62517+ }
62518+ write_unlock(&grsec_exec_file_lock);
62519+#endif
62520+
62521+ gr_set_kernel_label(current);
62522+
62523 /*
62524 * If we were started as result of loading a module, close all of the
62525 * user space pages. We don't need them, and if we didn't close them
62526@@ -897,17 +927,17 @@ NORET_TYPE void do_exit(long code)
62527 struct task_struct *tsk = current;
62528 int group_dead;
62529
62530- profile_task_exit(tsk);
62531-
62532- WARN_ON(atomic_read(&tsk->fs_excl));
62533-
62534+ /*
62535+ * Check this first since set_fs() below depends on
62536+ * current_thread_info(), which we better not access when we're in
62537+ * interrupt context. Other than that, we want to do the set_fs()
62538+ * as early as possible.
62539+ */
62540 if (unlikely(in_interrupt()))
62541 panic("Aiee, killing interrupt handler!");
62542- if (unlikely(!tsk->pid))
62543- panic("Attempted to kill the idle task!");
62544
62545 /*
62546- * If do_exit is called because this processes oopsed, it's possible
62547+ * If do_exit is called because this processes Oops'ed, it's possible
62548 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
62549 * continuing. Amongst other possible reasons, this is to prevent
62550 * mm_release()->clear_child_tid() from writing to a user-controlled
62551@@ -915,6 +945,13 @@ NORET_TYPE void do_exit(long code)
62552 */
62553 set_fs(USER_DS);
62554
62555+ profile_task_exit(tsk);
62556+
62557+ WARN_ON(atomic_read(&tsk->fs_excl));
62558+
62559+ if (unlikely(!tsk->pid))
62560+ panic("Attempted to kill the idle task!");
62561+
62562 tracehook_report_exit(&code);
62563
62564 validate_creds_for_do_exit(tsk);
62565@@ -973,6 +1010,9 @@ NORET_TYPE void do_exit(long code)
62566 tsk->exit_code = code;
62567 taskstats_exit(tsk, group_dead);
62568
62569+ gr_acl_handle_psacct(tsk, code);
62570+ gr_acl_handle_exit();
62571+
62572 exit_mm(tsk);
62573
62574 if (group_dead)
62575@@ -1188,7 +1228,7 @@ static int wait_task_zombie(struct wait_
62576
62577 if (unlikely(wo->wo_flags & WNOWAIT)) {
62578 int exit_code = p->exit_code;
62579- int why, status;
62580+ int why;
62581
62582 get_task_struct(p);
62583 read_unlock(&tasklist_lock);
62584diff -urNp linux-2.6.32.45/kernel/fork.c linux-2.6.32.45/kernel/fork.c
62585--- linux-2.6.32.45/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
62586+++ linux-2.6.32.45/kernel/fork.c 2011-08-11 19:50:07.000000000 -0400
62587@@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
62588 *stackend = STACK_END_MAGIC; /* for overflow detection */
62589
62590 #ifdef CONFIG_CC_STACKPROTECTOR
62591- tsk->stack_canary = get_random_int();
62592+ tsk->stack_canary = pax_get_random_long();
62593 #endif
62594
62595 /* One for us, one for whoever does the "release_task()" (usually parent) */
62596@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
62597 mm->locked_vm = 0;
62598 mm->mmap = NULL;
62599 mm->mmap_cache = NULL;
62600- mm->free_area_cache = oldmm->mmap_base;
62601- mm->cached_hole_size = ~0UL;
62602+ mm->free_area_cache = oldmm->free_area_cache;
62603+ mm->cached_hole_size = oldmm->cached_hole_size;
62604 mm->map_count = 0;
62605 cpumask_clear(mm_cpumask(mm));
62606 mm->mm_rb = RB_ROOT;
62607@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
62608 tmp->vm_flags &= ~VM_LOCKED;
62609 tmp->vm_mm = mm;
62610 tmp->vm_next = tmp->vm_prev = NULL;
62611+ tmp->vm_mirror = NULL;
62612 anon_vma_link(tmp);
62613 file = tmp->vm_file;
62614 if (file) {
62615@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
62616 if (retval)
62617 goto out;
62618 }
62619+
62620+#ifdef CONFIG_PAX_SEGMEXEC
62621+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
62622+ struct vm_area_struct *mpnt_m;
62623+
62624+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
62625+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
62626+
62627+ if (!mpnt->vm_mirror)
62628+ continue;
62629+
62630+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
62631+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
62632+ mpnt->vm_mirror = mpnt_m;
62633+ } else {
62634+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
62635+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
62636+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
62637+ mpnt->vm_mirror->vm_mirror = mpnt;
62638+ }
62639+ }
62640+ BUG_ON(mpnt_m);
62641+ }
62642+#endif
62643+
62644 /* a new mm has just been created */
62645 arch_dup_mmap(oldmm, mm);
62646 retval = 0;
62647@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
62648 write_unlock(&fs->lock);
62649 return -EAGAIN;
62650 }
62651- fs->users++;
62652+ atomic_inc(&fs->users);
62653 write_unlock(&fs->lock);
62654 return 0;
62655 }
62656 tsk->fs = copy_fs_struct(fs);
62657 if (!tsk->fs)
62658 return -ENOMEM;
62659+ gr_set_chroot_entries(tsk, &tsk->fs->root);
62660 return 0;
62661 }
62662
62663@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(
62664 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
62665 #endif
62666 retval = -EAGAIN;
62667+
62668+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
62669+
62670 if (atomic_read(&p->real_cred->user->processes) >=
62671 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
62672- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
62673- p->real_cred->user != INIT_USER)
62674+ if (p->real_cred->user != INIT_USER &&
62675+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
62676 goto bad_fork_free;
62677 }
62678+ current->flags &= ~PF_NPROC_EXCEEDED;
62679
62680 retval = copy_creds(p, clone_flags);
62681 if (retval < 0)
62682@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(
62683 goto bad_fork_free_pid;
62684 }
62685
62686+ gr_copy_label(p);
62687+
62688 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
62689 /*
62690 * Clear TID on mm_release()?
62691@@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
62692 bad_fork_free:
62693 free_task(p);
62694 fork_out:
62695+ gr_log_forkfail(retval);
62696+
62697 return ERR_PTR(retval);
62698 }
62699
62700@@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
62701 if (clone_flags & CLONE_PARENT_SETTID)
62702 put_user(nr, parent_tidptr);
62703
62704+ gr_handle_brute_check();
62705+
62706 if (clone_flags & CLONE_VFORK) {
62707 p->vfork_done = &vfork;
62708 init_completion(&vfork);
62709@@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unsh
62710 return 0;
62711
62712 /* don't need lock here; in the worst case we'll do useless copy */
62713- if (fs->users == 1)
62714+ if (atomic_read(&fs->users) == 1)
62715 return 0;
62716
62717 *new_fsp = copy_fs_struct(fs);
62718@@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
62719 fs = current->fs;
62720 write_lock(&fs->lock);
62721 current->fs = new_fs;
62722- if (--fs->users)
62723+ gr_set_chroot_entries(current, &current->fs->root);
62724+ if (atomic_dec_return(&fs->users))
62725 new_fs = NULL;
62726 else
62727 new_fs = fs;
62728diff -urNp linux-2.6.32.45/kernel/futex.c linux-2.6.32.45/kernel/futex.c
62729--- linux-2.6.32.45/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
62730+++ linux-2.6.32.45/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
62731@@ -54,6 +54,7 @@
62732 #include <linux/mount.h>
62733 #include <linux/pagemap.h>
62734 #include <linux/syscalls.h>
62735+#include <linux/ptrace.h>
62736 #include <linux/signal.h>
62737 #include <linux/module.h>
62738 #include <linux/magic.h>
62739@@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
62740 struct page *page;
62741 int err;
62742
62743+#ifdef CONFIG_PAX_SEGMEXEC
62744+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
62745+ return -EFAULT;
62746+#endif
62747+
62748 /*
62749 * The futex address must be "naturally" aligned.
62750 */
62751@@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
62752 struct futex_q q;
62753 int ret;
62754
62755+ pax_track_stack();
62756+
62757 if (!bitset)
62758 return -EINVAL;
62759
62760@@ -1841,7 +1849,7 @@ retry:
62761
62762 restart = &current_thread_info()->restart_block;
62763 restart->fn = futex_wait_restart;
62764- restart->futex.uaddr = (u32 *)uaddr;
62765+ restart->futex.uaddr = uaddr;
62766 restart->futex.val = val;
62767 restart->futex.time = abs_time->tv64;
62768 restart->futex.bitset = bitset;
62769@@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
62770 struct futex_q q;
62771 int res, ret;
62772
62773+ pax_track_stack();
62774+
62775 if (!bitset)
62776 return -EINVAL;
62777
62778@@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62779 {
62780 struct robust_list_head __user *head;
62781 unsigned long ret;
62782+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62783 const struct cred *cred = current_cred(), *pcred;
62784+#endif
62785
62786 if (!futex_cmpxchg_enabled)
62787 return -ENOSYS;
62788@@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62789 if (!p)
62790 goto err_unlock;
62791 ret = -EPERM;
62792+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62793+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
62794+ goto err_unlock;
62795+#else
62796 pcred = __task_cred(p);
62797 if (cred->euid != pcred->euid &&
62798 cred->euid != pcred->uid &&
62799 !capable(CAP_SYS_PTRACE))
62800 goto err_unlock;
62801+#endif
62802 head = p->robust_list;
62803 rcu_read_unlock();
62804 }
62805@@ -2459,7 +2476,7 @@ retry:
62806 */
62807 static inline int fetch_robust_entry(struct robust_list __user **entry,
62808 struct robust_list __user * __user *head,
62809- int *pi)
62810+ unsigned int *pi)
62811 {
62812 unsigned long uentry;
62813
62814@@ -2640,6 +2657,7 @@ static int __init futex_init(void)
62815 {
62816 u32 curval;
62817 int i;
62818+ mm_segment_t oldfs;
62819
62820 /*
62821 * This will fail and we want it. Some arch implementations do
62822@@ -2651,7 +2669,10 @@ static int __init futex_init(void)
62823 * implementation, the non functional ones will return
62824 * -ENOSYS.
62825 */
62826+ oldfs = get_fs();
62827+ set_fs(USER_DS);
62828 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
62829+ set_fs(oldfs);
62830 if (curval == -EFAULT)
62831 futex_cmpxchg_enabled = 1;
62832
62833diff -urNp linux-2.6.32.45/kernel/futex_compat.c linux-2.6.32.45/kernel/futex_compat.c
62834--- linux-2.6.32.45/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
62835+++ linux-2.6.32.45/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
62836@@ -10,6 +10,7 @@
62837 #include <linux/compat.h>
62838 #include <linux/nsproxy.h>
62839 #include <linux/futex.h>
62840+#include <linux/ptrace.h>
62841
62842 #include <asm/uaccess.h>
62843
62844@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
62845 {
62846 struct compat_robust_list_head __user *head;
62847 unsigned long ret;
62848- const struct cred *cred = current_cred(), *pcred;
62849+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62850+ const struct cred *cred = current_cred();
62851+ const struct cred *pcred;
62852+#endif
62853
62854 if (!futex_cmpxchg_enabled)
62855 return -ENOSYS;
62856@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
62857 if (!p)
62858 goto err_unlock;
62859 ret = -EPERM;
62860+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62861+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
62862+ goto err_unlock;
62863+#else
62864 pcred = __task_cred(p);
62865 if (cred->euid != pcred->euid &&
62866 cred->euid != pcred->uid &&
62867 !capable(CAP_SYS_PTRACE))
62868 goto err_unlock;
62869+#endif
62870 head = p->compat_robust_list;
62871 read_unlock(&tasklist_lock);
62872 }
62873diff -urNp linux-2.6.32.45/kernel/gcov/base.c linux-2.6.32.45/kernel/gcov/base.c
62874--- linux-2.6.32.45/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
62875+++ linux-2.6.32.45/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
62876@@ -102,11 +102,6 @@ void gcov_enable_events(void)
62877 }
62878
62879 #ifdef CONFIG_MODULES
62880-static inline int within(void *addr, void *start, unsigned long size)
62881-{
62882- return ((addr >= start) && (addr < start + size));
62883-}
62884-
62885 /* Update list and generate events when modules are unloaded. */
62886 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
62887 void *data)
62888@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
62889 prev = NULL;
62890 /* Remove entries located in module from linked list. */
62891 for (info = gcov_info_head; info; info = info->next) {
62892- if (within(info, mod->module_core, mod->core_size)) {
62893+ if (within_module_core_rw((unsigned long)info, mod)) {
62894 if (prev)
62895 prev->next = info->next;
62896 else
62897diff -urNp linux-2.6.32.45/kernel/hrtimer.c linux-2.6.32.45/kernel/hrtimer.c
62898--- linux-2.6.32.45/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
62899+++ linux-2.6.32.45/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
62900@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
62901 local_irq_restore(flags);
62902 }
62903
62904-static void run_hrtimer_softirq(struct softirq_action *h)
62905+static void run_hrtimer_softirq(void)
62906 {
62907 hrtimer_peek_ahead_timers();
62908 }
62909diff -urNp linux-2.6.32.45/kernel/kallsyms.c linux-2.6.32.45/kernel/kallsyms.c
62910--- linux-2.6.32.45/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
62911+++ linux-2.6.32.45/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
62912@@ -11,6 +11,9 @@
62913 * Changed the compression method from stem compression to "table lookup"
62914 * compression (see scripts/kallsyms.c for a more complete description)
62915 */
62916+#ifdef CONFIG_GRKERNSEC_HIDESYM
62917+#define __INCLUDED_BY_HIDESYM 1
62918+#endif
62919 #include <linux/kallsyms.h>
62920 #include <linux/module.h>
62921 #include <linux/init.h>
62922@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
62923
62924 static inline int is_kernel_inittext(unsigned long addr)
62925 {
62926+ if (system_state != SYSTEM_BOOTING)
62927+ return 0;
62928+
62929 if (addr >= (unsigned long)_sinittext
62930 && addr <= (unsigned long)_einittext)
62931 return 1;
62932 return 0;
62933 }
62934
62935+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62936+#ifdef CONFIG_MODULES
62937+static inline int is_module_text(unsigned long addr)
62938+{
62939+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
62940+ return 1;
62941+
62942+ addr = ktla_ktva(addr);
62943+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
62944+}
62945+#else
62946+static inline int is_module_text(unsigned long addr)
62947+{
62948+ return 0;
62949+}
62950+#endif
62951+#endif
62952+
62953 static inline int is_kernel_text(unsigned long addr)
62954 {
62955 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
62956@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
62957
62958 static inline int is_kernel(unsigned long addr)
62959 {
62960+
62961+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62962+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
62963+ return 1;
62964+
62965+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
62966+#else
62967 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
62968+#endif
62969+
62970 return 1;
62971 return in_gate_area_no_task(addr);
62972 }
62973
62974 static int is_ksym_addr(unsigned long addr)
62975 {
62976+
62977+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62978+ if (is_module_text(addr))
62979+ return 0;
62980+#endif
62981+
62982 if (all_var)
62983 return is_kernel(addr);
62984
62985@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
62986
62987 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
62988 {
62989- iter->name[0] = '\0';
62990 iter->nameoff = get_symbol_offset(new_pos);
62991 iter->pos = new_pos;
62992 }
62993@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
62994 {
62995 struct kallsym_iter *iter = m->private;
62996
62997+#ifdef CONFIG_GRKERNSEC_HIDESYM
62998+ if (current_uid())
62999+ return 0;
63000+#endif
63001+
63002 /* Some debugging symbols have no name. Ignore them. */
63003 if (!iter->name[0])
63004 return 0;
63005@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
63006 struct kallsym_iter *iter;
63007 int ret;
63008
63009- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63010+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63011 if (!iter)
63012 return -ENOMEM;
63013 reset_iter(iter, 0);
63014diff -urNp linux-2.6.32.45/kernel/kgdb.c linux-2.6.32.45/kernel/kgdb.c
63015--- linux-2.6.32.45/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
63016+++ linux-2.6.32.45/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
63017@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
63018 /* Guard for recursive entry */
63019 static int exception_level;
63020
63021-static struct kgdb_io *kgdb_io_ops;
63022+static const struct kgdb_io *kgdb_io_ops;
63023 static DEFINE_SPINLOCK(kgdb_registration_lock);
63024
63025 /* kgdb console driver is loaded */
63026@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
63027 */
63028 static atomic_t passive_cpu_wait[NR_CPUS];
63029 static atomic_t cpu_in_kgdb[NR_CPUS];
63030-atomic_t kgdb_setting_breakpoint;
63031+atomic_unchecked_t kgdb_setting_breakpoint;
63032
63033 struct task_struct *kgdb_usethread;
63034 struct task_struct *kgdb_contthread;
63035@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
63036 sizeof(unsigned long)];
63037
63038 /* to keep track of the CPU which is doing the single stepping*/
63039-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63040+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63041
63042 /*
63043 * If you are debugging a problem where roundup (the collection of
63044@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
63045 return 0;
63046 if (kgdb_connected)
63047 return 1;
63048- if (atomic_read(&kgdb_setting_breakpoint))
63049+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
63050 return 1;
63051 if (print_wait)
63052 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
63053@@ -1426,8 +1426,8 @@ acquirelock:
63054 * instance of the exception handler wanted to come into the
63055 * debugger on a different CPU via a single step
63056 */
63057- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63058- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
63059+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63060+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
63061
63062 atomic_set(&kgdb_active, -1);
63063 touch_softlockup_watchdog();
63064@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
63065 *
63066 * Register it with the KGDB core.
63067 */
63068-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
63069+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
63070 {
63071 int err;
63072
63073@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
63074 *
63075 * Unregister it with the KGDB core.
63076 */
63077-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
63078+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
63079 {
63080 BUG_ON(kgdb_connected);
63081
63082@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
63083 */
63084 void kgdb_breakpoint(void)
63085 {
63086- atomic_set(&kgdb_setting_breakpoint, 1);
63087+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
63088 wmb(); /* Sync point before breakpoint */
63089 arch_kgdb_breakpoint();
63090 wmb(); /* Sync point after breakpoint */
63091- atomic_set(&kgdb_setting_breakpoint, 0);
63092+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
63093 }
63094 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
63095
63096diff -urNp linux-2.6.32.45/kernel/kmod.c linux-2.6.32.45/kernel/kmod.c
63097--- linux-2.6.32.45/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
63098+++ linux-2.6.32.45/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
63099@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63100 * If module auto-loading support is disabled then this function
63101 * becomes a no-operation.
63102 */
63103-int __request_module(bool wait, const char *fmt, ...)
63104+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63105 {
63106- va_list args;
63107 char module_name[MODULE_NAME_LEN];
63108 unsigned int max_modprobes;
63109 int ret;
63110- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63111+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63112 static char *envp[] = { "HOME=/",
63113 "TERM=linux",
63114 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63115@@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
63116 if (ret)
63117 return ret;
63118
63119- va_start(args, fmt);
63120- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63121- va_end(args);
63122+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63123 if (ret >= MODULE_NAME_LEN)
63124 return -ENAMETOOLONG;
63125
63126+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63127+ if (!current_uid()) {
63128+ /* hack to workaround consolekit/udisks stupidity */
63129+ read_lock(&tasklist_lock);
63130+ if (!strcmp(current->comm, "mount") &&
63131+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63132+ read_unlock(&tasklist_lock);
63133+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63134+ return -EPERM;
63135+ }
63136+ read_unlock(&tasklist_lock);
63137+ }
63138+#endif
63139+
63140 /* If modprobe needs a service that is in a module, we get a recursive
63141 * loop. Limit the number of running kmod threads to max_threads/2 or
63142 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63143@@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
63144 atomic_dec(&kmod_concurrent);
63145 return ret;
63146 }
63147+
63148+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63149+{
63150+ va_list args;
63151+ int ret;
63152+
63153+ va_start(args, fmt);
63154+ ret = ____request_module(wait, module_param, fmt, args);
63155+ va_end(args);
63156+
63157+ return ret;
63158+}
63159+
63160+int __request_module(bool wait, const char *fmt, ...)
63161+{
63162+ va_list args;
63163+ int ret;
63164+
63165+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63166+ if (current_uid()) {
63167+ char module_param[MODULE_NAME_LEN];
63168+
63169+ memset(module_param, 0, sizeof(module_param));
63170+
63171+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63172+
63173+ va_start(args, fmt);
63174+ ret = ____request_module(wait, module_param, fmt, args);
63175+ va_end(args);
63176+
63177+ return ret;
63178+ }
63179+#endif
63180+
63181+ va_start(args, fmt);
63182+ ret = ____request_module(wait, NULL, fmt, args);
63183+ va_end(args);
63184+
63185+ return ret;
63186+}
63187+
63188+
63189 EXPORT_SYMBOL(__request_module);
63190 #endif /* CONFIG_MODULES */
63191
63192diff -urNp linux-2.6.32.45/kernel/kprobes.c linux-2.6.32.45/kernel/kprobes.c
63193--- linux-2.6.32.45/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
63194+++ linux-2.6.32.45/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
63195@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
63196 * kernel image and loaded module images reside. This is required
63197 * so x86_64 can correctly handle the %rip-relative fixups.
63198 */
63199- kip->insns = module_alloc(PAGE_SIZE);
63200+ kip->insns = module_alloc_exec(PAGE_SIZE);
63201 if (!kip->insns) {
63202 kfree(kip);
63203 return NULL;
63204@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
63205 */
63206 if (!list_is_singular(&kprobe_insn_pages)) {
63207 list_del(&kip->list);
63208- module_free(NULL, kip->insns);
63209+ module_free_exec(NULL, kip->insns);
63210 kfree(kip);
63211 }
63212 return 1;
63213@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
63214 {
63215 int i, err = 0;
63216 unsigned long offset = 0, size = 0;
63217- char *modname, namebuf[128];
63218+ char *modname, namebuf[KSYM_NAME_LEN];
63219 const char *symbol_name;
63220 void *addr;
63221 struct kprobe_blackpoint *kb;
63222@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
63223 const char *sym = NULL;
63224 unsigned int i = *(loff_t *) v;
63225 unsigned long offset = 0;
63226- char *modname, namebuf[128];
63227+ char *modname, namebuf[KSYM_NAME_LEN];
63228
63229 head = &kprobe_table[i];
63230 preempt_disable();
63231diff -urNp linux-2.6.32.45/kernel/lockdep.c linux-2.6.32.45/kernel/lockdep.c
63232--- linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
63233+++ linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
63234@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
63235 /*
63236 * Various lockdep statistics:
63237 */
63238-atomic_t chain_lookup_hits;
63239-atomic_t chain_lookup_misses;
63240-atomic_t hardirqs_on_events;
63241-atomic_t hardirqs_off_events;
63242-atomic_t redundant_hardirqs_on;
63243-atomic_t redundant_hardirqs_off;
63244-atomic_t softirqs_on_events;
63245-atomic_t softirqs_off_events;
63246-atomic_t redundant_softirqs_on;
63247-atomic_t redundant_softirqs_off;
63248-atomic_t nr_unused_locks;
63249-atomic_t nr_cyclic_checks;
63250-atomic_t nr_find_usage_forwards_checks;
63251-atomic_t nr_find_usage_backwards_checks;
63252+atomic_unchecked_t chain_lookup_hits;
63253+atomic_unchecked_t chain_lookup_misses;
63254+atomic_unchecked_t hardirqs_on_events;
63255+atomic_unchecked_t hardirqs_off_events;
63256+atomic_unchecked_t redundant_hardirqs_on;
63257+atomic_unchecked_t redundant_hardirqs_off;
63258+atomic_unchecked_t softirqs_on_events;
63259+atomic_unchecked_t softirqs_off_events;
63260+atomic_unchecked_t redundant_softirqs_on;
63261+atomic_unchecked_t redundant_softirqs_off;
63262+atomic_unchecked_t nr_unused_locks;
63263+atomic_unchecked_t nr_cyclic_checks;
63264+atomic_unchecked_t nr_find_usage_forwards_checks;
63265+atomic_unchecked_t nr_find_usage_backwards_checks;
63266 #endif
63267
63268 /*
63269@@ -577,6 +577,10 @@ static int static_obj(void *obj)
63270 int i;
63271 #endif
63272
63273+#ifdef CONFIG_PAX_KERNEXEC
63274+ start = ktla_ktva(start);
63275+#endif
63276+
63277 /*
63278 * static variable?
63279 */
63280@@ -592,8 +596,7 @@ static int static_obj(void *obj)
63281 */
63282 for_each_possible_cpu(i) {
63283 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
63284- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
63285- + per_cpu_offset(i);
63286+ end = start + PERCPU_ENOUGH_ROOM;
63287
63288 if ((addr >= start) && (addr < end))
63289 return 1;
63290@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
63291 if (!static_obj(lock->key)) {
63292 debug_locks_off();
63293 printk("INFO: trying to register non-static key.\n");
63294+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63295 printk("the code is fine but needs lockdep annotation.\n");
63296 printk("turning off the locking correctness validator.\n");
63297 dump_stack();
63298@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
63299 if (!class)
63300 return 0;
63301 }
63302- debug_atomic_inc((atomic_t *)&class->ops);
63303+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
63304 if (very_verbose(class)) {
63305 printk("\nacquire class [%p] %s", class->key, class->name);
63306 if (class->name_version > 1)
63307diff -urNp linux-2.6.32.45/kernel/lockdep_internals.h linux-2.6.32.45/kernel/lockdep_internals.h
63308--- linux-2.6.32.45/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
63309+++ linux-2.6.32.45/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
63310@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
63311 /*
63312 * Various lockdep statistics:
63313 */
63314-extern atomic_t chain_lookup_hits;
63315-extern atomic_t chain_lookup_misses;
63316-extern atomic_t hardirqs_on_events;
63317-extern atomic_t hardirqs_off_events;
63318-extern atomic_t redundant_hardirqs_on;
63319-extern atomic_t redundant_hardirqs_off;
63320-extern atomic_t softirqs_on_events;
63321-extern atomic_t softirqs_off_events;
63322-extern atomic_t redundant_softirqs_on;
63323-extern atomic_t redundant_softirqs_off;
63324-extern atomic_t nr_unused_locks;
63325-extern atomic_t nr_cyclic_checks;
63326-extern atomic_t nr_cyclic_check_recursions;
63327-extern atomic_t nr_find_usage_forwards_checks;
63328-extern atomic_t nr_find_usage_forwards_recursions;
63329-extern atomic_t nr_find_usage_backwards_checks;
63330-extern atomic_t nr_find_usage_backwards_recursions;
63331-# define debug_atomic_inc(ptr) atomic_inc(ptr)
63332-# define debug_atomic_dec(ptr) atomic_dec(ptr)
63333-# define debug_atomic_read(ptr) atomic_read(ptr)
63334+extern atomic_unchecked_t chain_lookup_hits;
63335+extern atomic_unchecked_t chain_lookup_misses;
63336+extern atomic_unchecked_t hardirqs_on_events;
63337+extern atomic_unchecked_t hardirqs_off_events;
63338+extern atomic_unchecked_t redundant_hardirqs_on;
63339+extern atomic_unchecked_t redundant_hardirqs_off;
63340+extern atomic_unchecked_t softirqs_on_events;
63341+extern atomic_unchecked_t softirqs_off_events;
63342+extern atomic_unchecked_t redundant_softirqs_on;
63343+extern atomic_unchecked_t redundant_softirqs_off;
63344+extern atomic_unchecked_t nr_unused_locks;
63345+extern atomic_unchecked_t nr_cyclic_checks;
63346+extern atomic_unchecked_t nr_cyclic_check_recursions;
63347+extern atomic_unchecked_t nr_find_usage_forwards_checks;
63348+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
63349+extern atomic_unchecked_t nr_find_usage_backwards_checks;
63350+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
63351+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
63352+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
63353+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
63354 #else
63355 # define debug_atomic_inc(ptr) do { } while (0)
63356 # define debug_atomic_dec(ptr) do { } while (0)
63357diff -urNp linux-2.6.32.45/kernel/lockdep_proc.c linux-2.6.32.45/kernel/lockdep_proc.c
63358--- linux-2.6.32.45/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
63359+++ linux-2.6.32.45/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
63360@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63361
63362 static void print_name(struct seq_file *m, struct lock_class *class)
63363 {
63364- char str[128];
63365+ char str[KSYM_NAME_LEN];
63366 const char *name = class->name;
63367
63368 if (!name) {
63369diff -urNp linux-2.6.32.45/kernel/module.c linux-2.6.32.45/kernel/module.c
63370--- linux-2.6.32.45/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
63371+++ linux-2.6.32.45/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
63372@@ -55,6 +55,7 @@
63373 #include <linux/async.h>
63374 #include <linux/percpu.h>
63375 #include <linux/kmemleak.h>
63376+#include <linux/grsecurity.h>
63377
63378 #define CREATE_TRACE_POINTS
63379 #include <trace/events/module.h>
63380@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
63381 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
63382
63383 /* Bounds of module allocation, for speeding __module_address */
63384-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63385+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63386+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63387
63388 int register_module_notifier(struct notifier_block * nb)
63389 {
63390@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
63391 return true;
63392
63393 list_for_each_entry_rcu(mod, &modules, list) {
63394- struct symsearch arr[] = {
63395+ struct symsearch modarr[] = {
63396 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63397 NOT_GPL_ONLY, false },
63398 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63399@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
63400 #endif
63401 };
63402
63403- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63404+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63405 return true;
63406 }
63407 return false;
63408@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
63409 void *ptr;
63410 int cpu;
63411
63412- if (align > PAGE_SIZE) {
63413+ if (align-1 >= PAGE_SIZE) {
63414 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63415 name, align, PAGE_SIZE);
63416 align = PAGE_SIZE;
63417@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
63418 * /sys/module/foo/sections stuff
63419 * J. Corbet <corbet@lwn.net>
63420 */
63421-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
63422+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63423
63424 static inline bool sect_empty(const Elf_Shdr *sect)
63425 {
63426@@ -1545,7 +1547,8 @@ static void free_module(struct module *m
63427 destroy_params(mod->kp, mod->num_kp);
63428
63429 /* This may be NULL, but that's OK */
63430- module_free(mod, mod->module_init);
63431+ module_free(mod, mod->module_init_rw);
63432+ module_free_exec(mod, mod->module_init_rx);
63433 kfree(mod->args);
63434 if (mod->percpu)
63435 percpu_modfree(mod->percpu);
63436@@ -1554,10 +1557,12 @@ static void free_module(struct module *m
63437 percpu_modfree(mod->refptr);
63438 #endif
63439 /* Free lock-classes: */
63440- lockdep_free_key_range(mod->module_core, mod->core_size);
63441+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63442+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63443
63444 /* Finally, free the core (containing the module structure) */
63445- module_free(mod, mod->module_core);
63446+ module_free_exec(mod, mod->module_core_rx);
63447+ module_free(mod, mod->module_core_rw);
63448
63449 #ifdef CONFIG_MPU
63450 update_protections(current->mm);
63451@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
63452 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63453 int ret = 0;
63454 const struct kernel_symbol *ksym;
63455+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63456+ int is_fs_load = 0;
63457+ int register_filesystem_found = 0;
63458+ char *p;
63459+
63460+ p = strstr(mod->args, "grsec_modharden_fs");
63461+
63462+ if (p) {
63463+ char *endptr = p + strlen("grsec_modharden_fs");
63464+ /* copy \0 as well */
63465+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63466+ is_fs_load = 1;
63467+ }
63468+#endif
63469+
63470
63471 for (i = 1; i < n; i++) {
63472+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63473+ const char *name = strtab + sym[i].st_name;
63474+
63475+ /* it's a real shame this will never get ripped and copied
63476+ upstream! ;(
63477+ */
63478+ if (is_fs_load && !strcmp(name, "register_filesystem"))
63479+ register_filesystem_found = 1;
63480+#endif
63481 switch (sym[i].st_shndx) {
63482 case SHN_COMMON:
63483 /* We compiled with -fno-common. These are not
63484@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
63485 strtab + sym[i].st_name, mod);
63486 /* Ok if resolved. */
63487 if (ksym) {
63488+ pax_open_kernel();
63489 sym[i].st_value = ksym->value;
63490+ pax_close_kernel();
63491 break;
63492 }
63493
63494@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
63495 secbase = (unsigned long)mod->percpu;
63496 else
63497 secbase = sechdrs[sym[i].st_shndx].sh_addr;
63498+ pax_open_kernel();
63499 sym[i].st_value += secbase;
63500+ pax_close_kernel();
63501 break;
63502 }
63503 }
63504
63505+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63506+ if (is_fs_load && !register_filesystem_found) {
63507+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63508+ ret = -EPERM;
63509+ }
63510+#endif
63511+
63512 return ret;
63513 }
63514
63515@@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
63516 || s->sh_entsize != ~0UL
63517 || strstarts(secstrings + s->sh_name, ".init"))
63518 continue;
63519- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63520+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63521+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63522+ else
63523+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63524 DEBUGP("\t%s\n", secstrings + s->sh_name);
63525 }
63526- if (m == 0)
63527- mod->core_text_size = mod->core_size;
63528 }
63529
63530 DEBUGP("Init section allocation order:\n");
63531@@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
63532 || s->sh_entsize != ~0UL
63533 || !strstarts(secstrings + s->sh_name, ".init"))
63534 continue;
63535- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63536- | INIT_OFFSET_MASK);
63537+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63538+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63539+ else
63540+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63541+ s->sh_entsize |= INIT_OFFSET_MASK;
63542 DEBUGP("\t%s\n", secstrings + s->sh_name);
63543 }
63544- if (m == 0)
63545- mod->init_text_size = mod->init_size;
63546 }
63547 }
63548
63549@@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
63550
63551 /* As per nm */
63552 static char elf_type(const Elf_Sym *sym,
63553- Elf_Shdr *sechdrs,
63554- const char *secstrings,
63555- struct module *mod)
63556+ const Elf_Shdr *sechdrs,
63557+ const char *secstrings)
63558 {
63559 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
63560 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
63561@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
63562
63563 /* Put symbol section at end of init part of module. */
63564 symsect->sh_flags |= SHF_ALLOC;
63565- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63566+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63567 symindex) | INIT_OFFSET_MASK;
63568 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
63569
63570@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
63571 }
63572
63573 /* Append room for core symbols at end of core part. */
63574- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63575- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
63576+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63577+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
63578
63579 /* Put string table section at end of init part of module. */
63580 strsect->sh_flags |= SHF_ALLOC;
63581- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63582+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63583 strindex) | INIT_OFFSET_MASK;
63584 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
63585
63586 /* Append room for core symbols' strings at end of core part. */
63587- *pstroffs = mod->core_size;
63588+ *pstroffs = mod->core_size_rx;
63589 __set_bit(0, strmap);
63590- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
63591+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
63592
63593 return symoffs;
63594 }
63595@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
63596 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63597 mod->strtab = (void *)sechdrs[strindex].sh_addr;
63598
63599+ pax_open_kernel();
63600+
63601 /* Set types up while we still have access to sections. */
63602 for (i = 0; i < mod->num_symtab; i++)
63603 mod->symtab[i].st_info
63604- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
63605+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
63606
63607- mod->core_symtab = dst = mod->module_core + symoffs;
63608+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
63609 src = mod->symtab;
63610 *dst = *src;
63611 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63612@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
63613 }
63614 mod->core_num_syms = ndst;
63615
63616- mod->core_strtab = s = mod->module_core + stroffs;
63617+ mod->core_strtab = s = mod->module_core_rx + stroffs;
63618 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
63619 if (test_bit(i, strmap))
63620 *++s = mod->strtab[i];
63621+
63622+ pax_close_kernel();
63623 }
63624 #else
63625 static inline unsigned long layout_symtab(struct module *mod,
63626@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
63627 #endif
63628 }
63629
63630-static void *module_alloc_update_bounds(unsigned long size)
63631+static void *module_alloc_update_bounds_rw(unsigned long size)
63632 {
63633 void *ret = module_alloc(size);
63634
63635 if (ret) {
63636 /* Update module bounds. */
63637- if ((unsigned long)ret < module_addr_min)
63638- module_addr_min = (unsigned long)ret;
63639- if ((unsigned long)ret + size > module_addr_max)
63640- module_addr_max = (unsigned long)ret + size;
63641+ if ((unsigned long)ret < module_addr_min_rw)
63642+ module_addr_min_rw = (unsigned long)ret;
63643+ if ((unsigned long)ret + size > module_addr_max_rw)
63644+ module_addr_max_rw = (unsigned long)ret + size;
63645+ }
63646+ return ret;
63647+}
63648+
63649+static void *module_alloc_update_bounds_rx(unsigned long size)
63650+{
63651+ void *ret = module_alloc_exec(size);
63652+
63653+ if (ret) {
63654+ /* Update module bounds. */
63655+ if ((unsigned long)ret < module_addr_min_rx)
63656+ module_addr_min_rx = (unsigned long)ret;
63657+ if ((unsigned long)ret + size > module_addr_max_rx)
63658+ module_addr_max_rx = (unsigned long)ret + size;
63659 }
63660 return ret;
63661 }
63662@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
63663 unsigned int i;
63664
63665 /* only scan the sections containing data */
63666- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
63667- (unsigned long)mod->module_core,
63668+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
63669+ (unsigned long)mod->module_core_rw,
63670 sizeof(struct module), GFP_KERNEL);
63671
63672 for (i = 1; i < hdr->e_shnum; i++) {
63673@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
63674 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
63675 continue;
63676
63677- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
63678- (unsigned long)mod->module_core,
63679+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
63680+ (unsigned long)mod->module_core_rw,
63681 sechdrs[i].sh_size, GFP_KERNEL);
63682 }
63683 }
63684@@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
63685 secstrings, &stroffs, strmap);
63686
63687 /* Do the allocs. */
63688- ptr = module_alloc_update_bounds(mod->core_size);
63689+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
63690 /*
63691 * The pointer to this block is stored in the module structure
63692 * which is inside the block. Just mark it as not being a
63693@@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
63694 err = -ENOMEM;
63695 goto free_percpu;
63696 }
63697- memset(ptr, 0, mod->core_size);
63698- mod->module_core = ptr;
63699+ memset(ptr, 0, mod->core_size_rw);
63700+ mod->module_core_rw = ptr;
63701
63702- ptr = module_alloc_update_bounds(mod->init_size);
63703+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
63704 /*
63705 * The pointer to this block is stored in the module structure
63706 * which is inside the block. This block doesn't need to be
63707 * scanned as it contains data and code that will be freed
63708 * after the module is initialized.
63709 */
63710- kmemleak_ignore(ptr);
63711- if (!ptr && mod->init_size) {
63712+ kmemleak_not_leak(ptr);
63713+ if (!ptr && mod->init_size_rw) {
63714+ err = -ENOMEM;
63715+ goto free_core_rw;
63716+ }
63717+ memset(ptr, 0, mod->init_size_rw);
63718+ mod->module_init_rw = ptr;
63719+
63720+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
63721+ kmemleak_not_leak(ptr);
63722+ if (!ptr) {
63723 err = -ENOMEM;
63724- goto free_core;
63725+ goto free_init_rw;
63726 }
63727- memset(ptr, 0, mod->init_size);
63728- mod->module_init = ptr;
63729+
63730+ pax_open_kernel();
63731+ memset(ptr, 0, mod->core_size_rx);
63732+ pax_close_kernel();
63733+ mod->module_core_rx = ptr;
63734+
63735+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
63736+ kmemleak_not_leak(ptr);
63737+ if (!ptr && mod->init_size_rx) {
63738+ err = -ENOMEM;
63739+ goto free_core_rx;
63740+ }
63741+
63742+ pax_open_kernel();
63743+ memset(ptr, 0, mod->init_size_rx);
63744+ pax_close_kernel();
63745+ mod->module_init_rx = ptr;
63746
63747 /* Transfer each section which specifies SHF_ALLOC */
63748 DEBUGP("final section addresses:\n");
63749@@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
63750 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
63751 continue;
63752
63753- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
63754- dest = mod->module_init
63755- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63756- else
63757- dest = mod->module_core + sechdrs[i].sh_entsize;
63758+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
63759+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63760+ dest = mod->module_init_rw
63761+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63762+ else
63763+ dest = mod->module_init_rx
63764+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63765+ } else {
63766+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63767+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
63768+ else
63769+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
63770+ }
63771+
63772+ if (sechdrs[i].sh_type != SHT_NOBITS) {
63773
63774- if (sechdrs[i].sh_type != SHT_NOBITS)
63775- memcpy(dest, (void *)sechdrs[i].sh_addr,
63776- sechdrs[i].sh_size);
63777+#ifdef CONFIG_PAX_KERNEXEC
63778+#ifdef CONFIG_X86_64
63779+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
63780+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
63781+#endif
63782+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
63783+ pax_open_kernel();
63784+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63785+ pax_close_kernel();
63786+ } else
63787+#endif
63788+
63789+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63790+ }
63791 /* Update sh_addr to point to copy in image. */
63792- sechdrs[i].sh_addr = (unsigned long)dest;
63793+
63794+#ifdef CONFIG_PAX_KERNEXEC
63795+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
63796+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
63797+ else
63798+#endif
63799+
63800+ sechdrs[i].sh_addr = (unsigned long)dest;
63801 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
63802 }
63803 /* Module has been moved. */
63804@@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
63805 mod->name);
63806 if (!mod->refptr) {
63807 err = -ENOMEM;
63808- goto free_init;
63809+ goto free_init_rx;
63810 }
63811 #endif
63812 /* Now we've moved module, initialize linked lists, etc. */
63813@@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
63814 /* Set up MODINFO_ATTR fields */
63815 setup_modinfo(mod, sechdrs, infoindex);
63816
63817+ mod->args = args;
63818+
63819+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63820+ {
63821+ char *p, *p2;
63822+
63823+ if (strstr(mod->args, "grsec_modharden_netdev")) {
63824+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
63825+ err = -EPERM;
63826+ goto cleanup;
63827+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
63828+ p += strlen("grsec_modharden_normal");
63829+ p2 = strstr(p, "_");
63830+ if (p2) {
63831+ *p2 = '\0';
63832+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
63833+ *p2 = '_';
63834+ }
63835+ err = -EPERM;
63836+ goto cleanup;
63837+ }
63838+ }
63839+#endif
63840+
63841+
63842 /* Fix up syms, so that st_value is a pointer to location. */
63843 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
63844 mod);
63845@@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
63846
63847 /* Now do relocations. */
63848 for (i = 1; i < hdr->e_shnum; i++) {
63849- const char *strtab = (char *)sechdrs[strindex].sh_addr;
63850 unsigned int info = sechdrs[i].sh_info;
63851+ strtab = (char *)sechdrs[strindex].sh_addr;
63852
63853 /* Not a valid relocation section? */
63854 if (info >= hdr->e_shnum)
63855@@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
63856 * Do it before processing of module parameters, so the module
63857 * can provide parameter accessor functions of its own.
63858 */
63859- if (mod->module_init)
63860- flush_icache_range((unsigned long)mod->module_init,
63861- (unsigned long)mod->module_init
63862- + mod->init_size);
63863- flush_icache_range((unsigned long)mod->module_core,
63864- (unsigned long)mod->module_core + mod->core_size);
63865+ if (mod->module_init_rx)
63866+ flush_icache_range((unsigned long)mod->module_init_rx,
63867+ (unsigned long)mod->module_init_rx
63868+ + mod->init_size_rx);
63869+ flush_icache_range((unsigned long)mod->module_core_rx,
63870+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
63871
63872 set_fs(old_fs);
63873
63874- mod->args = args;
63875 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
63876 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
63877 mod->name);
63878@@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
63879 free_unload:
63880 module_unload_free(mod);
63881 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
63882+ free_init_rx:
63883 percpu_modfree(mod->refptr);
63884- free_init:
63885 #endif
63886- module_free(mod, mod->module_init);
63887- free_core:
63888- module_free(mod, mod->module_core);
63889+ module_free_exec(mod, mod->module_init_rx);
63890+ free_core_rx:
63891+ module_free_exec(mod, mod->module_core_rx);
63892+ free_init_rw:
63893+ module_free(mod, mod->module_init_rw);
63894+ free_core_rw:
63895+ module_free(mod, mod->module_core_rw);
63896 /* mod will be freed with core. Don't access it beyond this line! */
63897 free_percpu:
63898 if (percpu)
63899@@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
63900 mod->symtab = mod->core_symtab;
63901 mod->strtab = mod->core_strtab;
63902 #endif
63903- module_free(mod, mod->module_init);
63904- mod->module_init = NULL;
63905- mod->init_size = 0;
63906- mod->init_text_size = 0;
63907+ module_free(mod, mod->module_init_rw);
63908+ module_free_exec(mod, mod->module_init_rx);
63909+ mod->module_init_rw = NULL;
63910+ mod->module_init_rx = NULL;
63911+ mod->init_size_rw = 0;
63912+ mod->init_size_rx = 0;
63913 mutex_unlock(&module_mutex);
63914
63915 return 0;
63916@@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
63917 unsigned long nextval;
63918
63919 /* At worse, next value is at end of module */
63920- if (within_module_init(addr, mod))
63921- nextval = (unsigned long)mod->module_init+mod->init_text_size;
63922+ if (within_module_init_rx(addr, mod))
63923+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
63924+ else if (within_module_init_rw(addr, mod))
63925+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
63926+ else if (within_module_core_rx(addr, mod))
63927+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
63928+ else if (within_module_core_rw(addr, mod))
63929+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
63930 else
63931- nextval = (unsigned long)mod->module_core+mod->core_text_size;
63932+ return NULL;
63933
63934 /* Scan for closest preceeding symbol, and next symbol. (ELF
63935 starts real symbols at 1). */
63936@@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
63937 char buf[8];
63938
63939 seq_printf(m, "%s %u",
63940- mod->name, mod->init_size + mod->core_size);
63941+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
63942 print_unload_info(m, mod);
63943
63944 /* Informative for users. */
63945@@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
63946 mod->state == MODULE_STATE_COMING ? "Loading":
63947 "Live");
63948 /* Used by oprofile and other similar tools. */
63949- seq_printf(m, " 0x%p", mod->module_core);
63950+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63951
63952 /* Taints info */
63953 if (mod->taints)
63954@@ -2981,7 +3128,17 @@ static const struct file_operations proc
63955
63956 static int __init proc_modules_init(void)
63957 {
63958+#ifndef CONFIG_GRKERNSEC_HIDESYM
63959+#ifdef CONFIG_GRKERNSEC_PROC_USER
63960+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
63961+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63962+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
63963+#else
63964 proc_create("modules", 0, NULL, &proc_modules_operations);
63965+#endif
63966+#else
63967+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
63968+#endif
63969 return 0;
63970 }
63971 module_init(proc_modules_init);
63972@@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
63973 {
63974 struct module *mod;
63975
63976- if (addr < module_addr_min || addr > module_addr_max)
63977+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
63978+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
63979 return NULL;
63980
63981 list_for_each_entry_rcu(mod, &modules, list)
63982- if (within_module_core(addr, mod)
63983- || within_module_init(addr, mod))
63984+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
63985 return mod;
63986 return NULL;
63987 }
63988@@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
63989 */
63990 struct module *__module_text_address(unsigned long addr)
63991 {
63992- struct module *mod = __module_address(addr);
63993+ struct module *mod;
63994+
63995+#ifdef CONFIG_X86_32
63996+ addr = ktla_ktva(addr);
63997+#endif
63998+
63999+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64000+ return NULL;
64001+
64002+ mod = __module_address(addr);
64003+
64004 if (mod) {
64005 /* Make sure it's within the text section. */
64006- if (!within(addr, mod->module_init, mod->init_text_size)
64007- && !within(addr, mod->module_core, mod->core_text_size))
64008+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64009 mod = NULL;
64010 }
64011 return mod;
64012diff -urNp linux-2.6.32.45/kernel/mutex.c linux-2.6.32.45/kernel/mutex.c
64013--- linux-2.6.32.45/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
64014+++ linux-2.6.32.45/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
64015@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
64016 */
64017
64018 for (;;) {
64019- struct thread_info *owner;
64020+ struct task_struct *owner;
64021
64022 /*
64023 * If we own the BKL, then don't spin. The owner of
64024@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
64025 spin_lock_mutex(&lock->wait_lock, flags);
64026
64027 debug_mutex_lock_common(lock, &waiter);
64028- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64029+ debug_mutex_add_waiter(lock, &waiter, task);
64030
64031 /* add waiting tasks to the end of the waitqueue (FIFO): */
64032 list_add_tail(&waiter.list, &lock->wait_list);
64033@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
64034 * TASK_UNINTERRUPTIBLE case.)
64035 */
64036 if (unlikely(signal_pending_state(state, task))) {
64037- mutex_remove_waiter(lock, &waiter,
64038- task_thread_info(task));
64039+ mutex_remove_waiter(lock, &waiter, task);
64040 mutex_release(&lock->dep_map, 1, ip);
64041 spin_unlock_mutex(&lock->wait_lock, flags);
64042
64043@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
64044 done:
64045 lock_acquired(&lock->dep_map, ip);
64046 /* got the lock - rejoice! */
64047- mutex_remove_waiter(lock, &waiter, current_thread_info());
64048+ mutex_remove_waiter(lock, &waiter, task);
64049 mutex_set_owner(lock);
64050
64051 /* set it to 0 if there are no waiters left: */
64052diff -urNp linux-2.6.32.45/kernel/mutex-debug.c linux-2.6.32.45/kernel/mutex-debug.c
64053--- linux-2.6.32.45/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
64054+++ linux-2.6.32.45/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
64055@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64056 }
64057
64058 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64059- struct thread_info *ti)
64060+ struct task_struct *task)
64061 {
64062 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64063
64064 /* Mark the current thread as blocked on the lock: */
64065- ti->task->blocked_on = waiter;
64066+ task->blocked_on = waiter;
64067 }
64068
64069 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64070- struct thread_info *ti)
64071+ struct task_struct *task)
64072 {
64073 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64074- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64075- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64076- ti->task->blocked_on = NULL;
64077+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64078+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64079+ task->blocked_on = NULL;
64080
64081 list_del_init(&waiter->list);
64082 waiter->task = NULL;
64083@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
64084 return;
64085
64086 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
64087- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
64088+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
64089 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
64090 mutex_clear_owner(lock);
64091 }
64092diff -urNp linux-2.6.32.45/kernel/mutex-debug.h linux-2.6.32.45/kernel/mutex-debug.h
64093--- linux-2.6.32.45/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
64094+++ linux-2.6.32.45/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
64095@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
64096 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64097 extern void debug_mutex_add_waiter(struct mutex *lock,
64098 struct mutex_waiter *waiter,
64099- struct thread_info *ti);
64100+ struct task_struct *task);
64101 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64102- struct thread_info *ti);
64103+ struct task_struct *task);
64104 extern void debug_mutex_unlock(struct mutex *lock);
64105 extern void debug_mutex_init(struct mutex *lock, const char *name,
64106 struct lock_class_key *key);
64107
64108 static inline void mutex_set_owner(struct mutex *lock)
64109 {
64110- lock->owner = current_thread_info();
64111+ lock->owner = current;
64112 }
64113
64114 static inline void mutex_clear_owner(struct mutex *lock)
64115diff -urNp linux-2.6.32.45/kernel/mutex.h linux-2.6.32.45/kernel/mutex.h
64116--- linux-2.6.32.45/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
64117+++ linux-2.6.32.45/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
64118@@ -19,7 +19,7 @@
64119 #ifdef CONFIG_SMP
64120 static inline void mutex_set_owner(struct mutex *lock)
64121 {
64122- lock->owner = current_thread_info();
64123+ lock->owner = current;
64124 }
64125
64126 static inline void mutex_clear_owner(struct mutex *lock)
64127diff -urNp linux-2.6.32.45/kernel/panic.c linux-2.6.32.45/kernel/panic.c
64128--- linux-2.6.32.45/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
64129+++ linux-2.6.32.45/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
64130@@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
64131 const char *board;
64132
64133 printk(KERN_WARNING "------------[ cut here ]------------\n");
64134- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64135+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64136 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64137 if (board)
64138 printk(KERN_WARNING "Hardware name: %s\n", board);
64139@@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64140 */
64141 void __stack_chk_fail(void)
64142 {
64143- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64144+ dump_stack();
64145+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64146 __builtin_return_address(0));
64147 }
64148 EXPORT_SYMBOL(__stack_chk_fail);
64149diff -urNp linux-2.6.32.45/kernel/params.c linux-2.6.32.45/kernel/params.c
64150--- linux-2.6.32.45/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
64151+++ linux-2.6.32.45/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
64152@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
64153 return ret;
64154 }
64155
64156-static struct sysfs_ops module_sysfs_ops = {
64157+static const struct sysfs_ops module_sysfs_ops = {
64158 .show = module_attr_show,
64159 .store = module_attr_store,
64160 };
64161@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
64162 return 0;
64163 }
64164
64165-static struct kset_uevent_ops module_uevent_ops = {
64166+static const struct kset_uevent_ops module_uevent_ops = {
64167 .filter = uevent_filter,
64168 };
64169
64170diff -urNp linux-2.6.32.45/kernel/perf_event.c linux-2.6.32.45/kernel/perf_event.c
64171--- linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:35:30.000000000 -0400
64172+++ linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:34:01.000000000 -0400
64173@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
64174 */
64175 int sysctl_perf_event_sample_rate __read_mostly = 100000;
64176
64177-static atomic64_t perf_event_id;
64178+static atomic64_unchecked_t perf_event_id;
64179
64180 /*
64181 * Lock for (sysadmin-configurable) event reservations:
64182@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
64183 * In order to keep per-task stats reliable we need to flip the event
64184 * values when we flip the contexts.
64185 */
64186- value = atomic64_read(&next_event->count);
64187- value = atomic64_xchg(&event->count, value);
64188- atomic64_set(&next_event->count, value);
64189+ value = atomic64_read_unchecked(&next_event->count);
64190+ value = atomic64_xchg_unchecked(&event->count, value);
64191+ atomic64_set_unchecked(&next_event->count, value);
64192
64193 swap(event->total_time_enabled, next_event->total_time_enabled);
64194 swap(event->total_time_running, next_event->total_time_running);
64195@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
64196 update_event_times(event);
64197 }
64198
64199- return atomic64_read(&event->count);
64200+ return atomic64_read_unchecked(&event->count);
64201 }
64202
64203 /*
64204@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
64205 values[n++] = 1 + leader->nr_siblings;
64206 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64207 values[n++] = leader->total_time_enabled +
64208- atomic64_read(&leader->child_total_time_enabled);
64209+ atomic64_read_unchecked(&leader->child_total_time_enabled);
64210 }
64211 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64212 values[n++] = leader->total_time_running +
64213- atomic64_read(&leader->child_total_time_running);
64214+ atomic64_read_unchecked(&leader->child_total_time_running);
64215 }
64216
64217 size = n * sizeof(u64);
64218@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
64219 values[n++] = perf_event_read_value(event);
64220 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64221 values[n++] = event->total_time_enabled +
64222- atomic64_read(&event->child_total_time_enabled);
64223+ atomic64_read_unchecked(&event->child_total_time_enabled);
64224 }
64225 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64226 values[n++] = event->total_time_running +
64227- atomic64_read(&event->child_total_time_running);
64228+ atomic64_read_unchecked(&event->child_total_time_running);
64229 }
64230 if (read_format & PERF_FORMAT_ID)
64231 values[n++] = primary_event_id(event);
64232@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
64233 static void perf_event_reset(struct perf_event *event)
64234 {
64235 (void)perf_event_read(event);
64236- atomic64_set(&event->count, 0);
64237+ atomic64_set_unchecked(&event->count, 0);
64238 perf_event_update_userpage(event);
64239 }
64240
64241@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
64242 ++userpg->lock;
64243 barrier();
64244 userpg->index = perf_event_index(event);
64245- userpg->offset = atomic64_read(&event->count);
64246+ userpg->offset = atomic64_read_unchecked(&event->count);
64247 if (event->state == PERF_EVENT_STATE_ACTIVE)
64248- userpg->offset -= atomic64_read(&event->hw.prev_count);
64249+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
64250
64251 userpg->time_enabled = event->total_time_enabled +
64252- atomic64_read(&event->child_total_time_enabled);
64253+ atomic64_read_unchecked(&event->child_total_time_enabled);
64254
64255 userpg->time_running = event->total_time_running +
64256- atomic64_read(&event->child_total_time_running);
64257+ atomic64_read_unchecked(&event->child_total_time_running);
64258
64259 barrier();
64260 ++userpg->lock;
64261@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
64262 u64 values[4];
64263 int n = 0;
64264
64265- values[n++] = atomic64_read(&event->count);
64266+ values[n++] = atomic64_read_unchecked(&event->count);
64267 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64268 values[n++] = event->total_time_enabled +
64269- atomic64_read(&event->child_total_time_enabled);
64270+ atomic64_read_unchecked(&event->child_total_time_enabled);
64271 }
64272 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64273 values[n++] = event->total_time_running +
64274- atomic64_read(&event->child_total_time_running);
64275+ atomic64_read_unchecked(&event->child_total_time_running);
64276 }
64277 if (read_format & PERF_FORMAT_ID)
64278 values[n++] = primary_event_id(event);
64279@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
64280 if (leader != event)
64281 leader->pmu->read(leader);
64282
64283- values[n++] = atomic64_read(&leader->count);
64284+ values[n++] = atomic64_read_unchecked(&leader->count);
64285 if (read_format & PERF_FORMAT_ID)
64286 values[n++] = primary_event_id(leader);
64287
64288@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
64289 if (sub != event)
64290 sub->pmu->read(sub);
64291
64292- values[n++] = atomic64_read(&sub->count);
64293+ values[n++] = atomic64_read_unchecked(&sub->count);
64294 if (read_format & PERF_FORMAT_ID)
64295 values[n++] = primary_event_id(sub);
64296
64297@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf
64298 {
64299 struct hw_perf_event *hwc = &event->hw;
64300
64301- atomic64_add(nr, &event->count);
64302+ atomic64_add_unchecked(nr, &event->count);
64303
64304 if (!hwc->sample_period)
64305 return;
64306@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(
64307 u64 now;
64308
64309 now = cpu_clock(cpu);
64310- prev = atomic64_read(&event->hw.prev_count);
64311- atomic64_set(&event->hw.prev_count, now);
64312- atomic64_add(now - prev, &event->count);
64313+ prev = atomic64_read_unchecked(&event->hw.prev_count);
64314+ atomic64_set_unchecked(&event->hw.prev_count, now);
64315+ atomic64_add_unchecked(now - prev, &event->count);
64316 }
64317
64318 static int cpu_clock_perf_event_enable(struct perf_event *event)
64319@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(s
64320 struct hw_perf_event *hwc = &event->hw;
64321 int cpu = raw_smp_processor_id();
64322
64323- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
64324+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
64325 perf_swevent_start_hrtimer(event);
64326
64327 return 0;
64328@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update
64329 u64 prev;
64330 s64 delta;
64331
64332- prev = atomic64_xchg(&event->hw.prev_count, now);
64333+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
64334 delta = now - prev;
64335- atomic64_add(delta, &event->count);
64336+ atomic64_add_unchecked(delta, &event->count);
64337 }
64338
64339 static int task_clock_perf_event_enable(struct perf_event *event)
64340@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(
64341
64342 now = event->ctx->time;
64343
64344- atomic64_set(&hwc->prev_count, now);
64345+ atomic64_set_unchecked(&hwc->prev_count, now);
64346
64347 perf_swevent_start_hrtimer(event);
64348
64349@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr
64350 event->parent = parent_event;
64351
64352 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64353- event->id = atomic64_inc_return(&perf_event_id);
64354+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
64355
64356 event->state = PERF_EVENT_STATE_INACTIVE;
64357
64358@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf
64359 if (child_event->attr.inherit_stat)
64360 perf_event_read_event(child_event, child);
64361
64362- child_val = atomic64_read(&child_event->count);
64363+ child_val = atomic64_read_unchecked(&child_event->count);
64364
64365 /*
64366 * Add back the child's count to the parent's count:
64367 */
64368- atomic64_add(child_val, &parent_event->count);
64369- atomic64_add(child_event->total_time_enabled,
64370+ atomic64_add_unchecked(child_val, &parent_event->count);
64371+ atomic64_add_unchecked(child_event->total_time_enabled,
64372 &parent_event->child_total_time_enabled);
64373- atomic64_add(child_event->total_time_running,
64374+ atomic64_add_unchecked(child_event->total_time_running,
64375 &parent_event->child_total_time_running);
64376
64377 /*
64378diff -urNp linux-2.6.32.45/kernel/pid.c linux-2.6.32.45/kernel/pid.c
64379--- linux-2.6.32.45/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
64380+++ linux-2.6.32.45/kernel/pid.c 2011-08-21 19:11:29.000000000 -0400
64381@@ -33,6 +33,7 @@
64382 #include <linux/rculist.h>
64383 #include <linux/bootmem.h>
64384 #include <linux/hash.h>
64385+#include <linux/security.h>
64386 #include <linux/pid_namespace.h>
64387 #include <linux/init_task.h>
64388 #include <linux/syscalls.h>
64389@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64390
64391 int pid_max = PID_MAX_DEFAULT;
64392
64393-#define RESERVED_PIDS 300
64394+#define RESERVED_PIDS 500
64395
64396 int pid_max_min = RESERVED_PIDS + 1;
64397 int pid_max_max = PID_MAX_LIMIT;
64398@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
64399 */
64400 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64401 {
64402- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64403+ struct task_struct *task;
64404+
64405+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64406+
64407+ if (gr_pid_is_chrooted(task))
64408+ return NULL;
64409+
64410+ return task;
64411 }
64412
64413 struct task_struct *find_task_by_vpid(pid_t vnr)
64414@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pi
64415 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64416 }
64417
64418+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64419+{
64420+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64421+}
64422+
64423 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64424 {
64425 struct pid *pid;
64426diff -urNp linux-2.6.32.45/kernel/posix-cpu-timers.c linux-2.6.32.45/kernel/posix-cpu-timers.c
64427--- linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
64428+++ linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-08-06 09:33:44.000000000 -0400
64429@@ -6,6 +6,7 @@
64430 #include <linux/posix-timers.h>
64431 #include <linux/errno.h>
64432 #include <linux/math64.h>
64433+#include <linux/security.h>
64434 #include <asm/uaccess.h>
64435 #include <linux/kernel_stat.h>
64436 #include <trace/events/timer.h>
64437@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(st
64438
64439 static __init int init_posix_cpu_timers(void)
64440 {
64441- struct k_clock process = {
64442+ static struct k_clock process = {
64443 .clock_getres = process_cpu_clock_getres,
64444 .clock_get = process_cpu_clock_get,
64445 .clock_set = do_posix_clock_nosettime,
64446@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(
64447 .nsleep = process_cpu_nsleep,
64448 .nsleep_restart = process_cpu_nsleep_restart,
64449 };
64450- struct k_clock thread = {
64451+ static struct k_clock thread = {
64452 .clock_getres = thread_cpu_clock_getres,
64453 .clock_get = thread_cpu_clock_get,
64454 .clock_set = do_posix_clock_nosettime,
64455diff -urNp linux-2.6.32.45/kernel/posix-timers.c linux-2.6.32.45/kernel/posix-timers.c
64456--- linux-2.6.32.45/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
64457+++ linux-2.6.32.45/kernel/posix-timers.c 2011-08-23 20:22:38.000000000 -0400
64458@@ -42,6 +42,7 @@
64459 #include <linux/compiler.h>
64460 #include <linux/idr.h>
64461 #include <linux/posix-timers.h>
64462+#include <linux/grsecurity.h>
64463 #include <linux/syscalls.h>
64464 #include <linux/wait.h>
64465 #include <linux/workqueue.h>
64466@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
64467 * which we beg off on and pass to do_sys_settimeofday().
64468 */
64469
64470-static struct k_clock posix_clocks[MAX_CLOCKS];
64471+static struct k_clock *posix_clocks[MAX_CLOCKS];
64472
64473 /*
64474 * These ones are defined below.
64475@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k
64476 */
64477 #define CLOCK_DISPATCH(clock, call, arglist) \
64478 ((clock) < 0 ? posix_cpu_##call arglist : \
64479- (posix_clocks[clock].call != NULL \
64480- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
64481+ (posix_clocks[clock]->call != NULL \
64482+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
64483
64484 /*
64485 * Default clock hook functions when the struct k_clock passed
64486@@ -172,7 +173,7 @@ static inline int common_clock_getres(co
64487 struct timespec *tp)
64488 {
64489 tp->tv_sec = 0;
64490- tp->tv_nsec = posix_clocks[which_clock].res;
64491+ tp->tv_nsec = posix_clocks[which_clock]->res;
64492 return 0;
64493 }
64494
64495@@ -217,9 +218,11 @@ static inline int invalid_clockid(const
64496 return 0;
64497 if ((unsigned) which_clock >= MAX_CLOCKS)
64498 return 1;
64499- if (posix_clocks[which_clock].clock_getres != NULL)
64500+ if (posix_clocks[which_clock] == NULL)
64501 return 0;
64502- if (posix_clocks[which_clock].res != 0)
64503+ if (posix_clocks[which_clock]->clock_getres != NULL)
64504+ return 0;
64505+ if (posix_clocks[which_clock]->res != 0)
64506 return 0;
64507 return 1;
64508 }
64509@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t
64510 */
64511 static __init int init_posix_timers(void)
64512 {
64513- struct k_clock clock_realtime = {
64514+ static struct k_clock clock_realtime = {
64515 .clock_getres = hrtimer_get_res,
64516 };
64517- struct k_clock clock_monotonic = {
64518+ static struct k_clock clock_monotonic = {
64519 .clock_getres = hrtimer_get_res,
64520 .clock_get = posix_ktime_get_ts,
64521 .clock_set = do_posix_clock_nosettime,
64522 };
64523- struct k_clock clock_monotonic_raw = {
64524+ static struct k_clock clock_monotonic_raw = {
64525 .clock_getres = hrtimer_get_res,
64526 .clock_get = posix_get_monotonic_raw,
64527 .clock_set = do_posix_clock_nosettime,
64528 .timer_create = no_timer_create,
64529 .nsleep = no_nsleep,
64530 };
64531- struct k_clock clock_realtime_coarse = {
64532+ static struct k_clock clock_realtime_coarse = {
64533 .clock_getres = posix_get_coarse_res,
64534 .clock_get = posix_get_realtime_coarse,
64535 .clock_set = do_posix_clock_nosettime,
64536 .timer_create = no_timer_create,
64537 .nsleep = no_nsleep,
64538 };
64539- struct k_clock clock_monotonic_coarse = {
64540+ static struct k_clock clock_monotonic_coarse = {
64541 .clock_getres = posix_get_coarse_res,
64542 .clock_get = posix_get_monotonic_coarse,
64543 .clock_set = do_posix_clock_nosettime,
64544@@ -296,6 +299,8 @@ static __init int init_posix_timers(void
64545 .nsleep = no_nsleep,
64546 };
64547
64548+ pax_track_stack();
64549+
64550 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
64551 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
64552 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64553@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_
64554 return;
64555 }
64556
64557- posix_clocks[clock_id] = *new_clock;
64558+ posix_clocks[clock_id] = new_clock;
64559 }
64560 EXPORT_SYMBOL_GPL(register_posix_clock);
64561
64562@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64563 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64564 return -EFAULT;
64565
64566+ /* only the CLOCK_REALTIME clock can be set, all other clocks
64567+ have their clock_set fptr set to a nosettime dummy function
64568+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64569+ call common_clock_set, which calls do_sys_settimeofday, which
64570+ we hook
64571+ */
64572+
64573 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
64574 }
64575
64576diff -urNp linux-2.6.32.45/kernel/power/hibernate.c linux-2.6.32.45/kernel/power/hibernate.c
64577--- linux-2.6.32.45/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
64578+++ linux-2.6.32.45/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
64579@@ -48,14 +48,14 @@ enum {
64580
64581 static int hibernation_mode = HIBERNATION_SHUTDOWN;
64582
64583-static struct platform_hibernation_ops *hibernation_ops;
64584+static const struct platform_hibernation_ops *hibernation_ops;
64585
64586 /**
64587 * hibernation_set_ops - set the global hibernate operations
64588 * @ops: the hibernation operations to use in subsequent hibernation transitions
64589 */
64590
64591-void hibernation_set_ops(struct platform_hibernation_ops *ops)
64592+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
64593 {
64594 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
64595 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
64596diff -urNp linux-2.6.32.45/kernel/power/poweroff.c linux-2.6.32.45/kernel/power/poweroff.c
64597--- linux-2.6.32.45/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
64598+++ linux-2.6.32.45/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
64599@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64600 .enable_mask = SYSRQ_ENABLE_BOOT,
64601 };
64602
64603-static int pm_sysrq_init(void)
64604+static int __init pm_sysrq_init(void)
64605 {
64606 register_sysrq_key('o', &sysrq_poweroff_op);
64607 return 0;
64608diff -urNp linux-2.6.32.45/kernel/power/process.c linux-2.6.32.45/kernel/power/process.c
64609--- linux-2.6.32.45/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
64610+++ linux-2.6.32.45/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
64611@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
64612 struct timeval start, end;
64613 u64 elapsed_csecs64;
64614 unsigned int elapsed_csecs;
64615+ bool timedout = false;
64616
64617 do_gettimeofday(&start);
64618
64619 end_time = jiffies + TIMEOUT;
64620 do {
64621 todo = 0;
64622+ if (time_after(jiffies, end_time))
64623+ timedout = true;
64624 read_lock(&tasklist_lock);
64625 do_each_thread(g, p) {
64626 if (frozen(p) || !freezeable(p))
64627@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
64628 * It is "frozen enough". If the task does wake
64629 * up, it will immediately call try_to_freeze.
64630 */
64631- if (!task_is_stopped_or_traced(p) &&
64632- !freezer_should_skip(p))
64633+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64634 todo++;
64635+ if (timedout) {
64636+ printk(KERN_ERR "Task refusing to freeze:\n");
64637+ sched_show_task(p);
64638+ }
64639+ }
64640 } while_each_thread(g, p);
64641 read_unlock(&tasklist_lock);
64642 yield(); /* Yield is okay here */
64643- if (time_after(jiffies, end_time))
64644- break;
64645- } while (todo);
64646+ } while (todo && !timedout);
64647
64648 do_gettimeofday(&end);
64649 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
64650diff -urNp linux-2.6.32.45/kernel/power/suspend.c linux-2.6.32.45/kernel/power/suspend.c
64651--- linux-2.6.32.45/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
64652+++ linux-2.6.32.45/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
64653@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
64654 [PM_SUSPEND_MEM] = "mem",
64655 };
64656
64657-static struct platform_suspend_ops *suspend_ops;
64658+static const struct platform_suspend_ops *suspend_ops;
64659
64660 /**
64661 * suspend_set_ops - Set the global suspend method table.
64662 * @ops: Pointer to ops structure.
64663 */
64664-void suspend_set_ops(struct platform_suspend_ops *ops)
64665+void suspend_set_ops(const struct platform_suspend_ops *ops)
64666 {
64667 mutex_lock(&pm_mutex);
64668 suspend_ops = ops;
64669diff -urNp linux-2.6.32.45/kernel/printk.c linux-2.6.32.45/kernel/printk.c
64670--- linux-2.6.32.45/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
64671+++ linux-2.6.32.45/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
64672@@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
64673 char c;
64674 int error = 0;
64675
64676+#ifdef CONFIG_GRKERNSEC_DMESG
64677+ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
64678+ return -EPERM;
64679+#endif
64680+
64681 error = security_syslog(type);
64682 if (error)
64683 return error;
64684diff -urNp linux-2.6.32.45/kernel/profile.c linux-2.6.32.45/kernel/profile.c
64685--- linux-2.6.32.45/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
64686+++ linux-2.6.32.45/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
64687@@ -39,7 +39,7 @@ struct profile_hit {
64688 /* Oprofile timer tick hook */
64689 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64690
64691-static atomic_t *prof_buffer;
64692+static atomic_unchecked_t *prof_buffer;
64693 static unsigned long prof_len, prof_shift;
64694
64695 int prof_on __read_mostly;
64696@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
64697 hits[i].pc = 0;
64698 continue;
64699 }
64700- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64701+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64702 hits[i].hits = hits[i].pc = 0;
64703 }
64704 }
64705@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
64706 * Add the current hit(s) and flush the write-queue out
64707 * to the global buffer:
64708 */
64709- atomic_add(nr_hits, &prof_buffer[pc]);
64710+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64711 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64712- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64713+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64714 hits[i].pc = hits[i].hits = 0;
64715 }
64716 out:
64717@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
64718 if (prof_on != type || !prof_buffer)
64719 return;
64720 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64721- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64722+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64723 }
64724 #endif /* !CONFIG_SMP */
64725 EXPORT_SYMBOL_GPL(profile_hits);
64726@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64727 return -EFAULT;
64728 buf++; p++; count--; read++;
64729 }
64730- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64731+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64732 if (copy_to_user(buf, (void *)pnt, count))
64733 return -EFAULT;
64734 read += count;
64735@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64736 }
64737 #endif
64738 profile_discard_flip_buffers();
64739- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64740+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64741 return count;
64742 }
64743
64744diff -urNp linux-2.6.32.45/kernel/ptrace.c linux-2.6.32.45/kernel/ptrace.c
64745--- linux-2.6.32.45/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
64746+++ linux-2.6.32.45/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
64747@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
64748 return ret;
64749 }
64750
64751-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64752+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64753+ unsigned int log)
64754 {
64755 const struct cred *cred = current_cred(), *tcred;
64756
64757@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
64758 cred->gid != tcred->egid ||
64759 cred->gid != tcred->sgid ||
64760 cred->gid != tcred->gid) &&
64761- !capable(CAP_SYS_PTRACE)) {
64762+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64763+ (log && !capable(CAP_SYS_PTRACE)))
64764+ ) {
64765 rcu_read_unlock();
64766 return -EPERM;
64767 }
64768@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
64769 smp_rmb();
64770 if (task->mm)
64771 dumpable = get_dumpable(task->mm);
64772- if (!dumpable && !capable(CAP_SYS_PTRACE))
64773+ if (!dumpable &&
64774+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64775+ (log && !capable(CAP_SYS_PTRACE))))
64776 return -EPERM;
64777
64778 return security_ptrace_access_check(task, mode);
64779@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
64780 {
64781 int err;
64782 task_lock(task);
64783- err = __ptrace_may_access(task, mode);
64784+ err = __ptrace_may_access(task, mode, 0);
64785+ task_unlock(task);
64786+ return !err;
64787+}
64788+
64789+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64790+{
64791+ int err;
64792+ task_lock(task);
64793+ err = __ptrace_may_access(task, mode, 1);
64794 task_unlock(task);
64795 return !err;
64796 }
64797@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
64798 goto out;
64799
64800 task_lock(task);
64801- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64802+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64803 task_unlock(task);
64804 if (retval)
64805 goto unlock_creds;
64806@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
64807 goto unlock_tasklist;
64808
64809 task->ptrace = PT_PTRACED;
64810- if (capable(CAP_SYS_PTRACE))
64811+ if (capable_nolog(CAP_SYS_PTRACE))
64812 task->ptrace |= PT_PTRACE_CAP;
64813
64814 __ptrace_link(task, current);
64815@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
64816 {
64817 int copied = 0;
64818
64819+ pax_track_stack();
64820+
64821 while (len > 0) {
64822 char buf[128];
64823 int this_len, retval;
64824@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
64825 {
64826 int copied = 0;
64827
64828+ pax_track_stack();
64829+
64830 while (len > 0) {
64831 char buf[128];
64832 int this_len, retval;
64833@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
64834 int ret = -EIO;
64835 siginfo_t siginfo;
64836
64837+ pax_track_stack();
64838+
64839 switch (request) {
64840 case PTRACE_PEEKTEXT:
64841 case PTRACE_PEEKDATA:
64842@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
64843 ret = ptrace_setoptions(child, data);
64844 break;
64845 case PTRACE_GETEVENTMSG:
64846- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
64847+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
64848 break;
64849
64850 case PTRACE_GETSIGINFO:
64851 ret = ptrace_getsiginfo(child, &siginfo);
64852 if (!ret)
64853- ret = copy_siginfo_to_user((siginfo_t __user *) data,
64854+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
64855 &siginfo);
64856 break;
64857
64858 case PTRACE_SETSIGINFO:
64859- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
64860+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
64861 sizeof siginfo))
64862 ret = -EFAULT;
64863 else
64864@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
64865 goto out;
64866 }
64867
64868+ if (gr_handle_ptrace(child, request)) {
64869+ ret = -EPERM;
64870+ goto out_put_task_struct;
64871+ }
64872+
64873 if (request == PTRACE_ATTACH) {
64874 ret = ptrace_attach(child);
64875 /*
64876 * Some architectures need to do book-keeping after
64877 * a ptrace attach.
64878 */
64879- if (!ret)
64880+ if (!ret) {
64881 arch_ptrace_attach(child);
64882+ gr_audit_ptrace(child);
64883+ }
64884 goto out_put_task_struct;
64885 }
64886
64887@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
64888 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
64889 if (copied != sizeof(tmp))
64890 return -EIO;
64891- return put_user(tmp, (unsigned long __user *)data);
64892+ return put_user(tmp, (__force unsigned long __user *)data);
64893 }
64894
64895 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
64896@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
64897 siginfo_t siginfo;
64898 int ret;
64899
64900+ pax_track_stack();
64901+
64902 switch (request) {
64903 case PTRACE_PEEKTEXT:
64904 case PTRACE_PEEKDATA:
64905@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
64906 goto out;
64907 }
64908
64909+ if (gr_handle_ptrace(child, request)) {
64910+ ret = -EPERM;
64911+ goto out_put_task_struct;
64912+ }
64913+
64914 if (request == PTRACE_ATTACH) {
64915 ret = ptrace_attach(child);
64916 /*
64917 * Some architectures need to do book-keeping after
64918 * a ptrace attach.
64919 */
64920- if (!ret)
64921+ if (!ret) {
64922 arch_ptrace_attach(child);
64923+ gr_audit_ptrace(child);
64924+ }
64925 goto out_put_task_struct;
64926 }
64927
64928diff -urNp linux-2.6.32.45/kernel/rcutorture.c linux-2.6.32.45/kernel/rcutorture.c
64929--- linux-2.6.32.45/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
64930+++ linux-2.6.32.45/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
64931@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
64932 { 0 };
64933 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
64934 { 0 };
64935-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64936-static atomic_t n_rcu_torture_alloc;
64937-static atomic_t n_rcu_torture_alloc_fail;
64938-static atomic_t n_rcu_torture_free;
64939-static atomic_t n_rcu_torture_mberror;
64940-static atomic_t n_rcu_torture_error;
64941+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64942+static atomic_unchecked_t n_rcu_torture_alloc;
64943+static atomic_unchecked_t n_rcu_torture_alloc_fail;
64944+static atomic_unchecked_t n_rcu_torture_free;
64945+static atomic_unchecked_t n_rcu_torture_mberror;
64946+static atomic_unchecked_t n_rcu_torture_error;
64947 static long n_rcu_torture_timers;
64948 static struct list_head rcu_torture_removed;
64949 static cpumask_var_t shuffle_tmp_mask;
64950@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
64951
64952 spin_lock_bh(&rcu_torture_lock);
64953 if (list_empty(&rcu_torture_freelist)) {
64954- atomic_inc(&n_rcu_torture_alloc_fail);
64955+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
64956 spin_unlock_bh(&rcu_torture_lock);
64957 return NULL;
64958 }
64959- atomic_inc(&n_rcu_torture_alloc);
64960+ atomic_inc_unchecked(&n_rcu_torture_alloc);
64961 p = rcu_torture_freelist.next;
64962 list_del_init(p);
64963 spin_unlock_bh(&rcu_torture_lock);
64964@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
64965 static void
64966 rcu_torture_free(struct rcu_torture *p)
64967 {
64968- atomic_inc(&n_rcu_torture_free);
64969+ atomic_inc_unchecked(&n_rcu_torture_free);
64970 spin_lock_bh(&rcu_torture_lock);
64971 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
64972 spin_unlock_bh(&rcu_torture_lock);
64973@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
64974 i = rp->rtort_pipe_count;
64975 if (i > RCU_TORTURE_PIPE_LEN)
64976 i = RCU_TORTURE_PIPE_LEN;
64977- atomic_inc(&rcu_torture_wcount[i]);
64978+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64979 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64980 rp->rtort_mbtest = 0;
64981 rcu_torture_free(rp);
64982@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
64983 i = rp->rtort_pipe_count;
64984 if (i > RCU_TORTURE_PIPE_LEN)
64985 i = RCU_TORTURE_PIPE_LEN;
64986- atomic_inc(&rcu_torture_wcount[i]);
64987+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64988 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64989 rp->rtort_mbtest = 0;
64990 list_del(&rp->rtort_free);
64991@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
64992 i = old_rp->rtort_pipe_count;
64993 if (i > RCU_TORTURE_PIPE_LEN)
64994 i = RCU_TORTURE_PIPE_LEN;
64995- atomic_inc(&rcu_torture_wcount[i]);
64996+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64997 old_rp->rtort_pipe_count++;
64998 cur_ops->deferred_free(old_rp);
64999 }
65000@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
65001 return;
65002 }
65003 if (p->rtort_mbtest == 0)
65004- atomic_inc(&n_rcu_torture_mberror);
65005+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65006 spin_lock(&rand_lock);
65007 cur_ops->read_delay(&rand);
65008 n_rcu_torture_timers++;
65009@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
65010 continue;
65011 }
65012 if (p->rtort_mbtest == 0)
65013- atomic_inc(&n_rcu_torture_mberror);
65014+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65015 cur_ops->read_delay(&rand);
65016 preempt_disable();
65017 pipe_count = p->rtort_pipe_count;
65018@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
65019 rcu_torture_current,
65020 rcu_torture_current_version,
65021 list_empty(&rcu_torture_freelist),
65022- atomic_read(&n_rcu_torture_alloc),
65023- atomic_read(&n_rcu_torture_alloc_fail),
65024- atomic_read(&n_rcu_torture_free),
65025- atomic_read(&n_rcu_torture_mberror),
65026+ atomic_read_unchecked(&n_rcu_torture_alloc),
65027+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65028+ atomic_read_unchecked(&n_rcu_torture_free),
65029+ atomic_read_unchecked(&n_rcu_torture_mberror),
65030 n_rcu_torture_timers);
65031- if (atomic_read(&n_rcu_torture_mberror) != 0)
65032+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
65033 cnt += sprintf(&page[cnt], " !!!");
65034 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65035 if (i > 1) {
65036 cnt += sprintf(&page[cnt], "!!! ");
65037- atomic_inc(&n_rcu_torture_error);
65038+ atomic_inc_unchecked(&n_rcu_torture_error);
65039 WARN_ON_ONCE(1);
65040 }
65041 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65042@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
65043 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65044 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65045 cnt += sprintf(&page[cnt], " %d",
65046- atomic_read(&rcu_torture_wcount[i]));
65047+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65048 }
65049 cnt += sprintf(&page[cnt], "\n");
65050 if (cur_ops->stats)
65051@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
65052
65053 if (cur_ops->cleanup)
65054 cur_ops->cleanup();
65055- if (atomic_read(&n_rcu_torture_error))
65056+ if (atomic_read_unchecked(&n_rcu_torture_error))
65057 rcu_torture_print_module_parms("End of test: FAILURE");
65058 else
65059 rcu_torture_print_module_parms("End of test: SUCCESS");
65060@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
65061
65062 rcu_torture_current = NULL;
65063 rcu_torture_current_version = 0;
65064- atomic_set(&n_rcu_torture_alloc, 0);
65065- atomic_set(&n_rcu_torture_alloc_fail, 0);
65066- atomic_set(&n_rcu_torture_free, 0);
65067- atomic_set(&n_rcu_torture_mberror, 0);
65068- atomic_set(&n_rcu_torture_error, 0);
65069+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65070+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65071+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65072+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65073+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65074 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65075- atomic_set(&rcu_torture_wcount[i], 0);
65076+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65077 for_each_possible_cpu(cpu) {
65078 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65079 per_cpu(rcu_torture_count, cpu)[i] = 0;
65080diff -urNp linux-2.6.32.45/kernel/rcutree.c linux-2.6.32.45/kernel/rcutree.c
65081--- linux-2.6.32.45/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
65082+++ linux-2.6.32.45/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
65083@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
65084 /*
65085 * Do softirq processing for the current CPU.
65086 */
65087-static void rcu_process_callbacks(struct softirq_action *unused)
65088+static void rcu_process_callbacks(void)
65089 {
65090 /*
65091 * Memory references from any prior RCU read-side critical sections
65092diff -urNp linux-2.6.32.45/kernel/rcutree_plugin.h linux-2.6.32.45/kernel/rcutree_plugin.h
65093--- linux-2.6.32.45/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
65094+++ linux-2.6.32.45/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
65095@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
65096 */
65097 void __rcu_read_lock(void)
65098 {
65099- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
65100+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
65101 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
65102 }
65103 EXPORT_SYMBOL_GPL(__rcu_read_lock);
65104@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
65105 struct task_struct *t = current;
65106
65107 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
65108- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
65109+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
65110 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
65111 rcu_read_unlock_special(t);
65112 }
65113diff -urNp linux-2.6.32.45/kernel/relay.c linux-2.6.32.45/kernel/relay.c
65114--- linux-2.6.32.45/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
65115+++ linux-2.6.32.45/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
65116@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
65117 unsigned int flags,
65118 int *nonpad_ret)
65119 {
65120- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
65121+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
65122 struct rchan_buf *rbuf = in->private_data;
65123 unsigned int subbuf_size = rbuf->chan->subbuf_size;
65124 uint64_t pos = (uint64_t) *ppos;
65125@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
65126 .ops = &relay_pipe_buf_ops,
65127 .spd_release = relay_page_release,
65128 };
65129+ ssize_t ret;
65130+
65131+ pax_track_stack();
65132
65133 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65134 return 0;
65135diff -urNp linux-2.6.32.45/kernel/resource.c linux-2.6.32.45/kernel/resource.c
65136--- linux-2.6.32.45/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
65137+++ linux-2.6.32.45/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
65138@@ -132,8 +132,18 @@ static const struct file_operations proc
65139
65140 static int __init ioresources_init(void)
65141 {
65142+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65143+#ifdef CONFIG_GRKERNSEC_PROC_USER
65144+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65145+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65146+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65147+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65148+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65149+#endif
65150+#else
65151 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65152 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65153+#endif
65154 return 0;
65155 }
65156 __initcall(ioresources_init);
65157diff -urNp linux-2.6.32.45/kernel/rtmutex.c linux-2.6.32.45/kernel/rtmutex.c
65158--- linux-2.6.32.45/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
65159+++ linux-2.6.32.45/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
65160@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
65161 */
65162 spin_lock_irqsave(&pendowner->pi_lock, flags);
65163
65164- WARN_ON(!pendowner->pi_blocked_on);
65165+ BUG_ON(!pendowner->pi_blocked_on);
65166 WARN_ON(pendowner->pi_blocked_on != waiter);
65167 WARN_ON(pendowner->pi_blocked_on->lock != lock);
65168
65169diff -urNp linux-2.6.32.45/kernel/rtmutex-tester.c linux-2.6.32.45/kernel/rtmutex-tester.c
65170--- linux-2.6.32.45/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
65171+++ linux-2.6.32.45/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
65172@@ -21,7 +21,7 @@
65173 #define MAX_RT_TEST_MUTEXES 8
65174
65175 static spinlock_t rttest_lock;
65176-static atomic_t rttest_event;
65177+static atomic_unchecked_t rttest_event;
65178
65179 struct test_thread_data {
65180 int opcode;
65181@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
65182
65183 case RTTEST_LOCKCONT:
65184 td->mutexes[td->opdata] = 1;
65185- td->event = atomic_add_return(1, &rttest_event);
65186+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65187 return 0;
65188
65189 case RTTEST_RESET:
65190@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
65191 return 0;
65192
65193 case RTTEST_RESETEVENT:
65194- atomic_set(&rttest_event, 0);
65195+ atomic_set_unchecked(&rttest_event, 0);
65196 return 0;
65197
65198 default:
65199@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
65200 return ret;
65201
65202 td->mutexes[id] = 1;
65203- td->event = atomic_add_return(1, &rttest_event);
65204+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65205 rt_mutex_lock(&mutexes[id]);
65206- td->event = atomic_add_return(1, &rttest_event);
65207+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65208 td->mutexes[id] = 4;
65209 return 0;
65210
65211@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
65212 return ret;
65213
65214 td->mutexes[id] = 1;
65215- td->event = atomic_add_return(1, &rttest_event);
65216+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65217 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65218- td->event = atomic_add_return(1, &rttest_event);
65219+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65220 td->mutexes[id] = ret ? 0 : 4;
65221 return ret ? -EINTR : 0;
65222
65223@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
65224 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65225 return ret;
65226
65227- td->event = atomic_add_return(1, &rttest_event);
65228+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65229 rt_mutex_unlock(&mutexes[id]);
65230- td->event = atomic_add_return(1, &rttest_event);
65231+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65232 td->mutexes[id] = 0;
65233 return 0;
65234
65235@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
65236 break;
65237
65238 td->mutexes[dat] = 2;
65239- td->event = atomic_add_return(1, &rttest_event);
65240+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65241 break;
65242
65243 case RTTEST_LOCKBKL:
65244@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
65245 return;
65246
65247 td->mutexes[dat] = 3;
65248- td->event = atomic_add_return(1, &rttest_event);
65249+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65250 break;
65251
65252 case RTTEST_LOCKNOWAIT:
65253@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
65254 return;
65255
65256 td->mutexes[dat] = 1;
65257- td->event = atomic_add_return(1, &rttest_event);
65258+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65259 return;
65260
65261 case RTTEST_LOCKBKL:
65262diff -urNp linux-2.6.32.45/kernel/sched.c linux-2.6.32.45/kernel/sched.c
65263--- linux-2.6.32.45/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
65264+++ linux-2.6.32.45/kernel/sched.c 2011-08-21 19:29:25.000000000 -0400
65265@@ -2764,9 +2764,10 @@ void wake_up_new_task(struct task_struct
65266 {
65267 unsigned long flags;
65268 struct rq *rq;
65269- int cpu = get_cpu();
65270
65271 #ifdef CONFIG_SMP
65272+ int cpu = get_cpu();
65273+
65274 rq = task_rq_lock(p, &flags);
65275 p->state = TASK_WAKING;
65276
65277@@ -5043,7 +5044,7 @@ out:
65278 * In CONFIG_NO_HZ case, the idle load balance owner will do the
65279 * rebalancing for all the cpus for whom scheduler ticks are stopped.
65280 */
65281-static void run_rebalance_domains(struct softirq_action *h)
65282+static void run_rebalance_domains(void)
65283 {
65284 int this_cpu = smp_processor_id();
65285 struct rq *this_rq = cpu_rq(this_cpu);
65286@@ -5700,6 +5701,8 @@ asmlinkage void __sched schedule(void)
65287 struct rq *rq;
65288 int cpu;
65289
65290+ pax_track_stack();
65291+
65292 need_resched:
65293 preempt_disable();
65294 cpu = smp_processor_id();
65295@@ -5770,7 +5773,7 @@ EXPORT_SYMBOL(schedule);
65296 * Look out! "owner" is an entirely speculative pointer
65297 * access and not reliable.
65298 */
65299-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
65300+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
65301 {
65302 unsigned int cpu;
65303 struct rq *rq;
65304@@ -5784,10 +5787,10 @@ int mutex_spin_on_owner(struct mutex *lo
65305 * DEBUG_PAGEALLOC could have unmapped it if
65306 * the mutex owner just released it and exited.
65307 */
65308- if (probe_kernel_address(&owner->cpu, cpu))
65309+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
65310 return 0;
65311 #else
65312- cpu = owner->cpu;
65313+ cpu = task_thread_info(owner)->cpu;
65314 #endif
65315
65316 /*
65317@@ -5816,7 +5819,7 @@ int mutex_spin_on_owner(struct mutex *lo
65318 /*
65319 * Is that owner really running on that cpu?
65320 */
65321- if (task_thread_info(rq->curr) != owner || need_resched())
65322+ if (rq->curr != owner || need_resched())
65323 return 0;
65324
65325 cpu_relax();
65326@@ -6359,6 +6362,8 @@ int can_nice(const struct task_struct *p
65327 /* convert nice value [19,-20] to rlimit style value [1,40] */
65328 int nice_rlim = 20 - nice;
65329
65330+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65331+
65332 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
65333 capable(CAP_SYS_NICE));
65334 }
65335@@ -6392,7 +6397,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65336 if (nice > 19)
65337 nice = 19;
65338
65339- if (increment < 0 && !can_nice(current, nice))
65340+ if (increment < 0 && (!can_nice(current, nice) ||
65341+ gr_handle_chroot_nice()))
65342 return -EPERM;
65343
65344 retval = security_task_setnice(current, nice);
65345@@ -8774,7 +8780,7 @@ static void init_sched_groups_power(int
65346 long power;
65347 int weight;
65348
65349- WARN_ON(!sd || !sd->groups);
65350+ BUG_ON(!sd || !sd->groups);
65351
65352 if (cpu != group_first_cpu(sd->groups))
65353 return;
65354diff -urNp linux-2.6.32.45/kernel/signal.c linux-2.6.32.45/kernel/signal.c
65355--- linux-2.6.32.45/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
65356+++ linux-2.6.32.45/kernel/signal.c 2011-08-16 21:15:58.000000000 -0400
65357@@ -41,12 +41,12 @@
65358
65359 static struct kmem_cache *sigqueue_cachep;
65360
65361-static void __user *sig_handler(struct task_struct *t, int sig)
65362+static __sighandler_t sig_handler(struct task_struct *t, int sig)
65363 {
65364 return t->sighand->action[sig - 1].sa.sa_handler;
65365 }
65366
65367-static int sig_handler_ignored(void __user *handler, int sig)
65368+static int sig_handler_ignored(__sighandler_t handler, int sig)
65369 {
65370 /* Is it explicitly or implicitly ignored? */
65371 return handler == SIG_IGN ||
65372@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
65373 static int sig_task_ignored(struct task_struct *t, int sig,
65374 int from_ancestor_ns)
65375 {
65376- void __user *handler;
65377+ __sighandler_t handler;
65378
65379 handler = sig_handler(t, sig);
65380
65381@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
65382 */
65383 user = get_uid(__task_cred(t)->user);
65384 atomic_inc(&user->sigpending);
65385+
65386+ if (!override_rlimit)
65387+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65388 if (override_rlimit ||
65389 atomic_read(&user->sigpending) <=
65390 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
65391@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
65392
65393 int unhandled_signal(struct task_struct *tsk, int sig)
65394 {
65395- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65396+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65397 if (is_global_init(tsk))
65398 return 1;
65399 if (handler != SIG_IGN && handler != SIG_DFL)
65400@@ -627,6 +630,13 @@ static int check_kill_permission(int sig
65401 }
65402 }
65403
65404+ /* allow glibc communication via tgkill to other threads in our
65405+ thread group */
65406+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65407+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65408+ && gr_handle_signal(t, sig))
65409+ return -EPERM;
65410+
65411 return security_task_kill(t, info, sig, 0);
65412 }
65413
65414@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct si
65415 return send_signal(sig, info, p, 1);
65416 }
65417
65418-static int
65419+int
65420 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65421 {
65422 return send_signal(sig, info, t, 0);
65423@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *
65424 unsigned long int flags;
65425 int ret, blocked, ignored;
65426 struct k_sigaction *action;
65427+ int is_unhandled = 0;
65428
65429 spin_lock_irqsave(&t->sighand->siglock, flags);
65430 action = &t->sighand->action[sig-1];
65431@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *
65432 }
65433 if (action->sa.sa_handler == SIG_DFL)
65434 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65435+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65436+ is_unhandled = 1;
65437 ret = specific_send_sig_info(sig, info, t);
65438 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65439
65440+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
65441+ normal operation */
65442+ if (is_unhandled) {
65443+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65444+ gr_handle_crash(t, sig);
65445+ }
65446+
65447 return ret;
65448 }
65449
65450@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct
65451 {
65452 int ret = check_kill_permission(sig, info, p);
65453
65454- if (!ret && sig)
65455+ if (!ret && sig) {
65456 ret = do_send_sig_info(sig, info, p, true);
65457+ if (!ret)
65458+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65459+ }
65460
65461 return ret;
65462 }
65463@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
65464 {
65465 siginfo_t info;
65466
65467+ pax_track_stack();
65468+
65469 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65470
65471 memset(&info, 0, sizeof info);
65472@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65473 int error = -ESRCH;
65474
65475 rcu_read_lock();
65476- p = find_task_by_vpid(pid);
65477+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65478+ /* allow glibc communication via tgkill to other threads in our
65479+ thread group */
65480+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65481+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
65482+ p = find_task_by_vpid_unrestricted(pid);
65483+ else
65484+#endif
65485+ p = find_task_by_vpid(pid);
65486 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65487 error = check_kill_permission(sig, info, p);
65488 /*
65489diff -urNp linux-2.6.32.45/kernel/smp.c linux-2.6.32.45/kernel/smp.c
65490--- linux-2.6.32.45/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
65491+++ linux-2.6.32.45/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
65492@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
65493 }
65494 EXPORT_SYMBOL(smp_call_function);
65495
65496-void ipi_call_lock(void)
65497+void ipi_call_lock(void) __acquires(call_function.lock)
65498 {
65499 spin_lock(&call_function.lock);
65500 }
65501
65502-void ipi_call_unlock(void)
65503+void ipi_call_unlock(void) __releases(call_function.lock)
65504 {
65505 spin_unlock(&call_function.lock);
65506 }
65507
65508-void ipi_call_lock_irq(void)
65509+void ipi_call_lock_irq(void) __acquires(call_function.lock)
65510 {
65511 spin_lock_irq(&call_function.lock);
65512 }
65513
65514-void ipi_call_unlock_irq(void)
65515+void ipi_call_unlock_irq(void) __releases(call_function.lock)
65516 {
65517 spin_unlock_irq(&call_function.lock);
65518 }
65519diff -urNp linux-2.6.32.45/kernel/softirq.c linux-2.6.32.45/kernel/softirq.c
65520--- linux-2.6.32.45/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
65521+++ linux-2.6.32.45/kernel/softirq.c 2011-08-05 20:33:55.000000000 -0400
65522@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65523
65524 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65525
65526-char *softirq_to_name[NR_SOFTIRQS] = {
65527+const char * const softirq_to_name[NR_SOFTIRQS] = {
65528 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65529 "TASKLET", "SCHED", "HRTIMER", "RCU"
65530 };
65531@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
65532
65533 asmlinkage void __do_softirq(void)
65534 {
65535- struct softirq_action *h;
65536+ const struct softirq_action *h;
65537 __u32 pending;
65538 int max_restart = MAX_SOFTIRQ_RESTART;
65539 int cpu;
65540@@ -233,7 +233,7 @@ restart:
65541 kstat_incr_softirqs_this_cpu(h - softirq_vec);
65542
65543 trace_softirq_entry(h, softirq_vec);
65544- h->action(h);
65545+ h->action();
65546 trace_softirq_exit(h, softirq_vec);
65547 if (unlikely(prev_count != preempt_count())) {
65548 printk(KERN_ERR "huh, entered softirq %td %s %p"
65549@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
65550 local_irq_restore(flags);
65551 }
65552
65553-void open_softirq(int nr, void (*action)(struct softirq_action *))
65554+void open_softirq(int nr, void (*action)(void))
65555 {
65556- softirq_vec[nr].action = action;
65557+ pax_open_kernel();
65558+ *(void **)&softirq_vec[nr].action = action;
65559+ pax_close_kernel();
65560 }
65561
65562 /*
65563@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct
65564
65565 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65566
65567-static void tasklet_action(struct softirq_action *a)
65568+static void tasklet_action(void)
65569 {
65570 struct tasklet_struct *list;
65571
65572@@ -454,7 +456,7 @@ static void tasklet_action(struct softir
65573 }
65574 }
65575
65576-static void tasklet_hi_action(struct softirq_action *a)
65577+static void tasklet_hi_action(void)
65578 {
65579 struct tasklet_struct *list;
65580
65581diff -urNp linux-2.6.32.45/kernel/sys.c linux-2.6.32.45/kernel/sys.c
65582--- linux-2.6.32.45/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
65583+++ linux-2.6.32.45/kernel/sys.c 2011-08-11 19:51:54.000000000 -0400
65584@@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
65585 error = -EACCES;
65586 goto out;
65587 }
65588+
65589+ if (gr_handle_chroot_setpriority(p, niceval)) {
65590+ error = -EACCES;
65591+ goto out;
65592+ }
65593+
65594 no_nice = security_task_setnice(p, niceval);
65595 if (no_nice) {
65596 error = no_nice;
65597@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
65598 !(user = find_user(who)))
65599 goto out_unlock; /* No processes for this user */
65600
65601- do_each_thread(g, p)
65602+ do_each_thread(g, p) {
65603 if (__task_cred(p)->uid == who)
65604 error = set_one_prio(p, niceval, error);
65605- while_each_thread(g, p);
65606+ } while_each_thread(g, p);
65607 if (who != cred->uid)
65608 free_uid(user); /* For find_user() */
65609 break;
65610@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
65611 !(user = find_user(who)))
65612 goto out_unlock; /* No processes for this user */
65613
65614- do_each_thread(g, p)
65615+ do_each_thread(g, p) {
65616 if (__task_cred(p)->uid == who) {
65617 niceval = 20 - task_nice(p);
65618 if (niceval > retval)
65619 retval = niceval;
65620 }
65621- while_each_thread(g, p);
65622+ } while_each_thread(g, p);
65623 if (who != cred->uid)
65624 free_uid(user); /* for find_user() */
65625 break;
65626@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65627 goto error;
65628 }
65629
65630+ if (gr_check_group_change(new->gid, new->egid, -1))
65631+ goto error;
65632+
65633 if (rgid != (gid_t) -1 ||
65634 (egid != (gid_t) -1 && egid != old->gid))
65635 new->sgid = new->egid;
65636@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65637 goto error;
65638
65639 retval = -EPERM;
65640+
65641+ if (gr_check_group_change(gid, gid, gid))
65642+ goto error;
65643+
65644 if (capable(CAP_SETGID))
65645 new->gid = new->egid = new->sgid = new->fsgid = gid;
65646 else if (gid == old->gid || gid == old->sgid)
65647@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
65648 if (!new_user)
65649 return -EAGAIN;
65650
65651+ /*
65652+ * We don't fail in case of NPROC limit excess here because too many
65653+ * poorly written programs don't check set*uid() return code, assuming
65654+ * it never fails if called by root. We may still enforce NPROC limit
65655+ * for programs doing set*uid()+execve() by harmlessly deferring the
65656+ * failure to the execve() stage.
65657+ */
65658 if (atomic_read(&new_user->processes) >=
65659 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
65660- new_user != INIT_USER) {
65661- free_uid(new_user);
65662- return -EAGAIN;
65663- }
65664+ new_user != INIT_USER)
65665+ current->flags |= PF_NPROC_EXCEEDED;
65666+ else
65667+ current->flags &= ~PF_NPROC_EXCEEDED;
65668
65669 free_uid(new->user);
65670 new->user = new_user;
65671@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65672 goto error;
65673 }
65674
65675+ if (gr_check_user_change(new->uid, new->euid, -1))
65676+ goto error;
65677+
65678 if (new->uid != old->uid) {
65679 retval = set_user(new);
65680 if (retval < 0)
65681@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65682 goto error;
65683
65684 retval = -EPERM;
65685+
65686+ if (gr_check_crash_uid(uid))
65687+ goto error;
65688+ if (gr_check_user_change(uid, uid, uid))
65689+ goto error;
65690+
65691 if (capable(CAP_SETUID)) {
65692 new->suid = new->uid = uid;
65693 if (uid != old->uid) {
65694@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65695 goto error;
65696 }
65697
65698+ if (gr_check_user_change(ruid, euid, -1))
65699+ goto error;
65700+
65701 if (ruid != (uid_t) -1) {
65702 new->uid = ruid;
65703 if (ruid != old->uid) {
65704@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65705 goto error;
65706 }
65707
65708+ if (gr_check_group_change(rgid, egid, -1))
65709+ goto error;
65710+
65711 if (rgid != (gid_t) -1)
65712 new->gid = rgid;
65713 if (egid != (gid_t) -1)
65714@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65715 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
65716 goto error;
65717
65718+ if (gr_check_user_change(-1, -1, uid))
65719+ goto error;
65720+
65721 if (uid == old->uid || uid == old->euid ||
65722 uid == old->suid || uid == old->fsuid ||
65723 capable(CAP_SETUID)) {
65724@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65725 if (gid == old->gid || gid == old->egid ||
65726 gid == old->sgid || gid == old->fsgid ||
65727 capable(CAP_SETGID)) {
65728+ if (gr_check_group_change(-1, -1, gid))
65729+ goto error;
65730+
65731 if (gid != old_fsgid) {
65732 new->fsgid = gid;
65733 goto change_okay;
65734@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65735 error = get_dumpable(me->mm);
65736 break;
65737 case PR_SET_DUMPABLE:
65738- if (arg2 < 0 || arg2 > 1) {
65739+ if (arg2 > 1) {
65740 error = -EINVAL;
65741 break;
65742 }
65743diff -urNp linux-2.6.32.45/kernel/sysctl.c linux-2.6.32.45/kernel/sysctl.c
65744--- linux-2.6.32.45/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
65745+++ linux-2.6.32.45/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
65746@@ -63,6 +63,13 @@
65747 static int deprecated_sysctl_warning(struct __sysctl_args *args);
65748
65749 #if defined(CONFIG_SYSCTL)
65750+#include <linux/grsecurity.h>
65751+#include <linux/grinternal.h>
65752+
65753+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65754+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65755+ const int op);
65756+extern int gr_handle_chroot_sysctl(const int op);
65757
65758 /* External variables not in a header file. */
65759 extern int C_A_D;
65760@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
65761 static int proc_taint(struct ctl_table *table, int write,
65762 void __user *buffer, size_t *lenp, loff_t *ppos);
65763 #endif
65764+extern ctl_table grsecurity_table[];
65765
65766 static struct ctl_table root_table[];
65767 static struct ctl_table_root sysctl_table_root;
65768@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
65769 int sysctl_legacy_va_layout;
65770 #endif
65771
65772+#ifdef CONFIG_PAX_SOFTMODE
65773+static ctl_table pax_table[] = {
65774+ {
65775+ .ctl_name = CTL_UNNUMBERED,
65776+ .procname = "softmode",
65777+ .data = &pax_softmode,
65778+ .maxlen = sizeof(unsigned int),
65779+ .mode = 0600,
65780+ .proc_handler = &proc_dointvec,
65781+ },
65782+
65783+ { .ctl_name = 0 }
65784+};
65785+#endif
65786+
65787 extern int prove_locking;
65788 extern int lock_stat;
65789
65790@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
65791 #endif
65792
65793 static struct ctl_table kern_table[] = {
65794+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65795+ {
65796+ .ctl_name = CTL_UNNUMBERED,
65797+ .procname = "grsecurity",
65798+ .mode = 0500,
65799+ .child = grsecurity_table,
65800+ },
65801+#endif
65802+
65803+#ifdef CONFIG_PAX_SOFTMODE
65804+ {
65805+ .ctl_name = CTL_UNNUMBERED,
65806+ .procname = "pax",
65807+ .mode = 0500,
65808+ .child = pax_table,
65809+ },
65810+#endif
65811+
65812 {
65813 .ctl_name = CTL_UNNUMBERED,
65814 .procname = "sched_child_runs_first",
65815@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
65816 .data = &modprobe_path,
65817 .maxlen = KMOD_PATH_LEN,
65818 .mode = 0644,
65819- .proc_handler = &proc_dostring,
65820- .strategy = &sysctl_string,
65821+ .proc_handler = &proc_dostring_modpriv,
65822+ .strategy = &sysctl_string_modpriv,
65823 },
65824 {
65825 .ctl_name = CTL_UNNUMBERED,
65826@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
65827 .mode = 0644,
65828 .proc_handler = &proc_dointvec
65829 },
65830+ {
65831+ .procname = "heap_stack_gap",
65832+ .data = &sysctl_heap_stack_gap,
65833+ .maxlen = sizeof(sysctl_heap_stack_gap),
65834+ .mode = 0644,
65835+ .proc_handler = proc_doulongvec_minmax,
65836+ },
65837 #else
65838 {
65839 .ctl_name = CTL_UNNUMBERED,
65840@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
65841 return 0;
65842 }
65843
65844+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
65845+
65846 static int parse_table(int __user *name, int nlen,
65847 void __user *oldval, size_t __user *oldlenp,
65848 void __user *newval, size_t newlen,
65849@@ -1821,7 +1871,7 @@ repeat:
65850 if (n == table->ctl_name) {
65851 int error;
65852 if (table->child) {
65853- if (sysctl_perm(root, table, MAY_EXEC))
65854+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
65855 return -EPERM;
65856 name++;
65857 nlen--;
65858@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
65859 int error;
65860 int mode;
65861
65862+ if (table->parent != NULL && table->parent->procname != NULL &&
65863+ table->procname != NULL &&
65864+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
65865+ return -EACCES;
65866+ if (gr_handle_chroot_sysctl(op))
65867+ return -EACCES;
65868+ error = gr_handle_sysctl(table, op);
65869+ if (error)
65870+ return error;
65871+
65872+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
65873+ if (error)
65874+ return error;
65875+
65876+ if (root->permissions)
65877+ mode = root->permissions(root, current->nsproxy, table);
65878+ else
65879+ mode = table->mode;
65880+
65881+ return test_perm(mode, op);
65882+}
65883+
65884+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
65885+{
65886+ int error;
65887+ int mode;
65888+
65889 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
65890 if (error)
65891 return error;
65892@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
65893 buffer, lenp, ppos);
65894 }
65895
65896+int proc_dostring_modpriv(struct ctl_table *table, int write,
65897+ void __user *buffer, size_t *lenp, loff_t *ppos)
65898+{
65899+ if (write && !capable(CAP_SYS_MODULE))
65900+ return -EPERM;
65901+
65902+ return _proc_do_string(table->data, table->maxlen, write,
65903+ buffer, lenp, ppos);
65904+}
65905+
65906
65907 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
65908 int *valp,
65909@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
65910 vleft = table->maxlen / sizeof(unsigned long);
65911 left = *lenp;
65912
65913- for (; left && vleft--; i++, min++, max++, first=0) {
65914+ for (; left && vleft--; i++, first=0) {
65915 if (write) {
65916 while (left) {
65917 char c;
65918@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
65919 return -ENOSYS;
65920 }
65921
65922+int proc_dostring_modpriv(struct ctl_table *table, int write,
65923+ void __user *buffer, size_t *lenp, loff_t *ppos)
65924+{
65925+ return -ENOSYS;
65926+}
65927+
65928 int proc_dointvec(struct ctl_table *table, int write,
65929 void __user *buffer, size_t *lenp, loff_t *ppos)
65930 {
65931@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
65932 return 1;
65933 }
65934
65935+int sysctl_string_modpriv(struct ctl_table *table,
65936+ void __user *oldval, size_t __user *oldlenp,
65937+ void __user *newval, size_t newlen)
65938+{
65939+ if (newval && newlen && !capable(CAP_SYS_MODULE))
65940+ return -EPERM;
65941+
65942+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
65943+}
65944+
65945 /*
65946 * This function makes sure that all of the integers in the vector
65947 * are between the minimum and maximum values given in the arrays
65948@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
65949 return -ENOSYS;
65950 }
65951
65952+int sysctl_string_modpriv(struct ctl_table *table,
65953+ void __user *oldval, size_t __user *oldlenp,
65954+ void __user *newval, size_t newlen)
65955+{
65956+ return -ENOSYS;
65957+}
65958+
65959 int sysctl_intvec(struct ctl_table *table,
65960 void __user *oldval, size_t __user *oldlenp,
65961 void __user *newval, size_t newlen)
65962@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
65963 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
65964 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
65965 EXPORT_SYMBOL(proc_dostring);
65966+EXPORT_SYMBOL(proc_dostring_modpriv);
65967 EXPORT_SYMBOL(proc_doulongvec_minmax);
65968 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
65969 EXPORT_SYMBOL(register_sysctl_table);
65970@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
65971 EXPORT_SYMBOL(sysctl_jiffies);
65972 EXPORT_SYMBOL(sysctl_ms_jiffies);
65973 EXPORT_SYMBOL(sysctl_string);
65974+EXPORT_SYMBOL(sysctl_string_modpriv);
65975 EXPORT_SYMBOL(sysctl_data);
65976 EXPORT_SYMBOL(unregister_sysctl_table);
65977diff -urNp linux-2.6.32.45/kernel/sysctl_check.c linux-2.6.32.45/kernel/sysctl_check.c
65978--- linux-2.6.32.45/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
65979+++ linux-2.6.32.45/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
65980@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
65981 } else {
65982 if ((table->strategy == sysctl_data) ||
65983 (table->strategy == sysctl_string) ||
65984+ (table->strategy == sysctl_string_modpriv) ||
65985 (table->strategy == sysctl_intvec) ||
65986 (table->strategy == sysctl_jiffies) ||
65987 (table->strategy == sysctl_ms_jiffies) ||
65988 (table->proc_handler == proc_dostring) ||
65989+ (table->proc_handler == proc_dostring_modpriv) ||
65990 (table->proc_handler == proc_dointvec) ||
65991 (table->proc_handler == proc_dointvec_minmax) ||
65992 (table->proc_handler == proc_dointvec_jiffies) ||
65993diff -urNp linux-2.6.32.45/kernel/taskstats.c linux-2.6.32.45/kernel/taskstats.c
65994--- linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
65995+++ linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
65996@@ -26,9 +26,12 @@
65997 #include <linux/cgroup.h>
65998 #include <linux/fs.h>
65999 #include <linux/file.h>
66000+#include <linux/grsecurity.h>
66001 #include <net/genetlink.h>
66002 #include <asm/atomic.h>
66003
66004+extern int gr_is_taskstats_denied(int pid);
66005+
66006 /*
66007 * Maximum length of a cpumask that can be specified in
66008 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66009@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
66010 size_t size;
66011 cpumask_var_t mask;
66012
66013+ if (gr_is_taskstats_denied(current->pid))
66014+ return -EACCES;
66015+
66016 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
66017 return -ENOMEM;
66018
66019diff -urNp linux-2.6.32.45/kernel/time/tick-broadcast.c linux-2.6.32.45/kernel/time/tick-broadcast.c
66020--- linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
66021+++ linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
66022@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
66023 * then clear the broadcast bit.
66024 */
66025 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66026- int cpu = smp_processor_id();
66027+ cpu = smp_processor_id();
66028
66029 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66030 tick_broadcast_clear_oneshot(cpu);
66031diff -urNp linux-2.6.32.45/kernel/time/timekeeping.c linux-2.6.32.45/kernel/time/timekeeping.c
66032--- linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
66033+++ linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
66034@@ -14,6 +14,7 @@
66035 #include <linux/init.h>
66036 #include <linux/mm.h>
66037 #include <linux/sched.h>
66038+#include <linux/grsecurity.h>
66039 #include <linux/sysdev.h>
66040 #include <linux/clocksource.h>
66041 #include <linux/jiffies.h>
66042@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
66043 */
66044 struct timespec ts = xtime;
66045 timespec_add_ns(&ts, nsec);
66046- ACCESS_ONCE(xtime_cache) = ts;
66047+ ACCESS_ONCE_RW(xtime_cache) = ts;
66048 }
66049
66050 /* must hold xtime_lock */
66051@@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
66052 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66053 return -EINVAL;
66054
66055+ gr_log_timechange();
66056+
66057 write_seqlock_irqsave(&xtime_lock, flags);
66058
66059 timekeeping_forward_now();
66060diff -urNp linux-2.6.32.45/kernel/time/timer_list.c linux-2.6.32.45/kernel/time/timer_list.c
66061--- linux-2.6.32.45/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
66062+++ linux-2.6.32.45/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
66063@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66064
66065 static void print_name_offset(struct seq_file *m, void *sym)
66066 {
66067+#ifdef CONFIG_GRKERNSEC_HIDESYM
66068+ SEQ_printf(m, "<%p>", NULL);
66069+#else
66070 char symname[KSYM_NAME_LEN];
66071
66072 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66073 SEQ_printf(m, "<%p>", sym);
66074 else
66075 SEQ_printf(m, "%s", symname);
66076+#endif
66077 }
66078
66079 static void
66080@@ -112,7 +116,11 @@ next_one:
66081 static void
66082 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66083 {
66084+#ifdef CONFIG_GRKERNSEC_HIDESYM
66085+ SEQ_printf(m, " .base: %p\n", NULL);
66086+#else
66087 SEQ_printf(m, " .base: %p\n", base);
66088+#endif
66089 SEQ_printf(m, " .index: %d\n",
66090 base->index);
66091 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66092@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
66093 {
66094 struct proc_dir_entry *pe;
66095
66096+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66097+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66098+#else
66099 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66100+#endif
66101 if (!pe)
66102 return -ENOMEM;
66103 return 0;
66104diff -urNp linux-2.6.32.45/kernel/time/timer_stats.c linux-2.6.32.45/kernel/time/timer_stats.c
66105--- linux-2.6.32.45/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
66106+++ linux-2.6.32.45/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
66107@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66108 static unsigned long nr_entries;
66109 static struct entry entries[MAX_ENTRIES];
66110
66111-static atomic_t overflow_count;
66112+static atomic_unchecked_t overflow_count;
66113
66114 /*
66115 * The entries are in a hash-table, for fast lookup:
66116@@ -140,7 +140,7 @@ static void reset_entries(void)
66117 nr_entries = 0;
66118 memset(entries, 0, sizeof(entries));
66119 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66120- atomic_set(&overflow_count, 0);
66121+ atomic_set_unchecked(&overflow_count, 0);
66122 }
66123
66124 static struct entry *alloc_entry(void)
66125@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66126 if (likely(entry))
66127 entry->count++;
66128 else
66129- atomic_inc(&overflow_count);
66130+ atomic_inc_unchecked(&overflow_count);
66131
66132 out_unlock:
66133 spin_unlock_irqrestore(lock, flags);
66134@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66135
66136 static void print_name_offset(struct seq_file *m, unsigned long addr)
66137 {
66138+#ifdef CONFIG_GRKERNSEC_HIDESYM
66139+ seq_printf(m, "<%p>", NULL);
66140+#else
66141 char symname[KSYM_NAME_LEN];
66142
66143 if (lookup_symbol_name(addr, symname) < 0)
66144 seq_printf(m, "<%p>", (void *)addr);
66145 else
66146 seq_printf(m, "%s", symname);
66147+#endif
66148 }
66149
66150 static int tstats_show(struct seq_file *m, void *v)
66151@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66152
66153 seq_puts(m, "Timer Stats Version: v0.2\n");
66154 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66155- if (atomic_read(&overflow_count))
66156+ if (atomic_read_unchecked(&overflow_count))
66157 seq_printf(m, "Overflow: %d entries\n",
66158- atomic_read(&overflow_count));
66159+ atomic_read_unchecked(&overflow_count));
66160
66161 for (i = 0; i < nr_entries; i++) {
66162 entry = entries + i;
66163@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
66164 {
66165 struct proc_dir_entry *pe;
66166
66167+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66168+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66169+#else
66170 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66171+#endif
66172 if (!pe)
66173 return -ENOMEM;
66174 return 0;
66175diff -urNp linux-2.6.32.45/kernel/time.c linux-2.6.32.45/kernel/time.c
66176--- linux-2.6.32.45/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
66177+++ linux-2.6.32.45/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
66178@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
66179 return error;
66180
66181 if (tz) {
66182+ /* we log in do_settimeofday called below, so don't log twice
66183+ */
66184+ if (!tv)
66185+ gr_log_timechange();
66186+
66187 /* SMP safe, global irq locking makes it work. */
66188 sys_tz = *tz;
66189 update_vsyscall_tz();
66190@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
66191 * Avoid unnecessary multiplications/divisions in the
66192 * two most common HZ cases:
66193 */
66194-unsigned int inline jiffies_to_msecs(const unsigned long j)
66195+inline unsigned int jiffies_to_msecs(const unsigned long j)
66196 {
66197 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
66198 return (MSEC_PER_SEC / HZ) * j;
66199@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
66200 }
66201 EXPORT_SYMBOL(jiffies_to_msecs);
66202
66203-unsigned int inline jiffies_to_usecs(const unsigned long j)
66204+inline unsigned int jiffies_to_usecs(const unsigned long j)
66205 {
66206 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
66207 return (USEC_PER_SEC / HZ) * j;
66208diff -urNp linux-2.6.32.45/kernel/timer.c linux-2.6.32.45/kernel/timer.c
66209--- linux-2.6.32.45/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
66210+++ linux-2.6.32.45/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
66211@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
66212 /*
66213 * This function runs timers and the timer-tq in bottom half context.
66214 */
66215-static void run_timer_softirq(struct softirq_action *h)
66216+static void run_timer_softirq(void)
66217 {
66218 struct tvec_base *base = __get_cpu_var(tvec_bases);
66219
66220diff -urNp linux-2.6.32.45/kernel/trace/blktrace.c linux-2.6.32.45/kernel/trace/blktrace.c
66221--- linux-2.6.32.45/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
66222+++ linux-2.6.32.45/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
66223@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
66224 struct blk_trace *bt = filp->private_data;
66225 char buf[16];
66226
66227- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66228+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66229
66230 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66231 }
66232@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
66233 return 1;
66234
66235 bt = buf->chan->private_data;
66236- atomic_inc(&bt->dropped);
66237+ atomic_inc_unchecked(&bt->dropped);
66238 return 0;
66239 }
66240
66241@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
66242
66243 bt->dir = dir;
66244 bt->dev = dev;
66245- atomic_set(&bt->dropped, 0);
66246+ atomic_set_unchecked(&bt->dropped, 0);
66247
66248 ret = -EIO;
66249 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66250diff -urNp linux-2.6.32.45/kernel/trace/ftrace.c linux-2.6.32.45/kernel/trace/ftrace.c
66251--- linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
66252+++ linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
66253@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
66254
66255 ip = rec->ip;
66256
66257+ ret = ftrace_arch_code_modify_prepare();
66258+ FTRACE_WARN_ON(ret);
66259+ if (ret)
66260+ return 0;
66261+
66262 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66263+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66264 if (ret) {
66265 ftrace_bug(ret, ip);
66266 rec->flags |= FTRACE_FL_FAILED;
66267- return 0;
66268 }
66269- return 1;
66270+ return ret ? 0 : 1;
66271 }
66272
66273 /*
66274diff -urNp linux-2.6.32.45/kernel/trace/ring_buffer.c linux-2.6.32.45/kernel/trace/ring_buffer.c
66275--- linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
66276+++ linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
66277@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
66278 * the reader page). But if the next page is a header page,
66279 * its flags will be non zero.
66280 */
66281-static int inline
66282+static inline int
66283 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
66284 struct buffer_page *page, struct list_head *list)
66285 {
66286diff -urNp linux-2.6.32.45/kernel/trace/trace.c linux-2.6.32.45/kernel/trace/trace.c
66287--- linux-2.6.32.45/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
66288+++ linux-2.6.32.45/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
66289@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
66290 size_t rem;
66291 unsigned int i;
66292
66293+ pax_track_stack();
66294+
66295 /* copy the tracer to avoid using a global lock all around */
66296 mutex_lock(&trace_types_lock);
66297 if (unlikely(old_tracer != current_trace && current_trace)) {
66298@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
66299 int entries, size, i;
66300 size_t ret;
66301
66302+ pax_track_stack();
66303+
66304 if (*ppos & (PAGE_SIZE - 1)) {
66305 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
66306 return -EINVAL;
66307@@ -3816,10 +3820,9 @@ static const struct file_operations trac
66308 };
66309 #endif
66310
66311-static struct dentry *d_tracer;
66312-
66313 struct dentry *tracing_init_dentry(void)
66314 {
66315+ static struct dentry *d_tracer;
66316 static int once;
66317
66318 if (d_tracer)
66319@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
66320 return d_tracer;
66321 }
66322
66323-static struct dentry *d_percpu;
66324-
66325 struct dentry *tracing_dentry_percpu(void)
66326 {
66327+ static struct dentry *d_percpu;
66328 static int once;
66329 struct dentry *d_tracer;
66330
66331diff -urNp linux-2.6.32.45/kernel/trace/trace_events.c linux-2.6.32.45/kernel/trace/trace_events.c
66332--- linux-2.6.32.45/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
66333+++ linux-2.6.32.45/kernel/trace/trace_events.c 2011-08-05 20:33:55.000000000 -0400
66334@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list
66335 * Modules must own their file_operations to keep up with
66336 * reference counting.
66337 */
66338+
66339 struct ftrace_module_file_ops {
66340 struct list_head list;
66341 struct module *mod;
66342- struct file_operations id;
66343- struct file_operations enable;
66344- struct file_operations format;
66345- struct file_operations filter;
66346 };
66347
66348 static void remove_subsystem_dir(const char *name)
66349@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod
66350
66351 file_ops->mod = mod;
66352
66353- file_ops->id = ftrace_event_id_fops;
66354- file_ops->id.owner = mod;
66355-
66356- file_ops->enable = ftrace_enable_fops;
66357- file_ops->enable.owner = mod;
66358-
66359- file_ops->filter = ftrace_event_filter_fops;
66360- file_ops->filter.owner = mod;
66361-
66362- file_ops->format = ftrace_event_format_fops;
66363- file_ops->format.owner = mod;
66364+ pax_open_kernel();
66365+ *(void **)&mod->trace_id.owner = mod;
66366+ *(void **)&mod->trace_enable.owner = mod;
66367+ *(void **)&mod->trace_filter.owner = mod;
66368+ *(void **)&mod->trace_format.owner = mod;
66369+ pax_close_kernel();
66370
66371 list_add(&file_ops->list, &ftrace_module_file_list);
66372
66373@@ -1063,8 +1055,8 @@ static void trace_module_add_events(stru
66374 call->mod = mod;
66375 list_add(&call->list, &ftrace_events);
66376 event_create_dir(call, d_events,
66377- &file_ops->id, &file_ops->enable,
66378- &file_ops->filter, &file_ops->format);
66379+ &mod->trace_id, &mod->trace_enable,
66380+ &mod->trace_filter, &mod->trace_format);
66381 }
66382 }
66383
66384diff -urNp linux-2.6.32.45/kernel/trace/trace_mmiotrace.c linux-2.6.32.45/kernel/trace/trace_mmiotrace.c
66385--- linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
66386+++ linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
66387@@ -23,7 +23,7 @@ struct header_iter {
66388 static struct trace_array *mmio_trace_array;
66389 static bool overrun_detected;
66390 static unsigned long prev_overruns;
66391-static atomic_t dropped_count;
66392+static atomic_unchecked_t dropped_count;
66393
66394 static void mmio_reset_data(struct trace_array *tr)
66395 {
66396@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
66397
66398 static unsigned long count_overruns(struct trace_iterator *iter)
66399 {
66400- unsigned long cnt = atomic_xchg(&dropped_count, 0);
66401+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66402 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66403
66404 if (over > prev_overruns)
66405@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
66406 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66407 sizeof(*entry), 0, pc);
66408 if (!event) {
66409- atomic_inc(&dropped_count);
66410+ atomic_inc_unchecked(&dropped_count);
66411 return;
66412 }
66413 entry = ring_buffer_event_data(event);
66414@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
66415 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66416 sizeof(*entry), 0, pc);
66417 if (!event) {
66418- atomic_inc(&dropped_count);
66419+ atomic_inc_unchecked(&dropped_count);
66420 return;
66421 }
66422 entry = ring_buffer_event_data(event);
66423diff -urNp linux-2.6.32.45/kernel/trace/trace_output.c linux-2.6.32.45/kernel/trace/trace_output.c
66424--- linux-2.6.32.45/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
66425+++ linux-2.6.32.45/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
66426@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
66427 return 0;
66428 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66429 if (!IS_ERR(p)) {
66430- p = mangle_path(s->buffer + s->len, p, "\n");
66431+ p = mangle_path(s->buffer + s->len, p, "\n\\");
66432 if (p) {
66433 s->len = p - s->buffer;
66434 return 1;
66435diff -urNp linux-2.6.32.45/kernel/trace/trace_stack.c linux-2.6.32.45/kernel/trace/trace_stack.c
66436--- linux-2.6.32.45/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
66437+++ linux-2.6.32.45/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
66438@@ -50,7 +50,7 @@ static inline void check_stack(void)
66439 return;
66440
66441 /* we do not handle interrupt stacks yet */
66442- if (!object_is_on_stack(&this_size))
66443+ if (!object_starts_on_stack(&this_size))
66444 return;
66445
66446 local_irq_save(flags);
66447diff -urNp linux-2.6.32.45/kernel/trace/trace_workqueue.c linux-2.6.32.45/kernel/trace/trace_workqueue.c
66448--- linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
66449+++ linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
66450@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
66451 int cpu;
66452 pid_t pid;
66453 /* Can be inserted from interrupt or user context, need to be atomic */
66454- atomic_t inserted;
66455+ atomic_unchecked_t inserted;
66456 /*
66457 * Don't need to be atomic, works are serialized in a single workqueue thread
66458 * on a single CPU.
66459@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
66460 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66461 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66462 if (node->pid == wq_thread->pid) {
66463- atomic_inc(&node->inserted);
66464+ atomic_inc_unchecked(&node->inserted);
66465 goto found;
66466 }
66467 }
66468@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
66469 tsk = get_pid_task(pid, PIDTYPE_PID);
66470 if (tsk) {
66471 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66472- atomic_read(&cws->inserted), cws->executed,
66473+ atomic_read_unchecked(&cws->inserted), cws->executed,
66474 tsk->comm);
66475 put_task_struct(tsk);
66476 }
66477diff -urNp linux-2.6.32.45/kernel/user.c linux-2.6.32.45/kernel/user.c
66478--- linux-2.6.32.45/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
66479+++ linux-2.6.32.45/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
66480@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
66481 spin_lock_irq(&uidhash_lock);
66482 up = uid_hash_find(uid, hashent);
66483 if (up) {
66484+ put_user_ns(ns);
66485 key_put(new->uid_keyring);
66486 key_put(new->session_keyring);
66487 kmem_cache_free(uid_cachep, new);
66488diff -urNp linux-2.6.32.45/lib/bug.c linux-2.6.32.45/lib/bug.c
66489--- linux-2.6.32.45/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
66490+++ linux-2.6.32.45/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
66491@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
66492 return BUG_TRAP_TYPE_NONE;
66493
66494 bug = find_bug(bugaddr);
66495+ if (!bug)
66496+ return BUG_TRAP_TYPE_NONE;
66497
66498 printk(KERN_EMERG "------------[ cut here ]------------\n");
66499
66500diff -urNp linux-2.6.32.45/lib/debugobjects.c linux-2.6.32.45/lib/debugobjects.c
66501--- linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
66502+++ linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
66503@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
66504 if (limit > 4)
66505 return;
66506
66507- is_on_stack = object_is_on_stack(addr);
66508+ is_on_stack = object_starts_on_stack(addr);
66509 if (is_on_stack == onstack)
66510 return;
66511
66512diff -urNp linux-2.6.32.45/lib/dma-debug.c linux-2.6.32.45/lib/dma-debug.c
66513--- linux-2.6.32.45/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
66514+++ linux-2.6.32.45/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
66515@@ -861,7 +861,7 @@ out:
66516
66517 static void check_for_stack(struct device *dev, void *addr)
66518 {
66519- if (object_is_on_stack(addr))
66520+ if (object_starts_on_stack(addr))
66521 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66522 "stack [addr=%p]\n", addr);
66523 }
66524diff -urNp linux-2.6.32.45/lib/idr.c linux-2.6.32.45/lib/idr.c
66525--- linux-2.6.32.45/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
66526+++ linux-2.6.32.45/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
66527@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
66528 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
66529
66530 /* if already at the top layer, we need to grow */
66531- if (id >= 1 << (idp->layers * IDR_BITS)) {
66532+ if (id >= (1 << (idp->layers * IDR_BITS))) {
66533 *starting_id = id;
66534 return IDR_NEED_TO_GROW;
66535 }
66536diff -urNp linux-2.6.32.45/lib/inflate.c linux-2.6.32.45/lib/inflate.c
66537--- linux-2.6.32.45/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
66538+++ linux-2.6.32.45/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
66539@@ -266,7 +266,7 @@ static void free(void *where)
66540 malloc_ptr = free_mem_ptr;
66541 }
66542 #else
66543-#define malloc(a) kmalloc(a, GFP_KERNEL)
66544+#define malloc(a) kmalloc((a), GFP_KERNEL)
66545 #define free(a) kfree(a)
66546 #endif
66547
66548diff -urNp linux-2.6.32.45/lib/Kconfig.debug linux-2.6.32.45/lib/Kconfig.debug
66549--- linux-2.6.32.45/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
66550+++ linux-2.6.32.45/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
66551@@ -905,7 +905,7 @@ config LATENCYTOP
66552 select STACKTRACE
66553 select SCHEDSTATS
66554 select SCHED_DEBUG
66555- depends on HAVE_LATENCYTOP_SUPPORT
66556+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
66557 help
66558 Enable this option if you want to use the LatencyTOP tool
66559 to find out which userspace is blocking on what kernel operations.
66560diff -urNp linux-2.6.32.45/lib/kobject.c linux-2.6.32.45/lib/kobject.c
66561--- linux-2.6.32.45/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
66562+++ linux-2.6.32.45/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
66563@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
66564 return ret;
66565 }
66566
66567-struct sysfs_ops kobj_sysfs_ops = {
66568+const struct sysfs_ops kobj_sysfs_ops = {
66569 .show = kobj_attr_show,
66570 .store = kobj_attr_store,
66571 };
66572@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
66573 * If the kset was not able to be created, NULL will be returned.
66574 */
66575 static struct kset *kset_create(const char *name,
66576- struct kset_uevent_ops *uevent_ops,
66577+ const struct kset_uevent_ops *uevent_ops,
66578 struct kobject *parent_kobj)
66579 {
66580 struct kset *kset;
66581@@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
66582 * If the kset was not able to be created, NULL will be returned.
66583 */
66584 struct kset *kset_create_and_add(const char *name,
66585- struct kset_uevent_ops *uevent_ops,
66586+ const struct kset_uevent_ops *uevent_ops,
66587 struct kobject *parent_kobj)
66588 {
66589 struct kset *kset;
66590diff -urNp linux-2.6.32.45/lib/kobject_uevent.c linux-2.6.32.45/lib/kobject_uevent.c
66591--- linux-2.6.32.45/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
66592+++ linux-2.6.32.45/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
66593@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
66594 const char *subsystem;
66595 struct kobject *top_kobj;
66596 struct kset *kset;
66597- struct kset_uevent_ops *uevent_ops;
66598+ const struct kset_uevent_ops *uevent_ops;
66599 u64 seq;
66600 int i = 0;
66601 int retval = 0;
66602diff -urNp linux-2.6.32.45/lib/kref.c linux-2.6.32.45/lib/kref.c
66603--- linux-2.6.32.45/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
66604+++ linux-2.6.32.45/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
66605@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
66606 */
66607 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66608 {
66609- WARN_ON(release == NULL);
66610+ BUG_ON(release == NULL);
66611 WARN_ON(release == (void (*)(struct kref *))kfree);
66612
66613 if (atomic_dec_and_test(&kref->refcount)) {
66614diff -urNp linux-2.6.32.45/lib/parser.c linux-2.6.32.45/lib/parser.c
66615--- linux-2.6.32.45/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
66616+++ linux-2.6.32.45/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
66617@@ -126,7 +126,7 @@ static int match_number(substring_t *s,
66618 char *buf;
66619 int ret;
66620
66621- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
66622+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
66623 if (!buf)
66624 return -ENOMEM;
66625 memcpy(buf, s->from, s->to - s->from);
66626diff -urNp linux-2.6.32.45/lib/radix-tree.c linux-2.6.32.45/lib/radix-tree.c
66627--- linux-2.6.32.45/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
66628+++ linux-2.6.32.45/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
66629@@ -81,7 +81,7 @@ struct radix_tree_preload {
66630 int nr;
66631 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66632 };
66633-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66634+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66635
66636 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
66637 {
66638diff -urNp linux-2.6.32.45/lib/random32.c linux-2.6.32.45/lib/random32.c
66639--- linux-2.6.32.45/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
66640+++ linux-2.6.32.45/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
66641@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
66642 */
66643 static inline u32 __seed(u32 x, u32 m)
66644 {
66645- return (x < m) ? x + m : x;
66646+ return (x <= m) ? x + m + 1 : x;
66647 }
66648
66649 /**
66650diff -urNp linux-2.6.32.45/lib/vsprintf.c linux-2.6.32.45/lib/vsprintf.c
66651--- linux-2.6.32.45/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
66652+++ linux-2.6.32.45/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
66653@@ -16,6 +16,9 @@
66654 * - scnprintf and vscnprintf
66655 */
66656
66657+#ifdef CONFIG_GRKERNSEC_HIDESYM
66658+#define __INCLUDED_BY_HIDESYM 1
66659+#endif
66660 #include <stdarg.h>
66661 #include <linux/module.h>
66662 #include <linux/types.h>
66663@@ -546,12 +549,12 @@ static char *number(char *buf, char *end
66664 return buf;
66665 }
66666
66667-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
66668+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
66669 {
66670 int len, i;
66671
66672 if ((unsigned long)s < PAGE_SIZE)
66673- s = "<NULL>";
66674+ s = "(null)";
66675
66676 len = strnlen(s, spec.precision);
66677
66678@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
66679 unsigned long value = (unsigned long) ptr;
66680 #ifdef CONFIG_KALLSYMS
66681 char sym[KSYM_SYMBOL_LEN];
66682- if (ext != 'f' && ext != 's')
66683+ if (ext != 'f' && ext != 's' && ext != 'a')
66684 sprint_symbol(sym, value);
66685 else
66686 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66687@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
66688 * - 'f' For simple symbolic function names without offset
66689 * - 'S' For symbolic direct pointers with offset
66690 * - 's' For symbolic direct pointers without offset
66691+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66692+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66693 * - 'R' For a struct resource pointer, it prints the range of
66694 * addresses (not the name nor the flags)
66695 * - 'M' For a 6-byte MAC address, it prints the address in the
66696@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
66697 struct printf_spec spec)
66698 {
66699 if (!ptr)
66700- return string(buf, end, "(null)", spec);
66701+ return string(buf, end, "(nil)", spec);
66702
66703 switch (*fmt) {
66704 case 'F':
66705@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
66706 case 's':
66707 /* Fallthrough */
66708 case 'S':
66709+#ifdef CONFIG_GRKERNSEC_HIDESYM
66710+ break;
66711+#else
66712+ return symbol_string(buf, end, ptr, spec, *fmt);
66713+#endif
66714+ case 'a':
66715+ /* Fallthrough */
66716+ case 'A':
66717 return symbol_string(buf, end, ptr, spec, *fmt);
66718 case 'R':
66719 return resource_string(buf, end, ptr, spec);
66720@@ -1445,7 +1458,7 @@ do { \
66721 size_t len;
66722 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
66723 || (unsigned long)save_str < PAGE_SIZE)
66724- save_str = "<NULL>";
66725+ save_str = "(null)";
66726 len = strlen(save_str);
66727 if (str + len + 1 < end)
66728 memcpy(str, save_str, len + 1);
66729@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
66730 typeof(type) value; \
66731 if (sizeof(type) == 8) { \
66732 args = PTR_ALIGN(args, sizeof(u32)); \
66733- *(u32 *)&value = *(u32 *)args; \
66734- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66735+ *(u32 *)&value = *(const u32 *)args; \
66736+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66737 } else { \
66738 args = PTR_ALIGN(args, sizeof(type)); \
66739- value = *(typeof(type) *)args; \
66740+ value = *(const typeof(type) *)args; \
66741 } \
66742 args += sizeof(type); \
66743 value; \
66744@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
66745 const char *str_arg = args;
66746 size_t len = strlen(str_arg);
66747 args += len + 1;
66748- str = string(str, end, (char *)str_arg, spec);
66749+ str = string(str, end, str_arg, spec);
66750 break;
66751 }
66752
66753diff -urNp linux-2.6.32.45/localversion-grsec linux-2.6.32.45/localversion-grsec
66754--- linux-2.6.32.45/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66755+++ linux-2.6.32.45/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
66756@@ -0,0 +1 @@
66757+-grsec
66758diff -urNp linux-2.6.32.45/Makefile linux-2.6.32.45/Makefile
66759--- linux-2.6.32.45/Makefile 2011-08-16 20:37:25.000000000 -0400
66760+++ linux-2.6.32.45/Makefile 2011-08-24 18:35:52.000000000 -0400
66761@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66762
66763 HOSTCC = gcc
66764 HOSTCXX = g++
66765-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66766-HOSTCXXFLAGS = -O2
66767+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66768+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
66769+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
66770
66771 # Decide whether to build built-in, modular, or both.
66772 # Normally, just do built-in.
66773@@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
66774 KBUILD_CPPFLAGS := -D__KERNEL__
66775
66776 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
66777+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
66778 -fno-strict-aliasing -fno-common \
66779 -Werror-implicit-function-declaration \
66780 -Wno-format-security \
66781 -fno-delete-null-pointer-checks
66782+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
66783 KBUILD_AFLAGS := -D__ASSEMBLY__
66784
66785 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
66786@@ -376,9 +379,10 @@ export RCS_TAR_IGNORE := --exclude SCCS
66787 # Rules shared between *config targets and build targets
66788
66789 # Basic helpers built in scripts/
66790-PHONY += scripts_basic
66791-scripts_basic:
66792+PHONY += scripts_basic0 scripts_basic gcc-plugins
66793+scripts_basic0:
66794 $(Q)$(MAKE) $(build)=scripts/basic
66795+scripts_basic: scripts_basic0 gcc-plugins
66796
66797 # To avoid any implicit rule to kick in, define an empty command.
66798 scripts/basic/%: scripts_basic ;
66799@@ -403,7 +407,7 @@ endif
66800 # of make so .config is not included in this case either (for *config).
66801
66802 no-dot-config-targets := clean mrproper distclean \
66803- cscope TAGS tags help %docs check% \
66804+ cscope gtags TAGS tags help %docs check% \
66805 include/linux/version.h headers_% \
66806 kernelrelease kernelversion
66807
66808@@ -526,6 +530,24 @@ else
66809 KBUILD_CFLAGS += -O2
66810 endif
66811
66812+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
66813+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
66814+ifdef CONFIG_PAX_MEMORY_STACKLEAK
66815+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
66816+endif
66817+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
66818+gcc-plugins:
66819+ $(Q)$(MAKE) $(build)=tools/gcc
66820+else
66821+gcc-plugins:
66822+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
66823+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
66824+else
66825+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
66826+endif
66827+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
66828+endif
66829+
66830 include $(srctree)/arch/$(SRCARCH)/Makefile
66831
66832 ifneq ($(CONFIG_FRAME_WARN),0)
66833@@ -644,7 +666,7 @@ export mod_strip_cmd
66834
66835
66836 ifeq ($(KBUILD_EXTMOD),)
66837-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
66838+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
66839
66840 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
66841 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
66842@@ -840,6 +862,7 @@ define rule_vmlinux-modpost
66843 endef
66844
66845 # vmlinux image - including updated kernel symbols
66846+vmlinux: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
66847 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
66848 ifdef CONFIG_HEADERS_CHECK
66849 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
66850@@ -970,7 +993,7 @@ ifneq ($(KBUILD_SRC),)
66851 endif
66852
66853 # prepare2 creates a makefile if using a separate output directory
66854-prepare2: prepare3 outputmakefile
66855+prepare2: prepare3 outputmakefile gcc-plugins
66856
66857 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
66858 include/asm include/config/auto.conf
66859@@ -1124,6 +1147,7 @@ all: modules
66860 # using awk while concatenating to the final file.
66861
66862 PHONY += modules
66863+modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
66864 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
66865 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
66866 @$(kecho) ' Building modules, stage 2.';
66867@@ -1198,7 +1222,7 @@ MRPROPER_FILES += .config .config.old in
66868 include/linux/autoconf.h include/linux/version.h \
66869 include/linux/utsrelease.h \
66870 include/linux/bounds.h include/asm*/asm-offsets.h \
66871- Module.symvers Module.markers tags TAGS cscope*
66872+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
66873
66874 # clean - Delete most, but leave enough to build external modules
66875 #
66876@@ -1242,7 +1266,7 @@ distclean: mrproper
66877 @find $(srctree) $(RCS_FIND_IGNORE) \
66878 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
66879 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
66880- -o -name '.*.rej' -o -size 0 \
66881+ -o -name '.*.rej' -o -size 0 -o -name '*.so' \
66882 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
66883 -type f -print | xargs rm -f
66884
66885@@ -1289,6 +1313,7 @@ help:
66886 @echo ' modules_prepare - Set up for building external modules'
66887 @echo ' tags/TAGS - Generate tags file for editors'
66888 @echo ' cscope - Generate cscope index'
66889+ @echo ' gtags - Generate GNU GLOBAL index'
66890 @echo ' kernelrelease - Output the release version string'
66891 @echo ' kernelversion - Output the version stored in Makefile'
66892 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
66893@@ -1421,7 +1446,7 @@ clean: $(clean-dirs)
66894 $(call cmd,rmdirs)
66895 $(call cmd,rmfiles)
66896 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
66897- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
66898+ \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
66899 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
66900 -o -name '*.gcno' \) -type f -print | xargs rm -f
66901
66902@@ -1445,7 +1470,7 @@ endif # KBUILD_EXTMOD
66903 quiet_cmd_tags = GEN $@
66904 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
66905
66906-tags TAGS cscope: FORCE
66907+tags TAGS cscope gtags: FORCE
66908 $(call cmd,tags)
66909
66910 # Scripts to check various things for consistency
66911diff -urNp linux-2.6.32.45/mm/backing-dev.c linux-2.6.32.45/mm/backing-dev.c
66912--- linux-2.6.32.45/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
66913+++ linux-2.6.32.45/mm/backing-dev.c 2011-08-11 19:48:17.000000000 -0400
66914@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing
66915 list_add_tail_rcu(&wb->list, &bdi->wb_list);
66916 spin_unlock(&bdi->wb_lock);
66917
66918- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
66919+ tsk->flags |= PF_SWAPWRITE;
66920 set_freezable();
66921
66922 /*
66923@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
66924 * Add the default flusher task that gets created for any bdi
66925 * that has dirty data pending writeout
66926 */
66927-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
66928+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
66929 {
66930 if (!bdi_cap_writeback_dirty(bdi))
66931 return;
66932diff -urNp linux-2.6.32.45/mm/filemap.c linux-2.6.32.45/mm/filemap.c
66933--- linux-2.6.32.45/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
66934+++ linux-2.6.32.45/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
66935@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
66936 struct address_space *mapping = file->f_mapping;
66937
66938 if (!mapping->a_ops->readpage)
66939- return -ENOEXEC;
66940+ return -ENODEV;
66941 file_accessed(file);
66942 vma->vm_ops = &generic_file_vm_ops;
66943 vma->vm_flags |= VM_CAN_NONLINEAR;
66944@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
66945 *pos = i_size_read(inode);
66946
66947 if (limit != RLIM_INFINITY) {
66948+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
66949 if (*pos >= limit) {
66950 send_sig(SIGXFSZ, current, 0);
66951 return -EFBIG;
66952diff -urNp linux-2.6.32.45/mm/fremap.c linux-2.6.32.45/mm/fremap.c
66953--- linux-2.6.32.45/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
66954+++ linux-2.6.32.45/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
66955@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
66956 retry:
66957 vma = find_vma(mm, start);
66958
66959+#ifdef CONFIG_PAX_SEGMEXEC
66960+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
66961+ goto out;
66962+#endif
66963+
66964 /*
66965 * Make sure the vma is shared, that it supports prefaulting,
66966 * and that the remapped range is valid and fully within
66967@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
66968 /*
66969 * drop PG_Mlocked flag for over-mapped range
66970 */
66971- unsigned int saved_flags = vma->vm_flags;
66972+ unsigned long saved_flags = vma->vm_flags;
66973 munlock_vma_pages_range(vma, start, start + size);
66974 vma->vm_flags = saved_flags;
66975 }
66976diff -urNp linux-2.6.32.45/mm/highmem.c linux-2.6.32.45/mm/highmem.c
66977--- linux-2.6.32.45/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
66978+++ linux-2.6.32.45/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
66979@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
66980 * So no dangers, even with speculative execution.
66981 */
66982 page = pte_page(pkmap_page_table[i]);
66983+ pax_open_kernel();
66984 pte_clear(&init_mm, (unsigned long)page_address(page),
66985 &pkmap_page_table[i]);
66986-
66987+ pax_close_kernel();
66988 set_page_address(page, NULL);
66989 need_flush = 1;
66990 }
66991@@ -177,9 +178,11 @@ start:
66992 }
66993 }
66994 vaddr = PKMAP_ADDR(last_pkmap_nr);
66995+
66996+ pax_open_kernel();
66997 set_pte_at(&init_mm, vaddr,
66998 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
66999-
67000+ pax_close_kernel();
67001 pkmap_count[last_pkmap_nr] = 1;
67002 set_page_address(page, (void *)vaddr);
67003
67004diff -urNp linux-2.6.32.45/mm/hugetlb.c linux-2.6.32.45/mm/hugetlb.c
67005--- linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
67006+++ linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
67007@@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
67008 return 1;
67009 }
67010
67011+#ifdef CONFIG_PAX_SEGMEXEC
67012+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67013+{
67014+ struct mm_struct *mm = vma->vm_mm;
67015+ struct vm_area_struct *vma_m;
67016+ unsigned long address_m;
67017+ pte_t *ptep_m;
67018+
67019+ vma_m = pax_find_mirror_vma(vma);
67020+ if (!vma_m)
67021+ return;
67022+
67023+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67024+ address_m = address + SEGMEXEC_TASK_SIZE;
67025+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67026+ get_page(page_m);
67027+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67028+}
67029+#endif
67030+
67031 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
67032 unsigned long address, pte_t *ptep, pte_t pte,
67033 struct page *pagecache_page)
67034@@ -2004,6 +2024,11 @@ retry_avoidcopy:
67035 huge_ptep_clear_flush(vma, address, ptep);
67036 set_huge_pte_at(mm, address, ptep,
67037 make_huge_pte(vma, new_page, 1));
67038+
67039+#ifdef CONFIG_PAX_SEGMEXEC
67040+ pax_mirror_huge_pte(vma, address, new_page);
67041+#endif
67042+
67043 /* Make the old page be freed below */
67044 new_page = old_page;
67045 }
67046@@ -2135,6 +2160,10 @@ retry:
67047 && (vma->vm_flags & VM_SHARED)));
67048 set_huge_pte_at(mm, address, ptep, new_pte);
67049
67050+#ifdef CONFIG_PAX_SEGMEXEC
67051+ pax_mirror_huge_pte(vma, address, page);
67052+#endif
67053+
67054 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67055 /* Optimization, do the COW without a second fault */
67056 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67057@@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
67058 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67059 struct hstate *h = hstate_vma(vma);
67060
67061+#ifdef CONFIG_PAX_SEGMEXEC
67062+ struct vm_area_struct *vma_m;
67063+
67064+ vma_m = pax_find_mirror_vma(vma);
67065+ if (vma_m) {
67066+ unsigned long address_m;
67067+
67068+ if (vma->vm_start > vma_m->vm_start) {
67069+ address_m = address;
67070+ address -= SEGMEXEC_TASK_SIZE;
67071+ vma = vma_m;
67072+ h = hstate_vma(vma);
67073+ } else
67074+ address_m = address + SEGMEXEC_TASK_SIZE;
67075+
67076+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67077+ return VM_FAULT_OOM;
67078+ address_m &= HPAGE_MASK;
67079+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67080+ }
67081+#endif
67082+
67083 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67084 if (!ptep)
67085 return VM_FAULT_OOM;
67086diff -urNp linux-2.6.32.45/mm/internal.h linux-2.6.32.45/mm/internal.h
67087--- linux-2.6.32.45/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
67088+++ linux-2.6.32.45/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
67089@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
67090 * in mm/page_alloc.c
67091 */
67092 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67093+extern void free_compound_page(struct page *page);
67094 extern void prep_compound_page(struct page *page, unsigned long order);
67095
67096
67097diff -urNp linux-2.6.32.45/mm/Kconfig linux-2.6.32.45/mm/Kconfig
67098--- linux-2.6.32.45/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
67099+++ linux-2.6.32.45/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
67100@@ -228,7 +228,7 @@ config KSM
67101 config DEFAULT_MMAP_MIN_ADDR
67102 int "Low address space to protect from user allocation"
67103 depends on MMU
67104- default 4096
67105+ default 65536
67106 help
67107 This is the portion of low virtual memory which should be protected
67108 from userspace allocation. Keeping a user from writing to low pages
67109diff -urNp linux-2.6.32.45/mm/kmemleak.c linux-2.6.32.45/mm/kmemleak.c
67110--- linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
67111+++ linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
67112@@ -358,7 +358,7 @@ static void print_unreferenced(struct se
67113
67114 for (i = 0; i < object->trace_len; i++) {
67115 void *ptr = (void *)object->trace[i];
67116- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67117+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67118 }
67119 }
67120
67121diff -urNp linux-2.6.32.45/mm/maccess.c linux-2.6.32.45/mm/maccess.c
67122--- linux-2.6.32.45/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
67123+++ linux-2.6.32.45/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
67124@@ -14,7 +14,7 @@
67125 * Safely read from address @src to the buffer at @dst. If a kernel fault
67126 * happens, handle that and return -EFAULT.
67127 */
67128-long probe_kernel_read(void *dst, void *src, size_t size)
67129+long probe_kernel_read(void *dst, const void *src, size_t size)
67130 {
67131 long ret;
67132 mm_segment_t old_fs = get_fs();
67133@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
67134 * Safely write to address @dst from the buffer at @src. If a kernel fault
67135 * happens, handle that and return -EFAULT.
67136 */
67137-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
67138+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
67139 {
67140 long ret;
67141 mm_segment_t old_fs = get_fs();
67142diff -urNp linux-2.6.32.45/mm/madvise.c linux-2.6.32.45/mm/madvise.c
67143--- linux-2.6.32.45/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
67144+++ linux-2.6.32.45/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
67145@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
67146 pgoff_t pgoff;
67147 unsigned long new_flags = vma->vm_flags;
67148
67149+#ifdef CONFIG_PAX_SEGMEXEC
67150+ struct vm_area_struct *vma_m;
67151+#endif
67152+
67153 switch (behavior) {
67154 case MADV_NORMAL:
67155 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67156@@ -103,6 +107,13 @@ success:
67157 /*
67158 * vm_flags is protected by the mmap_sem held in write mode.
67159 */
67160+
67161+#ifdef CONFIG_PAX_SEGMEXEC
67162+ vma_m = pax_find_mirror_vma(vma);
67163+ if (vma_m)
67164+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67165+#endif
67166+
67167 vma->vm_flags = new_flags;
67168
67169 out:
67170@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
67171 struct vm_area_struct ** prev,
67172 unsigned long start, unsigned long end)
67173 {
67174+
67175+#ifdef CONFIG_PAX_SEGMEXEC
67176+ struct vm_area_struct *vma_m;
67177+#endif
67178+
67179 *prev = vma;
67180 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67181 return -EINVAL;
67182@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
67183 zap_page_range(vma, start, end - start, &details);
67184 } else
67185 zap_page_range(vma, start, end - start, NULL);
67186+
67187+#ifdef CONFIG_PAX_SEGMEXEC
67188+ vma_m = pax_find_mirror_vma(vma);
67189+ if (vma_m) {
67190+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67191+ struct zap_details details = {
67192+ .nonlinear_vma = vma_m,
67193+ .last_index = ULONG_MAX,
67194+ };
67195+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67196+ } else
67197+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67198+ }
67199+#endif
67200+
67201 return 0;
67202 }
67203
67204@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67205 if (end < start)
67206 goto out;
67207
67208+#ifdef CONFIG_PAX_SEGMEXEC
67209+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67210+ if (end > SEGMEXEC_TASK_SIZE)
67211+ goto out;
67212+ } else
67213+#endif
67214+
67215+ if (end > TASK_SIZE)
67216+ goto out;
67217+
67218 error = 0;
67219 if (end == start)
67220 goto out;
67221diff -urNp linux-2.6.32.45/mm/memory.c linux-2.6.32.45/mm/memory.c
67222--- linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
67223+++ linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
67224@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
67225 return;
67226
67227 pmd = pmd_offset(pud, start);
67228+
67229+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67230 pud_clear(pud);
67231 pmd_free_tlb(tlb, pmd, start);
67232+#endif
67233+
67234 }
67235
67236 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67237@@ -219,9 +223,12 @@ static inline void free_pud_range(struct
67238 if (end - 1 > ceiling - 1)
67239 return;
67240
67241+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67242 pud = pud_offset(pgd, start);
67243 pgd_clear(pgd);
67244 pud_free_tlb(tlb, pud, start);
67245+#endif
67246+
67247 }
67248
67249 /*
67250@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
67251 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67252 i = 0;
67253
67254- do {
67255+ while (nr_pages) {
67256 struct vm_area_struct *vma;
67257
67258- vma = find_extend_vma(mm, start);
67259+ vma = find_vma(mm, start);
67260 if (!vma && in_gate_area(tsk, start)) {
67261 unsigned long pg = start & PAGE_MASK;
67262 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
67263@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
67264 continue;
67265 }
67266
67267- if (!vma ||
67268+ if (!vma || start < vma->vm_start ||
67269 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67270 !(vm_flags & vma->vm_flags))
67271 return i ? : -EFAULT;
67272@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
67273 start += PAGE_SIZE;
67274 nr_pages--;
67275 } while (nr_pages && start < vma->vm_end);
67276- } while (nr_pages);
67277+ }
67278 return i;
67279 }
67280
67281@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
67282 page_add_file_rmap(page);
67283 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67284
67285+#ifdef CONFIG_PAX_SEGMEXEC
67286+ pax_mirror_file_pte(vma, addr, page, ptl);
67287+#endif
67288+
67289 retval = 0;
67290 pte_unmap_unlock(pte, ptl);
67291 return retval;
67292@@ -1560,10 +1571,22 @@ out:
67293 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67294 struct page *page)
67295 {
67296+
67297+#ifdef CONFIG_PAX_SEGMEXEC
67298+ struct vm_area_struct *vma_m;
67299+#endif
67300+
67301 if (addr < vma->vm_start || addr >= vma->vm_end)
67302 return -EFAULT;
67303 if (!page_count(page))
67304 return -EINVAL;
67305+
67306+#ifdef CONFIG_PAX_SEGMEXEC
67307+ vma_m = pax_find_mirror_vma(vma);
67308+ if (vma_m)
67309+ vma_m->vm_flags |= VM_INSERTPAGE;
67310+#endif
67311+
67312 vma->vm_flags |= VM_INSERTPAGE;
67313 return insert_page(vma, addr, page, vma->vm_page_prot);
67314 }
67315@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
67316 unsigned long pfn)
67317 {
67318 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67319+ BUG_ON(vma->vm_mirror);
67320
67321 if (addr < vma->vm_start || addr >= vma->vm_end)
67322 return -EFAULT;
67323@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
67324 copy_user_highpage(dst, src, va, vma);
67325 }
67326
67327+#ifdef CONFIG_PAX_SEGMEXEC
67328+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67329+{
67330+ struct mm_struct *mm = vma->vm_mm;
67331+ spinlock_t *ptl;
67332+ pte_t *pte, entry;
67333+
67334+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67335+ entry = *pte;
67336+ if (!pte_present(entry)) {
67337+ if (!pte_none(entry)) {
67338+ BUG_ON(pte_file(entry));
67339+ free_swap_and_cache(pte_to_swp_entry(entry));
67340+ pte_clear_not_present_full(mm, address, pte, 0);
67341+ }
67342+ } else {
67343+ struct page *page;
67344+
67345+ flush_cache_page(vma, address, pte_pfn(entry));
67346+ entry = ptep_clear_flush(vma, address, pte);
67347+ BUG_ON(pte_dirty(entry));
67348+ page = vm_normal_page(vma, address, entry);
67349+ if (page) {
67350+ update_hiwater_rss(mm);
67351+ if (PageAnon(page))
67352+ dec_mm_counter(mm, anon_rss);
67353+ else
67354+ dec_mm_counter(mm, file_rss);
67355+ page_remove_rmap(page);
67356+ page_cache_release(page);
67357+ }
67358+ }
67359+ pte_unmap_unlock(pte, ptl);
67360+}
67361+
67362+/* PaX: if vma is mirrored, synchronize the mirror's PTE
67363+ *
67364+ * the ptl of the lower mapped page is held on entry and is not released on exit
67365+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67366+ */
67367+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67368+{
67369+ struct mm_struct *mm = vma->vm_mm;
67370+ unsigned long address_m;
67371+ spinlock_t *ptl_m;
67372+ struct vm_area_struct *vma_m;
67373+ pmd_t *pmd_m;
67374+ pte_t *pte_m, entry_m;
67375+
67376+ BUG_ON(!page_m || !PageAnon(page_m));
67377+
67378+ vma_m = pax_find_mirror_vma(vma);
67379+ if (!vma_m)
67380+ return;
67381+
67382+ BUG_ON(!PageLocked(page_m));
67383+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67384+ address_m = address + SEGMEXEC_TASK_SIZE;
67385+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67386+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67387+ ptl_m = pte_lockptr(mm, pmd_m);
67388+ if (ptl != ptl_m) {
67389+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67390+ if (!pte_none(*pte_m))
67391+ goto out;
67392+ }
67393+
67394+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67395+ page_cache_get(page_m);
67396+ page_add_anon_rmap(page_m, vma_m, address_m);
67397+ inc_mm_counter(mm, anon_rss);
67398+ set_pte_at(mm, address_m, pte_m, entry_m);
67399+ update_mmu_cache(vma_m, address_m, entry_m);
67400+out:
67401+ if (ptl != ptl_m)
67402+ spin_unlock(ptl_m);
67403+ pte_unmap_nested(pte_m);
67404+ unlock_page(page_m);
67405+}
67406+
67407+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67408+{
67409+ struct mm_struct *mm = vma->vm_mm;
67410+ unsigned long address_m;
67411+ spinlock_t *ptl_m;
67412+ struct vm_area_struct *vma_m;
67413+ pmd_t *pmd_m;
67414+ pte_t *pte_m, entry_m;
67415+
67416+ BUG_ON(!page_m || PageAnon(page_m));
67417+
67418+ vma_m = pax_find_mirror_vma(vma);
67419+ if (!vma_m)
67420+ return;
67421+
67422+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67423+ address_m = address + SEGMEXEC_TASK_SIZE;
67424+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67425+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67426+ ptl_m = pte_lockptr(mm, pmd_m);
67427+ if (ptl != ptl_m) {
67428+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67429+ if (!pte_none(*pte_m))
67430+ goto out;
67431+ }
67432+
67433+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67434+ page_cache_get(page_m);
67435+ page_add_file_rmap(page_m);
67436+ inc_mm_counter(mm, file_rss);
67437+ set_pte_at(mm, address_m, pte_m, entry_m);
67438+ update_mmu_cache(vma_m, address_m, entry_m);
67439+out:
67440+ if (ptl != ptl_m)
67441+ spin_unlock(ptl_m);
67442+ pte_unmap_nested(pte_m);
67443+}
67444+
67445+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67446+{
67447+ struct mm_struct *mm = vma->vm_mm;
67448+ unsigned long address_m;
67449+ spinlock_t *ptl_m;
67450+ struct vm_area_struct *vma_m;
67451+ pmd_t *pmd_m;
67452+ pte_t *pte_m, entry_m;
67453+
67454+ vma_m = pax_find_mirror_vma(vma);
67455+ if (!vma_m)
67456+ return;
67457+
67458+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67459+ address_m = address + SEGMEXEC_TASK_SIZE;
67460+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67461+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67462+ ptl_m = pte_lockptr(mm, pmd_m);
67463+ if (ptl != ptl_m) {
67464+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67465+ if (!pte_none(*pte_m))
67466+ goto out;
67467+ }
67468+
67469+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67470+ set_pte_at(mm, address_m, pte_m, entry_m);
67471+out:
67472+ if (ptl != ptl_m)
67473+ spin_unlock(ptl_m);
67474+ pte_unmap_nested(pte_m);
67475+}
67476+
67477+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67478+{
67479+ struct page *page_m;
67480+ pte_t entry;
67481+
67482+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67483+ goto out;
67484+
67485+ entry = *pte;
67486+ page_m = vm_normal_page(vma, address, entry);
67487+ if (!page_m)
67488+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67489+ else if (PageAnon(page_m)) {
67490+ if (pax_find_mirror_vma(vma)) {
67491+ pte_unmap_unlock(pte, ptl);
67492+ lock_page(page_m);
67493+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67494+ if (pte_same(entry, *pte))
67495+ pax_mirror_anon_pte(vma, address, page_m, ptl);
67496+ else
67497+ unlock_page(page_m);
67498+ }
67499+ } else
67500+ pax_mirror_file_pte(vma, address, page_m, ptl);
67501+
67502+out:
67503+ pte_unmap_unlock(pte, ptl);
67504+}
67505+#endif
67506+
67507 /*
67508 * This routine handles present pages, when users try to write
67509 * to a shared page. It is done by copying the page to a new address
67510@@ -2156,6 +2360,12 @@ gotten:
67511 */
67512 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67513 if (likely(pte_same(*page_table, orig_pte))) {
67514+
67515+#ifdef CONFIG_PAX_SEGMEXEC
67516+ if (pax_find_mirror_vma(vma))
67517+ BUG_ON(!trylock_page(new_page));
67518+#endif
67519+
67520 if (old_page) {
67521 if (!PageAnon(old_page)) {
67522 dec_mm_counter(mm, file_rss);
67523@@ -2207,6 +2417,10 @@ gotten:
67524 page_remove_rmap(old_page);
67525 }
67526
67527+#ifdef CONFIG_PAX_SEGMEXEC
67528+ pax_mirror_anon_pte(vma, address, new_page, ptl);
67529+#endif
67530+
67531 /* Free the old page.. */
67532 new_page = old_page;
67533 ret |= VM_FAULT_WRITE;
67534@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
67535 swap_free(entry);
67536 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67537 try_to_free_swap(page);
67538+
67539+#ifdef CONFIG_PAX_SEGMEXEC
67540+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67541+#endif
67542+
67543 unlock_page(page);
67544
67545 if (flags & FAULT_FLAG_WRITE) {
67546@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
67547
67548 /* No need to invalidate - it was non-present before */
67549 update_mmu_cache(vma, address, pte);
67550+
67551+#ifdef CONFIG_PAX_SEGMEXEC
67552+ pax_mirror_anon_pte(vma, address, page, ptl);
67553+#endif
67554+
67555 unlock:
67556 pte_unmap_unlock(page_table, ptl);
67557 out:
67558@@ -2632,40 +2856,6 @@ out_release:
67559 }
67560
67561 /*
67562- * This is like a special single-page "expand_{down|up}wards()",
67563- * except we must first make sure that 'address{-|+}PAGE_SIZE'
67564- * doesn't hit another vma.
67565- */
67566-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67567-{
67568- address &= PAGE_MASK;
67569- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67570- struct vm_area_struct *prev = vma->vm_prev;
67571-
67572- /*
67573- * Is there a mapping abutting this one below?
67574- *
67575- * That's only ok if it's the same stack mapping
67576- * that has gotten split..
67577- */
67578- if (prev && prev->vm_end == address)
67579- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67580-
67581- expand_stack(vma, address - PAGE_SIZE);
67582- }
67583- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67584- struct vm_area_struct *next = vma->vm_next;
67585-
67586- /* As VM_GROWSDOWN but s/below/above/ */
67587- if (next && next->vm_start == address + PAGE_SIZE)
67588- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67589-
67590- expand_upwards(vma, address + PAGE_SIZE);
67591- }
67592- return 0;
67593-}
67594-
67595-/*
67596 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67597 * but allow concurrent faults), and pte mapped but not yet locked.
67598 * We return with mmap_sem still held, but pte unmapped and unlocked.
67599@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
67600 unsigned long address, pte_t *page_table, pmd_t *pmd,
67601 unsigned int flags)
67602 {
67603- struct page *page;
67604+ struct page *page = NULL;
67605 spinlock_t *ptl;
67606 pte_t entry;
67607
67608- pte_unmap(page_table);
67609-
67610- /* Check if we need to add a guard page to the stack */
67611- if (check_stack_guard_page(vma, address) < 0)
67612- return VM_FAULT_SIGBUS;
67613-
67614- /* Use the zero-page for reads */
67615 if (!(flags & FAULT_FLAG_WRITE)) {
67616 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67617 vma->vm_page_prot));
67618- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67619+ ptl = pte_lockptr(mm, pmd);
67620+ spin_lock(ptl);
67621 if (!pte_none(*page_table))
67622 goto unlock;
67623 goto setpte;
67624 }
67625
67626 /* Allocate our own private page. */
67627+ pte_unmap(page_table);
67628+
67629 if (unlikely(anon_vma_prepare(vma)))
67630 goto oom;
67631 page = alloc_zeroed_user_highpage_movable(vma, address);
67632@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
67633 if (!pte_none(*page_table))
67634 goto release;
67635
67636+#ifdef CONFIG_PAX_SEGMEXEC
67637+ if (pax_find_mirror_vma(vma))
67638+ BUG_ON(!trylock_page(page));
67639+#endif
67640+
67641 inc_mm_counter(mm, anon_rss);
67642 page_add_new_anon_rmap(page, vma, address);
67643 setpte:
67644@@ -2720,6 +2911,12 @@ setpte:
67645
67646 /* No need to invalidate - it was non-present before */
67647 update_mmu_cache(vma, address, entry);
67648+
67649+#ifdef CONFIG_PAX_SEGMEXEC
67650+ if (page)
67651+ pax_mirror_anon_pte(vma, address, page, ptl);
67652+#endif
67653+
67654 unlock:
67655 pte_unmap_unlock(page_table, ptl);
67656 return 0;
67657@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
67658 */
67659 /* Only go through if we didn't race with anybody else... */
67660 if (likely(pte_same(*page_table, orig_pte))) {
67661+
67662+#ifdef CONFIG_PAX_SEGMEXEC
67663+ if (anon && pax_find_mirror_vma(vma))
67664+ BUG_ON(!trylock_page(page));
67665+#endif
67666+
67667 flush_icache_page(vma, page);
67668 entry = mk_pte(page, vma->vm_page_prot);
67669 if (flags & FAULT_FLAG_WRITE)
67670@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
67671
67672 /* no need to invalidate: a not-present page won't be cached */
67673 update_mmu_cache(vma, address, entry);
67674+
67675+#ifdef CONFIG_PAX_SEGMEXEC
67676+ if (anon)
67677+ pax_mirror_anon_pte(vma, address, page, ptl);
67678+ else
67679+ pax_mirror_file_pte(vma, address, page, ptl);
67680+#endif
67681+
67682 } else {
67683 if (charged)
67684 mem_cgroup_uncharge_page(page);
67685@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
67686 if (flags & FAULT_FLAG_WRITE)
67687 flush_tlb_page(vma, address);
67688 }
67689+
67690+#ifdef CONFIG_PAX_SEGMEXEC
67691+ pax_mirror_pte(vma, address, pte, pmd, ptl);
67692+ return 0;
67693+#endif
67694+
67695 unlock:
67696 pte_unmap_unlock(pte, ptl);
67697 return 0;
67698@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
67699 pmd_t *pmd;
67700 pte_t *pte;
67701
67702+#ifdef CONFIG_PAX_SEGMEXEC
67703+ struct vm_area_struct *vma_m;
67704+#endif
67705+
67706 __set_current_state(TASK_RUNNING);
67707
67708 count_vm_event(PGFAULT);
67709@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
67710 if (unlikely(is_vm_hugetlb_page(vma)))
67711 return hugetlb_fault(mm, vma, address, flags);
67712
67713+#ifdef CONFIG_PAX_SEGMEXEC
67714+ vma_m = pax_find_mirror_vma(vma);
67715+ if (vma_m) {
67716+ unsigned long address_m;
67717+ pgd_t *pgd_m;
67718+ pud_t *pud_m;
67719+ pmd_t *pmd_m;
67720+
67721+ if (vma->vm_start > vma_m->vm_start) {
67722+ address_m = address;
67723+ address -= SEGMEXEC_TASK_SIZE;
67724+ vma = vma_m;
67725+ } else
67726+ address_m = address + SEGMEXEC_TASK_SIZE;
67727+
67728+ pgd_m = pgd_offset(mm, address_m);
67729+ pud_m = pud_alloc(mm, pgd_m, address_m);
67730+ if (!pud_m)
67731+ return VM_FAULT_OOM;
67732+ pmd_m = pmd_alloc(mm, pud_m, address_m);
67733+ if (!pmd_m)
67734+ return VM_FAULT_OOM;
67735+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
67736+ return VM_FAULT_OOM;
67737+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67738+ }
67739+#endif
67740+
67741 pgd = pgd_offset(mm, address);
67742 pud = pud_alloc(mm, pgd, address);
67743 if (!pud)
67744@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
67745 gate_vma.vm_start = FIXADDR_USER_START;
67746 gate_vma.vm_end = FIXADDR_USER_END;
67747 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67748- gate_vma.vm_page_prot = __P101;
67749+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67750 /*
67751 * Make sure the vDSO gets into every core dump.
67752 * Dumping its contents makes post-mortem fully interpretable later
67753diff -urNp linux-2.6.32.45/mm/memory-failure.c linux-2.6.32.45/mm/memory-failure.c
67754--- linux-2.6.32.45/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
67755+++ linux-2.6.32.45/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
67756@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
67757
67758 int sysctl_memory_failure_recovery __read_mostly = 1;
67759
67760-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67761+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67762
67763 /*
67764 * Send all the processes who have the page mapped an ``action optional''
67765@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
67766 return 0;
67767 }
67768
67769- atomic_long_add(1, &mce_bad_pages);
67770+ atomic_long_add_unchecked(1, &mce_bad_pages);
67771
67772 /*
67773 * We need/can do nothing about count=0 pages.
67774diff -urNp linux-2.6.32.45/mm/mempolicy.c linux-2.6.32.45/mm/mempolicy.c
67775--- linux-2.6.32.45/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
67776+++ linux-2.6.32.45/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
67777@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
67778 struct vm_area_struct *next;
67779 int err;
67780
67781+#ifdef CONFIG_PAX_SEGMEXEC
67782+ struct vm_area_struct *vma_m;
67783+#endif
67784+
67785 err = 0;
67786 for (; vma && vma->vm_start < end; vma = next) {
67787 next = vma->vm_next;
67788@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
67789 err = policy_vma(vma, new);
67790 if (err)
67791 break;
67792+
67793+#ifdef CONFIG_PAX_SEGMEXEC
67794+ vma_m = pax_find_mirror_vma(vma);
67795+ if (vma_m) {
67796+ err = policy_vma(vma_m, new);
67797+ if (err)
67798+ break;
67799+ }
67800+#endif
67801+
67802 }
67803 return err;
67804 }
67805@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
67806
67807 if (end < start)
67808 return -EINVAL;
67809+
67810+#ifdef CONFIG_PAX_SEGMEXEC
67811+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67812+ if (end > SEGMEXEC_TASK_SIZE)
67813+ return -EINVAL;
67814+ } else
67815+#endif
67816+
67817+ if (end > TASK_SIZE)
67818+ return -EINVAL;
67819+
67820 if (end == start)
67821 return 0;
67822
67823@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67824 if (!mm)
67825 return -EINVAL;
67826
67827+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67828+ if (mm != current->mm &&
67829+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67830+ err = -EPERM;
67831+ goto out;
67832+ }
67833+#endif
67834+
67835 /*
67836 * Check if this process has the right to modify the specified
67837 * process. The right exists if the process has administrative
67838@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67839 rcu_read_lock();
67840 tcred = __task_cred(task);
67841 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67842- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67843- !capable(CAP_SYS_NICE)) {
67844+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67845 rcu_read_unlock();
67846 err = -EPERM;
67847 goto out;
67848@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
67849
67850 if (file) {
67851 seq_printf(m, " file=");
67852- seq_path(m, &file->f_path, "\n\t= ");
67853+ seq_path(m, &file->f_path, "\n\t\\= ");
67854 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67855 seq_printf(m, " heap");
67856 } else if (vma->vm_start <= mm->start_stack &&
67857diff -urNp linux-2.6.32.45/mm/migrate.c linux-2.6.32.45/mm/migrate.c
67858--- linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
67859+++ linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
67860@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
67861 unsigned long chunk_start;
67862 int err;
67863
67864+ pax_track_stack();
67865+
67866 task_nodes = cpuset_mems_allowed(task);
67867
67868 err = -ENOMEM;
67869@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67870 if (!mm)
67871 return -EINVAL;
67872
67873+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67874+ if (mm != current->mm &&
67875+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67876+ err = -EPERM;
67877+ goto out;
67878+ }
67879+#endif
67880+
67881 /*
67882 * Check if this process has the right to modify the specified
67883 * process. The right exists if the process has administrative
67884@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67885 rcu_read_lock();
67886 tcred = __task_cred(task);
67887 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67888- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67889- !capable(CAP_SYS_NICE)) {
67890+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67891 rcu_read_unlock();
67892 err = -EPERM;
67893 goto out;
67894diff -urNp linux-2.6.32.45/mm/mlock.c linux-2.6.32.45/mm/mlock.c
67895--- linux-2.6.32.45/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
67896+++ linux-2.6.32.45/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
67897@@ -13,6 +13,7 @@
67898 #include <linux/pagemap.h>
67899 #include <linux/mempolicy.h>
67900 #include <linux/syscalls.h>
67901+#include <linux/security.h>
67902 #include <linux/sched.h>
67903 #include <linux/module.h>
67904 #include <linux/rmap.h>
67905@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
67906 }
67907 }
67908
67909-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67910-{
67911- return (vma->vm_flags & VM_GROWSDOWN) &&
67912- (vma->vm_start == addr) &&
67913- !vma_stack_continue(vma->vm_prev, addr);
67914-}
67915-
67916 /**
67917 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
67918 * @vma: target vma
67919@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
67920 if (vma->vm_flags & VM_WRITE)
67921 gup_flags |= FOLL_WRITE;
67922
67923- /* We don't try to access the guard page of a stack vma */
67924- if (stack_guard_page(vma, start)) {
67925- addr += PAGE_SIZE;
67926- nr_pages--;
67927- }
67928-
67929 while (nr_pages > 0) {
67930 int i;
67931
67932@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
67933 {
67934 unsigned long nstart, end, tmp;
67935 struct vm_area_struct * vma, * prev;
67936- int error;
67937+ int error = -EINVAL;
67938
67939 len = PAGE_ALIGN(len);
67940 end = start + len;
67941@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
67942 return -EINVAL;
67943 if (end == start)
67944 return 0;
67945+ if (end > TASK_SIZE)
67946+ return -EINVAL;
67947+
67948 vma = find_vma_prev(current->mm, start, &prev);
67949 if (!vma || vma->vm_start > start)
67950 return -ENOMEM;
67951@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
67952 for (nstart = start ; ; ) {
67953 unsigned int newflags;
67954
67955+#ifdef CONFIG_PAX_SEGMEXEC
67956+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67957+ break;
67958+#endif
67959+
67960 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
67961
67962 newflags = vma->vm_flags | VM_LOCKED;
67963@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
67964 lock_limit >>= PAGE_SHIFT;
67965
67966 /* check against resource limits */
67967+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
67968 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
67969 error = do_mlock(start, len, 1);
67970 up_write(&current->mm->mmap_sem);
67971@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
67972 static int do_mlockall(int flags)
67973 {
67974 struct vm_area_struct * vma, * prev = NULL;
67975- unsigned int def_flags = 0;
67976
67977 if (flags & MCL_FUTURE)
67978- def_flags = VM_LOCKED;
67979- current->mm->def_flags = def_flags;
67980+ current->mm->def_flags |= VM_LOCKED;
67981+ else
67982+ current->mm->def_flags &= ~VM_LOCKED;
67983 if (flags == MCL_FUTURE)
67984 goto out;
67985
67986 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
67987- unsigned int newflags;
67988+ unsigned long newflags;
67989+
67990+#ifdef CONFIG_PAX_SEGMEXEC
67991+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67992+ break;
67993+#endif
67994
67995+ BUG_ON(vma->vm_end > TASK_SIZE);
67996 newflags = vma->vm_flags | VM_LOCKED;
67997 if (!(flags & MCL_CURRENT))
67998 newflags &= ~VM_LOCKED;
67999@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68000 lock_limit >>= PAGE_SHIFT;
68001
68002 ret = -ENOMEM;
68003+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68004 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68005 capable(CAP_IPC_LOCK))
68006 ret = do_mlockall(flags);
68007diff -urNp linux-2.6.32.45/mm/mmap.c linux-2.6.32.45/mm/mmap.c
68008--- linux-2.6.32.45/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
68009+++ linux-2.6.32.45/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
68010@@ -45,6 +45,16 @@
68011 #define arch_rebalance_pgtables(addr, len) (addr)
68012 #endif
68013
68014+static inline void verify_mm_writelocked(struct mm_struct *mm)
68015+{
68016+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68017+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68018+ up_read(&mm->mmap_sem);
68019+ BUG();
68020+ }
68021+#endif
68022+}
68023+
68024 static void unmap_region(struct mm_struct *mm,
68025 struct vm_area_struct *vma, struct vm_area_struct *prev,
68026 unsigned long start, unsigned long end);
68027@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
68028 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68029 *
68030 */
68031-pgprot_t protection_map[16] = {
68032+pgprot_t protection_map[16] __read_only = {
68033 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68034 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68035 };
68036
68037 pgprot_t vm_get_page_prot(unsigned long vm_flags)
68038 {
68039- return __pgprot(pgprot_val(protection_map[vm_flags &
68040+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68041 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68042 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68043+
68044+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68045+ if (!nx_enabled &&
68046+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68047+ (vm_flags & (VM_READ | VM_WRITE)))
68048+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68049+#endif
68050+
68051+ return prot;
68052 }
68053 EXPORT_SYMBOL(vm_get_page_prot);
68054
68055 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
68056 int sysctl_overcommit_ratio = 50; /* default is 50% */
68057 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68058+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68059 struct percpu_counter vm_committed_as;
68060
68061 /*
68062@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
68063 struct vm_area_struct *next = vma->vm_next;
68064
68065 might_sleep();
68066+ BUG_ON(vma->vm_mirror);
68067 if (vma->vm_ops && vma->vm_ops->close)
68068 vma->vm_ops->close(vma);
68069 if (vma->vm_file) {
68070@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68071 * not page aligned -Ram Gupta
68072 */
68073 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
68074+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68075 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68076 (mm->end_data - mm->start_data) > rlim)
68077 goto out;
68078@@ -704,6 +726,12 @@ static int
68079 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68080 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68081 {
68082+
68083+#ifdef CONFIG_PAX_SEGMEXEC
68084+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68085+ return 0;
68086+#endif
68087+
68088 if (is_mergeable_vma(vma, file, vm_flags) &&
68089 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68090 if (vma->vm_pgoff == vm_pgoff)
68091@@ -723,6 +751,12 @@ static int
68092 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68093 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68094 {
68095+
68096+#ifdef CONFIG_PAX_SEGMEXEC
68097+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68098+ return 0;
68099+#endif
68100+
68101 if (is_mergeable_vma(vma, file, vm_flags) &&
68102 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68103 pgoff_t vm_pglen;
68104@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
68105 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68106 struct vm_area_struct *prev, unsigned long addr,
68107 unsigned long end, unsigned long vm_flags,
68108- struct anon_vma *anon_vma, struct file *file,
68109+ struct anon_vma *anon_vma, struct file *file,
68110 pgoff_t pgoff, struct mempolicy *policy)
68111 {
68112 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68113 struct vm_area_struct *area, *next;
68114
68115+#ifdef CONFIG_PAX_SEGMEXEC
68116+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68117+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68118+
68119+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68120+#endif
68121+
68122 /*
68123 * We later require that vma->vm_flags == vm_flags,
68124 * so this tests vma->vm_flags & VM_SPECIAL, too.
68125@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
68126 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68127 next = next->vm_next;
68128
68129+#ifdef CONFIG_PAX_SEGMEXEC
68130+ if (prev)
68131+ prev_m = pax_find_mirror_vma(prev);
68132+ if (area)
68133+ area_m = pax_find_mirror_vma(area);
68134+ if (next)
68135+ next_m = pax_find_mirror_vma(next);
68136+#endif
68137+
68138 /*
68139 * Can it merge with the predecessor?
68140 */
68141@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
68142 /* cases 1, 6 */
68143 vma_adjust(prev, prev->vm_start,
68144 next->vm_end, prev->vm_pgoff, NULL);
68145- } else /* cases 2, 5, 7 */
68146+
68147+#ifdef CONFIG_PAX_SEGMEXEC
68148+ if (prev_m)
68149+ vma_adjust(prev_m, prev_m->vm_start,
68150+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68151+#endif
68152+
68153+ } else { /* cases 2, 5, 7 */
68154 vma_adjust(prev, prev->vm_start,
68155 end, prev->vm_pgoff, NULL);
68156+
68157+#ifdef CONFIG_PAX_SEGMEXEC
68158+ if (prev_m)
68159+ vma_adjust(prev_m, prev_m->vm_start,
68160+ end_m, prev_m->vm_pgoff, NULL);
68161+#endif
68162+
68163+ }
68164 return prev;
68165 }
68166
68167@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
68168 mpol_equal(policy, vma_policy(next)) &&
68169 can_vma_merge_before(next, vm_flags,
68170 anon_vma, file, pgoff+pglen)) {
68171- if (prev && addr < prev->vm_end) /* case 4 */
68172+ if (prev && addr < prev->vm_end) { /* case 4 */
68173 vma_adjust(prev, prev->vm_start,
68174 addr, prev->vm_pgoff, NULL);
68175- else /* cases 3, 8 */
68176+
68177+#ifdef CONFIG_PAX_SEGMEXEC
68178+ if (prev_m)
68179+ vma_adjust(prev_m, prev_m->vm_start,
68180+ addr_m, prev_m->vm_pgoff, NULL);
68181+#endif
68182+
68183+ } else { /* cases 3, 8 */
68184 vma_adjust(area, addr, next->vm_end,
68185 next->vm_pgoff - pglen, NULL);
68186+
68187+#ifdef CONFIG_PAX_SEGMEXEC
68188+ if (area_m)
68189+ vma_adjust(area_m, addr_m, next_m->vm_end,
68190+ next_m->vm_pgoff - pglen, NULL);
68191+#endif
68192+
68193+ }
68194 return area;
68195 }
68196
68197@@ -898,14 +978,11 @@ none:
68198 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68199 struct file *file, long pages)
68200 {
68201- const unsigned long stack_flags
68202- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68203-
68204 if (file) {
68205 mm->shared_vm += pages;
68206 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68207 mm->exec_vm += pages;
68208- } else if (flags & stack_flags)
68209+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68210 mm->stack_vm += pages;
68211 if (flags & (VM_RESERVED|VM_IO))
68212 mm->reserved_vm += pages;
68213@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
68214 * (the exception is when the underlying filesystem is noexec
68215 * mounted, in which case we dont add PROT_EXEC.)
68216 */
68217- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68218+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68219 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68220 prot |= PROT_EXEC;
68221
68222@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
68223 /* Obtain the address to map to. we verify (or select) it and ensure
68224 * that it represents a valid section of the address space.
68225 */
68226- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68227+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68228 if (addr & ~PAGE_MASK)
68229 return addr;
68230
68231@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
68232 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68233 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68234
68235+#ifdef CONFIG_PAX_MPROTECT
68236+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68237+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68238+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68239+ gr_log_rwxmmap(file);
68240+
68241+#ifdef CONFIG_PAX_EMUPLT
68242+ vm_flags &= ~VM_EXEC;
68243+#else
68244+ return -EPERM;
68245+#endif
68246+
68247+ }
68248+
68249+ if (!(vm_flags & VM_EXEC))
68250+ vm_flags &= ~VM_MAYEXEC;
68251+#else
68252+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68253+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68254+#endif
68255+ else
68256+ vm_flags &= ~VM_MAYWRITE;
68257+ }
68258+#endif
68259+
68260+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68261+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68262+ vm_flags &= ~VM_PAGEEXEC;
68263+#endif
68264+
68265 if (flags & MAP_LOCKED)
68266 if (!can_do_mlock())
68267 return -EPERM;
68268@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
68269 locked += mm->locked_vm;
68270 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
68271 lock_limit >>= PAGE_SHIFT;
68272+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68273 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68274 return -EAGAIN;
68275 }
68276@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
68277 if (error)
68278 return error;
68279
68280+ if (!gr_acl_handle_mmap(file, prot))
68281+ return -EACCES;
68282+
68283 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68284 }
68285 EXPORT_SYMBOL(do_mmap_pgoff);
68286@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
68287 */
68288 int vma_wants_writenotify(struct vm_area_struct *vma)
68289 {
68290- unsigned int vm_flags = vma->vm_flags;
68291+ unsigned long vm_flags = vma->vm_flags;
68292
68293 /* If it was private or non-writable, the write bit is already clear */
68294- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68295+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68296 return 0;
68297
68298 /* The backer wishes to know when pages are first written to? */
68299@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
68300 unsigned long charged = 0;
68301 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68302
68303+#ifdef CONFIG_PAX_SEGMEXEC
68304+ struct vm_area_struct *vma_m = NULL;
68305+#endif
68306+
68307+ /*
68308+ * mm->mmap_sem is required to protect against another thread
68309+ * changing the mappings in case we sleep.
68310+ */
68311+ verify_mm_writelocked(mm);
68312+
68313 /* Clear old maps */
68314 error = -ENOMEM;
68315-munmap_back:
68316 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68317 if (vma && vma->vm_start < addr + len) {
68318 if (do_munmap(mm, addr, len))
68319 return -ENOMEM;
68320- goto munmap_back;
68321+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68322+ BUG_ON(vma && vma->vm_start < addr + len);
68323 }
68324
68325 /* Check against address space limit. */
68326@@ -1173,6 +1294,16 @@ munmap_back:
68327 goto unacct_error;
68328 }
68329
68330+#ifdef CONFIG_PAX_SEGMEXEC
68331+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68332+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68333+ if (!vma_m) {
68334+ error = -ENOMEM;
68335+ goto free_vma;
68336+ }
68337+ }
68338+#endif
68339+
68340 vma->vm_mm = mm;
68341 vma->vm_start = addr;
68342 vma->vm_end = addr + len;
68343@@ -1195,6 +1326,19 @@ munmap_back:
68344 error = file->f_op->mmap(file, vma);
68345 if (error)
68346 goto unmap_and_free_vma;
68347+
68348+#ifdef CONFIG_PAX_SEGMEXEC
68349+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68350+ added_exe_file_vma(mm);
68351+#endif
68352+
68353+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68354+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68355+ vma->vm_flags |= VM_PAGEEXEC;
68356+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68357+ }
68358+#endif
68359+
68360 if (vm_flags & VM_EXECUTABLE)
68361 added_exe_file_vma(mm);
68362
68363@@ -1218,6 +1362,11 @@ munmap_back:
68364 vma_link(mm, vma, prev, rb_link, rb_parent);
68365 file = vma->vm_file;
68366
68367+#ifdef CONFIG_PAX_SEGMEXEC
68368+ if (vma_m)
68369+ pax_mirror_vma(vma_m, vma);
68370+#endif
68371+
68372 /* Once vma denies write, undo our temporary denial count */
68373 if (correct_wcount)
68374 atomic_inc(&inode->i_writecount);
68375@@ -1226,6 +1375,7 @@ out:
68376
68377 mm->total_vm += len >> PAGE_SHIFT;
68378 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68379+ track_exec_limit(mm, addr, addr + len, vm_flags);
68380 if (vm_flags & VM_LOCKED) {
68381 /*
68382 * makes pages present; downgrades, drops, reacquires mmap_sem
68383@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
68384 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68385 charged = 0;
68386 free_vma:
68387+
68388+#ifdef CONFIG_PAX_SEGMEXEC
68389+ if (vma_m)
68390+ kmem_cache_free(vm_area_cachep, vma_m);
68391+#endif
68392+
68393 kmem_cache_free(vm_area_cachep, vma);
68394 unacct_error:
68395 if (charged)
68396@@ -1255,6 +1411,44 @@ unacct_error:
68397 return error;
68398 }
68399
68400+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68401+{
68402+ if (!vma) {
68403+#ifdef CONFIG_STACK_GROWSUP
68404+ if (addr > sysctl_heap_stack_gap)
68405+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68406+ else
68407+ vma = find_vma(current->mm, 0);
68408+ if (vma && (vma->vm_flags & VM_GROWSUP))
68409+ return false;
68410+#endif
68411+ return true;
68412+ }
68413+
68414+ if (addr + len > vma->vm_start)
68415+ return false;
68416+
68417+ if (vma->vm_flags & VM_GROWSDOWN)
68418+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68419+#ifdef CONFIG_STACK_GROWSUP
68420+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68421+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68422+#endif
68423+
68424+ return true;
68425+}
68426+
68427+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68428+{
68429+ if (vma->vm_start < len)
68430+ return -ENOMEM;
68431+ if (!(vma->vm_flags & VM_GROWSDOWN))
68432+ return vma->vm_start - len;
68433+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
68434+ return vma->vm_start - len - sysctl_heap_stack_gap;
68435+ return -ENOMEM;
68436+}
68437+
68438 /* Get an address range which is currently unmapped.
68439 * For shmat() with addr=0.
68440 *
68441@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
68442 if (flags & MAP_FIXED)
68443 return addr;
68444
68445+#ifdef CONFIG_PAX_RANDMMAP
68446+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68447+#endif
68448+
68449 if (addr) {
68450 addr = PAGE_ALIGN(addr);
68451- vma = find_vma(mm, addr);
68452- if (TASK_SIZE - len >= addr &&
68453- (!vma || addr + len <= vma->vm_start))
68454- return addr;
68455+ if (TASK_SIZE - len >= addr) {
68456+ vma = find_vma(mm, addr);
68457+ if (check_heap_stack_gap(vma, addr, len))
68458+ return addr;
68459+ }
68460 }
68461 if (len > mm->cached_hole_size) {
68462- start_addr = addr = mm->free_area_cache;
68463+ start_addr = addr = mm->free_area_cache;
68464 } else {
68465- start_addr = addr = TASK_UNMAPPED_BASE;
68466- mm->cached_hole_size = 0;
68467+ start_addr = addr = mm->mmap_base;
68468+ mm->cached_hole_size = 0;
68469 }
68470
68471 full_search:
68472@@ -1303,34 +1502,40 @@ full_search:
68473 * Start a new search - just in case we missed
68474 * some holes.
68475 */
68476- if (start_addr != TASK_UNMAPPED_BASE) {
68477- addr = TASK_UNMAPPED_BASE;
68478- start_addr = addr;
68479+ if (start_addr != mm->mmap_base) {
68480+ start_addr = addr = mm->mmap_base;
68481 mm->cached_hole_size = 0;
68482 goto full_search;
68483 }
68484 return -ENOMEM;
68485 }
68486- if (!vma || addr + len <= vma->vm_start) {
68487- /*
68488- * Remember the place where we stopped the search:
68489- */
68490- mm->free_area_cache = addr + len;
68491- return addr;
68492- }
68493+ if (check_heap_stack_gap(vma, addr, len))
68494+ break;
68495 if (addr + mm->cached_hole_size < vma->vm_start)
68496 mm->cached_hole_size = vma->vm_start - addr;
68497 addr = vma->vm_end;
68498 }
68499+
68500+ /*
68501+ * Remember the place where we stopped the search:
68502+ */
68503+ mm->free_area_cache = addr + len;
68504+ return addr;
68505 }
68506 #endif
68507
68508 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68509 {
68510+
68511+#ifdef CONFIG_PAX_SEGMEXEC
68512+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68513+ return;
68514+#endif
68515+
68516 /*
68517 * Is this a new hole at the lowest possible address?
68518 */
68519- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68520+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68521 mm->free_area_cache = addr;
68522 mm->cached_hole_size = ~0UL;
68523 }
68524@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
68525 {
68526 struct vm_area_struct *vma;
68527 struct mm_struct *mm = current->mm;
68528- unsigned long addr = addr0;
68529+ unsigned long base = mm->mmap_base, addr = addr0;
68530
68531 /* requested length too big for entire address space */
68532 if (len > TASK_SIZE)
68533@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
68534 if (flags & MAP_FIXED)
68535 return addr;
68536
68537+#ifdef CONFIG_PAX_RANDMMAP
68538+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68539+#endif
68540+
68541 /* requesting a specific address */
68542 if (addr) {
68543 addr = PAGE_ALIGN(addr);
68544- vma = find_vma(mm, addr);
68545- if (TASK_SIZE - len >= addr &&
68546- (!vma || addr + len <= vma->vm_start))
68547- return addr;
68548+ if (TASK_SIZE - len >= addr) {
68549+ vma = find_vma(mm, addr);
68550+ if (check_heap_stack_gap(vma, addr, len))
68551+ return addr;
68552+ }
68553 }
68554
68555 /* check if free_area_cache is useful for us */
68556@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
68557 /* make sure it can fit in the remaining address space */
68558 if (addr > len) {
68559 vma = find_vma(mm, addr-len);
68560- if (!vma || addr <= vma->vm_start)
68561+ if (check_heap_stack_gap(vma, addr - len, len))
68562 /* remember the address as a hint for next time */
68563 return (mm->free_area_cache = addr-len);
68564 }
68565@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
68566 * return with success:
68567 */
68568 vma = find_vma(mm, addr);
68569- if (!vma || addr+len <= vma->vm_start)
68570+ if (check_heap_stack_gap(vma, addr, len))
68571 /* remember the address as a hint for next time */
68572 return (mm->free_area_cache = addr);
68573
68574@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
68575 mm->cached_hole_size = vma->vm_start - addr;
68576
68577 /* try just below the current vma->vm_start */
68578- addr = vma->vm_start-len;
68579- } while (len < vma->vm_start);
68580+ addr = skip_heap_stack_gap(vma, len);
68581+ } while (!IS_ERR_VALUE(addr));
68582
68583 bottomup:
68584 /*
68585@@ -1414,13 +1624,21 @@ bottomup:
68586 * can happen with large stack limits and large mmap()
68587 * allocations.
68588 */
68589+ mm->mmap_base = TASK_UNMAPPED_BASE;
68590+
68591+#ifdef CONFIG_PAX_RANDMMAP
68592+ if (mm->pax_flags & MF_PAX_RANDMMAP)
68593+ mm->mmap_base += mm->delta_mmap;
68594+#endif
68595+
68596+ mm->free_area_cache = mm->mmap_base;
68597 mm->cached_hole_size = ~0UL;
68598- mm->free_area_cache = TASK_UNMAPPED_BASE;
68599 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68600 /*
68601 * Restore the topdown base:
68602 */
68603- mm->free_area_cache = mm->mmap_base;
68604+ mm->mmap_base = base;
68605+ mm->free_area_cache = base;
68606 mm->cached_hole_size = ~0UL;
68607
68608 return addr;
68609@@ -1429,6 +1647,12 @@ bottomup:
68610
68611 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68612 {
68613+
68614+#ifdef CONFIG_PAX_SEGMEXEC
68615+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68616+ return;
68617+#endif
68618+
68619 /*
68620 * Is this a new hole at the highest possible address?
68621 */
68622@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
68623 mm->free_area_cache = addr;
68624
68625 /* dont allow allocations above current base */
68626- if (mm->free_area_cache > mm->mmap_base)
68627+ if (mm->free_area_cache > mm->mmap_base) {
68628 mm->free_area_cache = mm->mmap_base;
68629+ mm->cached_hole_size = ~0UL;
68630+ }
68631 }
68632
68633 unsigned long
68634@@ -1545,6 +1771,27 @@ out:
68635 return prev ? prev->vm_next : vma;
68636 }
68637
68638+#ifdef CONFIG_PAX_SEGMEXEC
68639+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68640+{
68641+ struct vm_area_struct *vma_m;
68642+
68643+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68644+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68645+ BUG_ON(vma->vm_mirror);
68646+ return NULL;
68647+ }
68648+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68649+ vma_m = vma->vm_mirror;
68650+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68651+ BUG_ON(vma->vm_file != vma_m->vm_file);
68652+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68653+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
68654+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68655+ return vma_m;
68656+}
68657+#endif
68658+
68659 /*
68660 * Verify that the stack growth is acceptable and
68661 * update accounting. This is shared with both the
68662@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
68663 return -ENOMEM;
68664
68665 /* Stack limit test */
68666+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
68667 if (size > rlim[RLIMIT_STACK].rlim_cur)
68668 return -ENOMEM;
68669
68670@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
68671 unsigned long limit;
68672 locked = mm->locked_vm + grow;
68673 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
68674+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68675 if (locked > limit && !capable(CAP_IPC_LOCK))
68676 return -ENOMEM;
68677 }
68678@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
68679 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68680 * vma is the last one with address > vma->vm_end. Have to extend vma.
68681 */
68682+#ifndef CONFIG_IA64
68683+static
68684+#endif
68685 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68686 {
68687 int error;
68688+ bool locknext;
68689
68690 if (!(vma->vm_flags & VM_GROWSUP))
68691 return -EFAULT;
68692
68693+ /* Also guard against wrapping around to address 0. */
68694+ if (address < PAGE_ALIGN(address+1))
68695+ address = PAGE_ALIGN(address+1);
68696+ else
68697+ return -ENOMEM;
68698+
68699 /*
68700 * We must make sure the anon_vma is allocated
68701 * so that the anon_vma locking is not a noop.
68702 */
68703 if (unlikely(anon_vma_prepare(vma)))
68704 return -ENOMEM;
68705+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68706+ if (locknext && anon_vma_prepare(vma->vm_next))
68707+ return -ENOMEM;
68708 anon_vma_lock(vma);
68709+ if (locknext)
68710+ anon_vma_lock(vma->vm_next);
68711
68712 /*
68713 * vma->vm_start/vm_end cannot change under us because the caller
68714 * is required to hold the mmap_sem in read mode. We need the
68715- * anon_vma lock to serialize against concurrent expand_stacks.
68716- * Also guard against wrapping around to address 0.
68717+ * anon_vma locks to serialize against concurrent expand_stacks
68718+ * and expand_upwards.
68719 */
68720- if (address < PAGE_ALIGN(address+4))
68721- address = PAGE_ALIGN(address+4);
68722- else {
68723- anon_vma_unlock(vma);
68724- return -ENOMEM;
68725- }
68726 error = 0;
68727
68728 /* Somebody else might have raced and expanded it already */
68729- if (address > vma->vm_end) {
68730+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68731+ error = -ENOMEM;
68732+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68733 unsigned long size, grow;
68734
68735 size = address - vma->vm_start;
68736@@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
68737 if (!error)
68738 vma->vm_end = address;
68739 }
68740+ if (locknext)
68741+ anon_vma_unlock(vma->vm_next);
68742 anon_vma_unlock(vma);
68743 return error;
68744 }
68745@@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
68746 unsigned long address)
68747 {
68748 int error;
68749+ bool lockprev = false;
68750+ struct vm_area_struct *prev;
68751
68752 /*
68753 * We must make sure the anon_vma is allocated
68754@@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
68755 if (error)
68756 return error;
68757
68758+ prev = vma->vm_prev;
68759+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68760+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68761+#endif
68762+ if (lockprev && anon_vma_prepare(prev))
68763+ return -ENOMEM;
68764+ if (lockprev)
68765+ anon_vma_lock(prev);
68766+
68767 anon_vma_lock(vma);
68768
68769 /*
68770@@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
68771 */
68772
68773 /* Somebody else might have raced and expanded it already */
68774- if (address < vma->vm_start) {
68775+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68776+ error = -ENOMEM;
68777+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68778 unsigned long size, grow;
68779
68780+#ifdef CONFIG_PAX_SEGMEXEC
68781+ struct vm_area_struct *vma_m;
68782+
68783+ vma_m = pax_find_mirror_vma(vma);
68784+#endif
68785+
68786 size = vma->vm_end - address;
68787 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68788
68789@@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
68790 if (!error) {
68791 vma->vm_start = address;
68792 vma->vm_pgoff -= grow;
68793+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68794+
68795+#ifdef CONFIG_PAX_SEGMEXEC
68796+ if (vma_m) {
68797+ vma_m->vm_start -= grow << PAGE_SHIFT;
68798+ vma_m->vm_pgoff -= grow;
68799+ }
68800+#endif
68801+
68802 }
68803 }
68804 anon_vma_unlock(vma);
68805+ if (lockprev)
68806+ anon_vma_unlock(prev);
68807 return error;
68808 }
68809
68810@@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
68811 do {
68812 long nrpages = vma_pages(vma);
68813
68814+#ifdef CONFIG_PAX_SEGMEXEC
68815+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
68816+ vma = remove_vma(vma);
68817+ continue;
68818+ }
68819+#endif
68820+
68821 mm->total_vm -= nrpages;
68822 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
68823 vma = remove_vma(vma);
68824@@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
68825 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
68826 vma->vm_prev = NULL;
68827 do {
68828+
68829+#ifdef CONFIG_PAX_SEGMEXEC
68830+ if (vma->vm_mirror) {
68831+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
68832+ vma->vm_mirror->vm_mirror = NULL;
68833+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
68834+ vma->vm_mirror = NULL;
68835+ }
68836+#endif
68837+
68838 rb_erase(&vma->vm_rb, &mm->mm_rb);
68839 mm->map_count--;
68840 tail_vma = vma;
68841@@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
68842 struct mempolicy *pol;
68843 struct vm_area_struct *new;
68844
68845+#ifdef CONFIG_PAX_SEGMEXEC
68846+ struct vm_area_struct *vma_m, *new_m = NULL;
68847+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
68848+#endif
68849+
68850 if (is_vm_hugetlb_page(vma) && (addr &
68851 ~(huge_page_mask(hstate_vma(vma)))))
68852 return -EINVAL;
68853
68854+#ifdef CONFIG_PAX_SEGMEXEC
68855+ vma_m = pax_find_mirror_vma(vma);
68856+
68857+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68858+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
68859+ if (mm->map_count >= sysctl_max_map_count-1)
68860+ return -ENOMEM;
68861+ } else
68862+#endif
68863+
68864 if (mm->map_count >= sysctl_max_map_count)
68865 return -ENOMEM;
68866
68867@@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
68868 if (!new)
68869 return -ENOMEM;
68870
68871+#ifdef CONFIG_PAX_SEGMEXEC
68872+ if (vma_m) {
68873+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68874+ if (!new_m) {
68875+ kmem_cache_free(vm_area_cachep, new);
68876+ return -ENOMEM;
68877+ }
68878+ }
68879+#endif
68880+
68881 /* most fields are the same, copy all, and then fixup */
68882 *new = *vma;
68883
68884@@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
68885 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
68886 }
68887
68888+#ifdef CONFIG_PAX_SEGMEXEC
68889+ if (vma_m) {
68890+ *new_m = *vma_m;
68891+ new_m->vm_mirror = new;
68892+ new->vm_mirror = new_m;
68893+
68894+ if (new_below)
68895+ new_m->vm_end = addr_m;
68896+ else {
68897+ new_m->vm_start = addr_m;
68898+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
68899+ }
68900+ }
68901+#endif
68902+
68903 pol = mpol_dup(vma_policy(vma));
68904 if (IS_ERR(pol)) {
68905+
68906+#ifdef CONFIG_PAX_SEGMEXEC
68907+ if (new_m)
68908+ kmem_cache_free(vm_area_cachep, new_m);
68909+#endif
68910+
68911 kmem_cache_free(vm_area_cachep, new);
68912 return PTR_ERR(pol);
68913 }
68914@@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
68915 else
68916 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
68917
68918+#ifdef CONFIG_PAX_SEGMEXEC
68919+ if (vma_m) {
68920+ mpol_get(pol);
68921+ vma_set_policy(new_m, pol);
68922+
68923+ if (new_m->vm_file) {
68924+ get_file(new_m->vm_file);
68925+ if (vma_m->vm_flags & VM_EXECUTABLE)
68926+ added_exe_file_vma(mm);
68927+ }
68928+
68929+ if (new_m->vm_ops && new_m->vm_ops->open)
68930+ new_m->vm_ops->open(new_m);
68931+
68932+ if (new_below)
68933+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
68934+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
68935+ else
68936+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
68937+ }
68938+#endif
68939+
68940 return 0;
68941 }
68942
68943@@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
68944 * work. This now handles partial unmappings.
68945 * Jeremy Fitzhardinge <jeremy@goop.org>
68946 */
68947+#ifdef CONFIG_PAX_SEGMEXEC
68948+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68949+{
68950+ int ret = __do_munmap(mm, start, len);
68951+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
68952+ return ret;
68953+
68954+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
68955+}
68956+
68957+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68958+#else
68959 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68960+#endif
68961 {
68962 unsigned long end;
68963 struct vm_area_struct *vma, *prev, *last;
68964
68965+ /*
68966+ * mm->mmap_sem is required to protect against another thread
68967+ * changing the mappings in case we sleep.
68968+ */
68969+ verify_mm_writelocked(mm);
68970+
68971 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
68972 return -EINVAL;
68973
68974@@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
68975 /* Fix up all other VM information */
68976 remove_vma_list(mm, vma);
68977
68978+ track_exec_limit(mm, start, end, 0UL);
68979+
68980 return 0;
68981 }
68982
68983@@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
68984
68985 profile_munmap(addr);
68986
68987+#ifdef CONFIG_PAX_SEGMEXEC
68988+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
68989+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
68990+ return -EINVAL;
68991+#endif
68992+
68993 down_write(&mm->mmap_sem);
68994 ret = do_munmap(mm, addr, len);
68995 up_write(&mm->mmap_sem);
68996 return ret;
68997 }
68998
68999-static inline void verify_mm_writelocked(struct mm_struct *mm)
69000-{
69001-#ifdef CONFIG_DEBUG_VM
69002- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69003- WARN_ON(1);
69004- up_read(&mm->mmap_sem);
69005- }
69006-#endif
69007-}
69008-
69009 /*
69010 * this is really a simplified "do_mmap". it only handles
69011 * anonymous maps. eventually we may be able to do some
69012@@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
69013 struct rb_node ** rb_link, * rb_parent;
69014 pgoff_t pgoff = addr >> PAGE_SHIFT;
69015 int error;
69016+ unsigned long charged;
69017
69018 len = PAGE_ALIGN(len);
69019 if (!len)
69020@@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
69021
69022 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69023
69024+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69025+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69026+ flags &= ~VM_EXEC;
69027+
69028+#ifdef CONFIG_PAX_MPROTECT
69029+ if (mm->pax_flags & MF_PAX_MPROTECT)
69030+ flags &= ~VM_MAYEXEC;
69031+#endif
69032+
69033+ }
69034+#endif
69035+
69036 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69037 if (error & ~PAGE_MASK)
69038 return error;
69039
69040+ charged = len >> PAGE_SHIFT;
69041+
69042 /*
69043 * mlock MCL_FUTURE?
69044 */
69045 if (mm->def_flags & VM_LOCKED) {
69046 unsigned long locked, lock_limit;
69047- locked = len >> PAGE_SHIFT;
69048+ locked = charged;
69049 locked += mm->locked_vm;
69050 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
69051 lock_limit >>= PAGE_SHIFT;
69052@@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
69053 /*
69054 * Clear old maps. this also does some error checking for us
69055 */
69056- munmap_back:
69057 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69058 if (vma && vma->vm_start < addr + len) {
69059 if (do_munmap(mm, addr, len))
69060 return -ENOMEM;
69061- goto munmap_back;
69062+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69063+ BUG_ON(vma && vma->vm_start < addr + len);
69064 }
69065
69066 /* Check against address space limits *after* clearing old maps... */
69067- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69068+ if (!may_expand_vm(mm, charged))
69069 return -ENOMEM;
69070
69071 if (mm->map_count > sysctl_max_map_count)
69072 return -ENOMEM;
69073
69074- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69075+ if (security_vm_enough_memory(charged))
69076 return -ENOMEM;
69077
69078 /* Can we just expand an old private anonymous mapping? */
69079@@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
69080 */
69081 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69082 if (!vma) {
69083- vm_unacct_memory(len >> PAGE_SHIFT);
69084+ vm_unacct_memory(charged);
69085 return -ENOMEM;
69086 }
69087
69088@@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
69089 vma->vm_page_prot = vm_get_page_prot(flags);
69090 vma_link(mm, vma, prev, rb_link, rb_parent);
69091 out:
69092- mm->total_vm += len >> PAGE_SHIFT;
69093+ mm->total_vm += charged;
69094 if (flags & VM_LOCKED) {
69095 if (!mlock_vma_pages_range(vma, addr, addr + len))
69096- mm->locked_vm += (len >> PAGE_SHIFT);
69097+ mm->locked_vm += charged;
69098 }
69099+ track_exec_limit(mm, addr, addr + len, flags);
69100 return addr;
69101 }
69102
69103@@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
69104 * Walk the list again, actually closing and freeing it,
69105 * with preemption enabled, without holding any MM locks.
69106 */
69107- while (vma)
69108+ while (vma) {
69109+ vma->vm_mirror = NULL;
69110 vma = remove_vma(vma);
69111+ }
69112
69113 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69114 }
69115@@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
69116 struct vm_area_struct * __vma, * prev;
69117 struct rb_node ** rb_link, * rb_parent;
69118
69119+#ifdef CONFIG_PAX_SEGMEXEC
69120+ struct vm_area_struct *vma_m = NULL;
69121+#endif
69122+
69123 /*
69124 * The vm_pgoff of a purely anonymous vma should be irrelevant
69125 * until its first write fault, when page's anon_vma and index
69126@@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
69127 if ((vma->vm_flags & VM_ACCOUNT) &&
69128 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69129 return -ENOMEM;
69130+
69131+#ifdef CONFIG_PAX_SEGMEXEC
69132+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69133+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69134+ if (!vma_m)
69135+ return -ENOMEM;
69136+ }
69137+#endif
69138+
69139 vma_link(mm, vma, prev, rb_link, rb_parent);
69140+
69141+#ifdef CONFIG_PAX_SEGMEXEC
69142+ if (vma_m)
69143+ pax_mirror_vma(vma_m, vma);
69144+#endif
69145+
69146 return 0;
69147 }
69148
69149@@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
69150 struct rb_node **rb_link, *rb_parent;
69151 struct mempolicy *pol;
69152
69153+ BUG_ON(vma->vm_mirror);
69154+
69155 /*
69156 * If anonymous vma has not yet been faulted, update new pgoff
69157 * to match new location, to increase its chance of merging.
69158@@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
69159 return new_vma;
69160 }
69161
69162+#ifdef CONFIG_PAX_SEGMEXEC
69163+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69164+{
69165+ struct vm_area_struct *prev_m;
69166+ struct rb_node **rb_link_m, *rb_parent_m;
69167+ struct mempolicy *pol_m;
69168+
69169+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69170+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69171+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69172+ *vma_m = *vma;
69173+ pol_m = vma_policy(vma_m);
69174+ mpol_get(pol_m);
69175+ vma_set_policy(vma_m, pol_m);
69176+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69177+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69178+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69179+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69180+ if (vma_m->vm_file)
69181+ get_file(vma_m->vm_file);
69182+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69183+ vma_m->vm_ops->open(vma_m);
69184+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69185+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69186+ vma_m->vm_mirror = vma;
69187+ vma->vm_mirror = vma_m;
69188+}
69189+#endif
69190+
69191 /*
69192 * Return true if the calling process may expand its vm space by the passed
69193 * number of pages
69194@@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
69195 unsigned long lim;
69196
69197 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
69198-
69199+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69200 if (cur + npages > lim)
69201 return 0;
69202 return 1;
69203@@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
69204 vma->vm_start = addr;
69205 vma->vm_end = addr + len;
69206
69207+#ifdef CONFIG_PAX_MPROTECT
69208+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69209+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69210+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69211+ return -EPERM;
69212+ if (!(vm_flags & VM_EXEC))
69213+ vm_flags &= ~VM_MAYEXEC;
69214+#else
69215+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69216+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69217+#endif
69218+ else
69219+ vm_flags &= ~VM_MAYWRITE;
69220+ }
69221+#endif
69222+
69223 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69224 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69225
69226diff -urNp linux-2.6.32.45/mm/mprotect.c linux-2.6.32.45/mm/mprotect.c
69227--- linux-2.6.32.45/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
69228+++ linux-2.6.32.45/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
69229@@ -24,10 +24,16 @@
69230 #include <linux/mmu_notifier.h>
69231 #include <linux/migrate.h>
69232 #include <linux/perf_event.h>
69233+
69234+#ifdef CONFIG_PAX_MPROTECT
69235+#include <linux/elf.h>
69236+#endif
69237+
69238 #include <asm/uaccess.h>
69239 #include <asm/pgtable.h>
69240 #include <asm/cacheflush.h>
69241 #include <asm/tlbflush.h>
69242+#include <asm/mmu_context.h>
69243
69244 #ifndef pgprot_modify
69245 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69246@@ -132,6 +138,48 @@ static void change_protection(struct vm_
69247 flush_tlb_range(vma, start, end);
69248 }
69249
69250+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69251+/* called while holding the mmap semaphor for writing except stack expansion */
69252+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69253+{
69254+ unsigned long oldlimit, newlimit = 0UL;
69255+
69256+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
69257+ return;
69258+
69259+ spin_lock(&mm->page_table_lock);
69260+ oldlimit = mm->context.user_cs_limit;
69261+ if ((prot & VM_EXEC) && oldlimit < end)
69262+ /* USER_CS limit moved up */
69263+ newlimit = end;
69264+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69265+ /* USER_CS limit moved down */
69266+ newlimit = start;
69267+
69268+ if (newlimit) {
69269+ mm->context.user_cs_limit = newlimit;
69270+
69271+#ifdef CONFIG_SMP
69272+ wmb();
69273+ cpus_clear(mm->context.cpu_user_cs_mask);
69274+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69275+#endif
69276+
69277+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69278+ }
69279+ spin_unlock(&mm->page_table_lock);
69280+ if (newlimit == end) {
69281+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69282+
69283+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69284+ if (is_vm_hugetlb_page(vma))
69285+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69286+ else
69287+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69288+ }
69289+}
69290+#endif
69291+
69292 int
69293 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69294 unsigned long start, unsigned long end, unsigned long newflags)
69295@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
69296 int error;
69297 int dirty_accountable = 0;
69298
69299+#ifdef CONFIG_PAX_SEGMEXEC
69300+ struct vm_area_struct *vma_m = NULL;
69301+ unsigned long start_m, end_m;
69302+
69303+ start_m = start + SEGMEXEC_TASK_SIZE;
69304+ end_m = end + SEGMEXEC_TASK_SIZE;
69305+#endif
69306+
69307 if (newflags == oldflags) {
69308 *pprev = vma;
69309 return 0;
69310 }
69311
69312+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69313+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69314+
69315+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69316+ return -ENOMEM;
69317+
69318+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69319+ return -ENOMEM;
69320+ }
69321+
69322 /*
69323 * If we make a private mapping writable we increase our commit;
69324 * but (without finer accounting) cannot reduce our commit if we
69325@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
69326 }
69327 }
69328
69329+#ifdef CONFIG_PAX_SEGMEXEC
69330+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69331+ if (start != vma->vm_start) {
69332+ error = split_vma(mm, vma, start, 1);
69333+ if (error)
69334+ goto fail;
69335+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69336+ *pprev = (*pprev)->vm_next;
69337+ }
69338+
69339+ if (end != vma->vm_end) {
69340+ error = split_vma(mm, vma, end, 0);
69341+ if (error)
69342+ goto fail;
69343+ }
69344+
69345+ if (pax_find_mirror_vma(vma)) {
69346+ error = __do_munmap(mm, start_m, end_m - start_m);
69347+ if (error)
69348+ goto fail;
69349+ } else {
69350+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69351+ if (!vma_m) {
69352+ error = -ENOMEM;
69353+ goto fail;
69354+ }
69355+ vma->vm_flags = newflags;
69356+ pax_mirror_vma(vma_m, vma);
69357+ }
69358+ }
69359+#endif
69360+
69361 /*
69362 * First try to merge with previous and/or next vma.
69363 */
69364@@ -195,9 +293,21 @@ success:
69365 * vm_flags and vm_page_prot are protected by the mmap_sem
69366 * held in write mode.
69367 */
69368+
69369+#ifdef CONFIG_PAX_SEGMEXEC
69370+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69371+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69372+#endif
69373+
69374 vma->vm_flags = newflags;
69375+
69376+#ifdef CONFIG_PAX_MPROTECT
69377+ if (mm->binfmt && mm->binfmt->handle_mprotect)
69378+ mm->binfmt->handle_mprotect(vma, newflags);
69379+#endif
69380+
69381 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69382- vm_get_page_prot(newflags));
69383+ vm_get_page_prot(vma->vm_flags));
69384
69385 if (vma_wants_writenotify(vma)) {
69386 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69387@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69388 end = start + len;
69389 if (end <= start)
69390 return -ENOMEM;
69391+
69392+#ifdef CONFIG_PAX_SEGMEXEC
69393+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69394+ if (end > SEGMEXEC_TASK_SIZE)
69395+ return -EINVAL;
69396+ } else
69397+#endif
69398+
69399+ if (end > TASK_SIZE)
69400+ return -EINVAL;
69401+
69402 if (!arch_validate_prot(prot))
69403 return -EINVAL;
69404
69405@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69406 /*
69407 * Does the application expect PROT_READ to imply PROT_EXEC:
69408 */
69409- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69410+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69411 prot |= PROT_EXEC;
69412
69413 vm_flags = calc_vm_prot_bits(prot);
69414@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69415 if (start > vma->vm_start)
69416 prev = vma;
69417
69418+#ifdef CONFIG_PAX_MPROTECT
69419+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69420+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
69421+#endif
69422+
69423 for (nstart = start ; ; ) {
69424 unsigned long newflags;
69425
69426@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69427
69428 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69429 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69430+ if (prot & (PROT_WRITE | PROT_EXEC))
69431+ gr_log_rwxmprotect(vma->vm_file);
69432+
69433+ error = -EACCES;
69434+ goto out;
69435+ }
69436+
69437+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69438 error = -EACCES;
69439 goto out;
69440 }
69441@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69442 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69443 if (error)
69444 goto out;
69445+
69446+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
69447+
69448 nstart = tmp;
69449
69450 if (nstart < prev->vm_end)
69451diff -urNp linux-2.6.32.45/mm/mremap.c linux-2.6.32.45/mm/mremap.c
69452--- linux-2.6.32.45/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
69453+++ linux-2.6.32.45/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
69454@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
69455 continue;
69456 pte = ptep_clear_flush(vma, old_addr, old_pte);
69457 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69458+
69459+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69460+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69461+ pte = pte_exprotect(pte);
69462+#endif
69463+
69464 set_pte_at(mm, new_addr, new_pte, pte);
69465 }
69466
69467@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
69468 if (is_vm_hugetlb_page(vma))
69469 goto Einval;
69470
69471+#ifdef CONFIG_PAX_SEGMEXEC
69472+ if (pax_find_mirror_vma(vma))
69473+ goto Einval;
69474+#endif
69475+
69476 /* We can't remap across vm area boundaries */
69477 if (old_len > vma->vm_end - addr)
69478 goto Efault;
69479@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
69480 unsigned long ret = -EINVAL;
69481 unsigned long charged = 0;
69482 unsigned long map_flags;
69483+ unsigned long pax_task_size = TASK_SIZE;
69484
69485 if (new_addr & ~PAGE_MASK)
69486 goto out;
69487
69488- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69489+#ifdef CONFIG_PAX_SEGMEXEC
69490+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69491+ pax_task_size = SEGMEXEC_TASK_SIZE;
69492+#endif
69493+
69494+ pax_task_size -= PAGE_SIZE;
69495+
69496+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69497 goto out;
69498
69499 /* Check if the location we're moving into overlaps the
69500 * old location at all, and fail if it does.
69501 */
69502- if ((new_addr <= addr) && (new_addr+new_len) > addr)
69503- goto out;
69504-
69505- if ((addr <= new_addr) && (addr+old_len) > new_addr)
69506+ if (addr + old_len > new_addr && new_addr + new_len > addr)
69507 goto out;
69508
69509 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69510@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
69511 struct vm_area_struct *vma;
69512 unsigned long ret = -EINVAL;
69513 unsigned long charged = 0;
69514+ unsigned long pax_task_size = TASK_SIZE;
69515
69516 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69517 goto out;
69518@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
69519 if (!new_len)
69520 goto out;
69521
69522+#ifdef CONFIG_PAX_SEGMEXEC
69523+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69524+ pax_task_size = SEGMEXEC_TASK_SIZE;
69525+#endif
69526+
69527+ pax_task_size -= PAGE_SIZE;
69528+
69529+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69530+ old_len > pax_task_size || addr > pax_task_size-old_len)
69531+ goto out;
69532+
69533 if (flags & MREMAP_FIXED) {
69534 if (flags & MREMAP_MAYMOVE)
69535 ret = mremap_to(addr, old_len, new_addr, new_len);
69536@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
69537 addr + new_len);
69538 }
69539 ret = addr;
69540+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69541 goto out;
69542 }
69543 }
69544@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
69545 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69546 if (ret)
69547 goto out;
69548+
69549+ map_flags = vma->vm_flags;
69550 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69551+ if (!(ret & ~PAGE_MASK)) {
69552+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69553+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69554+ }
69555 }
69556 out:
69557 if (ret & ~PAGE_MASK)
69558diff -urNp linux-2.6.32.45/mm/nommu.c linux-2.6.32.45/mm/nommu.c
69559--- linux-2.6.32.45/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
69560+++ linux-2.6.32.45/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
69561@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69562 int sysctl_overcommit_ratio = 50; /* default is 50% */
69563 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69564 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69565-int heap_stack_gap = 0;
69566
69567 atomic_long_t mmap_pages_allocated;
69568
69569@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
69570 EXPORT_SYMBOL(find_vma);
69571
69572 /*
69573- * find a VMA
69574- * - we don't extend stack VMAs under NOMMU conditions
69575- */
69576-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69577-{
69578- return find_vma(mm, addr);
69579-}
69580-
69581-/*
69582 * expand a stack to a given address
69583 * - not supported under NOMMU conditions
69584 */
69585diff -urNp linux-2.6.32.45/mm/page_alloc.c linux-2.6.32.45/mm/page_alloc.c
69586--- linux-2.6.32.45/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
69587+++ linux-2.6.32.45/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
69588@@ -289,7 +289,7 @@ out:
69589 * This usage means that zero-order pages may not be compound.
69590 */
69591
69592-static void free_compound_page(struct page *page)
69593+void free_compound_page(struct page *page)
69594 {
69595 __free_pages_ok(page, compound_order(page));
69596 }
69597@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
69598 int bad = 0;
69599 int wasMlocked = __TestClearPageMlocked(page);
69600
69601+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69602+ unsigned long index = 1UL << order;
69603+#endif
69604+
69605 kmemcheck_free_shadow(page, order);
69606
69607 for (i = 0 ; i < (1 << order) ; ++i)
69608@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
69609 debug_check_no_obj_freed(page_address(page),
69610 PAGE_SIZE << order);
69611 }
69612+
69613+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69614+ for (; index; --index)
69615+ sanitize_highpage(page + index - 1);
69616+#endif
69617+
69618 arch_free_page(page, order);
69619 kernel_map_pages(page, 1 << order, 0);
69620
69621@@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
69622 arch_alloc_page(page, order);
69623 kernel_map_pages(page, 1 << order, 1);
69624
69625+#ifndef CONFIG_PAX_MEMORY_SANITIZE
69626 if (gfp_flags & __GFP_ZERO)
69627 prep_zero_page(page, order, gfp_flags);
69628+#endif
69629
69630 if (order && (gfp_flags & __GFP_COMP))
69631 prep_compound_page(page, order);
69632@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
69633 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
69634 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
69635 }
69636+
69637+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69638+ sanitize_highpage(page);
69639+#endif
69640+
69641 arch_free_page(page, 0);
69642 kernel_map_pages(page, 1, 0);
69643
69644@@ -2179,6 +2196,8 @@ void show_free_areas(void)
69645 int cpu;
69646 struct zone *zone;
69647
69648+ pax_track_stack();
69649+
69650 for_each_populated_zone(zone) {
69651 show_node(zone);
69652 printk("%s per-cpu:\n", zone->name);
69653@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
69654 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
69655 }
69656 #else
69657-static void inline setup_usemap(struct pglist_data *pgdat,
69658+static inline void setup_usemap(struct pglist_data *pgdat,
69659 struct zone *zone, unsigned long zonesize) {}
69660 #endif /* CONFIG_SPARSEMEM */
69661
69662diff -urNp linux-2.6.32.45/mm/percpu.c linux-2.6.32.45/mm/percpu.c
69663--- linux-2.6.32.45/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
69664+++ linux-2.6.32.45/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
69665@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
69666 static unsigned int pcpu_last_unit_cpu __read_mostly;
69667
69668 /* the address of the first chunk which starts with the kernel static area */
69669-void *pcpu_base_addr __read_mostly;
69670+void *pcpu_base_addr __read_only;
69671 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69672
69673 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69674diff -urNp linux-2.6.32.45/mm/rmap.c linux-2.6.32.45/mm/rmap.c
69675--- linux-2.6.32.45/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
69676+++ linux-2.6.32.45/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
69677@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
69678 /* page_table_lock to protect against threads */
69679 spin_lock(&mm->page_table_lock);
69680 if (likely(!vma->anon_vma)) {
69681+
69682+#ifdef CONFIG_PAX_SEGMEXEC
69683+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
69684+
69685+ if (vma_m) {
69686+ BUG_ON(vma_m->anon_vma);
69687+ vma_m->anon_vma = anon_vma;
69688+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
69689+ }
69690+#endif
69691+
69692 vma->anon_vma = anon_vma;
69693 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
69694 allocated = NULL;
69695diff -urNp linux-2.6.32.45/mm/shmem.c linux-2.6.32.45/mm/shmem.c
69696--- linux-2.6.32.45/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
69697+++ linux-2.6.32.45/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
69698@@ -31,7 +31,7 @@
69699 #include <linux/swap.h>
69700 #include <linux/ima.h>
69701
69702-static struct vfsmount *shm_mnt;
69703+struct vfsmount *shm_mnt;
69704
69705 #ifdef CONFIG_SHMEM
69706 /*
69707@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
69708 goto unlock;
69709 }
69710 entry = shmem_swp_entry(info, index, NULL);
69711+ if (!entry)
69712+ goto unlock;
69713 if (entry->val) {
69714 /*
69715 * The more uptodate page coming down from a stacked
69716@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
69717 struct vm_area_struct pvma;
69718 struct page *page;
69719
69720+ pax_track_stack();
69721+
69722 spol = mpol_cond_copy(&mpol,
69723 mpol_shared_policy_lookup(&info->policy, idx));
69724
69725@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
69726
69727 info = SHMEM_I(inode);
69728 inode->i_size = len-1;
69729- if (len <= (char *)inode - (char *)info) {
69730+ if (len <= (char *)inode - (char *)info && len <= 64) {
69731 /* do it inline */
69732 memcpy(info, symname, len);
69733 inode->i_op = &shmem_symlink_inline_operations;
69734@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
69735 int err = -ENOMEM;
69736
69737 /* Round up to L1_CACHE_BYTES to resist false sharing */
69738- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
69739- L1_CACHE_BYTES), GFP_KERNEL);
69740+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
69741 if (!sbinfo)
69742 return -ENOMEM;
69743
69744diff -urNp linux-2.6.32.45/mm/slab.c linux-2.6.32.45/mm/slab.c
69745--- linux-2.6.32.45/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
69746+++ linux-2.6.32.45/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
69747@@ -174,7 +174,7 @@
69748
69749 /* Legal flag mask for kmem_cache_create(). */
69750 #if DEBUG
69751-# define CREATE_MASK (SLAB_RED_ZONE | \
69752+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
69753 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
69754 SLAB_CACHE_DMA | \
69755 SLAB_STORE_USER | \
69756@@ -182,7 +182,7 @@
69757 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69758 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
69759 #else
69760-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
69761+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
69762 SLAB_CACHE_DMA | \
69763 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
69764 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69765@@ -308,7 +308,7 @@ struct kmem_list3 {
69766 * Need this for bootstrapping a per node allocator.
69767 */
69768 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
69769-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
69770+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
69771 #define CACHE_CACHE 0
69772 #define SIZE_AC MAX_NUMNODES
69773 #define SIZE_L3 (2 * MAX_NUMNODES)
69774@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
69775 if ((x)->max_freeable < i) \
69776 (x)->max_freeable = i; \
69777 } while (0)
69778-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
69779-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
69780-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
69781-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
69782+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
69783+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
69784+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
69785+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
69786 #else
69787 #define STATS_INC_ACTIVE(x) do { } while (0)
69788 #define STATS_DEC_ACTIVE(x) do { } while (0)
69789@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
69790 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
69791 */
69792 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
69793- const struct slab *slab, void *obj)
69794+ const struct slab *slab, const void *obj)
69795 {
69796 u32 offset = (obj - slab->s_mem);
69797 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
69798@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
69799 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
69800 sizes[INDEX_AC].cs_size,
69801 ARCH_KMALLOC_MINALIGN,
69802- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69803+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69804 NULL);
69805
69806 if (INDEX_AC != INDEX_L3) {
69807@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
69808 kmem_cache_create(names[INDEX_L3].name,
69809 sizes[INDEX_L3].cs_size,
69810 ARCH_KMALLOC_MINALIGN,
69811- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69812+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69813 NULL);
69814 }
69815
69816@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
69817 sizes->cs_cachep = kmem_cache_create(names->name,
69818 sizes->cs_size,
69819 ARCH_KMALLOC_MINALIGN,
69820- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69821+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69822 NULL);
69823 }
69824 #ifdef CONFIG_ZONE_DMA
69825@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
69826 }
69827 /* cpu stats */
69828 {
69829- unsigned long allochit = atomic_read(&cachep->allochit);
69830- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
69831- unsigned long freehit = atomic_read(&cachep->freehit);
69832- unsigned long freemiss = atomic_read(&cachep->freemiss);
69833+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
69834+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
69835+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
69836+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
69837
69838 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
69839 allochit, allocmiss, freehit, freemiss);
69840@@ -4471,15 +4471,66 @@ static const struct file_operations proc
69841
69842 static int __init slab_proc_init(void)
69843 {
69844- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
69845+ mode_t gr_mode = S_IRUGO;
69846+
69847+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69848+ gr_mode = S_IRUSR;
69849+#endif
69850+
69851+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
69852 #ifdef CONFIG_DEBUG_SLAB_LEAK
69853- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
69854+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
69855 #endif
69856 return 0;
69857 }
69858 module_init(slab_proc_init);
69859 #endif
69860
69861+void check_object_size(const void *ptr, unsigned long n, bool to)
69862+{
69863+
69864+#ifdef CONFIG_PAX_USERCOPY
69865+ struct page *page;
69866+ struct kmem_cache *cachep = NULL;
69867+ struct slab *slabp;
69868+ unsigned int objnr;
69869+ unsigned long offset;
69870+
69871+ if (!n)
69872+ return;
69873+
69874+ if (ZERO_OR_NULL_PTR(ptr))
69875+ goto report;
69876+
69877+ if (!virt_addr_valid(ptr))
69878+ return;
69879+
69880+ page = virt_to_head_page(ptr);
69881+
69882+ if (!PageSlab(page)) {
69883+ if (object_is_on_stack(ptr, n) == -1)
69884+ goto report;
69885+ return;
69886+ }
69887+
69888+ cachep = page_get_cache(page);
69889+ if (!(cachep->flags & SLAB_USERCOPY))
69890+ goto report;
69891+
69892+ slabp = page_get_slab(page);
69893+ objnr = obj_to_index(cachep, slabp, ptr);
69894+ BUG_ON(objnr >= cachep->num);
69895+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
69896+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
69897+ return;
69898+
69899+report:
69900+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
69901+#endif
69902+
69903+}
69904+EXPORT_SYMBOL(check_object_size);
69905+
69906 /**
69907 * ksize - get the actual amount of memory allocated for a given object
69908 * @objp: Pointer to the object
69909diff -urNp linux-2.6.32.45/mm/slob.c linux-2.6.32.45/mm/slob.c
69910--- linux-2.6.32.45/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
69911+++ linux-2.6.32.45/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
69912@@ -29,7 +29,7 @@
69913 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
69914 * alloc_pages() directly, allocating compound pages so the page order
69915 * does not have to be separately tracked, and also stores the exact
69916- * allocation size in page->private so that it can be used to accurately
69917+ * allocation size in slob_page->size so that it can be used to accurately
69918 * provide ksize(). These objects are detected in kfree() because slob_page()
69919 * is false for them.
69920 *
69921@@ -58,6 +58,7 @@
69922 */
69923
69924 #include <linux/kernel.h>
69925+#include <linux/sched.h>
69926 #include <linux/slab.h>
69927 #include <linux/mm.h>
69928 #include <linux/swap.h> /* struct reclaim_state */
69929@@ -100,7 +101,8 @@ struct slob_page {
69930 unsigned long flags; /* mandatory */
69931 atomic_t _count; /* mandatory */
69932 slobidx_t units; /* free units left in page */
69933- unsigned long pad[2];
69934+ unsigned long pad[1];
69935+ unsigned long size; /* size when >=PAGE_SIZE */
69936 slob_t *free; /* first free slob_t in page */
69937 struct list_head list; /* linked list of free pages */
69938 };
69939@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
69940 */
69941 static inline int is_slob_page(struct slob_page *sp)
69942 {
69943- return PageSlab((struct page *)sp);
69944+ return PageSlab((struct page *)sp) && !sp->size;
69945 }
69946
69947 static inline void set_slob_page(struct slob_page *sp)
69948@@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
69949
69950 static inline struct slob_page *slob_page(const void *addr)
69951 {
69952- return (struct slob_page *)virt_to_page(addr);
69953+ return (struct slob_page *)virt_to_head_page(addr);
69954 }
69955
69956 /*
69957@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
69958 /*
69959 * Return the size of a slob block.
69960 */
69961-static slobidx_t slob_units(slob_t *s)
69962+static slobidx_t slob_units(const slob_t *s)
69963 {
69964 if (s->units > 0)
69965 return s->units;
69966@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
69967 /*
69968 * Return the next free slob block pointer after this one.
69969 */
69970-static slob_t *slob_next(slob_t *s)
69971+static slob_t *slob_next(const slob_t *s)
69972 {
69973 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
69974 slobidx_t next;
69975@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
69976 /*
69977 * Returns true if s is the last free block in its page.
69978 */
69979-static int slob_last(slob_t *s)
69980+static int slob_last(const slob_t *s)
69981 {
69982 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
69983 }
69984@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
69985 if (!page)
69986 return NULL;
69987
69988+ set_slob_page(page);
69989 return page_address(page);
69990 }
69991
69992@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
69993 if (!b)
69994 return NULL;
69995 sp = slob_page(b);
69996- set_slob_page(sp);
69997
69998 spin_lock_irqsave(&slob_lock, flags);
69999 sp->units = SLOB_UNITS(PAGE_SIZE);
70000 sp->free = b;
70001+ sp->size = 0;
70002 INIT_LIST_HEAD(&sp->list);
70003 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70004 set_slob_page_free(sp, slob_list);
70005@@ -475,10 +478,9 @@ out:
70006 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
70007 #endif
70008
70009-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70010+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70011 {
70012- unsigned int *m;
70013- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70014+ slob_t *m;
70015 void *ret;
70016
70017 lockdep_trace_alloc(gfp);
70018@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
70019
70020 if (!m)
70021 return NULL;
70022- *m = size;
70023+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70024+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70025+ m[0].units = size;
70026+ m[1].units = align;
70027 ret = (void *)m + align;
70028
70029 trace_kmalloc_node(_RET_IP_, ret,
70030@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
70031
70032 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
70033 if (ret) {
70034- struct page *page;
70035- page = virt_to_page(ret);
70036- page->private = size;
70037+ struct slob_page *sp;
70038+ sp = slob_page(ret);
70039+ sp->size = size;
70040 }
70041
70042 trace_kmalloc_node(_RET_IP_, ret,
70043 size, PAGE_SIZE << order, gfp, node);
70044 }
70045
70046- kmemleak_alloc(ret, size, 1, gfp);
70047+ return ret;
70048+}
70049+
70050+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70051+{
70052+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70053+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70054+
70055+ if (!ZERO_OR_NULL_PTR(ret))
70056+ kmemleak_alloc(ret, size, 1, gfp);
70057 return ret;
70058 }
70059 EXPORT_SYMBOL(__kmalloc_node);
70060@@ -528,13 +542,88 @@ void kfree(const void *block)
70061 sp = slob_page(block);
70062 if (is_slob_page(sp)) {
70063 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70064- unsigned int *m = (unsigned int *)(block - align);
70065- slob_free(m, *m + align);
70066- } else
70067+ slob_t *m = (slob_t *)(block - align);
70068+ slob_free(m, m[0].units + align);
70069+ } else {
70070+ clear_slob_page(sp);
70071+ free_slob_page(sp);
70072+ sp->size = 0;
70073 put_page(&sp->page);
70074+ }
70075 }
70076 EXPORT_SYMBOL(kfree);
70077
70078+void check_object_size(const void *ptr, unsigned long n, bool to)
70079+{
70080+
70081+#ifdef CONFIG_PAX_USERCOPY
70082+ struct slob_page *sp;
70083+ const slob_t *free;
70084+ const void *base;
70085+ unsigned long flags;
70086+
70087+ if (!n)
70088+ return;
70089+
70090+ if (ZERO_OR_NULL_PTR(ptr))
70091+ goto report;
70092+
70093+ if (!virt_addr_valid(ptr))
70094+ return;
70095+
70096+ sp = slob_page(ptr);
70097+ if (!PageSlab((struct page*)sp)) {
70098+ if (object_is_on_stack(ptr, n) == -1)
70099+ goto report;
70100+ return;
70101+ }
70102+
70103+ if (sp->size) {
70104+ base = page_address(&sp->page);
70105+ if (base <= ptr && n <= sp->size - (ptr - base))
70106+ return;
70107+ goto report;
70108+ }
70109+
70110+ /* some tricky double walking to find the chunk */
70111+ spin_lock_irqsave(&slob_lock, flags);
70112+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70113+ free = sp->free;
70114+
70115+ while (!slob_last(free) && (void *)free <= ptr) {
70116+ base = free + slob_units(free);
70117+ free = slob_next(free);
70118+ }
70119+
70120+ while (base < (void *)free) {
70121+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70122+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70123+ int offset;
70124+
70125+ if (ptr < base + align)
70126+ break;
70127+
70128+ offset = ptr - base - align;
70129+ if (offset >= m) {
70130+ base += size;
70131+ continue;
70132+ }
70133+
70134+ if (n > m - offset)
70135+ break;
70136+
70137+ spin_unlock_irqrestore(&slob_lock, flags);
70138+ return;
70139+ }
70140+
70141+ spin_unlock_irqrestore(&slob_lock, flags);
70142+report:
70143+ pax_report_usercopy(ptr, n, to, NULL);
70144+#endif
70145+
70146+}
70147+EXPORT_SYMBOL(check_object_size);
70148+
70149 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70150 size_t ksize(const void *block)
70151 {
70152@@ -547,10 +636,10 @@ size_t ksize(const void *block)
70153 sp = slob_page(block);
70154 if (is_slob_page(sp)) {
70155 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70156- unsigned int *m = (unsigned int *)(block - align);
70157- return SLOB_UNITS(*m) * SLOB_UNIT;
70158+ slob_t *m = (slob_t *)(block - align);
70159+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70160 } else
70161- return sp->page.private;
70162+ return sp->size;
70163 }
70164 EXPORT_SYMBOL(ksize);
70165
70166@@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
70167 {
70168 struct kmem_cache *c;
70169
70170+#ifdef CONFIG_PAX_USERCOPY
70171+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70172+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70173+#else
70174 c = slob_alloc(sizeof(struct kmem_cache),
70175 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70176+#endif
70177
70178 if (c) {
70179 c->name = name;
70180@@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
70181 {
70182 void *b;
70183
70184+#ifdef CONFIG_PAX_USERCOPY
70185+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70186+#else
70187 if (c->size < PAGE_SIZE) {
70188 b = slob_alloc(c->size, flags, c->align, node);
70189 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70190 SLOB_UNITS(c->size) * SLOB_UNIT,
70191 flags, node);
70192 } else {
70193+ struct slob_page *sp;
70194+
70195 b = slob_new_pages(flags, get_order(c->size), node);
70196+ sp = slob_page(b);
70197+ sp->size = c->size;
70198 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70199 PAGE_SIZE << get_order(c->size),
70200 flags, node);
70201 }
70202+#endif
70203
70204 if (c->ctor)
70205 c->ctor(b);
70206@@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70207
70208 static void __kmem_cache_free(void *b, int size)
70209 {
70210- if (size < PAGE_SIZE)
70211+ struct slob_page *sp = slob_page(b);
70212+
70213+ if (is_slob_page(sp))
70214 slob_free(b, size);
70215- else
70216+ else {
70217+ clear_slob_page(sp);
70218+ free_slob_page(sp);
70219+ sp->size = 0;
70220 slob_free_pages(b, get_order(size));
70221+ }
70222 }
70223
70224 static void kmem_rcu_free(struct rcu_head *head)
70225@@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
70226
70227 void kmem_cache_free(struct kmem_cache *c, void *b)
70228 {
70229+ int size = c->size;
70230+
70231+#ifdef CONFIG_PAX_USERCOPY
70232+ if (size + c->align < PAGE_SIZE) {
70233+ size += c->align;
70234+ b -= c->align;
70235+ }
70236+#endif
70237+
70238 kmemleak_free_recursive(b, c->flags);
70239 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70240 struct slob_rcu *slob_rcu;
70241- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70242+ slob_rcu = b + (size - sizeof(struct slob_rcu));
70243 INIT_RCU_HEAD(&slob_rcu->head);
70244- slob_rcu->size = c->size;
70245+ slob_rcu->size = size;
70246 call_rcu(&slob_rcu->head, kmem_rcu_free);
70247 } else {
70248- __kmem_cache_free(b, c->size);
70249+ __kmem_cache_free(b, size);
70250 }
70251
70252+#ifdef CONFIG_PAX_USERCOPY
70253+ trace_kfree(_RET_IP_, b);
70254+#else
70255 trace_kmem_cache_free(_RET_IP_, b);
70256+#endif
70257+
70258 }
70259 EXPORT_SYMBOL(kmem_cache_free);
70260
70261diff -urNp linux-2.6.32.45/mm/slub.c linux-2.6.32.45/mm/slub.c
70262--- linux-2.6.32.45/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
70263+++ linux-2.6.32.45/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
70264@@ -410,7 +410,7 @@ static void print_track(const char *s, s
70265 if (!t->addr)
70266 return;
70267
70268- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70269+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70270 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70271 }
70272
70273@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
70274
70275 page = virt_to_head_page(x);
70276
70277+ BUG_ON(!PageSlab(page));
70278+
70279 slab_free(s, page, x, _RET_IP_);
70280
70281 trace_kmem_cache_free(_RET_IP_, x);
70282@@ -1937,7 +1939,7 @@ static int slub_min_objects;
70283 * Merge control. If this is set then no merging of slab caches will occur.
70284 * (Could be removed. This was introduced to pacify the merge skeptics.)
70285 */
70286-static int slub_nomerge;
70287+static int slub_nomerge = 1;
70288
70289 /*
70290 * Calculate the order of allocation given an slab object size.
70291@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
70292 * list to avoid pounding the page allocator excessively.
70293 */
70294 set_min_partial(s, ilog2(s->size));
70295- s->refcount = 1;
70296+ atomic_set(&s->refcount, 1);
70297 #ifdef CONFIG_NUMA
70298 s->remote_node_defrag_ratio = 1000;
70299 #endif
70300@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
70301 void kmem_cache_destroy(struct kmem_cache *s)
70302 {
70303 down_write(&slub_lock);
70304- s->refcount--;
70305- if (!s->refcount) {
70306+ if (atomic_dec_and_test(&s->refcount)) {
70307 list_del(&s->list);
70308 up_write(&slub_lock);
70309 if (kmem_cache_close(s)) {
70310@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
70311 __setup("slub_nomerge", setup_slub_nomerge);
70312
70313 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
70314- const char *name, int size, gfp_t gfp_flags)
70315+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
70316 {
70317- unsigned int flags = 0;
70318-
70319 if (gfp_flags & SLUB_DMA)
70320- flags = SLAB_CACHE_DMA;
70321+ flags |= SLAB_CACHE_DMA;
70322
70323 /*
70324 * This function is called with IRQs disabled during early-boot on
70325@@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
70326 EXPORT_SYMBOL(__kmalloc_node);
70327 #endif
70328
70329+void check_object_size(const void *ptr, unsigned long n, bool to)
70330+{
70331+
70332+#ifdef CONFIG_PAX_USERCOPY
70333+ struct page *page;
70334+ struct kmem_cache *s = NULL;
70335+ unsigned long offset;
70336+
70337+ if (!n)
70338+ return;
70339+
70340+ if (ZERO_OR_NULL_PTR(ptr))
70341+ goto report;
70342+
70343+ if (!virt_addr_valid(ptr))
70344+ return;
70345+
70346+ page = get_object_page(ptr);
70347+
70348+ if (!page) {
70349+ if (object_is_on_stack(ptr, n) == -1)
70350+ goto report;
70351+ return;
70352+ }
70353+
70354+ s = page->slab;
70355+ if (!(s->flags & SLAB_USERCOPY))
70356+ goto report;
70357+
70358+ offset = (ptr - page_address(page)) % s->size;
70359+ if (offset <= s->objsize && n <= s->objsize - offset)
70360+ return;
70361+
70362+report:
70363+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70364+#endif
70365+
70366+}
70367+EXPORT_SYMBOL(check_object_size);
70368+
70369 size_t ksize(const void *object)
70370 {
70371 struct page *page;
70372@@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
70373 * kmem_cache_open for slab_state == DOWN.
70374 */
70375 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
70376- sizeof(struct kmem_cache_node), GFP_NOWAIT);
70377- kmalloc_caches[0].refcount = -1;
70378+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
70379+ atomic_set(&kmalloc_caches[0].refcount, -1);
70380 caches++;
70381
70382 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
70383@@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
70384 /* Caches that are not of the two-to-the-power-of size */
70385 if (KMALLOC_MIN_SIZE <= 32) {
70386 create_kmalloc_cache(&kmalloc_caches[1],
70387- "kmalloc-96", 96, GFP_NOWAIT);
70388+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
70389 caches++;
70390 }
70391 if (KMALLOC_MIN_SIZE <= 64) {
70392 create_kmalloc_cache(&kmalloc_caches[2],
70393- "kmalloc-192", 192, GFP_NOWAIT);
70394+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
70395 caches++;
70396 }
70397
70398 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70399 create_kmalloc_cache(&kmalloc_caches[i],
70400- "kmalloc", 1 << i, GFP_NOWAIT);
70401+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
70402 caches++;
70403 }
70404
70405@@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
70406 /*
70407 * We may have set a slab to be unmergeable during bootstrap.
70408 */
70409- if (s->refcount < 0)
70410+ if (atomic_read(&s->refcount) < 0)
70411 return 1;
70412
70413 return 0;
70414@@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
70415 if (s) {
70416 int cpu;
70417
70418- s->refcount++;
70419+ atomic_inc(&s->refcount);
70420 /*
70421 * Adjust the object sizes so that we clear
70422 * the complete object on kzalloc.
70423@@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
70424
70425 if (sysfs_slab_alias(s, name)) {
70426 down_write(&slub_lock);
70427- s->refcount--;
70428+ atomic_dec(&s->refcount);
70429 up_write(&slub_lock);
70430 goto err;
70431 }
70432@@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
70433
70434 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70435 {
70436- return sprintf(buf, "%d\n", s->refcount - 1);
70437+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70438 }
70439 SLAB_ATTR_RO(aliases);
70440
70441@@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
70442 kfree(s);
70443 }
70444
70445-static struct sysfs_ops slab_sysfs_ops = {
70446+static const struct sysfs_ops slab_sysfs_ops = {
70447 .show = slab_attr_show,
70448 .store = slab_attr_store,
70449 };
70450@@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
70451 return 0;
70452 }
70453
70454-static struct kset_uevent_ops slab_uevent_ops = {
70455+static const struct kset_uevent_ops slab_uevent_ops = {
70456 .filter = uevent_filter,
70457 };
70458
70459@@ -4785,7 +4824,13 @@ static const struct file_operations proc
70460
70461 static int __init slab_proc_init(void)
70462 {
70463- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70464+ mode_t gr_mode = S_IRUGO;
70465+
70466+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70467+ gr_mode = S_IRUSR;
70468+#endif
70469+
70470+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70471 return 0;
70472 }
70473 module_init(slab_proc_init);
70474diff -urNp linux-2.6.32.45/mm/swap.c linux-2.6.32.45/mm/swap.c
70475--- linux-2.6.32.45/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
70476+++ linux-2.6.32.45/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
70477@@ -30,6 +30,7 @@
70478 #include <linux/notifier.h>
70479 #include <linux/backing-dev.h>
70480 #include <linux/memcontrol.h>
70481+#include <linux/hugetlb.h>
70482
70483 #include "internal.h"
70484
70485@@ -65,6 +66,8 @@ static void put_compound_page(struct pag
70486 compound_page_dtor *dtor;
70487
70488 dtor = get_compound_page_dtor(page);
70489+ if (!PageHuge(page))
70490+ BUG_ON(dtor != free_compound_page);
70491 (*dtor)(page);
70492 }
70493 }
70494diff -urNp linux-2.6.32.45/mm/util.c linux-2.6.32.45/mm/util.c
70495--- linux-2.6.32.45/mm/util.c 2011-03-27 14:31:47.000000000 -0400
70496+++ linux-2.6.32.45/mm/util.c 2011-04-17 15:56:46.000000000 -0400
70497@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
70498 void arch_pick_mmap_layout(struct mm_struct *mm)
70499 {
70500 mm->mmap_base = TASK_UNMAPPED_BASE;
70501+
70502+#ifdef CONFIG_PAX_RANDMMAP
70503+ if (mm->pax_flags & MF_PAX_RANDMMAP)
70504+ mm->mmap_base += mm->delta_mmap;
70505+#endif
70506+
70507 mm->get_unmapped_area = arch_get_unmapped_area;
70508 mm->unmap_area = arch_unmap_area;
70509 }
70510diff -urNp linux-2.6.32.45/mm/vmalloc.c linux-2.6.32.45/mm/vmalloc.c
70511--- linux-2.6.32.45/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
70512+++ linux-2.6.32.45/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
70513@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70514
70515 pte = pte_offset_kernel(pmd, addr);
70516 do {
70517- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70518- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70519+
70520+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70521+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70522+ BUG_ON(!pte_exec(*pte));
70523+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70524+ continue;
70525+ }
70526+#endif
70527+
70528+ {
70529+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70530+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70531+ }
70532 } while (pte++, addr += PAGE_SIZE, addr != end);
70533 }
70534
70535@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70536 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70537 {
70538 pte_t *pte;
70539+ int ret = -ENOMEM;
70540
70541 /*
70542 * nr is a running index into the array which helps higher level
70543@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
70544 pte = pte_alloc_kernel(pmd, addr);
70545 if (!pte)
70546 return -ENOMEM;
70547+
70548+ pax_open_kernel();
70549 do {
70550 struct page *page = pages[*nr];
70551
70552- if (WARN_ON(!pte_none(*pte)))
70553- return -EBUSY;
70554- if (WARN_ON(!page))
70555- return -ENOMEM;
70556+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70557+ if (!(pgprot_val(prot) & _PAGE_NX))
70558+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
70559+ else
70560+#endif
70561+
70562+ if (WARN_ON(!pte_none(*pte))) {
70563+ ret = -EBUSY;
70564+ goto out;
70565+ }
70566+ if (WARN_ON(!page)) {
70567+ ret = -ENOMEM;
70568+ goto out;
70569+ }
70570 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70571 (*nr)++;
70572 } while (pte++, addr += PAGE_SIZE, addr != end);
70573- return 0;
70574+ ret = 0;
70575+out:
70576+ pax_close_kernel();
70577+ return ret;
70578 }
70579
70580 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70581@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
70582 * and fall back on vmalloc() if that fails. Others
70583 * just put it in the vmalloc space.
70584 */
70585-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70586+#ifdef CONFIG_MODULES
70587+#ifdef MODULES_VADDR
70588 unsigned long addr = (unsigned long)x;
70589 if (addr >= MODULES_VADDR && addr < MODULES_END)
70590 return 1;
70591 #endif
70592+
70593+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70594+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70595+ return 1;
70596+#endif
70597+
70598+#endif
70599+
70600 return is_vmalloc_addr(x);
70601 }
70602
70603@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
70604
70605 if (!pgd_none(*pgd)) {
70606 pud_t *pud = pud_offset(pgd, addr);
70607+#ifdef CONFIG_X86
70608+ if (!pud_large(*pud))
70609+#endif
70610 if (!pud_none(*pud)) {
70611 pmd_t *pmd = pmd_offset(pud, addr);
70612+#ifdef CONFIG_X86
70613+ if (!pmd_large(*pmd))
70614+#endif
70615 if (!pmd_none(*pmd)) {
70616 pte_t *ptep, pte;
70617
70618@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
70619 struct rb_node *tmp;
70620
70621 while (*p) {
70622- struct vmap_area *tmp;
70623+ struct vmap_area *varea;
70624
70625 parent = *p;
70626- tmp = rb_entry(parent, struct vmap_area, rb_node);
70627- if (va->va_start < tmp->va_end)
70628+ varea = rb_entry(parent, struct vmap_area, rb_node);
70629+ if (va->va_start < varea->va_end)
70630 p = &(*p)->rb_left;
70631- else if (va->va_end > tmp->va_start)
70632+ else if (va->va_end > varea->va_start)
70633 p = &(*p)->rb_right;
70634 else
70635 BUG();
70636@@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
70637 struct vm_struct *area;
70638
70639 BUG_ON(in_interrupt());
70640+
70641+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70642+ if (flags & VM_KERNEXEC) {
70643+ if (start != VMALLOC_START || end != VMALLOC_END)
70644+ return NULL;
70645+ start = (unsigned long)MODULES_EXEC_VADDR;
70646+ end = (unsigned long)MODULES_EXEC_END;
70647+ }
70648+#endif
70649+
70650 if (flags & VM_IOREMAP) {
70651 int bit = fls(size);
70652
70653@@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
70654 if (count > totalram_pages)
70655 return NULL;
70656
70657+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70658+ if (!(pgprot_val(prot) & _PAGE_NX))
70659+ flags |= VM_KERNEXEC;
70660+#endif
70661+
70662 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
70663 __builtin_return_address(0));
70664 if (!area)
70665@@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
70666 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
70667 return NULL;
70668
70669+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70670+ if (!(pgprot_val(prot) & _PAGE_NX))
70671+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
70672+ node, gfp_mask, caller);
70673+ else
70674+#endif
70675+
70676 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
70677 VMALLOC_END, node, gfp_mask, caller);
70678
70679@@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
70680 return addr;
70681 }
70682
70683+#undef __vmalloc
70684 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
70685 {
70686 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
70687@@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
70688 * For tight control over page level allocator and protection flags
70689 * use __vmalloc() instead.
70690 */
70691+#undef vmalloc
70692 void *vmalloc(unsigned long size)
70693 {
70694 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70695@@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
70696 * The resulting memory area is zeroed so it can be mapped to userspace
70697 * without leaking data.
70698 */
70699+#undef vmalloc_user
70700 void *vmalloc_user(unsigned long size)
70701 {
70702 struct vm_struct *area;
70703@@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
70704 * For tight control over page level allocator and protection flags
70705 * use __vmalloc() instead.
70706 */
70707+#undef vmalloc_node
70708 void *vmalloc_node(unsigned long size, int node)
70709 {
70710 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70711@@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
70712 * For tight control over page level allocator and protection flags
70713 * use __vmalloc() instead.
70714 */
70715-
70716+#undef vmalloc_exec
70717 void *vmalloc_exec(unsigned long size)
70718 {
70719- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
70720+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
70721 -1, __builtin_return_address(0));
70722 }
70723
70724@@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
70725 * Allocate enough 32bit PA addressable pages to cover @size from the
70726 * page level allocator and map them into contiguous kernel virtual space.
70727 */
70728+#undef vmalloc_32
70729 void *vmalloc_32(unsigned long size)
70730 {
70731 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
70732@@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
70733 * The resulting memory area is 32bit addressable and zeroed so it can be
70734 * mapped to userspace without leaking data.
70735 */
70736+#undef vmalloc_32_user
70737 void *vmalloc_32_user(unsigned long size)
70738 {
70739 struct vm_struct *area;
70740@@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
70741 unsigned long uaddr = vma->vm_start;
70742 unsigned long usize = vma->vm_end - vma->vm_start;
70743
70744+ BUG_ON(vma->vm_mirror);
70745+
70746 if ((PAGE_SIZE-1) & (unsigned long)addr)
70747 return -EINVAL;
70748
70749diff -urNp linux-2.6.32.45/mm/vmstat.c linux-2.6.32.45/mm/vmstat.c
70750--- linux-2.6.32.45/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
70751+++ linux-2.6.32.45/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
70752@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
70753 *
70754 * vm_stat contains the global counters
70755 */
70756-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70757+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70758 EXPORT_SYMBOL(vm_stat);
70759
70760 #ifdef CONFIG_SMP
70761@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
70762 v = p->vm_stat_diff[i];
70763 p->vm_stat_diff[i] = 0;
70764 local_irq_restore(flags);
70765- atomic_long_add(v, &zone->vm_stat[i]);
70766+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
70767 global_diff[i] += v;
70768 #ifdef CONFIG_NUMA
70769 /* 3 seconds idle till flush */
70770@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
70771
70772 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
70773 if (global_diff[i])
70774- atomic_long_add(global_diff[i], &vm_stat[i]);
70775+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
70776 }
70777
70778 #endif
70779@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
70780 start_cpu_timer(cpu);
70781 #endif
70782 #ifdef CONFIG_PROC_FS
70783- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
70784- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
70785- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
70786- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
70787+ {
70788+ mode_t gr_mode = S_IRUGO;
70789+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70790+ gr_mode = S_IRUSR;
70791+#endif
70792+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
70793+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
70794+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70795+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
70796+#else
70797+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
70798+#endif
70799+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
70800+ }
70801 #endif
70802 return 0;
70803 }
70804diff -urNp linux-2.6.32.45/net/8021q/vlan.c linux-2.6.32.45/net/8021q/vlan.c
70805--- linux-2.6.32.45/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
70806+++ linux-2.6.32.45/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
70807@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
70808 err = -EPERM;
70809 if (!capable(CAP_NET_ADMIN))
70810 break;
70811- if ((args.u.name_type >= 0) &&
70812- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
70813+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
70814 struct vlan_net *vn;
70815
70816 vn = net_generic(net, vlan_net_id);
70817diff -urNp linux-2.6.32.45/net/atm/atm_misc.c linux-2.6.32.45/net/atm/atm_misc.c
70818--- linux-2.6.32.45/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
70819+++ linux-2.6.32.45/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
70820@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
70821 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
70822 return 1;
70823 atm_return(vcc,truesize);
70824- atomic_inc(&vcc->stats->rx_drop);
70825+ atomic_inc_unchecked(&vcc->stats->rx_drop);
70826 return 0;
70827 }
70828
70829@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
70830 }
70831 }
70832 atm_return(vcc,guess);
70833- atomic_inc(&vcc->stats->rx_drop);
70834+ atomic_inc_unchecked(&vcc->stats->rx_drop);
70835 return NULL;
70836 }
70837
70838@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
70839
70840 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70841 {
70842-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
70843+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
70844 __SONET_ITEMS
70845 #undef __HANDLE_ITEM
70846 }
70847@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
70848
70849 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70850 {
70851-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
70852+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
70853 __SONET_ITEMS
70854 #undef __HANDLE_ITEM
70855 }
70856diff -urNp linux-2.6.32.45/net/atm/lec.h linux-2.6.32.45/net/atm/lec.h
70857--- linux-2.6.32.45/net/atm/lec.h 2011-03-27 14:31:47.000000000 -0400
70858+++ linux-2.6.32.45/net/atm/lec.h 2011-08-05 20:33:55.000000000 -0400
70859@@ -48,7 +48,7 @@ struct lane2_ops {
70860 const u8 *tlvs, u32 sizeoftlvs);
70861 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
70862 const u8 *tlvs, u32 sizeoftlvs);
70863-};
70864+} __no_const;
70865
70866 /*
70867 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
70868diff -urNp linux-2.6.32.45/net/atm/mpc.h linux-2.6.32.45/net/atm/mpc.h
70869--- linux-2.6.32.45/net/atm/mpc.h 2011-03-27 14:31:47.000000000 -0400
70870+++ linux-2.6.32.45/net/atm/mpc.h 2011-08-23 21:22:38.000000000 -0400
70871@@ -33,7 +33,7 @@ struct mpoa_client {
70872 struct mpc_parameters parameters; /* parameters for this client */
70873
70874 const struct net_device_ops *old_ops;
70875- struct net_device_ops new_ops;
70876+ net_device_ops_no_const new_ops;
70877 };
70878
70879
70880diff -urNp linux-2.6.32.45/net/atm/mpoa_caches.c linux-2.6.32.45/net/atm/mpoa_caches.c
70881--- linux-2.6.32.45/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
70882+++ linux-2.6.32.45/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
70883@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
70884 struct timeval now;
70885 struct k_message msg;
70886
70887+ pax_track_stack();
70888+
70889 do_gettimeofday(&now);
70890
70891 write_lock_irq(&client->egress_lock);
70892diff -urNp linux-2.6.32.45/net/atm/proc.c linux-2.6.32.45/net/atm/proc.c
70893--- linux-2.6.32.45/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
70894+++ linux-2.6.32.45/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
70895@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
70896 const struct k_atm_aal_stats *stats)
70897 {
70898 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
70899- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
70900- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
70901- atomic_read(&stats->rx_drop));
70902+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
70903+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
70904+ atomic_read_unchecked(&stats->rx_drop));
70905 }
70906
70907 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
70908@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
70909 {
70910 struct sock *sk = sk_atm(vcc);
70911
70912+#ifdef CONFIG_GRKERNSEC_HIDESYM
70913+ seq_printf(seq, "%p ", NULL);
70914+#else
70915 seq_printf(seq, "%p ", vcc);
70916+#endif
70917+
70918 if (!vcc->dev)
70919 seq_printf(seq, "Unassigned ");
70920 else
70921@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
70922 {
70923 if (!vcc->dev)
70924 seq_printf(seq, sizeof(void *) == 4 ?
70925+#ifdef CONFIG_GRKERNSEC_HIDESYM
70926+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
70927+#else
70928 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
70929+#endif
70930 else
70931 seq_printf(seq, "%3d %3d %5d ",
70932 vcc->dev->number, vcc->vpi, vcc->vci);
70933diff -urNp linux-2.6.32.45/net/atm/resources.c linux-2.6.32.45/net/atm/resources.c
70934--- linux-2.6.32.45/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
70935+++ linux-2.6.32.45/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
70936@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
70937 static void copy_aal_stats(struct k_atm_aal_stats *from,
70938 struct atm_aal_stats *to)
70939 {
70940-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
70941+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
70942 __AAL_STAT_ITEMS
70943 #undef __HANDLE_ITEM
70944 }
70945@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
70946 static void subtract_aal_stats(struct k_atm_aal_stats *from,
70947 struct atm_aal_stats *to)
70948 {
70949-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
70950+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
70951 __AAL_STAT_ITEMS
70952 #undef __HANDLE_ITEM
70953 }
70954diff -urNp linux-2.6.32.45/net/bluetooth/l2cap.c linux-2.6.32.45/net/bluetooth/l2cap.c
70955--- linux-2.6.32.45/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
70956+++ linux-2.6.32.45/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
70957@@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
70958 err = -ENOTCONN;
70959 break;
70960 }
70961-
70962+ memset(&cinfo, 0, sizeof(cinfo));
70963 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
70964 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
70965
70966@@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
70967
70968 /* Reject if config buffer is too small. */
70969 len = cmd_len - sizeof(*req);
70970- if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
70971+ if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
70972 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
70973 l2cap_build_conf_rsp(sk, rsp,
70974 L2CAP_CONF_REJECT, flags), rsp);
70975diff -urNp linux-2.6.32.45/net/bluetooth/rfcomm/sock.c linux-2.6.32.45/net/bluetooth/rfcomm/sock.c
70976--- linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
70977+++ linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
70978@@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
70979
70980 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
70981
70982+ memset(&cinfo, 0, sizeof(cinfo));
70983 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
70984 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
70985
70986diff -urNp linux-2.6.32.45/net/bridge/br_private.h linux-2.6.32.45/net/bridge/br_private.h
70987--- linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:35:30.000000000 -0400
70988+++ linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:34:01.000000000 -0400
70989@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event,
70990
70991 #ifdef CONFIG_SYSFS
70992 /* br_sysfs_if.c */
70993-extern struct sysfs_ops brport_sysfs_ops;
70994+extern const struct sysfs_ops brport_sysfs_ops;
70995 extern int br_sysfs_addif(struct net_bridge_port *p);
70996
70997 /* br_sysfs_br.c */
70998diff -urNp linux-2.6.32.45/net/bridge/br_stp_if.c linux-2.6.32.45/net/bridge/br_stp_if.c
70999--- linux-2.6.32.45/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
71000+++ linux-2.6.32.45/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
71001@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
71002 char *envp[] = { NULL };
71003
71004 if (br->stp_enabled == BR_USER_STP) {
71005- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
71006+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
71007 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
71008 br->dev->name, r);
71009
71010diff -urNp linux-2.6.32.45/net/bridge/br_sysfs_if.c linux-2.6.32.45/net/bridge/br_sysfs_if.c
71011--- linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
71012+++ linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
71013@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
71014 return ret;
71015 }
71016
71017-struct sysfs_ops brport_sysfs_ops = {
71018+const struct sysfs_ops brport_sysfs_ops = {
71019 .show = brport_show,
71020 .store = brport_store,
71021 };
71022diff -urNp linux-2.6.32.45/net/bridge/netfilter/ebtables.c linux-2.6.32.45/net/bridge/netfilter/ebtables.c
71023--- linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
71024+++ linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
71025@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
71026 unsigned int entries_size, nentries;
71027 char *entries;
71028
71029+ pax_track_stack();
71030+
71031 if (cmd == EBT_SO_GET_ENTRIES) {
71032 entries_size = t->private->entries_size;
71033 nentries = t->private->nentries;
71034diff -urNp linux-2.6.32.45/net/can/bcm.c linux-2.6.32.45/net/can/bcm.c
71035--- linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
71036+++ linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
71037@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
71038 struct bcm_sock *bo = bcm_sk(sk);
71039 struct bcm_op *op;
71040
71041+#ifdef CONFIG_GRKERNSEC_HIDESYM
71042+ seq_printf(m, ">>> socket %p", NULL);
71043+ seq_printf(m, " / sk %p", NULL);
71044+ seq_printf(m, " / bo %p", NULL);
71045+#else
71046 seq_printf(m, ">>> socket %p", sk->sk_socket);
71047 seq_printf(m, " / sk %p", sk);
71048 seq_printf(m, " / bo %p", bo);
71049+#endif
71050 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
71051 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
71052 seq_printf(m, " <<<\n");
71053diff -urNp linux-2.6.32.45/net/core/dev.c linux-2.6.32.45/net/core/dev.c
71054--- linux-2.6.32.45/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
71055+++ linux-2.6.32.45/net/core/dev.c 2011-08-05 20:33:55.000000000 -0400
71056@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
71057 if (no_module && capable(CAP_NET_ADMIN))
71058 no_module = request_module("netdev-%s", name);
71059 if (no_module && capable(CAP_SYS_MODULE)) {
71060+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71061+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
71062+#else
71063 if (!request_module("%s", name))
71064 pr_err("Loading kernel module for a network device "
71065 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71066 "instead\n", name);
71067+#endif
71068 }
71069 }
71070 EXPORT_SYMBOL(dev_load);
71071@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct
71072
71073 struct dev_gso_cb {
71074 void (*destructor)(struct sk_buff *skb);
71075-};
71076+} __no_const;
71077
71078 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71079
71080@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
71081 }
71082 EXPORT_SYMBOL(netif_rx_ni);
71083
71084-static void net_tx_action(struct softirq_action *h)
71085+static void net_tx_action(void)
71086 {
71087 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71088
71089@@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
71090 EXPORT_SYMBOL(netif_napi_del);
71091
71092
71093-static void net_rx_action(struct softirq_action *h)
71094+static void net_rx_action(void)
71095 {
71096 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
71097 unsigned long time_limit = jiffies + 2;
71098diff -urNp linux-2.6.32.45/net/core/flow.c linux-2.6.32.45/net/core/flow.c
71099--- linux-2.6.32.45/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
71100+++ linux-2.6.32.45/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
71101@@ -35,11 +35,11 @@ struct flow_cache_entry {
71102 atomic_t *object_ref;
71103 };
71104
71105-atomic_t flow_cache_genid = ATOMIC_INIT(0);
71106+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71107
71108 static u32 flow_hash_shift;
71109 #define flow_hash_size (1 << flow_hash_shift)
71110-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
71111+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
71112
71113 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
71114
71115@@ -52,7 +52,7 @@ struct flow_percpu_info {
71116 u32 hash_rnd;
71117 int count;
71118 };
71119-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
71120+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
71121
71122 #define flow_hash_rnd_recalc(cpu) \
71123 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
71124@@ -69,7 +69,7 @@ struct flow_flush_info {
71125 atomic_t cpuleft;
71126 struct completion completion;
71127 };
71128-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
71129+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
71130
71131 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
71132
71133@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
71134 if (fle->family == family &&
71135 fle->dir == dir &&
71136 flow_key_compare(key, &fle->key) == 0) {
71137- if (fle->genid == atomic_read(&flow_cache_genid)) {
71138+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
71139 void *ret = fle->object;
71140
71141 if (ret)
71142@@ -228,7 +228,7 @@ nocache:
71143 err = resolver(net, key, family, dir, &obj, &obj_ref);
71144
71145 if (fle && !err) {
71146- fle->genid = atomic_read(&flow_cache_genid);
71147+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
71148
71149 if (fle->object)
71150 atomic_dec(fle->object_ref);
71151@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
71152
71153 fle = flow_table(cpu)[i];
71154 for (; fle; fle = fle->next) {
71155- unsigned genid = atomic_read(&flow_cache_genid);
71156+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
71157
71158 if (!fle->object || fle->genid == genid)
71159 continue;
71160diff -urNp linux-2.6.32.45/net/core/rtnetlink.c linux-2.6.32.45/net/core/rtnetlink.c
71161--- linux-2.6.32.45/net/core/rtnetlink.c 2011-03-27 14:31:47.000000000 -0400
71162+++ linux-2.6.32.45/net/core/rtnetlink.c 2011-08-05 20:33:55.000000000 -0400
71163@@ -57,7 +57,7 @@ struct rtnl_link
71164 {
71165 rtnl_doit_func doit;
71166 rtnl_dumpit_func dumpit;
71167-};
71168+} __no_const;
71169
71170 static DEFINE_MUTEX(rtnl_mutex);
71171
71172diff -urNp linux-2.6.32.45/net/core/secure_seq.c linux-2.6.32.45/net/core/secure_seq.c
71173--- linux-2.6.32.45/net/core/secure_seq.c 2011-08-16 20:37:25.000000000 -0400
71174+++ linux-2.6.32.45/net/core/secure_seq.c 2011-08-07 19:48:09.000000000 -0400
71175@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be3
71176 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
71177
71178 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
71179- __be16 dport)
71180+ __be16 dport)
71181 {
71182 u32 secret[MD5_MESSAGE_BYTES / 4];
71183 u32 hash[MD5_DIGEST_WORDS];
71184@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __b
71185 secret[i] = net_secret[i];
71186
71187 md5_transform(hash, secret);
71188-
71189 return hash[0];
71190 }
71191 #endif
71192diff -urNp linux-2.6.32.45/net/core/skbuff.c linux-2.6.32.45/net/core/skbuff.c
71193--- linux-2.6.32.45/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
71194+++ linux-2.6.32.45/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
71195@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
71196 struct sk_buff *frag_iter;
71197 struct sock *sk = skb->sk;
71198
71199+ pax_track_stack();
71200+
71201 /*
71202 * __skb_splice_bits() only fails if the output has no room left,
71203 * so no point in going over the frag_list for the error case.
71204diff -urNp linux-2.6.32.45/net/core/sock.c linux-2.6.32.45/net/core/sock.c
71205--- linux-2.6.32.45/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
71206+++ linux-2.6.32.45/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
71207@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
71208 break;
71209
71210 case SO_PEERCRED:
71211+ {
71212+ struct ucred peercred;
71213 if (len > sizeof(sk->sk_peercred))
71214 len = sizeof(sk->sk_peercred);
71215- if (copy_to_user(optval, &sk->sk_peercred, len))
71216+ peercred = sk->sk_peercred;
71217+ if (copy_to_user(optval, &peercred, len))
71218 return -EFAULT;
71219 goto lenout;
71220+ }
71221
71222 case SO_PEERNAME:
71223 {
71224@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
71225 */
71226 smp_wmb();
71227 atomic_set(&sk->sk_refcnt, 1);
71228- atomic_set(&sk->sk_drops, 0);
71229+ atomic_set_unchecked(&sk->sk_drops, 0);
71230 }
71231 EXPORT_SYMBOL(sock_init_data);
71232
71233diff -urNp linux-2.6.32.45/net/decnet/sysctl_net_decnet.c linux-2.6.32.45/net/decnet/sysctl_net_decnet.c
71234--- linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
71235+++ linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
71236@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
71237
71238 if (len > *lenp) len = *lenp;
71239
71240- if (copy_to_user(buffer, addr, len))
71241+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
71242 return -EFAULT;
71243
71244 *lenp = len;
71245@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
71246
71247 if (len > *lenp) len = *lenp;
71248
71249- if (copy_to_user(buffer, devname, len))
71250+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
71251 return -EFAULT;
71252
71253 *lenp = len;
71254diff -urNp linux-2.6.32.45/net/econet/Kconfig linux-2.6.32.45/net/econet/Kconfig
71255--- linux-2.6.32.45/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
71256+++ linux-2.6.32.45/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
71257@@ -4,7 +4,7 @@
71258
71259 config ECONET
71260 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71261- depends on EXPERIMENTAL && INET
71262+ depends on EXPERIMENTAL && INET && BROKEN
71263 ---help---
71264 Econet is a fairly old and slow networking protocol mainly used by
71265 Acorn computers to access file and print servers. It uses native
71266diff -urNp linux-2.6.32.45/net/ieee802154/dgram.c linux-2.6.32.45/net/ieee802154/dgram.c
71267--- linux-2.6.32.45/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
71268+++ linux-2.6.32.45/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
71269@@ -318,7 +318,7 @@ out:
71270 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
71271 {
71272 if (sock_queue_rcv_skb(sk, skb) < 0) {
71273- atomic_inc(&sk->sk_drops);
71274+ atomic_inc_unchecked(&sk->sk_drops);
71275 kfree_skb(skb);
71276 return NET_RX_DROP;
71277 }
71278diff -urNp linux-2.6.32.45/net/ieee802154/raw.c linux-2.6.32.45/net/ieee802154/raw.c
71279--- linux-2.6.32.45/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
71280+++ linux-2.6.32.45/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
71281@@ -206,7 +206,7 @@ out:
71282 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
71283 {
71284 if (sock_queue_rcv_skb(sk, skb) < 0) {
71285- atomic_inc(&sk->sk_drops);
71286+ atomic_inc_unchecked(&sk->sk_drops);
71287 kfree_skb(skb);
71288 return NET_RX_DROP;
71289 }
71290diff -urNp linux-2.6.32.45/net/ipv4/inet_diag.c linux-2.6.32.45/net/ipv4/inet_diag.c
71291--- linux-2.6.32.45/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
71292+++ linux-2.6.32.45/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
71293@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
71294 r->idiag_retrans = 0;
71295
71296 r->id.idiag_if = sk->sk_bound_dev_if;
71297+#ifdef CONFIG_GRKERNSEC_HIDESYM
71298+ r->id.idiag_cookie[0] = 0;
71299+ r->id.idiag_cookie[1] = 0;
71300+#else
71301 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71302 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71303+#endif
71304
71305 r->id.idiag_sport = inet->sport;
71306 r->id.idiag_dport = inet->dport;
71307@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
71308 r->idiag_family = tw->tw_family;
71309 r->idiag_retrans = 0;
71310 r->id.idiag_if = tw->tw_bound_dev_if;
71311+
71312+#ifdef CONFIG_GRKERNSEC_HIDESYM
71313+ r->id.idiag_cookie[0] = 0;
71314+ r->id.idiag_cookie[1] = 0;
71315+#else
71316 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71317 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71318+#endif
71319+
71320 r->id.idiag_sport = tw->tw_sport;
71321 r->id.idiag_dport = tw->tw_dport;
71322 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71323@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
71324 if (sk == NULL)
71325 goto unlock;
71326
71327+#ifndef CONFIG_GRKERNSEC_HIDESYM
71328 err = -ESTALE;
71329 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71330 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71331 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71332 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71333 goto out;
71334+#endif
71335
71336 err = -ENOMEM;
71337 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71338@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
71339 r->idiag_retrans = req->retrans;
71340
71341 r->id.idiag_if = sk->sk_bound_dev_if;
71342+
71343+#ifdef CONFIG_GRKERNSEC_HIDESYM
71344+ r->id.idiag_cookie[0] = 0;
71345+ r->id.idiag_cookie[1] = 0;
71346+#else
71347 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71348 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71349+#endif
71350
71351 tmo = req->expires - jiffies;
71352 if (tmo < 0)
71353diff -urNp linux-2.6.32.45/net/ipv4/inet_hashtables.c linux-2.6.32.45/net/ipv4/inet_hashtables.c
71354--- linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71355+++ linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:42:30.000000000 -0400
71356@@ -18,12 +18,15 @@
71357 #include <linux/sched.h>
71358 #include <linux/slab.h>
71359 #include <linux/wait.h>
71360+#include <linux/security.h>
71361
71362 #include <net/inet_connection_sock.h>
71363 #include <net/inet_hashtables.h>
71364 #include <net/secure_seq.h>
71365 #include <net/ip.h>
71366
71367+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
71368+
71369 /*
71370 * Allocate and initialize a new local port bind bucket.
71371 * The bindhash mutex for snum's hash chain must be held here.
71372@@ -491,6 +494,8 @@ ok:
71373 }
71374 spin_unlock(&head->lock);
71375
71376+ gr_update_task_in_ip_table(current, inet_sk(sk));
71377+
71378 if (tw) {
71379 inet_twsk_deschedule(tw, death_row);
71380 inet_twsk_put(tw);
71381diff -urNp linux-2.6.32.45/net/ipv4/inetpeer.c linux-2.6.32.45/net/ipv4/inetpeer.c
71382--- linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-16 20:37:25.000000000 -0400
71383+++ linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-07 19:48:09.000000000 -0400
71384@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 da
71385 struct inet_peer *p, *n;
71386 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
71387
71388+ pax_track_stack();
71389+
71390 /* Look up for the address quickly. */
71391 read_lock_bh(&peer_pool_lock);
71392 p = lookup(daddr, NULL);
71393@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 da
71394 return NULL;
71395 n->v4daddr = daddr;
71396 atomic_set(&n->refcnt, 1);
71397- atomic_set(&n->rid, 0);
71398+ atomic_set_unchecked(&n->rid, 0);
71399 n->ip_id_count = secure_ip_id(daddr);
71400 n->tcp_ts_stamp = 0;
71401
71402diff -urNp linux-2.6.32.45/net/ipv4/ip_fragment.c linux-2.6.32.45/net/ipv4/ip_fragment.c
71403--- linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
71404+++ linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
71405@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
71406 return 0;
71407
71408 start = qp->rid;
71409- end = atomic_inc_return(&peer->rid);
71410+ end = atomic_inc_return_unchecked(&peer->rid);
71411 qp->rid = end;
71412
71413 rc = qp->q.fragments && (end - start) > max;
71414diff -urNp linux-2.6.32.45/net/ipv4/ip_sockglue.c linux-2.6.32.45/net/ipv4/ip_sockglue.c
71415--- linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71416+++ linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71417@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
71418 int val;
71419 int len;
71420
71421+ pax_track_stack();
71422+
71423 if (level != SOL_IP)
71424 return -EOPNOTSUPP;
71425
71426diff -urNp linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c
71427--- linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
71428+++ linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
71429@@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
71430 private = &tmp;
71431 }
71432 #endif
71433+ memset(&info, 0, sizeof(info));
71434 info.valid_hooks = t->valid_hooks;
71435 memcpy(info.hook_entry, private->hook_entry,
71436 sizeof(info.hook_entry));
71437diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c
71438--- linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c 2011-03-27 14:31:47.000000000 -0400
71439+++ linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c 2011-08-21 18:42:53.000000000 -0400
71440@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, st
71441
71442 if (v->data_len < sizeof(*user_iph))
71443 return 0;
71444+ if (v->data_len > 65535)
71445+ return -EMSGSIZE;
71446+
71447 diff = v->data_len - e->skb->len;
71448 if (diff < 0) {
71449 if (pskb_trim(e->skb, v->data_len))
71450@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
71451 static inline void
71452 __ipq_rcv_skb(struct sk_buff *skb)
71453 {
71454- int status, type, pid, flags, nlmsglen, skblen;
71455+ int status, type, pid, flags;
71456+ unsigned int nlmsglen, skblen;
71457 struct nlmsghdr *nlh;
71458
71459 skblen = skb->len;
71460diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c
71461--- linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
71462+++ linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
71463@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
71464 private = &tmp;
71465 }
71466 #endif
71467+ memset(&info, 0, sizeof(info));
71468 info.valid_hooks = t->valid_hooks;
71469 memcpy(info.hook_entry, private->hook_entry,
71470 sizeof(info.hook_entry));
71471diff -urNp linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c
71472--- linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
71473+++ linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
71474@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
71475
71476 *len = 0;
71477
71478- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
71479+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
71480 if (*octets == NULL) {
71481 if (net_ratelimit())
71482 printk("OOM in bsalg (%d)\n", __LINE__);
71483diff -urNp linux-2.6.32.45/net/ipv4/raw.c linux-2.6.32.45/net/ipv4/raw.c
71484--- linux-2.6.32.45/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
71485+++ linux-2.6.32.45/net/ipv4/raw.c 2011-08-14 11:46:51.000000000 -0400
71486@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
71487 /* Charge it to the socket. */
71488
71489 if (sock_queue_rcv_skb(sk, skb) < 0) {
71490- atomic_inc(&sk->sk_drops);
71491+ atomic_inc_unchecked(&sk->sk_drops);
71492 kfree_skb(skb);
71493 return NET_RX_DROP;
71494 }
71495@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
71496 int raw_rcv(struct sock *sk, struct sk_buff *skb)
71497 {
71498 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
71499- atomic_inc(&sk->sk_drops);
71500+ atomic_inc_unchecked(&sk->sk_drops);
71501 kfree_skb(skb);
71502 return NET_RX_DROP;
71503 }
71504@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
71505
71506 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
71507 {
71508+ struct icmp_filter filter;
71509+
71510+ if (optlen < 0)
71511+ return -EINVAL;
71512 if (optlen > sizeof(struct icmp_filter))
71513 optlen = sizeof(struct icmp_filter);
71514- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
71515+ if (copy_from_user(&filter, optval, optlen))
71516 return -EFAULT;
71517+ raw_sk(sk)->filter = filter;
71518+
71519 return 0;
71520 }
71521
71522 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
71523 {
71524 int len, ret = -EFAULT;
71525+ struct icmp_filter filter;
71526
71527 if (get_user(len, optlen))
71528 goto out;
71529@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
71530 if (len > sizeof(struct icmp_filter))
71531 len = sizeof(struct icmp_filter);
71532 ret = -EFAULT;
71533- if (put_user(len, optlen) ||
71534- copy_to_user(optval, &raw_sk(sk)->filter, len))
71535+ filter = raw_sk(sk)->filter;
71536+ if (put_user(len, optlen) || len > sizeof filter ||
71537+ copy_to_user(optval, &filter, len))
71538 goto out;
71539 ret = 0;
71540 out: return ret;
71541@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
71542 sk_wmem_alloc_get(sp),
71543 sk_rmem_alloc_get(sp),
71544 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71545- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
71546+ atomic_read(&sp->sk_refcnt),
71547+#ifdef CONFIG_GRKERNSEC_HIDESYM
71548+ NULL,
71549+#else
71550+ sp,
71551+#endif
71552+ atomic_read_unchecked(&sp->sk_drops));
71553 }
71554
71555 static int raw_seq_show(struct seq_file *seq, void *v)
71556diff -urNp linux-2.6.32.45/net/ipv4/route.c linux-2.6.32.45/net/ipv4/route.c
71557--- linux-2.6.32.45/net/ipv4/route.c 2011-08-16 20:37:25.000000000 -0400
71558+++ linux-2.6.32.45/net/ipv4/route.c 2011-08-07 19:48:09.000000000 -0400
71559@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be3
71560
71561 static inline int rt_genid(struct net *net)
71562 {
71563- return atomic_read(&net->ipv4.rt_genid);
71564+ return atomic_read_unchecked(&net->ipv4.rt_genid);
71565 }
71566
71567 #ifdef CONFIG_PROC_FS
71568@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct n
71569 unsigned char shuffle;
71570
71571 get_random_bytes(&shuffle, sizeof(shuffle));
71572- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
71573+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
71574 }
71575
71576 /*
71577@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_oper
71578
71579 static __net_init int rt_secret_timer_init(struct net *net)
71580 {
71581- atomic_set(&net->ipv4.rt_genid,
71582+ atomic_set_unchecked(&net->ipv4.rt_genid,
71583 (int) ((num_physpages ^ (num_physpages>>8)) ^
71584 (jiffies ^ (jiffies >> 7))));
71585
71586diff -urNp linux-2.6.32.45/net/ipv4/tcp.c linux-2.6.32.45/net/ipv4/tcp.c
71587--- linux-2.6.32.45/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
71588+++ linux-2.6.32.45/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
71589@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
71590 int val;
71591 int err = 0;
71592
71593+ pax_track_stack();
71594+
71595 /* This is a string value all the others are int's */
71596 if (optname == TCP_CONGESTION) {
71597 char name[TCP_CA_NAME_MAX];
71598@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
71599 struct tcp_sock *tp = tcp_sk(sk);
71600 int val, len;
71601
71602+ pax_track_stack();
71603+
71604 if (get_user(len, optlen))
71605 return -EFAULT;
71606
71607diff -urNp linux-2.6.32.45/net/ipv4/tcp_ipv4.c linux-2.6.32.45/net/ipv4/tcp_ipv4.c
71608--- linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-16 20:37:25.000000000 -0400
71609+++ linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-23 21:22:32.000000000 -0400
71610@@ -85,6 +85,9 @@
71611 int sysctl_tcp_tw_reuse __read_mostly;
71612 int sysctl_tcp_low_latency __read_mostly;
71613
71614+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71615+extern int grsec_enable_blackhole;
71616+#endif
71617
71618 #ifdef CONFIG_TCP_MD5SIG
71619 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
71620@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
71621 return 0;
71622
71623 reset:
71624+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71625+ if (!grsec_enable_blackhole)
71626+#endif
71627 tcp_v4_send_reset(rsk, skb);
71628 discard:
71629 kfree_skb(skb);
71630@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
71631 TCP_SKB_CB(skb)->sacked = 0;
71632
71633 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
71634- if (!sk)
71635+ if (!sk) {
71636+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71637+ ret = 1;
71638+#endif
71639 goto no_tcp_socket;
71640+ }
71641
71642 process:
71643- if (sk->sk_state == TCP_TIME_WAIT)
71644+ if (sk->sk_state == TCP_TIME_WAIT) {
71645+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71646+ ret = 2;
71647+#endif
71648 goto do_time_wait;
71649+ }
71650
71651 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
71652 goto discard_and_relse;
71653@@ -1651,6 +1665,10 @@ no_tcp_socket:
71654 bad_packet:
71655 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
71656 } else {
71657+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71658+ if (!grsec_enable_blackhole || (ret == 1 &&
71659+ (skb->dev->flags & IFF_LOOPBACK)))
71660+#endif
71661 tcp_v4_send_reset(NULL, skb);
71662 }
71663
71664@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk
71665 0, /* non standard timer */
71666 0, /* open_requests have no inode */
71667 atomic_read(&sk->sk_refcnt),
71668+#ifdef CONFIG_GRKERNSEC_HIDESYM
71669+ NULL,
71670+#else
71671 req,
71672+#endif
71673 len);
71674 }
71675
71676@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *s
71677 sock_i_uid(sk),
71678 icsk->icsk_probes_out,
71679 sock_i_ino(sk),
71680- atomic_read(&sk->sk_refcnt), sk,
71681+ atomic_read(&sk->sk_refcnt),
71682+#ifdef CONFIG_GRKERNSEC_HIDESYM
71683+ NULL,
71684+#else
71685+ sk,
71686+#endif
71687 jiffies_to_clock_t(icsk->icsk_rto),
71688 jiffies_to_clock_t(icsk->icsk_ack.ato),
71689 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
71690@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct in
71691 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
71692 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
71693 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
71694- atomic_read(&tw->tw_refcnt), tw, len);
71695+ atomic_read(&tw->tw_refcnt),
71696+#ifdef CONFIG_GRKERNSEC_HIDESYM
71697+ NULL,
71698+#else
71699+ tw,
71700+#endif
71701+ len);
71702 }
71703
71704 #define TMPSZ 150
71705diff -urNp linux-2.6.32.45/net/ipv4/tcp_minisocks.c linux-2.6.32.45/net/ipv4/tcp_minisocks.c
71706--- linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
71707+++ linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
71708@@ -26,6 +26,10 @@
71709 #include <net/inet_common.h>
71710 #include <net/xfrm.h>
71711
71712+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71713+extern int grsec_enable_blackhole;
71714+#endif
71715+
71716 #ifdef CONFIG_SYSCTL
71717 #define SYNC_INIT 0 /* let the user enable it */
71718 #else
71719@@ -672,6 +676,10 @@ listen_overflow:
71720
71721 embryonic_reset:
71722 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
71723+
71724+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71725+ if (!grsec_enable_blackhole)
71726+#endif
71727 if (!(flg & TCP_FLAG_RST))
71728 req->rsk_ops->send_reset(sk, skb);
71729
71730diff -urNp linux-2.6.32.45/net/ipv4/tcp_output.c linux-2.6.32.45/net/ipv4/tcp_output.c
71731--- linux-2.6.32.45/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
71732+++ linux-2.6.32.45/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
71733@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
71734 __u8 *md5_hash_location;
71735 int mss;
71736
71737+ pax_track_stack();
71738+
71739 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
71740 if (skb == NULL)
71741 return NULL;
71742diff -urNp linux-2.6.32.45/net/ipv4/tcp_probe.c linux-2.6.32.45/net/ipv4/tcp_probe.c
71743--- linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
71744+++ linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
71745@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
71746 if (cnt + width >= len)
71747 break;
71748
71749- if (copy_to_user(buf + cnt, tbuf, width))
71750+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
71751 return -EFAULT;
71752 cnt += width;
71753 }
71754diff -urNp linux-2.6.32.45/net/ipv4/tcp_timer.c linux-2.6.32.45/net/ipv4/tcp_timer.c
71755--- linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
71756+++ linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
71757@@ -21,6 +21,10 @@
71758 #include <linux/module.h>
71759 #include <net/tcp.h>
71760
71761+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71762+extern int grsec_lastack_retries;
71763+#endif
71764+
71765 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
71766 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
71767 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
71768@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
71769 }
71770 }
71771
71772+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71773+ if ((sk->sk_state == TCP_LAST_ACK) &&
71774+ (grsec_lastack_retries > 0) &&
71775+ (grsec_lastack_retries < retry_until))
71776+ retry_until = grsec_lastack_retries;
71777+#endif
71778+
71779 if (retransmits_timed_out(sk, retry_until)) {
71780 /* Has it gone just too far? */
71781 tcp_write_err(sk);
71782diff -urNp linux-2.6.32.45/net/ipv4/udp.c linux-2.6.32.45/net/ipv4/udp.c
71783--- linux-2.6.32.45/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
71784+++ linux-2.6.32.45/net/ipv4/udp.c 2011-08-23 21:22:32.000000000 -0400
71785@@ -86,6 +86,7 @@
71786 #include <linux/types.h>
71787 #include <linux/fcntl.h>
71788 #include <linux/module.h>
71789+#include <linux/security.h>
71790 #include <linux/socket.h>
71791 #include <linux/sockios.h>
71792 #include <linux/igmp.h>
71793@@ -106,6 +107,10 @@
71794 #include <net/xfrm.h>
71795 #include "udp_impl.h"
71796
71797+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71798+extern int grsec_enable_blackhole;
71799+#endif
71800+
71801 struct udp_table udp_table;
71802 EXPORT_SYMBOL(udp_table);
71803
71804@@ -371,6 +376,9 @@ found:
71805 return s;
71806 }
71807
71808+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
71809+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
71810+
71811 /*
71812 * This routine is called by the ICMP module when it gets some
71813 * sort of error condition. If err < 0 then the socket should
71814@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
71815 dport = usin->sin_port;
71816 if (dport == 0)
71817 return -EINVAL;
71818+
71819+ err = gr_search_udp_sendmsg(sk, usin);
71820+ if (err)
71821+ return err;
71822 } else {
71823 if (sk->sk_state != TCP_ESTABLISHED)
71824 return -EDESTADDRREQ;
71825+
71826+ err = gr_search_udp_sendmsg(sk, NULL);
71827+ if (err)
71828+ return err;
71829+
71830 daddr = inet->daddr;
71831 dport = inet->dport;
71832 /* Open fast path for connected socket.
71833@@ -945,6 +962,10 @@ try_again:
71834 if (!skb)
71835 goto out;
71836
71837+ err = gr_search_udp_recvmsg(sk, skb);
71838+ if (err)
71839+ goto out_free;
71840+
71841 ulen = skb->len - sizeof(struct udphdr);
71842 copied = len;
71843 if (copied > ulen)
71844@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
71845 if (rc == -ENOMEM) {
71846 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
71847 is_udplite);
71848- atomic_inc(&sk->sk_drops);
71849+ atomic_inc_unchecked(&sk->sk_drops);
71850 }
71851 goto drop;
71852 }
71853@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
71854 goto csum_error;
71855
71856 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
71857+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71858+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
71859+#endif
71860 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
71861
71862 /*
71863@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
71864 sk_wmem_alloc_get(sp),
71865 sk_rmem_alloc_get(sp),
71866 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71867- atomic_read(&sp->sk_refcnt), sp,
71868- atomic_read(&sp->sk_drops), len);
71869+ atomic_read(&sp->sk_refcnt),
71870+#ifdef CONFIG_GRKERNSEC_HIDESYM
71871+ NULL,
71872+#else
71873+ sp,
71874+#endif
71875+ atomic_read_unchecked(&sp->sk_drops), len);
71876 }
71877
71878 int udp4_seq_show(struct seq_file *seq, void *v)
71879diff -urNp linux-2.6.32.45/net/ipv6/inet6_connection_sock.c linux-2.6.32.45/net/ipv6/inet6_connection_sock.c
71880--- linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
71881+++ linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
71882@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
71883 #ifdef CONFIG_XFRM
71884 {
71885 struct rt6_info *rt = (struct rt6_info *)dst;
71886- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
71887+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
71888 }
71889 #endif
71890 }
71891@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
71892 #ifdef CONFIG_XFRM
71893 if (dst) {
71894 struct rt6_info *rt = (struct rt6_info *)dst;
71895- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
71896+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
71897 sk->sk_dst_cache = NULL;
71898 dst_release(dst);
71899 dst = NULL;
71900diff -urNp linux-2.6.32.45/net/ipv6/inet6_hashtables.c linux-2.6.32.45/net/ipv6/inet6_hashtables.c
71901--- linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71902+++ linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-07 19:48:09.000000000 -0400
71903@@ -119,7 +119,7 @@ out:
71904 }
71905 EXPORT_SYMBOL(__inet6_lookup_established);
71906
71907-static int inline compute_score(struct sock *sk, struct net *net,
71908+static inline int compute_score(struct sock *sk, struct net *net,
71909 const unsigned short hnum,
71910 const struct in6_addr *daddr,
71911 const int dif)
71912diff -urNp linux-2.6.32.45/net/ipv6/ip6_tunnel.c linux-2.6.32.45/net/ipv6/ip6_tunnel.c
71913--- linux-2.6.32.45/net/ipv6/ip6_tunnel.c 2011-08-09 18:35:30.000000000 -0400
71914+++ linux-2.6.32.45/net/ipv6/ip6_tunnel.c 2011-08-24 18:52:25.000000000 -0400
71915@@ -1466,7 +1466,7 @@ static int __init ip6_tunnel_init(void)
71916 {
71917 int err;
71918
71919- err = register_pernet_device(&ip6_tnl_net_ops);
71920+ err = register_pernet_gen_device(&ip6_tnl_net_id, &ip6_tnl_net_ops);
71921 if (err < 0)
71922 goto out_pernet;
71923
71924@@ -1487,7 +1487,7 @@ static int __init ip6_tunnel_init(void)
71925 out_ip6ip6:
71926 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
71927 out_ip4ip6:
71928- unregister_pernet_device(&ip6_tnl_net_ops);
71929+ unregister_pernet_gen_device(ip6_tnl_net_id, &ip6_tnl_net_ops);
71930 out_pernet:
71931 return err;
71932 }
71933diff -urNp linux-2.6.32.45/net/ipv6/ipv6_sockglue.c linux-2.6.32.45/net/ipv6/ipv6_sockglue.c
71934--- linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71935+++ linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71936@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
71937 int val, valbool;
71938 int retv = -ENOPROTOOPT;
71939
71940+ pax_track_stack();
71941+
71942 if (optval == NULL)
71943 val=0;
71944 else {
71945@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
71946 int len;
71947 int val;
71948
71949+ pax_track_stack();
71950+
71951 if (ip6_mroute_opt(optname))
71952 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
71953
71954diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c
71955--- linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c 2011-03-27 14:31:47.000000000 -0400
71956+++ linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c 2011-08-21 18:43:32.000000000 -0400
71957@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, st
71958
71959 if (v->data_len < sizeof(*user_iph))
71960 return 0;
71961+ if (v->data_len > 65535)
71962+ return -EMSGSIZE;
71963+
71964 diff = v->data_len - e->skb->len;
71965 if (diff < 0) {
71966 if (pskb_trim(e->skb, v->data_len))
71967@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
71968 static inline void
71969 __ipq_rcv_skb(struct sk_buff *skb)
71970 {
71971- int status, type, pid, flags, nlmsglen, skblen;
71972+ int status, type, pid, flags;
71973+ unsigned int nlmsglen, skblen;
71974 struct nlmsghdr *nlh;
71975
71976 skblen = skb->len;
71977diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c
71978--- linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
71979+++ linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
71980@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
71981 private = &tmp;
71982 }
71983 #endif
71984+ memset(&info, 0, sizeof(info));
71985 info.valid_hooks = t->valid_hooks;
71986 memcpy(info.hook_entry, private->hook_entry,
71987 sizeof(info.hook_entry));
71988diff -urNp linux-2.6.32.45/net/ipv6/raw.c linux-2.6.32.45/net/ipv6/raw.c
71989--- linux-2.6.32.45/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
71990+++ linux-2.6.32.45/net/ipv6/raw.c 2011-08-14 11:48:20.000000000 -0400
71991@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
71992 {
71993 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
71994 skb_checksum_complete(skb)) {
71995- atomic_inc(&sk->sk_drops);
71996+ atomic_inc_unchecked(&sk->sk_drops);
71997 kfree_skb(skb);
71998 return NET_RX_DROP;
71999 }
72000
72001 /* Charge it to the socket. */
72002 if (sock_queue_rcv_skb(sk,skb)<0) {
72003- atomic_inc(&sk->sk_drops);
72004+ atomic_inc_unchecked(&sk->sk_drops);
72005 kfree_skb(skb);
72006 return NET_RX_DROP;
72007 }
72008@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72009 struct raw6_sock *rp = raw6_sk(sk);
72010
72011 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72012- atomic_inc(&sk->sk_drops);
72013+ atomic_inc_unchecked(&sk->sk_drops);
72014 kfree_skb(skb);
72015 return NET_RX_DROP;
72016 }
72017@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72018
72019 if (inet->hdrincl) {
72020 if (skb_checksum_complete(skb)) {
72021- atomic_inc(&sk->sk_drops);
72022+ atomic_inc_unchecked(&sk->sk_drops);
72023 kfree_skb(skb);
72024 return NET_RX_DROP;
72025 }
72026@@ -518,7 +518,7 @@ csum_copy_err:
72027 as some normal condition.
72028 */
72029 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
72030- atomic_inc(&sk->sk_drops);
72031+ atomic_inc_unchecked(&sk->sk_drops);
72032 goto out;
72033 }
72034
72035@@ -600,7 +600,7 @@ out:
72036 return err;
72037 }
72038
72039-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72040+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72041 struct flowi *fl, struct rt6_info *rt,
72042 unsigned int flags)
72043 {
72044@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
72045 u16 proto;
72046 int err;
72047
72048+ pax_track_stack();
72049+
72050 /* Rough check on arithmetic overflow,
72051 better check is made in ip6_append_data().
72052 */
72053@@ -916,12 +918,17 @@ do_confirm:
72054 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72055 char __user *optval, int optlen)
72056 {
72057+ struct icmp6_filter filter;
72058+
72059 switch (optname) {
72060 case ICMPV6_FILTER:
72061+ if (optlen < 0)
72062+ return -EINVAL;
72063 if (optlen > sizeof(struct icmp6_filter))
72064 optlen = sizeof(struct icmp6_filter);
72065- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72066+ if (copy_from_user(&filter, optval, optlen))
72067 return -EFAULT;
72068+ raw6_sk(sk)->filter = filter;
72069 return 0;
72070 default:
72071 return -ENOPROTOOPT;
72072@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct so
72073 char __user *optval, int __user *optlen)
72074 {
72075 int len;
72076+ struct icmp6_filter filter;
72077
72078 switch (optname) {
72079 case ICMPV6_FILTER:
72080@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
72081 len = sizeof(struct icmp6_filter);
72082 if (put_user(len, optlen))
72083 return -EFAULT;
72084- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72085+ filter = raw6_sk(sk)->filter;
72086+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
72087 return -EFAULT;
72088 return 0;
72089 default:
72090@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
72091 0, 0L, 0,
72092 sock_i_uid(sp), 0,
72093 sock_i_ino(sp),
72094- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72095+ atomic_read(&sp->sk_refcnt),
72096+#ifdef CONFIG_GRKERNSEC_HIDESYM
72097+ NULL,
72098+#else
72099+ sp,
72100+#endif
72101+ atomic_read_unchecked(&sp->sk_drops));
72102 }
72103
72104 static int raw6_seq_show(struct seq_file *seq, void *v)
72105diff -urNp linux-2.6.32.45/net/ipv6/tcp_ipv6.c linux-2.6.32.45/net/ipv6/tcp_ipv6.c
72106--- linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-16 20:37:25.000000000 -0400
72107+++ linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-07 19:48:09.000000000 -0400
72108@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72109 }
72110 #endif
72111
72112+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72113+extern int grsec_enable_blackhole;
72114+#endif
72115+
72116 static void tcp_v6_hash(struct sock *sk)
72117 {
72118 if (sk->sk_state != TCP_CLOSE) {
72119@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72120 return 0;
72121
72122 reset:
72123+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72124+ if (!grsec_enable_blackhole)
72125+#endif
72126 tcp_v6_send_reset(sk, skb);
72127 discard:
72128 if (opt_skb)
72129@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72130 TCP_SKB_CB(skb)->sacked = 0;
72131
72132 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72133- if (!sk)
72134+ if (!sk) {
72135+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72136+ ret = 1;
72137+#endif
72138 goto no_tcp_socket;
72139+ }
72140
72141 process:
72142- if (sk->sk_state == TCP_TIME_WAIT)
72143+ if (sk->sk_state == TCP_TIME_WAIT) {
72144+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72145+ ret = 2;
72146+#endif
72147 goto do_time_wait;
72148+ }
72149
72150 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
72151 goto discard_and_relse;
72152@@ -1701,6 +1716,10 @@ no_tcp_socket:
72153 bad_packet:
72154 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72155 } else {
72156+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72157+ if (!grsec_enable_blackhole || (ret == 1 &&
72158+ (skb->dev->flags & IFF_LOOPBACK)))
72159+#endif
72160 tcp_v6_send_reset(NULL, skb);
72161 }
72162
72163@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file
72164 uid,
72165 0, /* non standard timer */
72166 0, /* open_requests have no inode */
72167- 0, req);
72168+ 0,
72169+#ifdef CONFIG_GRKERNSEC_HIDESYM
72170+ NULL
72171+#else
72172+ req
72173+#endif
72174+ );
72175 }
72176
72177 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72178@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_fil
72179 sock_i_uid(sp),
72180 icsk->icsk_probes_out,
72181 sock_i_ino(sp),
72182- atomic_read(&sp->sk_refcnt), sp,
72183+ atomic_read(&sp->sk_refcnt),
72184+#ifdef CONFIG_GRKERNSEC_HIDESYM
72185+ NULL,
72186+#else
72187+ sp,
72188+#endif
72189 jiffies_to_clock_t(icsk->icsk_rto),
72190 jiffies_to_clock_t(icsk->icsk_ack.ato),
72191 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72192@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct se
72193 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72194 tw->tw_substate, 0, 0,
72195 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72196- atomic_read(&tw->tw_refcnt), tw);
72197+ atomic_read(&tw->tw_refcnt),
72198+#ifdef CONFIG_GRKERNSEC_HIDESYM
72199+ NULL
72200+#else
72201+ tw
72202+#endif
72203+ );
72204 }
72205
72206 static int tcp6_seq_show(struct seq_file *seq, void *v)
72207diff -urNp linux-2.6.32.45/net/ipv6/udp.c linux-2.6.32.45/net/ipv6/udp.c
72208--- linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
72209+++ linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
72210@@ -49,6 +49,10 @@
72211 #include <linux/seq_file.h>
72212 #include "udp_impl.h"
72213
72214+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72215+extern int grsec_enable_blackhole;
72216+#endif
72217+
72218 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72219 {
72220 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72221@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72222 if (rc == -ENOMEM) {
72223 UDP6_INC_STATS_BH(sock_net(sk),
72224 UDP_MIB_RCVBUFERRORS, is_udplite);
72225- atomic_inc(&sk->sk_drops);
72226+ atomic_inc_unchecked(&sk->sk_drops);
72227 }
72228 goto drop;
72229 }
72230@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72231 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72232 proto == IPPROTO_UDPLITE);
72233
72234+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72235+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72236+#endif
72237 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
72238
72239 kfree_skb(skb);
72240@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
72241 0, 0L, 0,
72242 sock_i_uid(sp), 0,
72243 sock_i_ino(sp),
72244- atomic_read(&sp->sk_refcnt), sp,
72245- atomic_read(&sp->sk_drops));
72246+ atomic_read(&sp->sk_refcnt),
72247+#ifdef CONFIG_GRKERNSEC_HIDESYM
72248+ NULL,
72249+#else
72250+ sp,
72251+#endif
72252+ atomic_read_unchecked(&sp->sk_drops));
72253 }
72254
72255 int udp6_seq_show(struct seq_file *seq, void *v)
72256diff -urNp linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c
72257--- linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
72258+++ linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
72259@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
72260 add_wait_queue(&self->open_wait, &wait);
72261
72262 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72263- __FILE__,__LINE__, tty->driver->name, self->open_count );
72264+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72265
72266 /* As far as I can see, we protect open_count - Jean II */
72267 spin_lock_irqsave(&self->spinlock, flags);
72268 if (!tty_hung_up_p(filp)) {
72269 extra_count = 1;
72270- self->open_count--;
72271+ local_dec(&self->open_count);
72272 }
72273 spin_unlock_irqrestore(&self->spinlock, flags);
72274- self->blocked_open++;
72275+ local_inc(&self->blocked_open);
72276
72277 while (1) {
72278 if (tty->termios->c_cflag & CBAUD) {
72279@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
72280 }
72281
72282 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72283- __FILE__,__LINE__, tty->driver->name, self->open_count );
72284+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72285
72286 schedule();
72287 }
72288@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
72289 if (extra_count) {
72290 /* ++ is not atomic, so this should be protected - Jean II */
72291 spin_lock_irqsave(&self->spinlock, flags);
72292- self->open_count++;
72293+ local_inc(&self->open_count);
72294 spin_unlock_irqrestore(&self->spinlock, flags);
72295 }
72296- self->blocked_open--;
72297+ local_dec(&self->blocked_open);
72298
72299 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72300- __FILE__,__LINE__, tty->driver->name, self->open_count);
72301+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72302
72303 if (!retval)
72304 self->flags |= ASYNC_NORMAL_ACTIVE;
72305@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
72306 }
72307 /* ++ is not atomic, so this should be protected - Jean II */
72308 spin_lock_irqsave(&self->spinlock, flags);
72309- self->open_count++;
72310+ local_inc(&self->open_count);
72311
72312 tty->driver_data = self;
72313 self->tty = tty;
72314 spin_unlock_irqrestore(&self->spinlock, flags);
72315
72316 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72317- self->line, self->open_count);
72318+ self->line, local_read(&self->open_count));
72319
72320 /* Not really used by us, but lets do it anyway */
72321 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72322@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
72323 return;
72324 }
72325
72326- if ((tty->count == 1) && (self->open_count != 1)) {
72327+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72328 /*
72329 * Uh, oh. tty->count is 1, which means that the tty
72330 * structure will be freed. state->count should always
72331@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
72332 */
72333 IRDA_DEBUG(0, "%s(), bad serial port count; "
72334 "tty->count is 1, state->count is %d\n", __func__ ,
72335- self->open_count);
72336- self->open_count = 1;
72337+ local_read(&self->open_count));
72338+ local_set(&self->open_count, 1);
72339 }
72340
72341- if (--self->open_count < 0) {
72342+ if (local_dec_return(&self->open_count) < 0) {
72343 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72344- __func__, self->line, self->open_count);
72345- self->open_count = 0;
72346+ __func__, self->line, local_read(&self->open_count));
72347+ local_set(&self->open_count, 0);
72348 }
72349- if (self->open_count) {
72350+ if (local_read(&self->open_count)) {
72351 spin_unlock_irqrestore(&self->spinlock, flags);
72352
72353 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72354@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
72355 tty->closing = 0;
72356 self->tty = NULL;
72357
72358- if (self->blocked_open) {
72359+ if (local_read(&self->blocked_open)) {
72360 if (self->close_delay)
72361 schedule_timeout_interruptible(self->close_delay);
72362 wake_up_interruptible(&self->open_wait);
72363@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
72364 spin_lock_irqsave(&self->spinlock, flags);
72365 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72366 self->tty = NULL;
72367- self->open_count = 0;
72368+ local_set(&self->open_count, 0);
72369 spin_unlock_irqrestore(&self->spinlock, flags);
72370
72371 wake_up_interruptible(&self->open_wait);
72372@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
72373 seq_putc(m, '\n');
72374
72375 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72376- seq_printf(m, "Open count: %d\n", self->open_count);
72377+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72378 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72379 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72380
72381diff -urNp linux-2.6.32.45/net/iucv/af_iucv.c linux-2.6.32.45/net/iucv/af_iucv.c
72382--- linux-2.6.32.45/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
72383+++ linux-2.6.32.45/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
72384@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
72385
72386 write_lock_bh(&iucv_sk_list.lock);
72387
72388- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
72389+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72390 while (__iucv_get_sock_by_name(name)) {
72391 sprintf(name, "%08x",
72392- atomic_inc_return(&iucv_sk_list.autobind_name));
72393+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72394 }
72395
72396 write_unlock_bh(&iucv_sk_list.lock);
72397diff -urNp linux-2.6.32.45/net/key/af_key.c linux-2.6.32.45/net/key/af_key.c
72398--- linux-2.6.32.45/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
72399+++ linux-2.6.32.45/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
72400@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
72401 struct xfrm_migrate m[XFRM_MAX_DEPTH];
72402 struct xfrm_kmaddress k;
72403
72404+ pax_track_stack();
72405+
72406 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
72407 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
72408 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
72409@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
72410 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
72411 else
72412 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
72413+#ifdef CONFIG_GRKERNSEC_HIDESYM
72414+ NULL,
72415+#else
72416 s,
72417+#endif
72418 atomic_read(&s->sk_refcnt),
72419 sk_rmem_alloc_get(s),
72420 sk_wmem_alloc_get(s),
72421diff -urNp linux-2.6.32.45/net/lapb/lapb_iface.c linux-2.6.32.45/net/lapb/lapb_iface.c
72422--- linux-2.6.32.45/net/lapb/lapb_iface.c 2011-03-27 14:31:47.000000000 -0400
72423+++ linux-2.6.32.45/net/lapb/lapb_iface.c 2011-08-05 20:33:55.000000000 -0400
72424@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev
72425 goto out;
72426
72427 lapb->dev = dev;
72428- lapb->callbacks = *callbacks;
72429+ lapb->callbacks = callbacks;
72430
72431 __lapb_insert_cb(lapb);
72432
72433@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device
72434
72435 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
72436 {
72437- if (lapb->callbacks.connect_confirmation)
72438- lapb->callbacks.connect_confirmation(lapb->dev, reason);
72439+ if (lapb->callbacks->connect_confirmation)
72440+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
72441 }
72442
72443 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
72444 {
72445- if (lapb->callbacks.connect_indication)
72446- lapb->callbacks.connect_indication(lapb->dev, reason);
72447+ if (lapb->callbacks->connect_indication)
72448+ lapb->callbacks->connect_indication(lapb->dev, reason);
72449 }
72450
72451 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
72452 {
72453- if (lapb->callbacks.disconnect_confirmation)
72454- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
72455+ if (lapb->callbacks->disconnect_confirmation)
72456+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
72457 }
72458
72459 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
72460 {
72461- if (lapb->callbacks.disconnect_indication)
72462- lapb->callbacks.disconnect_indication(lapb->dev, reason);
72463+ if (lapb->callbacks->disconnect_indication)
72464+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
72465 }
72466
72467 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
72468 {
72469- if (lapb->callbacks.data_indication)
72470- return lapb->callbacks.data_indication(lapb->dev, skb);
72471+ if (lapb->callbacks->data_indication)
72472+ return lapb->callbacks->data_indication(lapb->dev, skb);
72473
72474 kfree_skb(skb);
72475 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
72476@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *l
72477 {
72478 int used = 0;
72479
72480- if (lapb->callbacks.data_transmit) {
72481- lapb->callbacks.data_transmit(lapb->dev, skb);
72482+ if (lapb->callbacks->data_transmit) {
72483+ lapb->callbacks->data_transmit(lapb->dev, skb);
72484 used = 1;
72485 }
72486
72487diff -urNp linux-2.6.32.45/net/mac80211/cfg.c linux-2.6.32.45/net/mac80211/cfg.c
72488--- linux-2.6.32.45/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
72489+++ linux-2.6.32.45/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
72490@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
72491 return err;
72492 }
72493
72494-struct cfg80211_ops mac80211_config_ops = {
72495+const struct cfg80211_ops mac80211_config_ops = {
72496 .add_virtual_intf = ieee80211_add_iface,
72497 .del_virtual_intf = ieee80211_del_iface,
72498 .change_virtual_intf = ieee80211_change_iface,
72499diff -urNp linux-2.6.32.45/net/mac80211/cfg.h linux-2.6.32.45/net/mac80211/cfg.h
72500--- linux-2.6.32.45/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
72501+++ linux-2.6.32.45/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
72502@@ -4,6 +4,6 @@
72503 #ifndef __CFG_H
72504 #define __CFG_H
72505
72506-extern struct cfg80211_ops mac80211_config_ops;
72507+extern const struct cfg80211_ops mac80211_config_ops;
72508
72509 #endif /* __CFG_H */
72510diff -urNp linux-2.6.32.45/net/mac80211/debugfs_key.c linux-2.6.32.45/net/mac80211/debugfs_key.c
72511--- linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
72512+++ linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
72513@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
72514 size_t count, loff_t *ppos)
72515 {
72516 struct ieee80211_key *key = file->private_data;
72517- int i, res, bufsize = 2 * key->conf.keylen + 2;
72518+ int i, bufsize = 2 * key->conf.keylen + 2;
72519 char *buf = kmalloc(bufsize, GFP_KERNEL);
72520 char *p = buf;
72521+ ssize_t res;
72522+
72523+ if (buf == NULL)
72524+ return -ENOMEM;
72525
72526 for (i = 0; i < key->conf.keylen; i++)
72527 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
72528diff -urNp linux-2.6.32.45/net/mac80211/debugfs_sta.c linux-2.6.32.45/net/mac80211/debugfs_sta.c
72529--- linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
72530+++ linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
72531@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
72532 int i;
72533 struct sta_info *sta = file->private_data;
72534
72535+ pax_track_stack();
72536+
72537 spin_lock_bh(&sta->lock);
72538 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
72539 sta->ampdu_mlme.dialog_token_allocator + 1);
72540diff -urNp linux-2.6.32.45/net/mac80211/ieee80211_i.h linux-2.6.32.45/net/mac80211/ieee80211_i.h
72541--- linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
72542+++ linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
72543@@ -25,6 +25,7 @@
72544 #include <linux/etherdevice.h>
72545 #include <net/cfg80211.h>
72546 #include <net/mac80211.h>
72547+#include <asm/local.h>
72548 #include "key.h"
72549 #include "sta_info.h"
72550
72551@@ -635,7 +636,7 @@ struct ieee80211_local {
72552 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
72553 spinlock_t queue_stop_reason_lock;
72554
72555- int open_count;
72556+ local_t open_count;
72557 int monitors, cooked_mntrs;
72558 /* number of interfaces with corresponding FIF_ flags */
72559 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
72560diff -urNp linux-2.6.32.45/net/mac80211/iface.c linux-2.6.32.45/net/mac80211/iface.c
72561--- linux-2.6.32.45/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
72562+++ linux-2.6.32.45/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
72563@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
72564 break;
72565 }
72566
72567- if (local->open_count == 0) {
72568+ if (local_read(&local->open_count) == 0) {
72569 res = drv_start(local);
72570 if (res)
72571 goto err_del_bss;
72572@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
72573 * Validate the MAC address for this device.
72574 */
72575 if (!is_valid_ether_addr(dev->dev_addr)) {
72576- if (!local->open_count)
72577+ if (!local_read(&local->open_count))
72578 drv_stop(local);
72579 return -EADDRNOTAVAIL;
72580 }
72581@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
72582
72583 hw_reconf_flags |= __ieee80211_recalc_idle(local);
72584
72585- local->open_count++;
72586+ local_inc(&local->open_count);
72587 if (hw_reconf_flags) {
72588 ieee80211_hw_config(local, hw_reconf_flags);
72589 /*
72590@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
72591 err_del_interface:
72592 drv_remove_interface(local, &conf);
72593 err_stop:
72594- if (!local->open_count)
72595+ if (!local_read(&local->open_count))
72596 drv_stop(local);
72597 err_del_bss:
72598 sdata->bss = NULL;
72599@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
72600 WARN_ON(!list_empty(&sdata->u.ap.vlans));
72601 }
72602
72603- local->open_count--;
72604+ local_dec(&local->open_count);
72605
72606 switch (sdata->vif.type) {
72607 case NL80211_IFTYPE_AP_VLAN:
72608@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
72609
72610 ieee80211_recalc_ps(local, -1);
72611
72612- if (local->open_count == 0) {
72613+ if (local_read(&local->open_count) == 0) {
72614 ieee80211_clear_tx_pending(local);
72615 ieee80211_stop_device(local);
72616
72617diff -urNp linux-2.6.32.45/net/mac80211/main.c linux-2.6.32.45/net/mac80211/main.c
72618--- linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
72619+++ linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
72620@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
72621 local->hw.conf.power_level = power;
72622 }
72623
72624- if (changed && local->open_count) {
72625+ if (changed && local_read(&local->open_count)) {
72626 ret = drv_config(local, changed);
72627 /*
72628 * Goal:
72629diff -urNp linux-2.6.32.45/net/mac80211/mlme.c linux-2.6.32.45/net/mac80211/mlme.c
72630--- linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:35:30.000000000 -0400
72631+++ linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:34:01.000000000 -0400
72632@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
72633 bool have_higher_than_11mbit = false, newsta = false;
72634 u16 ap_ht_cap_flags;
72635
72636+ pax_track_stack();
72637+
72638 /*
72639 * AssocResp and ReassocResp have identical structure, so process both
72640 * of them in this function.
72641diff -urNp linux-2.6.32.45/net/mac80211/pm.c linux-2.6.32.45/net/mac80211/pm.c
72642--- linux-2.6.32.45/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
72643+++ linux-2.6.32.45/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
72644@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
72645 }
72646
72647 /* stop hardware - this must stop RX */
72648- if (local->open_count)
72649+ if (local_read(&local->open_count))
72650 ieee80211_stop_device(local);
72651
72652 local->suspended = true;
72653diff -urNp linux-2.6.32.45/net/mac80211/rate.c linux-2.6.32.45/net/mac80211/rate.c
72654--- linux-2.6.32.45/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
72655+++ linux-2.6.32.45/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
72656@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
72657 struct rate_control_ref *ref, *old;
72658
72659 ASSERT_RTNL();
72660- if (local->open_count)
72661+ if (local_read(&local->open_count))
72662 return -EBUSY;
72663
72664 ref = rate_control_alloc(name, local);
72665diff -urNp linux-2.6.32.45/net/mac80211/tx.c linux-2.6.32.45/net/mac80211/tx.c
72666--- linux-2.6.32.45/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
72667+++ linux-2.6.32.45/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
72668@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
72669 return cpu_to_le16(dur);
72670 }
72671
72672-static int inline is_ieee80211_device(struct ieee80211_local *local,
72673+static inline int is_ieee80211_device(struct ieee80211_local *local,
72674 struct net_device *dev)
72675 {
72676 return local == wdev_priv(dev->ieee80211_ptr);
72677diff -urNp linux-2.6.32.45/net/mac80211/util.c linux-2.6.32.45/net/mac80211/util.c
72678--- linux-2.6.32.45/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
72679+++ linux-2.6.32.45/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
72680@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
72681 local->resuming = true;
72682
72683 /* restart hardware */
72684- if (local->open_count) {
72685+ if (local_read(&local->open_count)) {
72686 /*
72687 * Upon resume hardware can sometimes be goofy due to
72688 * various platform / driver / bus issues, so restarting
72689diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c
72690--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
72691+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
72692@@ -564,7 +564,7 @@ static const struct file_operations ip_v
72693 .open = ip_vs_app_open,
72694 .read = seq_read,
72695 .llseek = seq_lseek,
72696- .release = seq_release,
72697+ .release = seq_release_net,
72698 };
72699 #endif
72700
72701diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c
72702--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
72703+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
72704@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
72705 /* if the connection is not template and is created
72706 * by sync, preserve the activity flag.
72707 */
72708- cp->flags |= atomic_read(&dest->conn_flags) &
72709+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
72710 (~IP_VS_CONN_F_INACTIVE);
72711 else
72712- cp->flags |= atomic_read(&dest->conn_flags);
72713+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
72714 cp->dest = dest;
72715
72716 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
72717@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
72718 atomic_set(&cp->refcnt, 1);
72719
72720 atomic_set(&cp->n_control, 0);
72721- atomic_set(&cp->in_pkts, 0);
72722+ atomic_set_unchecked(&cp->in_pkts, 0);
72723
72724 atomic_inc(&ip_vs_conn_count);
72725 if (flags & IP_VS_CONN_F_NO_CPORT)
72726@@ -871,7 +871,7 @@ static const struct file_operations ip_v
72727 .open = ip_vs_conn_open,
72728 .read = seq_read,
72729 .llseek = seq_lseek,
72730- .release = seq_release,
72731+ .release = seq_release_net,
72732 };
72733
72734 static const char *ip_vs_origin_name(unsigned flags)
72735@@ -934,7 +934,7 @@ static const struct file_operations ip_v
72736 .open = ip_vs_conn_sync_open,
72737 .read = seq_read,
72738 .llseek = seq_lseek,
72739- .release = seq_release,
72740+ .release = seq_release_net,
72741 };
72742
72743 #endif
72744@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
72745
72746 /* Don't drop the entry if its number of incoming packets is not
72747 located in [0, 8] */
72748- i = atomic_read(&cp->in_pkts);
72749+ i = atomic_read_unchecked(&cp->in_pkts);
72750 if (i > 8 || i < 0) return 0;
72751
72752 if (!todrop_rate[i]) return 0;
72753diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c
72754--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
72755+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
72756@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
72757 ret = cp->packet_xmit(skb, cp, pp);
72758 /* do not touch skb anymore */
72759
72760- atomic_inc(&cp->in_pkts);
72761+ atomic_inc_unchecked(&cp->in_pkts);
72762 ip_vs_conn_put(cp);
72763 return ret;
72764 }
72765@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
72766 * Sync connection if it is about to close to
72767 * encorage the standby servers to update the connections timeout
72768 */
72769- pkts = atomic_add_return(1, &cp->in_pkts);
72770+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
72771 if (af == AF_INET &&
72772 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
72773 (((cp->protocol != IPPROTO_TCP ||
72774diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c
72775--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
72776+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
72777@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
72778 ip_vs_rs_hash(dest);
72779 write_unlock_bh(&__ip_vs_rs_lock);
72780 }
72781- atomic_set(&dest->conn_flags, conn_flags);
72782+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
72783
72784 /* bind the service */
72785 if (!dest->svc) {
72786@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
72787 " %-7s %-6d %-10d %-10d\n",
72788 &dest->addr.in6,
72789 ntohs(dest->port),
72790- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72791+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72792 atomic_read(&dest->weight),
72793 atomic_read(&dest->activeconns),
72794 atomic_read(&dest->inactconns));
72795@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
72796 "%-7s %-6d %-10d %-10d\n",
72797 ntohl(dest->addr.ip),
72798 ntohs(dest->port),
72799- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72800+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72801 atomic_read(&dest->weight),
72802 atomic_read(&dest->activeconns),
72803 atomic_read(&dest->inactconns));
72804@@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
72805 .open = ip_vs_info_open,
72806 .read = seq_read,
72807 .llseek = seq_lseek,
72808- .release = seq_release_private,
72809+ .release = seq_release_net,
72810 };
72811
72812 #endif
72813@@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
72814 .open = ip_vs_stats_seq_open,
72815 .read = seq_read,
72816 .llseek = seq_lseek,
72817- .release = single_release,
72818+ .release = single_release_net,
72819 };
72820
72821 #endif
72822@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
72823
72824 entry.addr = dest->addr.ip;
72825 entry.port = dest->port;
72826- entry.conn_flags = atomic_read(&dest->conn_flags);
72827+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
72828 entry.weight = atomic_read(&dest->weight);
72829 entry.u_threshold = dest->u_threshold;
72830 entry.l_threshold = dest->l_threshold;
72831@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
72832 unsigned char arg[128];
72833 int ret = 0;
72834
72835+ pax_track_stack();
72836+
72837 if (!capable(CAP_NET_ADMIN))
72838 return -EPERM;
72839
72840@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
72841 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
72842
72843 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
72844- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72845+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72846 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
72847 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
72848 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
72849diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c
72850--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
72851+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
72852@@ -438,7 +438,7 @@ static void ip_vs_process_message(const
72853
72854 if (opt)
72855 memcpy(&cp->in_seq, opt, sizeof(*opt));
72856- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72857+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72858 cp->state = state;
72859 cp->old_state = cp->state;
72860 /*
72861diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c
72862--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
72863+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
72864@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
72865 else
72866 rc = NF_ACCEPT;
72867 /* do not touch skb anymore */
72868- atomic_inc(&cp->in_pkts);
72869+ atomic_inc_unchecked(&cp->in_pkts);
72870 goto out;
72871 }
72872
72873@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
72874 else
72875 rc = NF_ACCEPT;
72876 /* do not touch skb anymore */
72877- atomic_inc(&cp->in_pkts);
72878+ atomic_inc_unchecked(&cp->in_pkts);
72879 goto out;
72880 }
72881
72882diff -urNp linux-2.6.32.45/net/netfilter/Kconfig linux-2.6.32.45/net/netfilter/Kconfig
72883--- linux-2.6.32.45/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
72884+++ linux-2.6.32.45/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
72885@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
72886
72887 To compile it as a module, choose M here. If unsure, say N.
72888
72889+config NETFILTER_XT_MATCH_GRADM
72890+ tristate '"gradm" match support'
72891+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
72892+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
72893+ ---help---
72894+ The gradm match allows to match on grsecurity RBAC being enabled.
72895+ It is useful when iptables rules are applied early on bootup to
72896+ prevent connections to the machine (except from a trusted host)
72897+ while the RBAC system is disabled.
72898+
72899 config NETFILTER_XT_MATCH_HASHLIMIT
72900 tristate '"hashlimit" match support'
72901 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
72902diff -urNp linux-2.6.32.45/net/netfilter/Makefile linux-2.6.32.45/net/netfilter/Makefile
72903--- linux-2.6.32.45/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
72904+++ linux-2.6.32.45/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
72905@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
72906 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
72907 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
72908 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
72909+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
72910 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
72911 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
72912 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
72913diff -urNp linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c
72914--- linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
72915+++ linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
72916@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
72917 static int
72918 ctnetlink_parse_tuple(const struct nlattr * const cda[],
72919 struct nf_conntrack_tuple *tuple,
72920- enum ctattr_tuple type, u_int8_t l3num)
72921+ enum ctattr_type type, u_int8_t l3num)
72922 {
72923 struct nlattr *tb[CTA_TUPLE_MAX+1];
72924 int err;
72925diff -urNp linux-2.6.32.45/net/netfilter/nfnetlink_log.c linux-2.6.32.45/net/netfilter/nfnetlink_log.c
72926--- linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
72927+++ linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
72928@@ -68,7 +68,7 @@ struct nfulnl_instance {
72929 };
72930
72931 static DEFINE_RWLOCK(instances_lock);
72932-static atomic_t global_seq;
72933+static atomic_unchecked_t global_seq;
72934
72935 #define INSTANCE_BUCKETS 16
72936 static struct hlist_head instance_table[INSTANCE_BUCKETS];
72937@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
72938 /* global sequence number */
72939 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
72940 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
72941- htonl(atomic_inc_return(&global_seq)));
72942+ htonl(atomic_inc_return_unchecked(&global_seq)));
72943
72944 if (data_len) {
72945 struct nlattr *nla;
72946diff -urNp linux-2.6.32.45/net/netfilter/xt_gradm.c linux-2.6.32.45/net/netfilter/xt_gradm.c
72947--- linux-2.6.32.45/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
72948+++ linux-2.6.32.45/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
72949@@ -0,0 +1,51 @@
72950+/*
72951+ * gradm match for netfilter
72952