]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.45-201108251825.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.45-201108251825.patch
CommitLineData
3724b450
PK
1diff -urNp linux-2.6.32.45/arch/alpha/include/asm/elf.h linux-2.6.32.45/arch/alpha/include/asm/elf.h
2--- linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3+++ linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-2.6.32.45/arch/alpha/include/asm/pgtable.h linux-2.6.32.45/arch/alpha/include/asm/pgtable.h
19--- linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20+++ linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-2.6.32.45/arch/alpha/kernel/module.c linux-2.6.32.45/arch/alpha/kernel/module.c
40--- linux-2.6.32.45/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41+++ linux-2.6.32.45/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-2.6.32.45/arch/alpha/kernel/osf_sys.c linux-2.6.32.45/arch/alpha/kernel/osf_sys.c
52--- linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-08-09 18:35:28.000000000 -0400
53+++ linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-2.6.32.45/arch/alpha/mm/fault.c linux-2.6.32.45/arch/alpha/mm/fault.c
86--- linux-2.6.32.45/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87+++ linux-2.6.32.45/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-2.6.32.45/arch/arm/include/asm/elf.h linux-2.6.32.45/arch/arm/include/asm/elf.h
245--- linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246+++ linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263diff -urNp linux-2.6.32.45/arch/arm/include/asm/kmap_types.h linux-2.6.32.45/arch/arm/include/asm/kmap_types.h
264--- linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265+++ linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266@@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270+ KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274diff -urNp linux-2.6.32.45/arch/arm/include/asm/uaccess.h linux-2.6.32.45/arch/arm/include/asm/uaccess.h
275--- linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276+++ linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
277@@ -22,6 +22,8 @@
278 #define VERIFY_READ 0
279 #define VERIFY_WRITE 1
280
281+extern void check_object_size(const void *ptr, unsigned long n, bool to);
282+
283 /*
284 * The exception table consists of pairs of addresses: the first is the
285 * address of an instruction that is allowed to fault, and the second is
286@@ -387,8 +389,23 @@ do { \
287
288
289 #ifdef CONFIG_MMU
290-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
291-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
292+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
293+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
294+
295+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
296+{
297+ if (!__builtin_constant_p(n))
298+ check_object_size(to, n, false);
299+ return ___copy_from_user(to, from, n);
300+}
301+
302+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
303+{
304+ if (!__builtin_constant_p(n))
305+ check_object_size(from, n, true);
306+ return ___copy_to_user(to, from, n);
307+}
308+
309 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
310 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
311 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
312@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
313
314 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
315 {
316+ if ((long)n < 0)
317+ return n;
318+
319 if (access_ok(VERIFY_READ, from, n))
320 n = __copy_from_user(to, from, n);
321 else /* security hole - plug it */
322@@ -412,6 +432,9 @@ static inline unsigned long __must_check
323
324 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
325 {
326+ if ((long)n < 0)
327+ return n;
328+
329 if (access_ok(VERIFY_WRITE, to, n))
330 n = __copy_to_user(to, from, n);
331 return n;
332diff -urNp linux-2.6.32.45/arch/arm/kernel/armksyms.c linux-2.6.32.45/arch/arm/kernel/armksyms.c
333--- linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
334+++ linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
335@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
336 #ifdef CONFIG_MMU
337 EXPORT_SYMBOL(copy_page);
338
339-EXPORT_SYMBOL(__copy_from_user);
340-EXPORT_SYMBOL(__copy_to_user);
341+EXPORT_SYMBOL(___copy_from_user);
342+EXPORT_SYMBOL(___copy_to_user);
343 EXPORT_SYMBOL(__clear_user);
344
345 EXPORT_SYMBOL(__get_user_1);
346diff -urNp linux-2.6.32.45/arch/arm/kernel/kgdb.c linux-2.6.32.45/arch/arm/kernel/kgdb.c
347--- linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
348+++ linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
349@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
350 * and we handle the normal undef case within the do_undefinstr
351 * handler.
352 */
353-struct kgdb_arch arch_kgdb_ops = {
354+const struct kgdb_arch arch_kgdb_ops = {
355 #ifndef __ARMEB__
356 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
357 #else /* ! __ARMEB__ */
358diff -urNp linux-2.6.32.45/arch/arm/kernel/traps.c linux-2.6.32.45/arch/arm/kernel/traps.c
359--- linux-2.6.32.45/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
360+++ linux-2.6.32.45/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
361@@ -247,6 +247,8 @@ static void __die(const char *str, int e
362
363 DEFINE_SPINLOCK(die_lock);
364
365+extern void gr_handle_kernel_exploit(void);
366+
367 /*
368 * This function is protected against re-entrancy.
369 */
370@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
371 if (panic_on_oops)
372 panic("Fatal exception");
373
374+ gr_handle_kernel_exploit();
375+
376 do_exit(SIGSEGV);
377 }
378
379diff -urNp linux-2.6.32.45/arch/arm/lib/copy_from_user.S linux-2.6.32.45/arch/arm/lib/copy_from_user.S
380--- linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
381+++ linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
382@@ -16,7 +16,7 @@
383 /*
384 * Prototype:
385 *
386- * size_t __copy_from_user(void *to, const void *from, size_t n)
387+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
388 *
389 * Purpose:
390 *
391@@ -84,11 +84,11 @@
392
393 .text
394
395-ENTRY(__copy_from_user)
396+ENTRY(___copy_from_user)
397
398 #include "copy_template.S"
399
400-ENDPROC(__copy_from_user)
401+ENDPROC(___copy_from_user)
402
403 .section .fixup,"ax"
404 .align 0
405diff -urNp linux-2.6.32.45/arch/arm/lib/copy_to_user.S linux-2.6.32.45/arch/arm/lib/copy_to_user.S
406--- linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
407+++ linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
408@@ -16,7 +16,7 @@
409 /*
410 * Prototype:
411 *
412- * size_t __copy_to_user(void *to, const void *from, size_t n)
413+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
414 *
415 * Purpose:
416 *
417@@ -88,11 +88,11 @@
418 .text
419
420 ENTRY(__copy_to_user_std)
421-WEAK(__copy_to_user)
422+WEAK(___copy_to_user)
423
424 #include "copy_template.S"
425
426-ENDPROC(__copy_to_user)
427+ENDPROC(___copy_to_user)
428
429 .section .fixup,"ax"
430 .align 0
431diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess.S linux-2.6.32.45/arch/arm/lib/uaccess.S
432--- linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
433+++ linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
434@@ -19,7 +19,7 @@
435
436 #define PAGE_SHIFT 12
437
438-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
439+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
440 * Purpose : copy a block to user memory from kernel memory
441 * Params : to - user memory
442 * : from - kernel memory
443@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
444 sub r2, r2, ip
445 b .Lc2u_dest_aligned
446
447-ENTRY(__copy_to_user)
448+ENTRY(___copy_to_user)
449 stmfd sp!, {r2, r4 - r7, lr}
450 cmp r2, #4
451 blt .Lc2u_not_enough
452@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
453 ldrgtb r3, [r1], #0
454 USER( strgtbt r3, [r0], #1) @ May fault
455 b .Lc2u_finished
456-ENDPROC(__copy_to_user)
457+ENDPROC(___copy_to_user)
458
459 .section .fixup,"ax"
460 .align 0
461 9001: ldmfd sp!, {r0, r4 - r7, pc}
462 .previous
463
464-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
465+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
466 * Purpose : copy a block from user memory to kernel memory
467 * Params : to - kernel memory
468 * : from - user memory
469@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
470 sub r2, r2, ip
471 b .Lcfu_dest_aligned
472
473-ENTRY(__copy_from_user)
474+ENTRY(___copy_from_user)
475 stmfd sp!, {r0, r2, r4 - r7, lr}
476 cmp r2, #4
477 blt .Lcfu_not_enough
478@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
479 USER( ldrgtbt r3, [r1], #1) @ May fault
480 strgtb r3, [r0], #1
481 b .Lcfu_finished
482-ENDPROC(__copy_from_user)
483+ENDPROC(___copy_from_user)
484
485 .section .fixup,"ax"
486 .align 0
487diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c
488--- linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
489+++ linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
490@@ -97,7 +97,7 @@ out:
491 }
492
493 unsigned long
494-__copy_to_user(void __user *to, const void *from, unsigned long n)
495+___copy_to_user(void __user *to, const void *from, unsigned long n)
496 {
497 /*
498 * This test is stubbed out of the main function above to keep
499diff -urNp linux-2.6.32.45/arch/arm/mach-at91/pm.c linux-2.6.32.45/arch/arm/mach-at91/pm.c
500--- linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
501+++ linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
502@@ -348,7 +348,7 @@ static void at91_pm_end(void)
503 }
504
505
506-static struct platform_suspend_ops at91_pm_ops ={
507+static const struct platform_suspend_ops at91_pm_ops ={
508 .valid = at91_pm_valid_state,
509 .begin = at91_pm_begin,
510 .enter = at91_pm_enter,
511diff -urNp linux-2.6.32.45/arch/arm/mach-omap1/pm.c linux-2.6.32.45/arch/arm/mach-omap1/pm.c
512--- linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
513+++ linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
514@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
515
516
517
518-static struct platform_suspend_ops omap_pm_ops ={
519+static const struct platform_suspend_ops omap_pm_ops ={
520 .prepare = omap_pm_prepare,
521 .enter = omap_pm_enter,
522 .finish = omap_pm_finish,
523diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c
524--- linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
525+++ linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
526@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
527 enable_hlt();
528 }
529
530-static struct platform_suspend_ops omap_pm_ops = {
531+static const struct platform_suspend_ops omap_pm_ops = {
532 .prepare = omap2_pm_prepare,
533 .enter = omap2_pm_enter,
534 .finish = omap2_pm_finish,
535diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c
536--- linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
537+++ linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
538@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
539 return;
540 }
541
542-static struct platform_suspend_ops omap_pm_ops = {
543+static const struct platform_suspend_ops omap_pm_ops = {
544 .begin = omap3_pm_begin,
545 .end = omap3_pm_end,
546 .prepare = omap3_pm_prepare,
547diff -urNp linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c
548--- linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
549+++ linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
550@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
551 (state == PM_SUSPEND_MEM);
552 }
553
554-static struct platform_suspend_ops pnx4008_pm_ops = {
555+static const struct platform_suspend_ops pnx4008_pm_ops = {
556 .enter = pnx4008_pm_enter,
557 .valid = pnx4008_pm_valid,
558 };
559diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/pm.c linux-2.6.32.45/arch/arm/mach-pxa/pm.c
560--- linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
561+++ linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
562@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
563 pxa_cpu_pm_fns->finish();
564 }
565
566-static struct platform_suspend_ops pxa_pm_ops = {
567+static const struct platform_suspend_ops pxa_pm_ops = {
568 .valid = pxa_pm_valid,
569 .enter = pxa_pm_enter,
570 .prepare = pxa_pm_prepare,
571diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c
572--- linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
573+++ linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
574@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
575 }
576
577 #ifdef CONFIG_PM
578-static struct platform_suspend_ops sharpsl_pm_ops = {
579+static const struct platform_suspend_ops sharpsl_pm_ops = {
580 .prepare = pxa_pm_prepare,
581 .finish = pxa_pm_finish,
582 .enter = corgi_pxa_pm_enter,
583diff -urNp linux-2.6.32.45/arch/arm/mach-sa1100/pm.c linux-2.6.32.45/arch/arm/mach-sa1100/pm.c
584--- linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
585+++ linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
586@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
587 return virt_to_phys(sp);
588 }
589
590-static struct platform_suspend_ops sa11x0_pm_ops = {
591+static const struct platform_suspend_ops sa11x0_pm_ops = {
592 .enter = sa11x0_pm_enter,
593 .valid = suspend_valid_only_mem,
594 };
595diff -urNp linux-2.6.32.45/arch/arm/mm/fault.c linux-2.6.32.45/arch/arm/mm/fault.c
596--- linux-2.6.32.45/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
597+++ linux-2.6.32.45/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
598@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
599 }
600 #endif
601
602+#ifdef CONFIG_PAX_PAGEEXEC
603+ if (fsr & FSR_LNX_PF) {
604+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
605+ do_group_exit(SIGKILL);
606+ }
607+#endif
608+
609 tsk->thread.address = addr;
610 tsk->thread.error_code = fsr;
611 tsk->thread.trap_no = 14;
612@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
613 }
614 #endif /* CONFIG_MMU */
615
616+#ifdef CONFIG_PAX_PAGEEXEC
617+void pax_report_insns(void *pc, void *sp)
618+{
619+ long i;
620+
621+ printk(KERN_ERR "PAX: bytes at PC: ");
622+ for (i = 0; i < 20; i++) {
623+ unsigned char c;
624+ if (get_user(c, (__force unsigned char __user *)pc+i))
625+ printk(KERN_CONT "?? ");
626+ else
627+ printk(KERN_CONT "%02x ", c);
628+ }
629+ printk("\n");
630+
631+ printk(KERN_ERR "PAX: bytes at SP-4: ");
632+ for (i = -1; i < 20; i++) {
633+ unsigned long c;
634+ if (get_user(c, (__force unsigned long __user *)sp+i))
635+ printk(KERN_CONT "???????? ");
636+ else
637+ printk(KERN_CONT "%08lx ", c);
638+ }
639+ printk("\n");
640+}
641+#endif
642+
643 /*
644 * First Level Translation Fault Handler
645 *
646diff -urNp linux-2.6.32.45/arch/arm/mm/mmap.c linux-2.6.32.45/arch/arm/mm/mmap.c
647--- linux-2.6.32.45/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
648+++ linux-2.6.32.45/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
649@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
650 if (len > TASK_SIZE)
651 return -ENOMEM;
652
653+#ifdef CONFIG_PAX_RANDMMAP
654+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
655+#endif
656+
657 if (addr) {
658 if (do_align)
659 addr = COLOUR_ALIGN(addr, pgoff);
660@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
661 addr = PAGE_ALIGN(addr);
662
663 vma = find_vma(mm, addr);
664- if (TASK_SIZE - len >= addr &&
665- (!vma || addr + len <= vma->vm_start))
666+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
667 return addr;
668 }
669 if (len > mm->cached_hole_size) {
670- start_addr = addr = mm->free_area_cache;
671+ start_addr = addr = mm->free_area_cache;
672 } else {
673- start_addr = addr = TASK_UNMAPPED_BASE;
674- mm->cached_hole_size = 0;
675+ start_addr = addr = mm->mmap_base;
676+ mm->cached_hole_size = 0;
677 }
678
679 full_search:
680@@ -94,14 +97,14 @@ full_search:
681 * Start a new search - just in case we missed
682 * some holes.
683 */
684- if (start_addr != TASK_UNMAPPED_BASE) {
685- start_addr = addr = TASK_UNMAPPED_BASE;
686+ if (start_addr != mm->mmap_base) {
687+ start_addr = addr = mm->mmap_base;
688 mm->cached_hole_size = 0;
689 goto full_search;
690 }
691 return -ENOMEM;
692 }
693- if (!vma || addr + len <= vma->vm_start) {
694+ if (check_heap_stack_gap(vma, addr, len)) {
695 /*
696 * Remember the place where we stopped the search:
697 */
698diff -urNp linux-2.6.32.45/arch/arm/plat-s3c/pm.c linux-2.6.32.45/arch/arm/plat-s3c/pm.c
699--- linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
700+++ linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
701@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
702 s3c_pm_check_cleanup();
703 }
704
705-static struct platform_suspend_ops s3c_pm_ops = {
706+static const struct platform_suspend_ops s3c_pm_ops = {
707 .enter = s3c_pm_enter,
708 .prepare = s3c_pm_prepare,
709 .finish = s3c_pm_finish,
710diff -urNp linux-2.6.32.45/arch/avr32/include/asm/elf.h linux-2.6.32.45/arch/avr32/include/asm/elf.h
711--- linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
712+++ linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
713@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719
720+#ifdef CONFIG_PAX_ASLR
721+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
722+
723+#define PAX_DELTA_MMAP_LEN 15
724+#define PAX_DELTA_STACK_LEN 15
725+#endif
726
727 /* This yields a mask that user programs can use to figure out what
728 instruction set this CPU supports. This could be done in user space,
729diff -urNp linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h
730--- linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
731+++ linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
732@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
733 D(11) KM_IRQ1,
734 D(12) KM_SOFTIRQ0,
735 D(13) KM_SOFTIRQ1,
736-D(14) KM_TYPE_NR
737+D(14) KM_CLEARPAGE,
738+D(15) KM_TYPE_NR
739 };
740
741 #undef D
742diff -urNp linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c
743--- linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
744+++ linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
745@@ -176,7 +176,7 @@ out:
746 return 0;
747 }
748
749-static struct platform_suspend_ops avr32_pm_ops = {
750+static const struct platform_suspend_ops avr32_pm_ops = {
751 .valid = avr32_pm_valid_state,
752 .enter = avr32_pm_enter,
753 };
754diff -urNp linux-2.6.32.45/arch/avr32/mm/fault.c linux-2.6.32.45/arch/avr32/mm/fault.c
755--- linux-2.6.32.45/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
756+++ linux-2.6.32.45/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
757@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
758
759 int exception_trace = 1;
760
761+#ifdef CONFIG_PAX_PAGEEXEC
762+void pax_report_insns(void *pc, void *sp)
763+{
764+ unsigned long i;
765+
766+ printk(KERN_ERR "PAX: bytes at PC: ");
767+ for (i = 0; i < 20; i++) {
768+ unsigned char c;
769+ if (get_user(c, (unsigned char *)pc+i))
770+ printk(KERN_CONT "???????? ");
771+ else
772+ printk(KERN_CONT "%02x ", c);
773+ }
774+ printk("\n");
775+}
776+#endif
777+
778 /*
779 * This routine handles page faults. It determines the address and the
780 * problem, and then passes it off to one of the appropriate routines.
781@@ -157,6 +174,16 @@ bad_area:
782 up_read(&mm->mmap_sem);
783
784 if (user_mode(regs)) {
785+
786+#ifdef CONFIG_PAX_PAGEEXEC
787+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
788+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
789+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
790+ do_group_exit(SIGKILL);
791+ }
792+ }
793+#endif
794+
795 if (exception_trace && printk_ratelimit())
796 printk("%s%s[%d]: segfault at %08lx pc %08lx "
797 "sp %08lx ecr %lu\n",
798diff -urNp linux-2.6.32.45/arch/blackfin/kernel/kgdb.c linux-2.6.32.45/arch/blackfin/kernel/kgdb.c
799--- linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
800+++ linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
801@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
802 return -1; /* this means that we do not want to exit from the handler */
803 }
804
805-struct kgdb_arch arch_kgdb_ops = {
806+const struct kgdb_arch arch_kgdb_ops = {
807 .gdb_bpt_instr = {0xa1},
808 #ifdef CONFIG_SMP
809 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
810diff -urNp linux-2.6.32.45/arch/blackfin/mach-common/pm.c linux-2.6.32.45/arch/blackfin/mach-common/pm.c
811--- linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
812+++ linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
813@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
814 return 0;
815 }
816
817-struct platform_suspend_ops bfin_pm_ops = {
818+const struct platform_suspend_ops bfin_pm_ops = {
819 .enter = bfin_pm_enter,
820 .valid = bfin_pm_valid,
821 };
822diff -urNp linux-2.6.32.45/arch/frv/include/asm/kmap_types.h linux-2.6.32.45/arch/frv/include/asm/kmap_types.h
823--- linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
824+++ linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
825@@ -23,6 +23,7 @@ enum km_type {
826 KM_IRQ1,
827 KM_SOFTIRQ0,
828 KM_SOFTIRQ1,
829+ KM_CLEARPAGE,
830 KM_TYPE_NR
831 };
832
833diff -urNp linux-2.6.32.45/arch/frv/mm/elf-fdpic.c linux-2.6.32.45/arch/frv/mm/elf-fdpic.c
834--- linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
835+++ linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
836@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
837 if (addr) {
838 addr = PAGE_ALIGN(addr);
839 vma = find_vma(current->mm, addr);
840- if (TASK_SIZE - len >= addr &&
841- (!vma || addr + len <= vma->vm_start))
842+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
843 goto success;
844 }
845
846@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
847 for (; vma; vma = vma->vm_next) {
848 if (addr > limit)
849 break;
850- if (addr + len <= vma->vm_start)
851+ if (check_heap_stack_gap(vma, addr, len))
852 goto success;
853 addr = vma->vm_end;
854 }
855@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
856 for (; vma; vma = vma->vm_next) {
857 if (addr > limit)
858 break;
859- if (addr + len <= vma->vm_start)
860+ if (check_heap_stack_gap(vma, addr, len))
861 goto success;
862 addr = vma->vm_end;
863 }
864diff -urNp linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c
865--- linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
866+++ linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
867@@ -17,7 +17,7 @@
868 #include <linux/swiotlb.h>
869 #include <asm/machvec.h>
870
871-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
872+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
873
874 /* swiotlb declarations & definitions: */
875 extern int swiotlb_late_init_with_default_size (size_t size);
876@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
877 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
878 }
879
880-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
881+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
882 {
883 if (use_swiotlb(dev))
884 return &swiotlb_dma_ops;
885diff -urNp linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c
886--- linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
887+++ linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
888@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
889 },
890 };
891
892-extern struct dma_map_ops swiotlb_dma_ops;
893+extern const struct dma_map_ops swiotlb_dma_ops;
894
895 static int __init
896 sba_init(void)
897@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
898
899 __setup("sbapagesize=",sba_page_override);
900
901-struct dma_map_ops sba_dma_ops = {
902+const struct dma_map_ops sba_dma_ops = {
903 .alloc_coherent = sba_alloc_coherent,
904 .free_coherent = sba_free_coherent,
905 .map_page = sba_map_page,
906diff -urNp linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c
907--- linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
908+++ linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
909@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
910
911 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
912
913+#ifdef CONFIG_PAX_ASLR
914+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
915+
916+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
917+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
918+#endif
919+
920 /* Ugly but avoids duplication */
921 #include "../../../fs/binfmt_elf.c"
922
923diff -urNp linux-2.6.32.45/arch/ia64/ia32/ia32priv.h linux-2.6.32.45/arch/ia64/ia32/ia32priv.h
924--- linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
925+++ linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
926@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
927 #define ELF_DATA ELFDATA2LSB
928 #define ELF_ARCH EM_386
929
930-#define IA32_STACK_TOP IA32_PAGE_OFFSET
931+#ifdef CONFIG_PAX_RANDUSTACK
932+#define __IA32_DELTA_STACK (current->mm->delta_stack)
933+#else
934+#define __IA32_DELTA_STACK 0UL
935+#endif
936+
937+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
938+
939 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
940 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
941
942diff -urNp linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h
943--- linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
944+++ linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
945@@ -12,7 +12,7 @@
946
947 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
948
949-extern struct dma_map_ops *dma_ops;
950+extern const struct dma_map_ops *dma_ops;
951 extern struct ia64_machine_vector ia64_mv;
952 extern void set_iommu_machvec(void);
953
954@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
955 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
956 dma_addr_t *daddr, gfp_t gfp)
957 {
958- struct dma_map_ops *ops = platform_dma_get_ops(dev);
959+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
960 void *caddr;
961
962 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
963@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
964 static inline void dma_free_coherent(struct device *dev, size_t size,
965 void *caddr, dma_addr_t daddr)
966 {
967- struct dma_map_ops *ops = platform_dma_get_ops(dev);
968+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
969 debug_dma_free_coherent(dev, size, caddr, daddr);
970 ops->free_coherent(dev, size, caddr, daddr);
971 }
972@@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
973
974 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
975 {
976- struct dma_map_ops *ops = platform_dma_get_ops(dev);
977+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
978 return ops->mapping_error(dev, daddr);
979 }
980
981 static inline int dma_supported(struct device *dev, u64 mask)
982 {
983- struct dma_map_ops *ops = platform_dma_get_ops(dev);
984+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
985 return ops->dma_supported(dev, mask);
986 }
987
988diff -urNp linux-2.6.32.45/arch/ia64/include/asm/elf.h linux-2.6.32.45/arch/ia64/include/asm/elf.h
989--- linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
990+++ linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
991@@ -43,6 +43,13 @@
992 */
993 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
994
995+#ifdef CONFIG_PAX_ASLR
996+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
997+
998+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
999+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1000+#endif
1001+
1002 #define PT_IA_64_UNWIND 0x70000001
1003
1004 /* IA-64 relocations: */
1005diff -urNp linux-2.6.32.45/arch/ia64/include/asm/machvec.h linux-2.6.32.45/arch/ia64/include/asm/machvec.h
1006--- linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1007+++ linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1008@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1009 /* DMA-mapping interface: */
1010 typedef void ia64_mv_dma_init (void);
1011 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1012-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1013+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1014
1015 /*
1016 * WARNING: The legacy I/O space is _architected_. Platforms are
1017@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1018 # endif /* CONFIG_IA64_GENERIC */
1019
1020 extern void swiotlb_dma_init(void);
1021-extern struct dma_map_ops *dma_get_ops(struct device *);
1022+extern const struct dma_map_ops *dma_get_ops(struct device *);
1023
1024 /*
1025 * Define default versions so we can extend machvec for new platforms without having
1026diff -urNp linux-2.6.32.45/arch/ia64/include/asm/pgtable.h linux-2.6.32.45/arch/ia64/include/asm/pgtable.h
1027--- linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1028+++ linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1029@@ -12,7 +12,7 @@
1030 * David Mosberger-Tang <davidm@hpl.hp.com>
1031 */
1032
1033-
1034+#include <linux/const.h>
1035 #include <asm/mman.h>
1036 #include <asm/page.h>
1037 #include <asm/processor.h>
1038@@ -143,6 +143,17 @@
1039 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1040 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1041 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1042+
1043+#ifdef CONFIG_PAX_PAGEEXEC
1044+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1045+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1046+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1047+#else
1048+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1049+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1050+# define PAGE_COPY_NOEXEC PAGE_COPY
1051+#endif
1052+
1053 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1054 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1055 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1056diff -urNp linux-2.6.32.45/arch/ia64/include/asm/spinlock.h linux-2.6.32.45/arch/ia64/include/asm/spinlock.h
1057--- linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1058+++ linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1059@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1060 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1061
1062 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1063- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1064+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1065 }
1066
1067 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1068diff -urNp linux-2.6.32.45/arch/ia64/include/asm/uaccess.h linux-2.6.32.45/arch/ia64/include/asm/uaccess.h
1069--- linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1070+++ linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1071@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1072 const void *__cu_from = (from); \
1073 long __cu_len = (n); \
1074 \
1075- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1076+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1077 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1078 __cu_len; \
1079 })
1080@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1081 long __cu_len = (n); \
1082 \
1083 __chk_user_ptr(__cu_from); \
1084- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1085+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1086 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1087 __cu_len; \
1088 })
1089diff -urNp linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c
1090--- linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1091+++ linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1092@@ -3,7 +3,7 @@
1093 /* Set this to 1 if there is a HW IOMMU in the system */
1094 int iommu_detected __read_mostly;
1095
1096-struct dma_map_ops *dma_ops;
1097+const struct dma_map_ops *dma_ops;
1098 EXPORT_SYMBOL(dma_ops);
1099
1100 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1101@@ -16,7 +16,7 @@ static int __init dma_init(void)
1102 }
1103 fs_initcall(dma_init);
1104
1105-struct dma_map_ops *dma_get_ops(struct device *dev)
1106+const struct dma_map_ops *dma_get_ops(struct device *dev)
1107 {
1108 return dma_ops;
1109 }
1110diff -urNp linux-2.6.32.45/arch/ia64/kernel/module.c linux-2.6.32.45/arch/ia64/kernel/module.c
1111--- linux-2.6.32.45/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1112+++ linux-2.6.32.45/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1113@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1114 void
1115 module_free (struct module *mod, void *module_region)
1116 {
1117- if (mod && mod->arch.init_unw_table &&
1118- module_region == mod->module_init) {
1119+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1120 unw_remove_unwind_table(mod->arch.init_unw_table);
1121 mod->arch.init_unw_table = NULL;
1122 }
1123@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1124 }
1125
1126 static inline int
1127+in_init_rx (const struct module *mod, uint64_t addr)
1128+{
1129+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1130+}
1131+
1132+static inline int
1133+in_init_rw (const struct module *mod, uint64_t addr)
1134+{
1135+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1136+}
1137+
1138+static inline int
1139 in_init (const struct module *mod, uint64_t addr)
1140 {
1141- return addr - (uint64_t) mod->module_init < mod->init_size;
1142+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1143+}
1144+
1145+static inline int
1146+in_core_rx (const struct module *mod, uint64_t addr)
1147+{
1148+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1149+}
1150+
1151+static inline int
1152+in_core_rw (const struct module *mod, uint64_t addr)
1153+{
1154+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1155 }
1156
1157 static inline int
1158 in_core (const struct module *mod, uint64_t addr)
1159 {
1160- return addr - (uint64_t) mod->module_core < mod->core_size;
1161+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1162 }
1163
1164 static inline int
1165@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1166 break;
1167
1168 case RV_BDREL:
1169- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1170+ if (in_init_rx(mod, val))
1171+ val -= (uint64_t) mod->module_init_rx;
1172+ else if (in_init_rw(mod, val))
1173+ val -= (uint64_t) mod->module_init_rw;
1174+ else if (in_core_rx(mod, val))
1175+ val -= (uint64_t) mod->module_core_rx;
1176+ else if (in_core_rw(mod, val))
1177+ val -= (uint64_t) mod->module_core_rw;
1178 break;
1179
1180 case RV_LTV:
1181@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1182 * addresses have been selected...
1183 */
1184 uint64_t gp;
1185- if (mod->core_size > MAX_LTOFF)
1186+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1187 /*
1188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1189 * at the end of the module.
1190 */
1191- gp = mod->core_size - MAX_LTOFF / 2;
1192+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1193 else
1194- gp = mod->core_size / 2;
1195- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1196+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1197+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1198 mod->arch.gp = gp;
1199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1200 }
1201diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-dma.c linux-2.6.32.45/arch/ia64/kernel/pci-dma.c
1202--- linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1203+++ linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1204@@ -43,7 +43,7 @@ struct device fallback_dev = {
1205 .dma_mask = &fallback_dev.coherent_dma_mask,
1206 };
1207
1208-extern struct dma_map_ops intel_dma_ops;
1209+extern const struct dma_map_ops intel_dma_ops;
1210
1211 static int __init pci_iommu_init(void)
1212 {
1213@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1214 }
1215 EXPORT_SYMBOL(iommu_dma_supported);
1216
1217+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1218+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1219+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1220+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1221+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1222+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1223+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1224+
1225+static const struct dma_map_ops intel_iommu_dma_ops = {
1226+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1227+ .alloc_coherent = intel_alloc_coherent,
1228+ .free_coherent = intel_free_coherent,
1229+ .map_sg = intel_map_sg,
1230+ .unmap_sg = intel_unmap_sg,
1231+ .map_page = intel_map_page,
1232+ .unmap_page = intel_unmap_page,
1233+ .mapping_error = intel_mapping_error,
1234+
1235+ .sync_single_for_cpu = machvec_dma_sync_single,
1236+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1237+ .sync_single_for_device = machvec_dma_sync_single,
1238+ .sync_sg_for_device = machvec_dma_sync_sg,
1239+ .dma_supported = iommu_dma_supported,
1240+};
1241+
1242 void __init pci_iommu_alloc(void)
1243 {
1244- dma_ops = &intel_dma_ops;
1245-
1246- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1247- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1248- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1249- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1250- dma_ops->dma_supported = iommu_dma_supported;
1251+ dma_ops = &intel_iommu_dma_ops;
1252
1253 /*
1254 * The order of these functions is important for
1255diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c
1256--- linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1257+++ linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1258@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1259 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1260 }
1261
1262-struct dma_map_ops swiotlb_dma_ops = {
1263+const struct dma_map_ops swiotlb_dma_ops = {
1264 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1265 .free_coherent = swiotlb_free_coherent,
1266 .map_page = swiotlb_map_page,
1267diff -urNp linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c
1268--- linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1269+++ linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1270@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1271 if (REGION_NUMBER(addr) == RGN_HPAGE)
1272 addr = 0;
1273 #endif
1274+
1275+#ifdef CONFIG_PAX_RANDMMAP
1276+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1277+ addr = mm->free_area_cache;
1278+ else
1279+#endif
1280+
1281 if (!addr)
1282 addr = mm->free_area_cache;
1283
1284@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1285 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1286 /* At this point: (!vma || addr < vma->vm_end). */
1287 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1288- if (start_addr != TASK_UNMAPPED_BASE) {
1289+ if (start_addr != mm->mmap_base) {
1290 /* Start a new search --- just in case we missed some holes. */
1291- addr = TASK_UNMAPPED_BASE;
1292+ addr = mm->mmap_base;
1293 goto full_search;
1294 }
1295 return -ENOMEM;
1296 }
1297- if (!vma || addr + len <= vma->vm_start) {
1298+ if (check_heap_stack_gap(vma, addr, len)) {
1299 /* Remember the address where we stopped this search: */
1300 mm->free_area_cache = addr + len;
1301 return addr;
1302diff -urNp linux-2.6.32.45/arch/ia64/kernel/topology.c linux-2.6.32.45/arch/ia64/kernel/topology.c
1303--- linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1304+++ linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1305@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1306 return ret;
1307 }
1308
1309-static struct sysfs_ops cache_sysfs_ops = {
1310+static const struct sysfs_ops cache_sysfs_ops = {
1311 .show = cache_show
1312 };
1313
1314diff -urNp linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S
1315--- linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1316+++ linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1317@@ -190,7 +190,7 @@ SECTIONS
1318 /* Per-cpu data: */
1319 . = ALIGN(PERCPU_PAGE_SIZE);
1320 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1321- __phys_per_cpu_start = __per_cpu_load;
1322+ __phys_per_cpu_start = per_cpu_load;
1323 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1324 * into percpu page size
1325 */
1326diff -urNp linux-2.6.32.45/arch/ia64/mm/fault.c linux-2.6.32.45/arch/ia64/mm/fault.c
1327--- linux-2.6.32.45/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1328+++ linux-2.6.32.45/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1329@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1330 return pte_present(pte);
1331 }
1332
1333+#ifdef CONFIG_PAX_PAGEEXEC
1334+void pax_report_insns(void *pc, void *sp)
1335+{
1336+ unsigned long i;
1337+
1338+ printk(KERN_ERR "PAX: bytes at PC: ");
1339+ for (i = 0; i < 8; i++) {
1340+ unsigned int c;
1341+ if (get_user(c, (unsigned int *)pc+i))
1342+ printk(KERN_CONT "???????? ");
1343+ else
1344+ printk(KERN_CONT "%08x ", c);
1345+ }
1346+ printk("\n");
1347+}
1348+#endif
1349+
1350 void __kprobes
1351 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1352 {
1353@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1354 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1355 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1356
1357- if ((vma->vm_flags & mask) != mask)
1358+ if ((vma->vm_flags & mask) != mask) {
1359+
1360+#ifdef CONFIG_PAX_PAGEEXEC
1361+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1362+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1363+ goto bad_area;
1364+
1365+ up_read(&mm->mmap_sem);
1366+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1367+ do_group_exit(SIGKILL);
1368+ }
1369+#endif
1370+
1371 goto bad_area;
1372
1373+ }
1374+
1375 survive:
1376 /*
1377 * If for any reason at all we couldn't handle the fault, make
1378diff -urNp linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c
1379--- linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1380+++ linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1381@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1382 /* At this point: (!vmm || addr < vmm->vm_end). */
1383 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1384 return -ENOMEM;
1385- if (!vmm || (addr + len) <= vmm->vm_start)
1386+ if (check_heap_stack_gap(vmm, addr, len))
1387 return addr;
1388 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1389 }
1390diff -urNp linux-2.6.32.45/arch/ia64/mm/init.c linux-2.6.32.45/arch/ia64/mm/init.c
1391--- linux-2.6.32.45/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1392+++ linux-2.6.32.45/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1393@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1394 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1395 vma->vm_end = vma->vm_start + PAGE_SIZE;
1396 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1397+
1398+#ifdef CONFIG_PAX_PAGEEXEC
1399+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1400+ vma->vm_flags &= ~VM_EXEC;
1401+
1402+#ifdef CONFIG_PAX_MPROTECT
1403+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1404+ vma->vm_flags &= ~VM_MAYEXEC;
1405+#endif
1406+
1407+ }
1408+#endif
1409+
1410 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1411 down_write(&current->mm->mmap_sem);
1412 if (insert_vm_struct(current->mm, vma)) {
1413diff -urNp linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c
1414--- linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1415+++ linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1416@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1417 return ret;
1418 }
1419
1420-static struct dma_map_ops sn_dma_ops = {
1421+static const struct dma_map_ops sn_dma_ops = {
1422 .alloc_coherent = sn_dma_alloc_coherent,
1423 .free_coherent = sn_dma_free_coherent,
1424 .map_page = sn_dma_map_page,
1425diff -urNp linux-2.6.32.45/arch/m32r/lib/usercopy.c linux-2.6.32.45/arch/m32r/lib/usercopy.c
1426--- linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1427+++ linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1428@@ -14,6 +14,9 @@
1429 unsigned long
1430 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1431 {
1432+ if ((long)n < 0)
1433+ return n;
1434+
1435 prefetch(from);
1436 if (access_ok(VERIFY_WRITE, to, n))
1437 __copy_user(to,from,n);
1438@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1439 unsigned long
1440 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1441 {
1442+ if ((long)n < 0)
1443+ return n;
1444+
1445 prefetchw(to);
1446 if (access_ok(VERIFY_READ, from, n))
1447 __copy_user_zeroing(to,from,n);
1448diff -urNp linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c
1449--- linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1450+++ linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1451@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1452
1453 }
1454
1455-static struct platform_suspend_ops db1x_pm_ops = {
1456+static const struct platform_suspend_ops db1x_pm_ops = {
1457 .valid = suspend_valid_only_mem,
1458 .begin = db1x_pm_begin,
1459 .enter = db1x_pm_enter,
1460diff -urNp linux-2.6.32.45/arch/mips/include/asm/elf.h linux-2.6.32.45/arch/mips/include/asm/elf.h
1461--- linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1462+++ linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1463@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1464 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1465 #endif
1466
1467+#ifdef CONFIG_PAX_ASLR
1468+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1469+
1470+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1471+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1472+#endif
1473+
1474 #endif /* _ASM_ELF_H */
1475diff -urNp linux-2.6.32.45/arch/mips/include/asm/page.h linux-2.6.32.45/arch/mips/include/asm/page.h
1476--- linux-2.6.32.45/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1477+++ linux-2.6.32.45/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1478@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1479 #ifdef CONFIG_CPU_MIPS32
1480 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1481 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1482- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1483+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1484 #else
1485 typedef struct { unsigned long long pte; } pte_t;
1486 #define pte_val(x) ((x).pte)
1487diff -urNp linux-2.6.32.45/arch/mips/include/asm/reboot.h linux-2.6.32.45/arch/mips/include/asm/reboot.h
1488--- linux-2.6.32.45/arch/mips/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
1489+++ linux-2.6.32.45/arch/mips/include/asm/reboot.h 2011-08-21 17:35:02.000000000 -0400
1490@@ -9,7 +9,7 @@
1491 #ifndef _ASM_REBOOT_H
1492 #define _ASM_REBOOT_H
1493
1494-extern void (*_machine_restart)(char *command);
1495-extern void (*_machine_halt)(void);
1496+extern void (*__noreturn _machine_restart)(char *command);
1497+extern void (*__noreturn _machine_halt)(void);
1498
1499 #endif /* _ASM_REBOOT_H */
1500diff -urNp linux-2.6.32.45/arch/mips/include/asm/system.h linux-2.6.32.45/arch/mips/include/asm/system.h
1501--- linux-2.6.32.45/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1502+++ linux-2.6.32.45/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1503@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1504 */
1505 #define __ARCH_WANT_UNLOCKED_CTXSW
1506
1507-extern unsigned long arch_align_stack(unsigned long sp);
1508+#define arch_align_stack(x) ((x) & ~0xfUL)
1509
1510 #endif /* _ASM_SYSTEM_H */
1511diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c
1512--- linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1513+++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1514@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1515 #undef ELF_ET_DYN_BASE
1516 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1517
1518+#ifdef CONFIG_PAX_ASLR
1519+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1520+
1521+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1522+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1523+#endif
1524+
1525 #include <asm/processor.h>
1526 #include <linux/module.h>
1527 #include <linux/elfcore.h>
1528diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c
1529--- linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1530+++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1531@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1532 #undef ELF_ET_DYN_BASE
1533 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1534
1535+#ifdef CONFIG_PAX_ASLR
1536+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1537+
1538+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1539+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1540+#endif
1541+
1542 #include <asm/processor.h>
1543
1544 /*
1545diff -urNp linux-2.6.32.45/arch/mips/kernel/kgdb.c linux-2.6.32.45/arch/mips/kernel/kgdb.c
1546--- linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1547+++ linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1548@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1549 return -1;
1550 }
1551
1552+/* cannot be const */
1553 struct kgdb_arch arch_kgdb_ops;
1554
1555 /*
1556diff -urNp linux-2.6.32.45/arch/mips/kernel/process.c linux-2.6.32.45/arch/mips/kernel/process.c
1557--- linux-2.6.32.45/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1558+++ linux-2.6.32.45/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1559@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1560 out:
1561 return pc;
1562 }
1563-
1564-/*
1565- * Don't forget that the stack pointer must be aligned on a 8 bytes
1566- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1567- */
1568-unsigned long arch_align_stack(unsigned long sp)
1569-{
1570- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1571- sp -= get_random_int() & ~PAGE_MASK;
1572-
1573- return sp & ALMASK;
1574-}
1575diff -urNp linux-2.6.32.45/arch/mips/kernel/reset.c linux-2.6.32.45/arch/mips/kernel/reset.c
1576--- linux-2.6.32.45/arch/mips/kernel/reset.c 2011-03-27 14:31:47.000000000 -0400
1577+++ linux-2.6.32.45/arch/mips/kernel/reset.c 2011-08-21 17:35:26.000000000 -0400
1578@@ -19,8 +19,8 @@
1579 * So handle all using function pointers to machine specific
1580 * functions.
1581 */
1582-void (*_machine_restart)(char *command);
1583-void (*_machine_halt)(void);
1584+void (*__noreturn _machine_restart)(char *command);
1585+void (*__noreturn _machine_halt)(void);
1586 void (*pm_power_off)(void);
1587
1588 EXPORT_SYMBOL(pm_power_off);
1589@@ -29,16 +29,19 @@ void machine_restart(char *command)
1590 {
1591 if (_machine_restart)
1592 _machine_restart(command);
1593+ BUG();
1594 }
1595
1596 void machine_halt(void)
1597 {
1598 if (_machine_halt)
1599 _machine_halt();
1600+ BUG();
1601 }
1602
1603 void machine_power_off(void)
1604 {
1605 if (pm_power_off)
1606 pm_power_off();
1607+ BUG();
1608 }
1609diff -urNp linux-2.6.32.45/arch/mips/kernel/syscall.c linux-2.6.32.45/arch/mips/kernel/syscall.c
1610--- linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1611+++ linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1612@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1613 do_color_align = 0;
1614 if (filp || (flags & MAP_SHARED))
1615 do_color_align = 1;
1616+
1617+#ifdef CONFIG_PAX_RANDMMAP
1618+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1619+#endif
1620+
1621 if (addr) {
1622 if (do_color_align)
1623 addr = COLOUR_ALIGN(addr, pgoff);
1624 else
1625 addr = PAGE_ALIGN(addr);
1626 vmm = find_vma(current->mm, addr);
1627- if (task_size - len >= addr &&
1628- (!vmm || addr + len <= vmm->vm_start))
1629+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1630 return addr;
1631 }
1632- addr = TASK_UNMAPPED_BASE;
1633+ addr = current->mm->mmap_base;
1634 if (do_color_align)
1635 addr = COLOUR_ALIGN(addr, pgoff);
1636 else
1637@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1638 /* At this point: (!vmm || addr < vmm->vm_end). */
1639 if (task_size - len < addr)
1640 return -ENOMEM;
1641- if (!vmm || addr + len <= vmm->vm_start)
1642+ if (check_heap_stack_gap(vmm, addr, len))
1643 return addr;
1644 addr = vmm->vm_end;
1645 if (do_color_align)
1646diff -urNp linux-2.6.32.45/arch/mips/Makefile linux-2.6.32.45/arch/mips/Makefile
1647--- linux-2.6.32.45/arch/mips/Makefile 2011-03-27 14:31:47.000000000 -0400
1648+++ linux-2.6.32.45/arch/mips/Makefile 2011-08-21 19:26:52.000000000 -0400
1649@@ -51,6 +51,8 @@ endif
1650 cflags-y := -ffunction-sections
1651 cflags-y += $(call cc-option, -mno-check-zero-division)
1652
1653+cflags-y += -Wno-sign-compare -Wno-extra
1654+
1655 ifdef CONFIG_32BIT
1656 ld-emul = $(32bit-emul)
1657 vmlinux-32 = vmlinux
1658diff -urNp linux-2.6.32.45/arch/mips/mm/fault.c linux-2.6.32.45/arch/mips/mm/fault.c
1659--- linux-2.6.32.45/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1660+++ linux-2.6.32.45/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1661@@ -26,6 +26,23 @@
1662 #include <asm/ptrace.h>
1663 #include <asm/highmem.h> /* For VMALLOC_END */
1664
1665+#ifdef CONFIG_PAX_PAGEEXEC
1666+void pax_report_insns(void *pc, void *sp)
1667+{
1668+ unsigned long i;
1669+
1670+ printk(KERN_ERR "PAX: bytes at PC: ");
1671+ for (i = 0; i < 5; i++) {
1672+ unsigned int c;
1673+ if (get_user(c, (unsigned int *)pc+i))
1674+ printk(KERN_CONT "???????? ");
1675+ else
1676+ printk(KERN_CONT "%08x ", c);
1677+ }
1678+ printk("\n");
1679+}
1680+#endif
1681+
1682 /*
1683 * This routine handles page faults. It determines the address,
1684 * and the problem, and then passes it off to one of the appropriate
1685diff -urNp linux-2.6.32.45/arch/parisc/include/asm/elf.h linux-2.6.32.45/arch/parisc/include/asm/elf.h
1686--- linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1687+++ linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1688@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1689
1690 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1691
1692+#ifdef CONFIG_PAX_ASLR
1693+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1694+
1695+#define PAX_DELTA_MMAP_LEN 16
1696+#define PAX_DELTA_STACK_LEN 16
1697+#endif
1698+
1699 /* This yields a mask that user programs can use to figure out what
1700 instruction set this CPU supports. This could be done in user space,
1701 but it's not easy, and we've already done it here. */
1702diff -urNp linux-2.6.32.45/arch/parisc/include/asm/pgtable.h linux-2.6.32.45/arch/parisc/include/asm/pgtable.h
1703--- linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1704+++ linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1705@@ -207,6 +207,17 @@
1706 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1707 #define PAGE_COPY PAGE_EXECREAD
1708 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1709+
1710+#ifdef CONFIG_PAX_PAGEEXEC
1711+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1712+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1713+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1714+#else
1715+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1716+# define PAGE_COPY_NOEXEC PAGE_COPY
1717+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1718+#endif
1719+
1720 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1721 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1722 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1723diff -urNp linux-2.6.32.45/arch/parisc/kernel/module.c linux-2.6.32.45/arch/parisc/kernel/module.c
1724--- linux-2.6.32.45/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1725+++ linux-2.6.32.45/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1726@@ -95,16 +95,38 @@
1727
1728 /* three functions to determine where in the module core
1729 * or init pieces the location is */
1730+static inline int in_init_rx(struct module *me, void *loc)
1731+{
1732+ return (loc >= me->module_init_rx &&
1733+ loc < (me->module_init_rx + me->init_size_rx));
1734+}
1735+
1736+static inline int in_init_rw(struct module *me, void *loc)
1737+{
1738+ return (loc >= me->module_init_rw &&
1739+ loc < (me->module_init_rw + me->init_size_rw));
1740+}
1741+
1742 static inline int in_init(struct module *me, void *loc)
1743 {
1744- return (loc >= me->module_init &&
1745- loc <= (me->module_init + me->init_size));
1746+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1747+}
1748+
1749+static inline int in_core_rx(struct module *me, void *loc)
1750+{
1751+ return (loc >= me->module_core_rx &&
1752+ loc < (me->module_core_rx + me->core_size_rx));
1753+}
1754+
1755+static inline int in_core_rw(struct module *me, void *loc)
1756+{
1757+ return (loc >= me->module_core_rw &&
1758+ loc < (me->module_core_rw + me->core_size_rw));
1759 }
1760
1761 static inline int in_core(struct module *me, void *loc)
1762 {
1763- return (loc >= me->module_core &&
1764- loc <= (me->module_core + me->core_size));
1765+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1766 }
1767
1768 static inline int in_local(struct module *me, void *loc)
1769@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1770 }
1771
1772 /* align things a bit */
1773- me->core_size = ALIGN(me->core_size, 16);
1774- me->arch.got_offset = me->core_size;
1775- me->core_size += gots * sizeof(struct got_entry);
1776-
1777- me->core_size = ALIGN(me->core_size, 16);
1778- me->arch.fdesc_offset = me->core_size;
1779- me->core_size += fdescs * sizeof(Elf_Fdesc);
1780+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1781+ me->arch.got_offset = me->core_size_rw;
1782+ me->core_size_rw += gots * sizeof(struct got_entry);
1783+
1784+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1785+ me->arch.fdesc_offset = me->core_size_rw;
1786+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1787
1788 me->arch.got_max = gots;
1789 me->arch.fdesc_max = fdescs;
1790@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1791
1792 BUG_ON(value == 0);
1793
1794- got = me->module_core + me->arch.got_offset;
1795+ got = me->module_core_rw + me->arch.got_offset;
1796 for (i = 0; got[i].addr; i++)
1797 if (got[i].addr == value)
1798 goto out;
1799@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1800 #ifdef CONFIG_64BIT
1801 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1802 {
1803- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1804+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1805
1806 if (!value) {
1807 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1808@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1809
1810 /* Create new one */
1811 fdesc->addr = value;
1812- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1813+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1814 return (Elf_Addr)fdesc;
1815 }
1816 #endif /* CONFIG_64BIT */
1817@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1818
1819 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1820 end = table + sechdrs[me->arch.unwind_section].sh_size;
1821- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1822+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1823
1824 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1825 me->arch.unwind_section, table, end, gp);
1826diff -urNp linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c
1827--- linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1828+++ linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1829@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1830 /* At this point: (!vma || addr < vma->vm_end). */
1831 if (TASK_SIZE - len < addr)
1832 return -ENOMEM;
1833- if (!vma || addr + len <= vma->vm_start)
1834+ if (check_heap_stack_gap(vma, addr, len))
1835 return addr;
1836 addr = vma->vm_end;
1837 }
1838@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1839 /* At this point: (!vma || addr < vma->vm_end). */
1840 if (TASK_SIZE - len < addr)
1841 return -ENOMEM;
1842- if (!vma || addr + len <= vma->vm_start)
1843+ if (check_heap_stack_gap(vma, addr, len))
1844 return addr;
1845 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1846 if (addr < vma->vm_end) /* handle wraparound */
1847@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1848 if (flags & MAP_FIXED)
1849 return addr;
1850 if (!addr)
1851- addr = TASK_UNMAPPED_BASE;
1852+ addr = current->mm->mmap_base;
1853
1854 if (filp) {
1855 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1856diff -urNp linux-2.6.32.45/arch/parisc/kernel/traps.c linux-2.6.32.45/arch/parisc/kernel/traps.c
1857--- linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1858+++ linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1859@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1860
1861 down_read(&current->mm->mmap_sem);
1862 vma = find_vma(current->mm,regs->iaoq[0]);
1863- if (vma && (regs->iaoq[0] >= vma->vm_start)
1864- && (vma->vm_flags & VM_EXEC)) {
1865-
1866+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1867 fault_address = regs->iaoq[0];
1868 fault_space = regs->iasq[0];
1869
1870diff -urNp linux-2.6.32.45/arch/parisc/mm/fault.c linux-2.6.32.45/arch/parisc/mm/fault.c
1871--- linux-2.6.32.45/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1872+++ linux-2.6.32.45/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1873@@ -15,6 +15,7 @@
1874 #include <linux/sched.h>
1875 #include <linux/interrupt.h>
1876 #include <linux/module.h>
1877+#include <linux/unistd.h>
1878
1879 #include <asm/uaccess.h>
1880 #include <asm/traps.h>
1881@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1882 static unsigned long
1883 parisc_acctyp(unsigned long code, unsigned int inst)
1884 {
1885- if (code == 6 || code == 16)
1886+ if (code == 6 || code == 7 || code == 16)
1887 return VM_EXEC;
1888
1889 switch (inst & 0xf0000000) {
1890@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1891 }
1892 #endif
1893
1894+#ifdef CONFIG_PAX_PAGEEXEC
1895+/*
1896+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1897+ *
1898+ * returns 1 when task should be killed
1899+ * 2 when rt_sigreturn trampoline was detected
1900+ * 3 when unpatched PLT trampoline was detected
1901+ */
1902+static int pax_handle_fetch_fault(struct pt_regs *regs)
1903+{
1904+
1905+#ifdef CONFIG_PAX_EMUPLT
1906+ int err;
1907+
1908+ do { /* PaX: unpatched PLT emulation */
1909+ unsigned int bl, depwi;
1910+
1911+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1912+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1913+
1914+ if (err)
1915+ break;
1916+
1917+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1918+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1919+
1920+ err = get_user(ldw, (unsigned int *)addr);
1921+ err |= get_user(bv, (unsigned int *)(addr+4));
1922+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1923+
1924+ if (err)
1925+ break;
1926+
1927+ if (ldw == 0x0E801096U &&
1928+ bv == 0xEAC0C000U &&
1929+ ldw2 == 0x0E881095U)
1930+ {
1931+ unsigned int resolver, map;
1932+
1933+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1934+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1935+ if (err)
1936+ break;
1937+
1938+ regs->gr[20] = instruction_pointer(regs)+8;
1939+ regs->gr[21] = map;
1940+ regs->gr[22] = resolver;
1941+ regs->iaoq[0] = resolver | 3UL;
1942+ regs->iaoq[1] = regs->iaoq[0] + 4;
1943+ return 3;
1944+ }
1945+ }
1946+ } while (0);
1947+#endif
1948+
1949+#ifdef CONFIG_PAX_EMUTRAMP
1950+
1951+#ifndef CONFIG_PAX_EMUSIGRT
1952+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1953+ return 1;
1954+#endif
1955+
1956+ do { /* PaX: rt_sigreturn emulation */
1957+ unsigned int ldi1, ldi2, bel, nop;
1958+
1959+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1960+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1961+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1962+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1963+
1964+ if (err)
1965+ break;
1966+
1967+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1968+ ldi2 == 0x3414015AU &&
1969+ bel == 0xE4008200U &&
1970+ nop == 0x08000240U)
1971+ {
1972+ regs->gr[25] = (ldi1 & 2) >> 1;
1973+ regs->gr[20] = __NR_rt_sigreturn;
1974+ regs->gr[31] = regs->iaoq[1] + 16;
1975+ regs->sr[0] = regs->iasq[1];
1976+ regs->iaoq[0] = 0x100UL;
1977+ regs->iaoq[1] = regs->iaoq[0] + 4;
1978+ regs->iasq[0] = regs->sr[2];
1979+ regs->iasq[1] = regs->sr[2];
1980+ return 2;
1981+ }
1982+ } while (0);
1983+#endif
1984+
1985+ return 1;
1986+}
1987+
1988+void pax_report_insns(void *pc, void *sp)
1989+{
1990+ unsigned long i;
1991+
1992+ printk(KERN_ERR "PAX: bytes at PC: ");
1993+ for (i = 0; i < 5; i++) {
1994+ unsigned int c;
1995+ if (get_user(c, (unsigned int *)pc+i))
1996+ printk(KERN_CONT "???????? ");
1997+ else
1998+ printk(KERN_CONT "%08x ", c);
1999+ }
2000+ printk("\n");
2001+}
2002+#endif
2003+
2004 int fixup_exception(struct pt_regs *regs)
2005 {
2006 const struct exception_table_entry *fix;
2007@@ -192,8 +303,33 @@ good_area:
2008
2009 acc_type = parisc_acctyp(code,regs->iir);
2010
2011- if ((vma->vm_flags & acc_type) != acc_type)
2012+ if ((vma->vm_flags & acc_type) != acc_type) {
2013+
2014+#ifdef CONFIG_PAX_PAGEEXEC
2015+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2016+ (address & ~3UL) == instruction_pointer(regs))
2017+ {
2018+ up_read(&mm->mmap_sem);
2019+ switch (pax_handle_fetch_fault(regs)) {
2020+
2021+#ifdef CONFIG_PAX_EMUPLT
2022+ case 3:
2023+ return;
2024+#endif
2025+
2026+#ifdef CONFIG_PAX_EMUTRAMP
2027+ case 2:
2028+ return;
2029+#endif
2030+
2031+ }
2032+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2033+ do_group_exit(SIGKILL);
2034+ }
2035+#endif
2036+
2037 goto bad_area;
2038+ }
2039
2040 /*
2041 * If for any reason at all we couldn't handle the fault, make
2042diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/device.h linux-2.6.32.45/arch/powerpc/include/asm/device.h
2043--- linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
2044+++ linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
2045@@ -14,7 +14,7 @@ struct dev_archdata {
2046 struct device_node *of_node;
2047
2048 /* DMA operations on that device */
2049- struct dma_map_ops *dma_ops;
2050+ const struct dma_map_ops *dma_ops;
2051
2052 /*
2053 * When an iommu is in use, dma_data is used as a ptr to the base of the
2054diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h
2055--- linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
2056+++ linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
2057@@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
2058 #ifdef CONFIG_PPC64
2059 extern struct dma_map_ops dma_iommu_ops;
2060 #endif
2061-extern struct dma_map_ops dma_direct_ops;
2062+extern const struct dma_map_ops dma_direct_ops;
2063
2064-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2065+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2066 {
2067 /* We don't handle the NULL dev case for ISA for now. We could
2068 * do it via an out of line call but it is not needed for now. The
2069@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2070 return dev->archdata.dma_ops;
2071 }
2072
2073-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2074+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2075 {
2076 dev->archdata.dma_ops = ops;
2077 }
2078@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2079
2080 static inline int dma_supported(struct device *dev, u64 mask)
2081 {
2082- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2083+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2084
2085 if (unlikely(dma_ops == NULL))
2086 return 0;
2087@@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2088
2089 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2090 {
2091- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2092+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2093
2094 if (unlikely(dma_ops == NULL))
2095 return -EIO;
2096@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2097 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2098 dma_addr_t *dma_handle, gfp_t flag)
2099 {
2100- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2101+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2102 void *cpu_addr;
2103
2104 BUG_ON(!dma_ops);
2105@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2106 static inline void dma_free_coherent(struct device *dev, size_t size,
2107 void *cpu_addr, dma_addr_t dma_handle)
2108 {
2109- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2110+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2111
2112 BUG_ON(!dma_ops);
2113
2114@@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2115
2116 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2117 {
2118- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2119+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2120
2121 if (dma_ops->mapping_error)
2122 return dma_ops->mapping_error(dev, dma_addr);
2123diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/elf.h linux-2.6.32.45/arch/powerpc/include/asm/elf.h
2124--- linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2125+++ linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2126@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2127 the loader. We need to make sure that it is out of the way of the program
2128 that it will "exec", and that there is sufficient room for the brk. */
2129
2130-extern unsigned long randomize_et_dyn(unsigned long base);
2131-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2132+#define ELF_ET_DYN_BASE (0x20000000)
2133+
2134+#ifdef CONFIG_PAX_ASLR
2135+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2136+
2137+#ifdef __powerpc64__
2138+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2139+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2140+#else
2141+#define PAX_DELTA_MMAP_LEN 15
2142+#define PAX_DELTA_STACK_LEN 15
2143+#endif
2144+#endif
2145
2146 /*
2147 * Our registers are always unsigned longs, whether we're a 32 bit
2148@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2149 (0x7ff >> (PAGE_SHIFT - 12)) : \
2150 (0x3ffff >> (PAGE_SHIFT - 12)))
2151
2152-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2153-#define arch_randomize_brk arch_randomize_brk
2154-
2155 #endif /* __KERNEL__ */
2156
2157 /*
2158diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/iommu.h linux-2.6.32.45/arch/powerpc/include/asm/iommu.h
2159--- linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2160+++ linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2161@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2162 extern void iommu_init_early_dart(void);
2163 extern void iommu_init_early_pasemi(void);
2164
2165+/* dma-iommu.c */
2166+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2167+
2168 #ifdef CONFIG_PCI
2169 extern void pci_iommu_init(void);
2170 extern void pci_direct_iommu_init(void);
2171diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h
2172--- linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2173+++ linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2174@@ -26,6 +26,7 @@ enum km_type {
2175 KM_SOFTIRQ1,
2176 KM_PPC_SYNC_PAGE,
2177 KM_PPC_SYNC_ICACHE,
2178+ KM_CLEARPAGE,
2179 KM_TYPE_NR
2180 };
2181
2182diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page_64.h linux-2.6.32.45/arch/powerpc/include/asm/page_64.h
2183--- linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2184+++ linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2185@@ -180,15 +180,18 @@ do { \
2186 * stack by default, so in the absense of a PT_GNU_STACK program header
2187 * we turn execute permission off.
2188 */
2189-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2190- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2191+#define VM_STACK_DEFAULT_FLAGS32 \
2192+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2193+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2194
2195 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2196 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2197
2198+#ifndef CONFIG_PAX_PAGEEXEC
2199 #define VM_STACK_DEFAULT_FLAGS \
2200 (test_thread_flag(TIF_32BIT) ? \
2201 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2202+#endif
2203
2204 #include <asm-generic/getorder.h>
2205
2206diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page.h linux-2.6.32.45/arch/powerpc/include/asm/page.h
2207--- linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2208+++ linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-08-21 16:07:39.000000000 -0400
2209@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2210 * and needs to be executable. This means the whole heap ends
2211 * up being executable.
2212 */
2213-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2214- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2215+#define VM_DATA_DEFAULT_FLAGS32 \
2216+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2217+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2218
2219 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2220 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2221@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2222 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2223 #endif
2224
2225+#define ktla_ktva(addr) (addr)
2226+#define ktva_ktla(addr) (addr)
2227+
2228 #ifndef __ASSEMBLY__
2229
2230 #undef STRICT_MM_TYPECHECKS
2231diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pci.h linux-2.6.32.45/arch/powerpc/include/asm/pci.h
2232--- linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2233+++ linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2234@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2235 }
2236
2237 #ifdef CONFIG_PCI
2238-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2239-extern struct dma_map_ops *get_pci_dma_ops(void);
2240+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2241+extern const struct dma_map_ops *get_pci_dma_ops(void);
2242 #else /* CONFIG_PCI */
2243 #define set_pci_dma_ops(d)
2244 #define get_pci_dma_ops() NULL
2245diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h
2246--- linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2247+++ linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2248@@ -2,6 +2,7 @@
2249 #define _ASM_POWERPC_PGTABLE_H
2250 #ifdef __KERNEL__
2251
2252+#include <linux/const.h>
2253 #ifndef __ASSEMBLY__
2254 #include <asm/processor.h> /* For TASK_SIZE */
2255 #include <asm/mmu.h>
2256diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h
2257--- linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2258+++ linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2259@@ -21,6 +21,7 @@
2260 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2261 #define _PAGE_USER 0x004 /* usermode access allowed */
2262 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2263+#define _PAGE_EXEC _PAGE_GUARDED
2264 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2265 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2266 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2267diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h
2268--- linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
2269+++ linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h 2011-08-21 15:53:58.000000000 -0400
2270@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct p
2271 } while(0)
2272
2273 struct task_struct;
2274-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2275+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2276 extern int ptrace_put_reg(struct task_struct *task, int regno,
2277 unsigned long data);
2278
2279diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/reg.h linux-2.6.32.45/arch/powerpc/include/asm/reg.h
2280--- linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2281+++ linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2282@@ -191,6 +191,7 @@
2283 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2284 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2285 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2286+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2287 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2288 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2289 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2290diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h
2291--- linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2292+++ linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2293@@ -13,7 +13,7 @@
2294
2295 #include <linux/swiotlb.h>
2296
2297-extern struct dma_map_ops swiotlb_dma_ops;
2298+extern const struct dma_map_ops swiotlb_dma_ops;
2299
2300 static inline void dma_mark_clean(void *addr, size_t size) {}
2301
2302diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/system.h linux-2.6.32.45/arch/powerpc/include/asm/system.h
2303--- linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2304+++ linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2305@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2306 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2307 #endif
2308
2309-extern unsigned long arch_align_stack(unsigned long sp);
2310+#define arch_align_stack(x) ((x) & ~0xfUL)
2311
2312 /* Used in very early kernel initialization. */
2313 extern unsigned long reloc_offset(void);
2314diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h
2315--- linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2316+++ linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2317@@ -13,6 +13,8 @@
2318 #define VERIFY_READ 0
2319 #define VERIFY_WRITE 1
2320
2321+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2322+
2323 /*
2324 * The fs value determines whether argument validity checking should be
2325 * performed or not. If get_fs() == USER_DS, checking is performed, with
2326@@ -327,52 +329,6 @@ do { \
2327 extern unsigned long __copy_tofrom_user(void __user *to,
2328 const void __user *from, unsigned long size);
2329
2330-#ifndef __powerpc64__
2331-
2332-static inline unsigned long copy_from_user(void *to,
2333- const void __user *from, unsigned long n)
2334-{
2335- unsigned long over;
2336-
2337- if (access_ok(VERIFY_READ, from, n))
2338- return __copy_tofrom_user((__force void __user *)to, from, n);
2339- if ((unsigned long)from < TASK_SIZE) {
2340- over = (unsigned long)from + n - TASK_SIZE;
2341- return __copy_tofrom_user((__force void __user *)to, from,
2342- n - over) + over;
2343- }
2344- return n;
2345-}
2346-
2347-static inline unsigned long copy_to_user(void __user *to,
2348- const void *from, unsigned long n)
2349-{
2350- unsigned long over;
2351-
2352- if (access_ok(VERIFY_WRITE, to, n))
2353- return __copy_tofrom_user(to, (__force void __user *)from, n);
2354- if ((unsigned long)to < TASK_SIZE) {
2355- over = (unsigned long)to + n - TASK_SIZE;
2356- return __copy_tofrom_user(to, (__force void __user *)from,
2357- n - over) + over;
2358- }
2359- return n;
2360-}
2361-
2362-#else /* __powerpc64__ */
2363-
2364-#define __copy_in_user(to, from, size) \
2365- __copy_tofrom_user((to), (from), (size))
2366-
2367-extern unsigned long copy_from_user(void *to, const void __user *from,
2368- unsigned long n);
2369-extern unsigned long copy_to_user(void __user *to, const void *from,
2370- unsigned long n);
2371-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2372- unsigned long n);
2373-
2374-#endif /* __powerpc64__ */
2375-
2376 static inline unsigned long __copy_from_user_inatomic(void *to,
2377 const void __user *from, unsigned long n)
2378 {
2379@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2380 if (ret == 0)
2381 return 0;
2382 }
2383+
2384+ if (!__builtin_constant_p(n))
2385+ check_object_size(to, n, false);
2386+
2387 return __copy_tofrom_user((__force void __user *)to, from, n);
2388 }
2389
2390@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2391 if (ret == 0)
2392 return 0;
2393 }
2394+
2395+ if (!__builtin_constant_p(n))
2396+ check_object_size(from, n, true);
2397+
2398 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2399 }
2400
2401@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2402 return __copy_to_user_inatomic(to, from, size);
2403 }
2404
2405+#ifndef __powerpc64__
2406+
2407+static inline unsigned long __must_check copy_from_user(void *to,
2408+ const void __user *from, unsigned long n)
2409+{
2410+ unsigned long over;
2411+
2412+ if ((long)n < 0)
2413+ return n;
2414+
2415+ if (access_ok(VERIFY_READ, from, n)) {
2416+ if (!__builtin_constant_p(n))
2417+ check_object_size(to, n, false);
2418+ return __copy_tofrom_user((__force void __user *)to, from, n);
2419+ }
2420+ if ((unsigned long)from < TASK_SIZE) {
2421+ over = (unsigned long)from + n - TASK_SIZE;
2422+ if (!__builtin_constant_p(n - over))
2423+ check_object_size(to, n - over, false);
2424+ return __copy_tofrom_user((__force void __user *)to, from,
2425+ n - over) + over;
2426+ }
2427+ return n;
2428+}
2429+
2430+static inline unsigned long __must_check copy_to_user(void __user *to,
2431+ const void *from, unsigned long n)
2432+{
2433+ unsigned long over;
2434+
2435+ if ((long)n < 0)
2436+ return n;
2437+
2438+ if (access_ok(VERIFY_WRITE, to, n)) {
2439+ if (!__builtin_constant_p(n))
2440+ check_object_size(from, n, true);
2441+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2442+ }
2443+ if ((unsigned long)to < TASK_SIZE) {
2444+ over = (unsigned long)to + n - TASK_SIZE;
2445+ if (!__builtin_constant_p(n))
2446+ check_object_size(from, n - over, true);
2447+ return __copy_tofrom_user(to, (__force void __user *)from,
2448+ n - over) + over;
2449+ }
2450+ return n;
2451+}
2452+
2453+#else /* __powerpc64__ */
2454+
2455+#define __copy_in_user(to, from, size) \
2456+ __copy_tofrom_user((to), (from), (size))
2457+
2458+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2459+{
2460+ if ((long)n < 0 || n > INT_MAX)
2461+ return n;
2462+
2463+ if (!__builtin_constant_p(n))
2464+ check_object_size(to, n, false);
2465+
2466+ if (likely(access_ok(VERIFY_READ, from, n)))
2467+ n = __copy_from_user(to, from, n);
2468+ else
2469+ memset(to, 0, n);
2470+ return n;
2471+}
2472+
2473+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2474+{
2475+ if ((long)n < 0 || n > INT_MAX)
2476+ return n;
2477+
2478+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2479+ if (!__builtin_constant_p(n))
2480+ check_object_size(from, n, true);
2481+ n = __copy_to_user(to, from, n);
2482+ }
2483+ return n;
2484+}
2485+
2486+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2487+ unsigned long n);
2488+
2489+#endif /* __powerpc64__ */
2490+
2491 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2492
2493 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2494diff -urNp linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c
2495--- linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2496+++ linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2497@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2498 &cache_assoc_attr,
2499 };
2500
2501-static struct sysfs_ops cache_index_ops = {
2502+static const struct sysfs_ops cache_index_ops = {
2503 .show = cache_index_show,
2504 };
2505
2506diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma.c linux-2.6.32.45/arch/powerpc/kernel/dma.c
2507--- linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2508+++ linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2509@@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2510 }
2511 #endif
2512
2513-struct dma_map_ops dma_direct_ops = {
2514+const struct dma_map_ops dma_direct_ops = {
2515 .alloc_coherent = dma_direct_alloc_coherent,
2516 .free_coherent = dma_direct_free_coherent,
2517 .map_sg = dma_direct_map_sg,
2518diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c
2519--- linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2520+++ linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2521@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2522 }
2523
2524 /* We support DMA to/from any memory page via the iommu */
2525-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2526+int dma_iommu_dma_supported(struct device *dev, u64 mask)
2527 {
2528 struct iommu_table *tbl = get_iommu_table_base(dev);
2529
2530diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c
2531--- linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2532+++ linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2533@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2534 * map_page, and unmap_page on highmem, use normal dma_ops
2535 * for everything else.
2536 */
2537-struct dma_map_ops swiotlb_dma_ops = {
2538+const struct dma_map_ops swiotlb_dma_ops = {
2539 .alloc_coherent = dma_direct_alloc_coherent,
2540 .free_coherent = dma_direct_free_coherent,
2541 .map_sg = swiotlb_map_sg_attrs,
2542diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S
2543--- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2544+++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2545@@ -455,6 +455,7 @@ storage_fault_common:
2546 std r14,_DAR(r1)
2547 std r15,_DSISR(r1)
2548 addi r3,r1,STACK_FRAME_OVERHEAD
2549+ bl .save_nvgprs
2550 mr r4,r14
2551 mr r5,r15
2552 ld r14,PACA_EXGEN+EX_R14(r13)
2553@@ -464,8 +465,7 @@ storage_fault_common:
2554 cmpdi r3,0
2555 bne- 1f
2556 b .ret_from_except_lite
2557-1: bl .save_nvgprs
2558- mr r5,r3
2559+1: mr r5,r3
2560 addi r3,r1,STACK_FRAME_OVERHEAD
2561 ld r4,_DAR(r1)
2562 bl .bad_page_fault
2563diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S
2564--- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2565+++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2566@@ -818,10 +818,10 @@ handle_page_fault:
2567 11: ld r4,_DAR(r1)
2568 ld r5,_DSISR(r1)
2569 addi r3,r1,STACK_FRAME_OVERHEAD
2570+ bl .save_nvgprs
2571 bl .do_page_fault
2572 cmpdi r3,0
2573 beq+ 13f
2574- bl .save_nvgprs
2575 mr r5,r3
2576 addi r3,r1,STACK_FRAME_OVERHEAD
2577 lwz r4,_DAR(r1)
2578diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c
2579--- linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2580+++ linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2581@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2582 return 1;
2583 }
2584
2585-static struct dma_map_ops ibmebus_dma_ops = {
2586+static const struct dma_map_ops ibmebus_dma_ops = {
2587 .alloc_coherent = ibmebus_alloc_coherent,
2588 .free_coherent = ibmebus_free_coherent,
2589 .map_sg = ibmebus_map_sg,
2590diff -urNp linux-2.6.32.45/arch/powerpc/kernel/kgdb.c linux-2.6.32.45/arch/powerpc/kernel/kgdb.c
2591--- linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2592+++ linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2593@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2594 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2595 return 0;
2596
2597- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2598+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2599 regs->nip += 4;
2600
2601 return 1;
2602@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2603 /*
2604 * Global data
2605 */
2606-struct kgdb_arch arch_kgdb_ops = {
2607+const struct kgdb_arch arch_kgdb_ops = {
2608 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2609 };
2610
2611diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module_32.c linux-2.6.32.45/arch/powerpc/kernel/module_32.c
2612--- linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2613+++ linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2614@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2615 me->arch.core_plt_section = i;
2616 }
2617 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2618- printk("Module doesn't contain .plt or .init.plt sections.\n");
2619+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2620 return -ENOEXEC;
2621 }
2622
2623@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2624
2625 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2626 /* Init, or core PLT? */
2627- if (location >= mod->module_core
2628- && location < mod->module_core + mod->core_size)
2629+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2630+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2631 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2632- else
2633+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2634+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2635 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2636+ else {
2637+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2638+ return ~0UL;
2639+ }
2640
2641 /* Find this entry, or if that fails, the next avail. entry */
2642 while (entry->jump[0]) {
2643diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module.c linux-2.6.32.45/arch/powerpc/kernel/module.c
2644--- linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2645+++ linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2646@@ -31,11 +31,24 @@
2647
2648 LIST_HEAD(module_bug_list);
2649
2650+#ifdef CONFIG_PAX_KERNEXEC
2651 void *module_alloc(unsigned long size)
2652 {
2653 if (size == 0)
2654 return NULL;
2655
2656+ return vmalloc(size);
2657+}
2658+
2659+void *module_alloc_exec(unsigned long size)
2660+#else
2661+void *module_alloc(unsigned long size)
2662+#endif
2663+
2664+{
2665+ if (size == 0)
2666+ return NULL;
2667+
2668 return vmalloc_exec(size);
2669 }
2670
2671@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2672 vfree(module_region);
2673 }
2674
2675+#ifdef CONFIG_PAX_KERNEXEC
2676+void module_free_exec(struct module *mod, void *module_region)
2677+{
2678+ module_free(mod, module_region);
2679+}
2680+#endif
2681+
2682 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2683 const Elf_Shdr *sechdrs,
2684 const char *name)
2685diff -urNp linux-2.6.32.45/arch/powerpc/kernel/pci-common.c linux-2.6.32.45/arch/powerpc/kernel/pci-common.c
2686--- linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2687+++ linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2688@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2689 unsigned int ppc_pci_flags = 0;
2690
2691
2692-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2693+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2694
2695-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2696+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2697 {
2698 pci_dma_ops = dma_ops;
2699 }
2700
2701-struct dma_map_ops *get_pci_dma_ops(void)
2702+const struct dma_map_ops *get_pci_dma_ops(void)
2703 {
2704 return pci_dma_ops;
2705 }
2706diff -urNp linux-2.6.32.45/arch/powerpc/kernel/process.c linux-2.6.32.45/arch/powerpc/kernel/process.c
2707--- linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2708+++ linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2709@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2710 * Lookup NIP late so we have the best change of getting the
2711 * above info out without failing
2712 */
2713- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2714- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2715+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2716+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2717 #endif
2718 show_stack(current, (unsigned long *) regs->gpr[1]);
2719 if (!user_mode(regs))
2720@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2721 newsp = stack[0];
2722 ip = stack[STACK_FRAME_LR_SAVE];
2723 if (!firstframe || ip != lr) {
2724- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2725+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2726 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2727 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2728- printk(" (%pS)",
2729+ printk(" (%pA)",
2730 (void *)current->ret_stack[curr_frame].ret);
2731 curr_frame--;
2732 }
2733@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2734 struct pt_regs *regs = (struct pt_regs *)
2735 (sp + STACK_FRAME_OVERHEAD);
2736 lr = regs->link;
2737- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2738+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2739 regs->trap, (void *)regs->nip, (void *)lr);
2740 firstframe = 1;
2741 }
2742@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2743 }
2744
2745 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2746-
2747-unsigned long arch_align_stack(unsigned long sp)
2748-{
2749- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2750- sp -= get_random_int() & ~PAGE_MASK;
2751- return sp & ~0xf;
2752-}
2753-
2754-static inline unsigned long brk_rnd(void)
2755-{
2756- unsigned long rnd = 0;
2757-
2758- /* 8MB for 32bit, 1GB for 64bit */
2759- if (is_32bit_task())
2760- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2761- else
2762- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2763-
2764- return rnd << PAGE_SHIFT;
2765-}
2766-
2767-unsigned long arch_randomize_brk(struct mm_struct *mm)
2768-{
2769- unsigned long base = mm->brk;
2770- unsigned long ret;
2771-
2772-#ifdef CONFIG_PPC_STD_MMU_64
2773- /*
2774- * If we are using 1TB segments and we are allowed to randomise
2775- * the heap, we can put it above 1TB so it is backed by a 1TB
2776- * segment. Otherwise the heap will be in the bottom 1TB
2777- * which always uses 256MB segments and this may result in a
2778- * performance penalty.
2779- */
2780- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2781- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2782-#endif
2783-
2784- ret = PAGE_ALIGN(base + brk_rnd());
2785-
2786- if (ret < mm->brk)
2787- return mm->brk;
2788-
2789- return ret;
2790-}
2791-
2792-unsigned long randomize_et_dyn(unsigned long base)
2793-{
2794- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2795-
2796- if (ret < base)
2797- return base;
2798-
2799- return ret;
2800-}
2801diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ptrace.c linux-2.6.32.45/arch/powerpc/kernel/ptrace.c
2802--- linux-2.6.32.45/arch/powerpc/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
2803+++ linux-2.6.32.45/arch/powerpc/kernel/ptrace.c 2011-08-21 15:53:39.000000000 -0400
2804@@ -86,7 +86,7 @@ static int set_user_trap(struct task_str
2805 /*
2806 * Get contents of register REGNO in task TASK.
2807 */
2808-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
2809+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
2810 {
2811 if (task->thread.regs == NULL)
2812 return -EIO;
2813@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *chi
2814
2815 CHECK_FULL_REGS(child->thread.regs);
2816 if (index < PT_FPR0) {
2817- tmp = ptrace_get_reg(child, (int) index);
2818+ tmp = ptrace_get_reg(child, index);
2819 } else {
2820 flush_fp_to_thread(child);
2821 tmp = ((unsigned long *)child->thread.fpr)
2822diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_32.c linux-2.6.32.45/arch/powerpc/kernel/signal_32.c
2823--- linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2824+++ linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2825@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2826 /* Save user registers on the stack */
2827 frame = &rt_sf->uc.uc_mcontext;
2828 addr = frame;
2829- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2830+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2831 if (save_user_regs(regs, frame, 0, 1))
2832 goto badframe;
2833 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2834diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_64.c linux-2.6.32.45/arch/powerpc/kernel/signal_64.c
2835--- linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2836+++ linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2837@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2838 current->thread.fpscr.val = 0;
2839
2840 /* Set up to return from userspace. */
2841- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2842+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2843 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2844 } else {
2845 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2846diff -urNp linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c
2847--- linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2848+++ linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2849@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2850 if (oldlenp) {
2851 if (!error) {
2852 if (get_user(oldlen, oldlenp) ||
2853- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2854+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2855+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2856 error = -EFAULT;
2857 }
2858- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2859 }
2860 return error;
2861 }
2862diff -urNp linux-2.6.32.45/arch/powerpc/kernel/traps.c linux-2.6.32.45/arch/powerpc/kernel/traps.c
2863--- linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2864+++ linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2865@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2866 static inline void pmac_backlight_unblank(void) { }
2867 #endif
2868
2869+extern void gr_handle_kernel_exploit(void);
2870+
2871 int die(const char *str, struct pt_regs *regs, long err)
2872 {
2873 static struct {
2874@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2875 if (panic_on_oops)
2876 panic("Fatal exception");
2877
2878+ gr_handle_kernel_exploit();
2879+
2880 oops_exit();
2881 do_exit(err);
2882
2883diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vdso.c linux-2.6.32.45/arch/powerpc/kernel/vdso.c
2884--- linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2885+++ linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2886@@ -36,6 +36,7 @@
2887 #include <asm/firmware.h>
2888 #include <asm/vdso.h>
2889 #include <asm/vdso_datapage.h>
2890+#include <asm/mman.h>
2891
2892 #include "setup.h"
2893
2894@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2895 vdso_base = VDSO32_MBASE;
2896 #endif
2897
2898- current->mm->context.vdso_base = 0;
2899+ current->mm->context.vdso_base = ~0UL;
2900
2901 /* vDSO has a problem and was disabled, just don't "enable" it for the
2902 * process
2903@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2904 vdso_base = get_unmapped_area(NULL, vdso_base,
2905 (vdso_pages << PAGE_SHIFT) +
2906 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2907- 0, 0);
2908+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2909 if (IS_ERR_VALUE(vdso_base)) {
2910 rc = vdso_base;
2911 goto fail_mmapsem;
2912diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vio.c linux-2.6.32.45/arch/powerpc/kernel/vio.c
2913--- linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2914+++ linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2915@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2916 vio_cmo_dealloc(viodev, alloc_size);
2917 }
2918
2919-struct dma_map_ops vio_dma_mapping_ops = {
2920+static const struct dma_map_ops vio_dma_mapping_ops = {
2921 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2922 .free_coherent = vio_dma_iommu_free_coherent,
2923 .map_sg = vio_dma_iommu_map_sg,
2924 .unmap_sg = vio_dma_iommu_unmap_sg,
2925+ .dma_supported = dma_iommu_dma_supported,
2926 .map_page = vio_dma_iommu_map_page,
2927 .unmap_page = vio_dma_iommu_unmap_page,
2928
2929@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2930
2931 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2932 {
2933- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2934 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2935 }
2936
2937diff -urNp linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c
2938--- linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2939+++ linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2940@@ -9,22 +9,6 @@
2941 #include <linux/module.h>
2942 #include <asm/uaccess.h>
2943
2944-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2945-{
2946- if (likely(access_ok(VERIFY_READ, from, n)))
2947- n = __copy_from_user(to, from, n);
2948- else
2949- memset(to, 0, n);
2950- return n;
2951-}
2952-
2953-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2954-{
2955- if (likely(access_ok(VERIFY_WRITE, to, n)))
2956- n = __copy_to_user(to, from, n);
2957- return n;
2958-}
2959-
2960 unsigned long copy_in_user(void __user *to, const void __user *from,
2961 unsigned long n)
2962 {
2963@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2964 return n;
2965 }
2966
2967-EXPORT_SYMBOL(copy_from_user);
2968-EXPORT_SYMBOL(copy_to_user);
2969 EXPORT_SYMBOL(copy_in_user);
2970
2971diff -urNp linux-2.6.32.45/arch/powerpc/Makefile linux-2.6.32.45/arch/powerpc/Makefile
2972--- linux-2.6.32.45/arch/powerpc/Makefile 2011-03-27 14:31:47.000000000 -0400
2973+++ linux-2.6.32.45/arch/powerpc/Makefile 2011-08-21 19:27:08.000000000 -0400
2974@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2975 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2976 CPP = $(CC) -E $(KBUILD_CFLAGS)
2977
2978+cflags-y += -Wno-sign-compare -Wno-extra
2979+
2980 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2981
2982 ifeq ($(CONFIG_PPC64),y)
2983diff -urNp linux-2.6.32.45/arch/powerpc/mm/fault.c linux-2.6.32.45/arch/powerpc/mm/fault.c
2984--- linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2985+++ linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2986@@ -30,6 +30,10 @@
2987 #include <linux/kprobes.h>
2988 #include <linux/kdebug.h>
2989 #include <linux/perf_event.h>
2990+#include <linux/slab.h>
2991+#include <linux/pagemap.h>
2992+#include <linux/compiler.h>
2993+#include <linux/unistd.h>
2994
2995 #include <asm/firmware.h>
2996 #include <asm/page.h>
2997@@ -40,6 +44,7 @@
2998 #include <asm/uaccess.h>
2999 #include <asm/tlbflush.h>
3000 #include <asm/siginfo.h>
3001+#include <asm/ptrace.h>
3002
3003
3004 #ifdef CONFIG_KPROBES
3005@@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
3006 }
3007 #endif
3008
3009+#ifdef CONFIG_PAX_PAGEEXEC
3010+/*
3011+ * PaX: decide what to do with offenders (regs->nip = fault address)
3012+ *
3013+ * returns 1 when task should be killed
3014+ */
3015+static int pax_handle_fetch_fault(struct pt_regs *regs)
3016+{
3017+ return 1;
3018+}
3019+
3020+void pax_report_insns(void *pc, void *sp)
3021+{
3022+ unsigned long i;
3023+
3024+ printk(KERN_ERR "PAX: bytes at PC: ");
3025+ for (i = 0; i < 5; i++) {
3026+ unsigned int c;
3027+ if (get_user(c, (unsigned int __user *)pc+i))
3028+ printk(KERN_CONT "???????? ");
3029+ else
3030+ printk(KERN_CONT "%08x ", c);
3031+ }
3032+ printk("\n");
3033+}
3034+#endif
3035+
3036 /*
3037 * Check whether the instruction at regs->nip is a store using
3038 * an update addressing form which will update r1.
3039@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
3040 * indicate errors in DSISR but can validly be set in SRR1.
3041 */
3042 if (trap == 0x400)
3043- error_code &= 0x48200000;
3044+ error_code &= 0x58200000;
3045 else
3046 is_write = error_code & DSISR_ISSTORE;
3047 #else
3048@@ -250,7 +282,7 @@ good_area:
3049 * "undefined". Of those that can be set, this is the only
3050 * one which seems bad.
3051 */
3052- if (error_code & 0x10000000)
3053+ if (error_code & DSISR_GUARDED)
3054 /* Guarded storage error. */
3055 goto bad_area;
3056 #endif /* CONFIG_8xx */
3057@@ -265,7 +297,7 @@ good_area:
3058 * processors use the same I/D cache coherency mechanism
3059 * as embedded.
3060 */
3061- if (error_code & DSISR_PROTFAULT)
3062+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3063 goto bad_area;
3064 #endif /* CONFIG_PPC_STD_MMU */
3065
3066@@ -335,6 +367,23 @@ bad_area:
3067 bad_area_nosemaphore:
3068 /* User mode accesses cause a SIGSEGV */
3069 if (user_mode(regs)) {
3070+
3071+#ifdef CONFIG_PAX_PAGEEXEC
3072+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3073+#ifdef CONFIG_PPC_STD_MMU
3074+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3075+#else
3076+ if (is_exec && regs->nip == address) {
3077+#endif
3078+ switch (pax_handle_fetch_fault(regs)) {
3079+ }
3080+
3081+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3082+ do_group_exit(SIGKILL);
3083+ }
3084+ }
3085+#endif
3086+
3087 _exception(SIGSEGV, regs, code, address);
3088 return 0;
3089 }
3090diff -urNp linux-2.6.32.45/arch/powerpc/mm/mem.c linux-2.6.32.45/arch/powerpc/mm/mem.c
3091--- linux-2.6.32.45/arch/powerpc/mm/mem.c 2011-03-27 14:31:47.000000000 -0400
3092+++ linux-2.6.32.45/arch/powerpc/mm/mem.c 2011-08-21 15:50:39.000000000 -0400
3093@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(voi
3094 {
3095 unsigned long lmb_next_region_start_pfn,
3096 lmb_region_max_pfn;
3097- int i;
3098+ unsigned int i;
3099
3100 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3101 lmb_region_max_pfn =
3102diff -urNp linux-2.6.32.45/arch/powerpc/mm/mmap_64.c linux-2.6.32.45/arch/powerpc/mm/mmap_64.c
3103--- linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
3104+++ linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
3105@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
3106 */
3107 if (mmap_is_legacy()) {
3108 mm->mmap_base = TASK_UNMAPPED_BASE;
3109+
3110+#ifdef CONFIG_PAX_RANDMMAP
3111+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3112+ mm->mmap_base += mm->delta_mmap;
3113+#endif
3114+
3115 mm->get_unmapped_area = arch_get_unmapped_area;
3116 mm->unmap_area = arch_unmap_area;
3117 } else {
3118 mm->mmap_base = mmap_base();
3119+
3120+#ifdef CONFIG_PAX_RANDMMAP
3121+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3122+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3123+#endif
3124+
3125 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3126 mm->unmap_area = arch_unmap_area_topdown;
3127 }
3128diff -urNp linux-2.6.32.45/arch/powerpc/mm/slice.c linux-2.6.32.45/arch/powerpc/mm/slice.c
3129--- linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3130+++ linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3131@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3132 if ((mm->task_size - len) < addr)
3133 return 0;
3134 vma = find_vma(mm, addr);
3135- return (!vma || (addr + len) <= vma->vm_start);
3136+ return check_heap_stack_gap(vma, addr, len);
3137 }
3138
3139 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3140@@ -256,7 +256,7 @@ full_search:
3141 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3142 continue;
3143 }
3144- if (!vma || addr + len <= vma->vm_start) {
3145+ if (check_heap_stack_gap(vma, addr, len)) {
3146 /*
3147 * Remember the place where we stopped the search:
3148 */
3149@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3150 }
3151 }
3152
3153- addr = mm->mmap_base;
3154- while (addr > len) {
3155+ if (mm->mmap_base < len)
3156+ addr = -ENOMEM;
3157+ else
3158+ addr = mm->mmap_base - len;
3159+
3160+ while (!IS_ERR_VALUE(addr)) {
3161 /* Go down by chunk size */
3162- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3163+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3164
3165 /* Check for hit with different page size */
3166 mask = slice_range_to_mask(addr, len);
3167@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3168 * return with success:
3169 */
3170 vma = find_vma(mm, addr);
3171- if (!vma || (addr + len) <= vma->vm_start) {
3172+ if (check_heap_stack_gap(vma, addr, len)) {
3173 /* remember the address as a hint for next time */
3174 if (use_cache)
3175 mm->free_area_cache = addr;
3176@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3177 mm->cached_hole_size = vma->vm_start - addr;
3178
3179 /* try just below the current vma->vm_start */
3180- addr = vma->vm_start;
3181+ addr = skip_heap_stack_gap(vma, len);
3182 }
3183
3184 /*
3185@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3186 if (fixed && addr > (mm->task_size - len))
3187 return -EINVAL;
3188
3189+#ifdef CONFIG_PAX_RANDMMAP
3190+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3191+ addr = 0;
3192+#endif
3193+
3194 /* If hint, make sure it matches our alignment restrictions */
3195 if (!fixed && addr) {
3196 addr = _ALIGN_UP(addr, 1ul << pshift);
3197diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c
3198--- linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3199+++ linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3200@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3201 lite5200_pm_target_state = PM_SUSPEND_ON;
3202 }
3203
3204-static struct platform_suspend_ops lite5200_pm_ops = {
3205+static const struct platform_suspend_ops lite5200_pm_ops = {
3206 .valid = lite5200_pm_valid,
3207 .begin = lite5200_pm_begin,
3208 .prepare = lite5200_pm_prepare,
3209diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3210--- linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3211+++ linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3212@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3213 iounmap(mbar);
3214 }
3215
3216-static struct platform_suspend_ops mpc52xx_pm_ops = {
3217+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3218 .valid = mpc52xx_pm_valid,
3219 .prepare = mpc52xx_pm_prepare,
3220 .enter = mpc52xx_pm_enter,
3221diff -urNp linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c
3222--- linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3223+++ linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3224@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3225 return ret;
3226 }
3227
3228-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3229+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3230 .valid = mpc83xx_suspend_valid,
3231 .begin = mpc83xx_suspend_begin,
3232 .enter = mpc83xx_suspend_enter,
3233diff -urNp linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c
3234--- linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3235+++ linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3236@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3237
3238 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3239
3240-struct dma_map_ops dma_iommu_fixed_ops = {
3241+const struct dma_map_ops dma_iommu_fixed_ops = {
3242 .alloc_coherent = dma_fixed_alloc_coherent,
3243 .free_coherent = dma_fixed_free_coherent,
3244 .map_sg = dma_fixed_map_sg,
3245diff -urNp linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c
3246--- linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3247+++ linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3248@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3249 return mask >= DMA_BIT_MASK(32);
3250 }
3251
3252-static struct dma_map_ops ps3_sb_dma_ops = {
3253+static const struct dma_map_ops ps3_sb_dma_ops = {
3254 .alloc_coherent = ps3_alloc_coherent,
3255 .free_coherent = ps3_free_coherent,
3256 .map_sg = ps3_sb_map_sg,
3257@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3258 .unmap_page = ps3_unmap_page,
3259 };
3260
3261-static struct dma_map_ops ps3_ioc0_dma_ops = {
3262+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3263 .alloc_coherent = ps3_alloc_coherent,
3264 .free_coherent = ps3_free_coherent,
3265 .map_sg = ps3_ioc0_map_sg,
3266diff -urNp linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig
3267--- linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3268+++ linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3269@@ -2,6 +2,8 @@ config PPC_PSERIES
3270 depends on PPC64 && PPC_BOOK3S
3271 bool "IBM pSeries & new (POWER5-based) iSeries"
3272 select MPIC
3273+ select PCI_MSI
3274+ select XICS
3275 select PPC_I8259
3276 select PPC_RTAS
3277 select RTAS_ERROR_LOGGING
3278diff -urNp linux-2.6.32.45/arch/s390/include/asm/elf.h linux-2.6.32.45/arch/s390/include/asm/elf.h
3279--- linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3280+++ linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3281@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3282 that it will "exec", and that there is sufficient room for the brk. */
3283 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3284
3285+#ifdef CONFIG_PAX_ASLR
3286+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3287+
3288+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3289+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3290+#endif
3291+
3292 /* This yields a mask that user programs can use to figure out what
3293 instruction set this CPU supports. */
3294
3295diff -urNp linux-2.6.32.45/arch/s390/include/asm/setup.h linux-2.6.32.45/arch/s390/include/asm/setup.h
3296--- linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3297+++ linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3298@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3299 void detect_memory_layout(struct mem_chunk chunk[]);
3300
3301 #ifdef CONFIG_S390_SWITCH_AMODE
3302-extern unsigned int switch_amode;
3303+#define switch_amode (1)
3304 #else
3305 #define switch_amode (0)
3306 #endif
3307
3308 #ifdef CONFIG_S390_EXEC_PROTECT
3309-extern unsigned int s390_noexec;
3310+#define s390_noexec (1)
3311 #else
3312 #define s390_noexec (0)
3313 #endif
3314diff -urNp linux-2.6.32.45/arch/s390/include/asm/uaccess.h linux-2.6.32.45/arch/s390/include/asm/uaccess.h
3315--- linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3316+++ linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3317@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3318 copy_to_user(void __user *to, const void *from, unsigned long n)
3319 {
3320 might_fault();
3321+
3322+ if ((long)n < 0)
3323+ return n;
3324+
3325 if (access_ok(VERIFY_WRITE, to, n))
3326 n = __copy_to_user(to, from, n);
3327 return n;
3328@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3329 static inline unsigned long __must_check
3330 __copy_from_user(void *to, const void __user *from, unsigned long n)
3331 {
3332+ if ((long)n < 0)
3333+ return n;
3334+
3335 if (__builtin_constant_p(n) && (n <= 256))
3336 return uaccess.copy_from_user_small(n, from, to);
3337 else
3338@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3339 copy_from_user(void *to, const void __user *from, unsigned long n)
3340 {
3341 might_fault();
3342+
3343+ if ((long)n < 0)
3344+ return n;
3345+
3346 if (access_ok(VERIFY_READ, from, n))
3347 n = __copy_from_user(to, from, n);
3348 else
3349diff -urNp linux-2.6.32.45/arch/s390/Kconfig linux-2.6.32.45/arch/s390/Kconfig
3350--- linux-2.6.32.45/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3351+++ linux-2.6.32.45/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3352@@ -194,28 +194,26 @@ config AUDIT_ARCH
3353
3354 config S390_SWITCH_AMODE
3355 bool "Switch kernel/user addressing modes"
3356+ default y
3357 help
3358 This option allows to switch the addressing modes of kernel and user
3359- space. The kernel parameter switch_amode=on will enable this feature,
3360- default is disabled. Enabling this (via kernel parameter) on machines
3361- earlier than IBM System z9-109 EC/BC will reduce system performance.
3362+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3363+ will reduce system performance.
3364
3365 Note that this option will also be selected by selecting the execute
3366- protection option below. Enabling the execute protection via the
3367- noexec kernel parameter will also switch the addressing modes,
3368- independent of the switch_amode kernel parameter.
3369+ protection option below. Enabling the execute protection will also
3370+ switch the addressing modes, independent of this option.
3371
3372
3373 config S390_EXEC_PROTECT
3374 bool "Data execute protection"
3375+ default y
3376 select S390_SWITCH_AMODE
3377 help
3378 This option allows to enable a buffer overflow protection for user
3379 space programs and it also selects the addressing mode option above.
3380- The kernel parameter noexec=on will enable this feature and also
3381- switch the addressing modes, default is disabled. Enabling this (via
3382- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3383- will reduce system performance.
3384+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3385+ reduce system performance.
3386
3387 comment "Code generation options"
3388
3389diff -urNp linux-2.6.32.45/arch/s390/kernel/module.c linux-2.6.32.45/arch/s390/kernel/module.c
3390--- linux-2.6.32.45/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3391+++ linux-2.6.32.45/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3392@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3393
3394 /* Increase core size by size of got & plt and set start
3395 offsets for got and plt. */
3396- me->core_size = ALIGN(me->core_size, 4);
3397- me->arch.got_offset = me->core_size;
3398- me->core_size += me->arch.got_size;
3399- me->arch.plt_offset = me->core_size;
3400- me->core_size += me->arch.plt_size;
3401+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3402+ me->arch.got_offset = me->core_size_rw;
3403+ me->core_size_rw += me->arch.got_size;
3404+ me->arch.plt_offset = me->core_size_rx;
3405+ me->core_size_rx += me->arch.plt_size;
3406 return 0;
3407 }
3408
3409@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3410 if (info->got_initialized == 0) {
3411 Elf_Addr *gotent;
3412
3413- gotent = me->module_core + me->arch.got_offset +
3414+ gotent = me->module_core_rw + me->arch.got_offset +
3415 info->got_offset;
3416 *gotent = val;
3417 info->got_initialized = 1;
3418@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3419 else if (r_type == R_390_GOTENT ||
3420 r_type == R_390_GOTPLTENT)
3421 *(unsigned int *) loc =
3422- (val + (Elf_Addr) me->module_core - loc) >> 1;
3423+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3424 else if (r_type == R_390_GOT64 ||
3425 r_type == R_390_GOTPLT64)
3426 *(unsigned long *) loc = val;
3427@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3428 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3429 if (info->plt_initialized == 0) {
3430 unsigned int *ip;
3431- ip = me->module_core + me->arch.plt_offset +
3432+ ip = me->module_core_rx + me->arch.plt_offset +
3433 info->plt_offset;
3434 #ifndef CONFIG_64BIT
3435 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3436@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3437 val - loc + 0xffffUL < 0x1ffffeUL) ||
3438 (r_type == R_390_PLT32DBL &&
3439 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3440- val = (Elf_Addr) me->module_core +
3441+ val = (Elf_Addr) me->module_core_rx +
3442 me->arch.plt_offset +
3443 info->plt_offset;
3444 val += rela->r_addend - loc;
3445@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3446 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3447 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3448 val = val + rela->r_addend -
3449- ((Elf_Addr) me->module_core + me->arch.got_offset);
3450+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3451 if (r_type == R_390_GOTOFF16)
3452 *(unsigned short *) loc = val;
3453 else if (r_type == R_390_GOTOFF32)
3454@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3455 break;
3456 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3457 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3458- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3459+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3460 rela->r_addend - loc;
3461 if (r_type == R_390_GOTPC)
3462 *(unsigned int *) loc = val;
3463diff -urNp linux-2.6.32.45/arch/s390/kernel/setup.c linux-2.6.32.45/arch/s390/kernel/setup.c
3464--- linux-2.6.32.45/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3465+++ linux-2.6.32.45/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3466@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3467 early_param("mem", early_parse_mem);
3468
3469 #ifdef CONFIG_S390_SWITCH_AMODE
3470-unsigned int switch_amode = 0;
3471-EXPORT_SYMBOL_GPL(switch_amode);
3472-
3473 static int set_amode_and_uaccess(unsigned long user_amode,
3474 unsigned long user32_amode)
3475 {
3476@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3477 return 0;
3478 }
3479 }
3480-
3481-/*
3482- * Switch kernel/user addressing modes?
3483- */
3484-static int __init early_parse_switch_amode(char *p)
3485-{
3486- switch_amode = 1;
3487- return 0;
3488-}
3489-early_param("switch_amode", early_parse_switch_amode);
3490-
3491 #else /* CONFIG_S390_SWITCH_AMODE */
3492 static inline int set_amode_and_uaccess(unsigned long user_amode,
3493 unsigned long user32_amode)
3494@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3495 }
3496 #endif /* CONFIG_S390_SWITCH_AMODE */
3497
3498-#ifdef CONFIG_S390_EXEC_PROTECT
3499-unsigned int s390_noexec = 0;
3500-EXPORT_SYMBOL_GPL(s390_noexec);
3501-
3502-/*
3503- * Enable execute protection?
3504- */
3505-static int __init early_parse_noexec(char *p)
3506-{
3507- if (!strncmp(p, "off", 3))
3508- return 0;
3509- switch_amode = 1;
3510- s390_noexec = 1;
3511- return 0;
3512-}
3513-early_param("noexec", early_parse_noexec);
3514-#endif /* CONFIG_S390_EXEC_PROTECT */
3515-
3516 static void setup_addressing_mode(void)
3517 {
3518 if (s390_noexec) {
3519diff -urNp linux-2.6.32.45/arch/s390/mm/mmap.c linux-2.6.32.45/arch/s390/mm/mmap.c
3520--- linux-2.6.32.45/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3521+++ linux-2.6.32.45/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3522@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3523 */
3524 if (mmap_is_legacy()) {
3525 mm->mmap_base = TASK_UNMAPPED_BASE;
3526+
3527+#ifdef CONFIG_PAX_RANDMMAP
3528+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3529+ mm->mmap_base += mm->delta_mmap;
3530+#endif
3531+
3532 mm->get_unmapped_area = arch_get_unmapped_area;
3533 mm->unmap_area = arch_unmap_area;
3534 } else {
3535 mm->mmap_base = mmap_base();
3536+
3537+#ifdef CONFIG_PAX_RANDMMAP
3538+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3539+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3540+#endif
3541+
3542 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3543 mm->unmap_area = arch_unmap_area_topdown;
3544 }
3545@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3546 */
3547 if (mmap_is_legacy()) {
3548 mm->mmap_base = TASK_UNMAPPED_BASE;
3549+
3550+#ifdef CONFIG_PAX_RANDMMAP
3551+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3552+ mm->mmap_base += mm->delta_mmap;
3553+#endif
3554+
3555 mm->get_unmapped_area = s390_get_unmapped_area;
3556 mm->unmap_area = arch_unmap_area;
3557 } else {
3558 mm->mmap_base = mmap_base();
3559+
3560+#ifdef CONFIG_PAX_RANDMMAP
3561+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3562+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3563+#endif
3564+
3565 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3566 mm->unmap_area = arch_unmap_area_topdown;
3567 }
3568diff -urNp linux-2.6.32.45/arch/score/include/asm/system.h linux-2.6.32.45/arch/score/include/asm/system.h
3569--- linux-2.6.32.45/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3570+++ linux-2.6.32.45/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3571@@ -17,7 +17,7 @@ do { \
3572 #define finish_arch_switch(prev) do {} while (0)
3573
3574 typedef void (*vi_handler_t)(void);
3575-extern unsigned long arch_align_stack(unsigned long sp);
3576+#define arch_align_stack(x) (x)
3577
3578 #define mb() barrier()
3579 #define rmb() barrier()
3580diff -urNp linux-2.6.32.45/arch/score/kernel/process.c linux-2.6.32.45/arch/score/kernel/process.c
3581--- linux-2.6.32.45/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3582+++ linux-2.6.32.45/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3583@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3584
3585 return task_pt_regs(task)->cp0_epc;
3586 }
3587-
3588-unsigned long arch_align_stack(unsigned long sp)
3589-{
3590- return sp;
3591-}
3592diff -urNp linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c
3593--- linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3594+++ linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3595@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3596 return 0;
3597 }
3598
3599-static struct platform_suspend_ops hp6x0_pm_ops = {
3600+static const struct platform_suspend_ops hp6x0_pm_ops = {
3601 .enter = hp6x0_pm_enter,
3602 .valid = suspend_valid_only_mem,
3603 };
3604diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c
3605--- linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3606+++ linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3607@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3608 NULL,
3609 };
3610
3611-static struct sysfs_ops sq_sysfs_ops = {
3612+static const struct sysfs_ops sq_sysfs_ops = {
3613 .show = sq_sysfs_show,
3614 .store = sq_sysfs_store,
3615 };
3616diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c
3617--- linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3618+++ linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3619@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3620 return 0;
3621 }
3622
3623-static struct platform_suspend_ops sh_pm_ops = {
3624+static const struct platform_suspend_ops sh_pm_ops = {
3625 .enter = sh_pm_enter,
3626 .valid = suspend_valid_only_mem,
3627 };
3628diff -urNp linux-2.6.32.45/arch/sh/kernel/kgdb.c linux-2.6.32.45/arch/sh/kernel/kgdb.c
3629--- linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3630+++ linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3631@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3632 {
3633 }
3634
3635-struct kgdb_arch arch_kgdb_ops = {
3636+const struct kgdb_arch arch_kgdb_ops = {
3637 /* Breakpoint instruction: trapa #0x3c */
3638 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3639 .gdb_bpt_instr = { 0x3c, 0xc3 },
3640diff -urNp linux-2.6.32.45/arch/sh/mm/mmap.c linux-2.6.32.45/arch/sh/mm/mmap.c
3641--- linux-2.6.32.45/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3642+++ linux-2.6.32.45/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3643@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3644 addr = PAGE_ALIGN(addr);
3645
3646 vma = find_vma(mm, addr);
3647- if (TASK_SIZE - len >= addr &&
3648- (!vma || addr + len <= vma->vm_start))
3649+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3650 return addr;
3651 }
3652
3653@@ -106,7 +105,7 @@ full_search:
3654 }
3655 return -ENOMEM;
3656 }
3657- if (likely(!vma || addr + len <= vma->vm_start)) {
3658+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3659 /*
3660 * Remember the place where we stopped the search:
3661 */
3662@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3663 addr = PAGE_ALIGN(addr);
3664
3665 vma = find_vma(mm, addr);
3666- if (TASK_SIZE - len >= addr &&
3667- (!vma || addr + len <= vma->vm_start))
3668+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3669 return addr;
3670 }
3671
3672@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3673 /* make sure it can fit in the remaining address space */
3674 if (likely(addr > len)) {
3675 vma = find_vma(mm, addr-len);
3676- if (!vma || addr <= vma->vm_start) {
3677+ if (check_heap_stack_gap(vma, addr - len, len)) {
3678 /* remember the address as a hint for next time */
3679 return (mm->free_area_cache = addr-len);
3680 }
3681@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3682 if (unlikely(mm->mmap_base < len))
3683 goto bottomup;
3684
3685- addr = mm->mmap_base-len;
3686- if (do_colour_align)
3687- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3688+ addr = mm->mmap_base - len;
3689
3690 do {
3691+ if (do_colour_align)
3692+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3693 /*
3694 * Lookup failure means no vma is above this address,
3695 * else if new region fits below vma->vm_start,
3696 * return with success:
3697 */
3698 vma = find_vma(mm, addr);
3699- if (likely(!vma || addr+len <= vma->vm_start)) {
3700+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3701 /* remember the address as a hint for next time */
3702 return (mm->free_area_cache = addr);
3703 }
3704@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3705 mm->cached_hole_size = vma->vm_start - addr;
3706
3707 /* try just below the current vma->vm_start */
3708- addr = vma->vm_start-len;
3709- if (do_colour_align)
3710- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3711- } while (likely(len < vma->vm_start));
3712+ addr = skip_heap_stack_gap(vma, len);
3713+ } while (!IS_ERR_VALUE(addr));
3714
3715 bottomup:
3716 /*
3717diff -urNp linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h
3718--- linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3719+++ linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-08-18 23:11:34.000000000 -0400
3720@@ -14,18 +14,40 @@
3721 #define ATOMIC64_INIT(i) { (i) }
3722
3723 #define atomic_read(v) ((v)->counter)
3724+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3725+{
3726+ return v->counter;
3727+}
3728 #define atomic64_read(v) ((v)->counter)
3729+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3730+{
3731+ return v->counter;
3732+}
3733
3734 #define atomic_set(v, i) (((v)->counter) = i)
3735+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3736+{
3737+ v->counter = i;
3738+}
3739 #define atomic64_set(v, i) (((v)->counter) = i)
3740+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3741+{
3742+ v->counter = i;
3743+}
3744
3745 extern void atomic_add(int, atomic_t *);
3746+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3747 extern void atomic64_add(long, atomic64_t *);
3748+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3749 extern void atomic_sub(int, atomic_t *);
3750+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3751 extern void atomic64_sub(long, atomic64_t *);
3752+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3753
3754 extern int atomic_add_ret(int, atomic_t *);
3755+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3756 extern long atomic64_add_ret(long, atomic64_t *);
3757+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3758 extern int atomic_sub_ret(int, atomic_t *);
3759 extern long atomic64_sub_ret(long, atomic64_t *);
3760
3761@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3762 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3763
3764 #define atomic_inc_return(v) atomic_add_ret(1, v)
3765+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3766+{
3767+ return atomic_add_ret_unchecked(1, v);
3768+}
3769 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3770+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3771+{
3772+ return atomic64_add_ret_unchecked(1, v);
3773+}
3774
3775 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3776 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3777
3778 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3779+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3780+{
3781+ return atomic_add_ret_unchecked(i, v);
3782+}
3783 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3784+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3785+{
3786+ return atomic64_add_ret_unchecked(i, v);
3787+}
3788
3789 /*
3790 * atomic_inc_and_test - increment and test
3791@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
3792 * other cases.
3793 */
3794 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3795+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3796+{
3797+ return atomic_inc_return_unchecked(v) == 0;
3798+}
3799 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3800
3801 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3802@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
3803 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3804
3805 #define atomic_inc(v) atomic_add(1, v)
3806+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3807+{
3808+ atomic_add_unchecked(1, v);
3809+}
3810 #define atomic64_inc(v) atomic64_add(1, v)
3811+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3812+{
3813+ atomic64_add_unchecked(1, v);
3814+}
3815
3816 #define atomic_dec(v) atomic_sub(1, v)
3817+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3818+{
3819+ atomic_sub_unchecked(1, v);
3820+}
3821 #define atomic64_dec(v) atomic64_sub(1, v)
3822+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3823+{
3824+ atomic64_sub_unchecked(1, v);
3825+}
3826
3827 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3828 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3829
3830 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3831+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3832+{
3833+ return cmpxchg(&v->counter, old, new);
3834+}
3835 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3836+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3837+{
3838+ return xchg(&v->counter, new);
3839+}
3840
3841 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3842 {
3843- int c, old;
3844+ int c, old, new;
3845 c = atomic_read(v);
3846 for (;;) {
3847- if (unlikely(c == (u)))
3848+ if (unlikely(c == u))
3849 break;
3850- old = atomic_cmpxchg((v), c, c + (a));
3851+
3852+ asm volatile("addcc %2, %0, %0\n"
3853+
3854+#ifdef CONFIG_PAX_REFCOUNT
3855+ "tvs %%icc, 6\n"
3856+#endif
3857+
3858+ : "=r" (new)
3859+ : "0" (c), "ir" (a)
3860+ : "cc");
3861+
3862+ old = atomic_cmpxchg(v, c, new);
3863 if (likely(old == c))
3864 break;
3865 c = old;
3866 }
3867- return c != (u);
3868+ return c != u;
3869 }
3870
3871 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3872@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
3873 #define atomic64_cmpxchg(v, o, n) \
3874 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3875 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3876+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3877+{
3878+ return xchg(&v->counter, new);
3879+}
3880
3881 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3882 {
3883- long c, old;
3884+ long c, old, new;
3885 c = atomic64_read(v);
3886 for (;;) {
3887- if (unlikely(c == (u)))
3888+ if (unlikely(c == u))
3889 break;
3890- old = atomic64_cmpxchg((v), c, c + (a));
3891+
3892+ asm volatile("addcc %2, %0, %0\n"
3893+
3894+#ifdef CONFIG_PAX_REFCOUNT
3895+ "tvs %%xcc, 6\n"
3896+#endif
3897+
3898+ : "=r" (new)
3899+ : "0" (c), "ir" (a)
3900+ : "cc");
3901+
3902+ old = atomic64_cmpxchg(v, c, new);
3903 if (likely(old == c))
3904 break;
3905 c = old;
3906 }
3907- return c != (u);
3908+ return c != u;
3909 }
3910
3911 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3912diff -urNp linux-2.6.32.45/arch/sparc/include/asm/cache.h linux-2.6.32.45/arch/sparc/include/asm/cache.h
3913--- linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3914+++ linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3915@@ -8,7 +8,7 @@
3916 #define _SPARC_CACHE_H
3917
3918 #define L1_CACHE_SHIFT 5
3919-#define L1_CACHE_BYTES 32
3920+#define L1_CACHE_BYTES 32UL
3921 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3922
3923 #ifdef CONFIG_SPARC32
3924diff -urNp linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h
3925--- linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3926+++ linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3927@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3928 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3929 #define dma_is_consistent(d, h) (1)
3930
3931-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3932+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3933 extern struct bus_type pci_bus_type;
3934
3935-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3936+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3937 {
3938 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3939 if (dev->bus == &pci_bus_type)
3940@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3941 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3942 dma_addr_t *dma_handle, gfp_t flag)
3943 {
3944- struct dma_map_ops *ops = get_dma_ops(dev);
3945+ const struct dma_map_ops *ops = get_dma_ops(dev);
3946 void *cpu_addr;
3947
3948 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3949@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3950 static inline void dma_free_coherent(struct device *dev, size_t size,
3951 void *cpu_addr, dma_addr_t dma_handle)
3952 {
3953- struct dma_map_ops *ops = get_dma_ops(dev);
3954+ const struct dma_map_ops *ops = get_dma_ops(dev);
3955
3956 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3957 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3958diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_32.h linux-2.6.32.45/arch/sparc/include/asm/elf_32.h
3959--- linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3960+++ linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3961@@ -116,6 +116,13 @@ typedef struct {
3962
3963 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3964
3965+#ifdef CONFIG_PAX_ASLR
3966+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3967+
3968+#define PAX_DELTA_MMAP_LEN 16
3969+#define PAX_DELTA_STACK_LEN 16
3970+#endif
3971+
3972 /* This yields a mask that user programs can use to figure out what
3973 instruction set this cpu supports. This can NOT be done in userspace
3974 on Sparc. */
3975diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_64.h linux-2.6.32.45/arch/sparc/include/asm/elf_64.h
3976--- linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3977+++ linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3978@@ -163,6 +163,12 @@ typedef struct {
3979 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3980 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3981
3982+#ifdef CONFIG_PAX_ASLR
3983+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3984+
3985+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3986+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3987+#endif
3988
3989 /* This yields a mask that user programs can use to figure out what
3990 instruction set this cpu supports. */
3991diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h
3992--- linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3993+++ linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3994@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3995 BTFIXUPDEF_INT(page_none)
3996 BTFIXUPDEF_INT(page_copy)
3997 BTFIXUPDEF_INT(page_readonly)
3998+
3999+#ifdef CONFIG_PAX_PAGEEXEC
4000+BTFIXUPDEF_INT(page_shared_noexec)
4001+BTFIXUPDEF_INT(page_copy_noexec)
4002+BTFIXUPDEF_INT(page_readonly_noexec)
4003+#endif
4004+
4005 BTFIXUPDEF_INT(page_kernel)
4006
4007 #define PMD_SHIFT SUN4C_PMD_SHIFT
4008@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4009 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4010 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4011
4012+#ifdef CONFIG_PAX_PAGEEXEC
4013+extern pgprot_t PAGE_SHARED_NOEXEC;
4014+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4015+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4016+#else
4017+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4018+# define PAGE_COPY_NOEXEC PAGE_COPY
4019+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4020+#endif
4021+
4022 extern unsigned long page_kernel;
4023
4024 #ifdef MODULE
4025diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h
4026--- linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
4027+++ linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
4028@@ -115,6 +115,13 @@
4029 SRMMU_EXEC | SRMMU_REF)
4030 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4031 SRMMU_EXEC | SRMMU_REF)
4032+
4033+#ifdef CONFIG_PAX_PAGEEXEC
4034+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4035+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4036+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4037+#endif
4038+
4039 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4040 SRMMU_DIRTY | SRMMU_REF)
4041
4042diff -urNp linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h
4043--- linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
4044+++ linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-08-18 23:19:30.000000000 -0400
4045@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
4046
4047 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4048
4049-static void inline arch_read_lock(raw_rwlock_t *lock)
4050+static inline void arch_read_lock(raw_rwlock_t *lock)
4051 {
4052 unsigned long tmp1, tmp2;
4053
4054 __asm__ __volatile__ (
4055 "1: ldsw [%2], %0\n"
4056 " brlz,pn %0, 2f\n"
4057-"4: add %0, 1, %1\n"
4058+"4: addcc %0, 1, %1\n"
4059+
4060+#ifdef CONFIG_PAX_REFCOUNT
4061+" tvs %%icc, 6\n"
4062+#endif
4063+
4064 " cas [%2], %0, %1\n"
4065 " cmp %0, %1\n"
4066 " bne,pn %%icc, 1b\n"
4067@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rw
4068 " .previous"
4069 : "=&r" (tmp1), "=&r" (tmp2)
4070 : "r" (lock)
4071- : "memory");
4072+ : "memory", "cc");
4073 }
4074
4075-static int inline arch_read_trylock(raw_rwlock_t *lock)
4076+static inline int arch_read_trylock(raw_rwlock_t *lock)
4077 {
4078 int tmp1, tmp2;
4079
4080@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
4081 "1: ldsw [%2], %0\n"
4082 " brlz,a,pn %0, 2f\n"
4083 " mov 0, %0\n"
4084-" add %0, 1, %1\n"
4085+" addcc %0, 1, %1\n"
4086+
4087+#ifdef CONFIG_PAX_REFCOUNT
4088+" tvs %%icc, 6\n"
4089+#endif
4090+
4091 " cas [%2], %0, %1\n"
4092 " cmp %0, %1\n"
4093 " bne,pn %%icc, 1b\n"
4094@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
4095 return tmp1;
4096 }
4097
4098-static void inline arch_read_unlock(raw_rwlock_t *lock)
4099+static inline void arch_read_unlock(raw_rwlock_t *lock)
4100 {
4101 unsigned long tmp1, tmp2;
4102
4103 __asm__ __volatile__(
4104 "1: lduw [%2], %0\n"
4105-" sub %0, 1, %1\n"
4106+" subcc %0, 1, %1\n"
4107+
4108+#ifdef CONFIG_PAX_REFCOUNT
4109+" tvs %%icc, 6\n"
4110+#endif
4111+
4112 " cas [%2], %0, %1\n"
4113 " cmp %0, %1\n"
4114 " bne,pn %%xcc, 1b\n"
4115@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4116 : "memory");
4117 }
4118
4119-static void inline arch_write_lock(raw_rwlock_t *lock)
4120+static inline void arch_write_lock(raw_rwlock_t *lock)
4121 {
4122 unsigned long mask, tmp1, tmp2;
4123
4124@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4125 : "memory");
4126 }
4127
4128-static void inline arch_write_unlock(raw_rwlock_t *lock)
4129+static inline void arch_write_unlock(raw_rwlock_t *lock)
4130 {
4131 __asm__ __volatile__(
4132 " stw %%g0, [%0]"
4133@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw
4134 : "memory");
4135 }
4136
4137-static int inline arch_write_trylock(raw_rwlock_t *lock)
4138+static inline int arch_write_trylock(raw_rwlock_t *lock)
4139 {
4140 unsigned long mask, tmp1, tmp2, result;
4141
4142diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h
4143--- linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4144+++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4145@@ -50,6 +50,8 @@ struct thread_info {
4146 unsigned long w_saved;
4147
4148 struct restart_block restart_block;
4149+
4150+ unsigned long lowest_stack;
4151 };
4152
4153 /*
4154diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h
4155--- linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4156+++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4157@@ -68,6 +68,8 @@ struct thread_info {
4158 struct pt_regs *kern_una_regs;
4159 unsigned int kern_una_insn;
4160
4161+ unsigned long lowest_stack;
4162+
4163 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4164 };
4165
4166diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h
4167--- linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4168+++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4169@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4170
4171 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4172 {
4173- if (n && __access_ok((unsigned long) to, n))
4174+ if ((long)n < 0)
4175+ return n;
4176+
4177+ if (n && __access_ok((unsigned long) to, n)) {
4178+ if (!__builtin_constant_p(n))
4179+ check_object_size(from, n, true);
4180 return __copy_user(to, (__force void __user *) from, n);
4181- else
4182+ } else
4183 return n;
4184 }
4185
4186 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4187 {
4188+ if ((long)n < 0)
4189+ return n;
4190+
4191+ if (!__builtin_constant_p(n))
4192+ check_object_size(from, n, true);
4193+
4194 return __copy_user(to, (__force void __user *) from, n);
4195 }
4196
4197 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4198 {
4199- if (n && __access_ok((unsigned long) from, n))
4200+ if ((long)n < 0)
4201+ return n;
4202+
4203+ if (n && __access_ok((unsigned long) from, n)) {
4204+ if (!__builtin_constant_p(n))
4205+ check_object_size(to, n, false);
4206 return __copy_user((__force void __user *) to, from, n);
4207- else
4208+ } else
4209 return n;
4210 }
4211
4212 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4213 {
4214+ if ((long)n < 0)
4215+ return n;
4216+
4217 return __copy_user((__force void __user *) to, from, n);
4218 }
4219
4220diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h
4221--- linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4222+++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4223@@ -9,6 +9,7 @@
4224 #include <linux/compiler.h>
4225 #include <linux/string.h>
4226 #include <linux/thread_info.h>
4227+#include <linux/kernel.h>
4228 #include <asm/asi.h>
4229 #include <asm/system.h>
4230 #include <asm/spitfire.h>
4231@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4232 static inline unsigned long __must_check
4233 copy_from_user(void *to, const void __user *from, unsigned long size)
4234 {
4235- unsigned long ret = ___copy_from_user(to, from, size);
4236+ unsigned long ret;
4237
4238+ if ((long)size < 0 || size > INT_MAX)
4239+ return size;
4240+
4241+ if (!__builtin_constant_p(size))
4242+ check_object_size(to, size, false);
4243+
4244+ ret = ___copy_from_user(to, from, size);
4245 if (unlikely(ret))
4246 ret = copy_from_user_fixup(to, from, size);
4247 return ret;
4248@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4249 static inline unsigned long __must_check
4250 copy_to_user(void __user *to, const void *from, unsigned long size)
4251 {
4252- unsigned long ret = ___copy_to_user(to, from, size);
4253+ unsigned long ret;
4254+
4255+ if ((long)size < 0 || size > INT_MAX)
4256+ return size;
4257+
4258+ if (!__builtin_constant_p(size))
4259+ check_object_size(from, size, true);
4260
4261+ ret = ___copy_to_user(to, from, size);
4262 if (unlikely(ret))
4263 ret = copy_to_user_fixup(to, from, size);
4264 return ret;
4265diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess.h linux-2.6.32.45/arch/sparc/include/asm/uaccess.h
4266--- linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4267+++ linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4268@@ -1,5 +1,13 @@
4269 #ifndef ___ASM_SPARC_UACCESS_H
4270 #define ___ASM_SPARC_UACCESS_H
4271+
4272+#ifdef __KERNEL__
4273+#ifndef __ASSEMBLY__
4274+#include <linux/types.h>
4275+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4276+#endif
4277+#endif
4278+
4279 #if defined(__sparc__) && defined(__arch64__)
4280 #include <asm/uaccess_64.h>
4281 #else
4282diff -urNp linux-2.6.32.45/arch/sparc/kernel/iommu.c linux-2.6.32.45/arch/sparc/kernel/iommu.c
4283--- linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4284+++ linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4285@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4286 spin_unlock_irqrestore(&iommu->lock, flags);
4287 }
4288
4289-static struct dma_map_ops sun4u_dma_ops = {
4290+static const struct dma_map_ops sun4u_dma_ops = {
4291 .alloc_coherent = dma_4u_alloc_coherent,
4292 .free_coherent = dma_4u_free_coherent,
4293 .map_page = dma_4u_map_page,
4294@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4295 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4296 };
4297
4298-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4299+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4300 EXPORT_SYMBOL(dma_ops);
4301
4302 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4303diff -urNp linux-2.6.32.45/arch/sparc/kernel/ioport.c linux-2.6.32.45/arch/sparc/kernel/ioport.c
4304--- linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4305+++ linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4306@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4307 BUG();
4308 }
4309
4310-struct dma_map_ops sbus_dma_ops = {
4311+const struct dma_map_ops sbus_dma_ops = {
4312 .alloc_coherent = sbus_alloc_coherent,
4313 .free_coherent = sbus_free_coherent,
4314 .map_page = sbus_map_page,
4315@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4316 .sync_sg_for_device = sbus_sync_sg_for_device,
4317 };
4318
4319-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4320+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4321 EXPORT_SYMBOL(dma_ops);
4322
4323 static int __init sparc_register_ioport(void)
4324@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4325 }
4326 }
4327
4328-struct dma_map_ops pci32_dma_ops = {
4329+const struct dma_map_ops pci32_dma_ops = {
4330 .alloc_coherent = pci32_alloc_coherent,
4331 .free_coherent = pci32_free_coherent,
4332 .map_page = pci32_map_page,
4333diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c
4334--- linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4335+++ linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4336@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4337 {
4338 }
4339
4340-struct kgdb_arch arch_kgdb_ops = {
4341+const struct kgdb_arch arch_kgdb_ops = {
4342 /* Breakpoint instruction: ta 0x7d */
4343 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4344 };
4345diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c
4346--- linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4347+++ linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4348@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4349 {
4350 }
4351
4352-struct kgdb_arch arch_kgdb_ops = {
4353+const struct kgdb_arch arch_kgdb_ops = {
4354 /* Breakpoint instruction: ta 0x72 */
4355 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4356 };
4357diff -urNp linux-2.6.32.45/arch/sparc/kernel/Makefile linux-2.6.32.45/arch/sparc/kernel/Makefile
4358--- linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4359+++ linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4360@@ -3,7 +3,7 @@
4361 #
4362
4363 asflags-y := -ansi
4364-ccflags-y := -Werror
4365+#ccflags-y := -Werror
4366
4367 extra-y := head_$(BITS).o
4368 extra-y += init_task.o
4369diff -urNp linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c
4370--- linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4371+++ linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4372@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4373 spin_unlock_irqrestore(&iommu->lock, flags);
4374 }
4375
4376-static struct dma_map_ops sun4v_dma_ops = {
4377+static const struct dma_map_ops sun4v_dma_ops = {
4378 .alloc_coherent = dma_4v_alloc_coherent,
4379 .free_coherent = dma_4v_free_coherent,
4380 .map_page = dma_4v_map_page,
4381diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_32.c linux-2.6.32.45/arch/sparc/kernel/process_32.c
4382--- linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4383+++ linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4384@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4385 rw->ins[4], rw->ins[5],
4386 rw->ins[6],
4387 rw->ins[7]);
4388- printk("%pS\n", (void *) rw->ins[7]);
4389+ printk("%pA\n", (void *) rw->ins[7]);
4390 rw = (struct reg_window32 *) rw->ins[6];
4391 }
4392 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4393@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4394
4395 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4396 r->psr, r->pc, r->npc, r->y, print_tainted());
4397- printk("PC: <%pS>\n", (void *) r->pc);
4398+ printk("PC: <%pA>\n", (void *) r->pc);
4399 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4400 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4401 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4402 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4403 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4404 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4405- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4406+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4407
4408 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4409 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4410@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4411 rw = (struct reg_window32 *) fp;
4412 pc = rw->ins[7];
4413 printk("[%08lx : ", pc);
4414- printk("%pS ] ", (void *) pc);
4415+ printk("%pA ] ", (void *) pc);
4416 fp = rw->ins[6];
4417 } while (++count < 16);
4418 printk("\n");
4419diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_64.c linux-2.6.32.45/arch/sparc/kernel/process_64.c
4420--- linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4421+++ linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4422@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4423 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4424 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4425 if (regs->tstate & TSTATE_PRIV)
4426- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4427+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4428 }
4429
4430 void show_regs(struct pt_regs *regs)
4431 {
4432 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4433 regs->tpc, regs->tnpc, regs->y, print_tainted());
4434- printk("TPC: <%pS>\n", (void *) regs->tpc);
4435+ printk("TPC: <%pA>\n", (void *) regs->tpc);
4436 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4437 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4438 regs->u_regs[3]);
4439@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4440 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4441 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4442 regs->u_regs[15]);
4443- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4444+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4445 show_regwindow(regs);
4446 }
4447
4448@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4449 ((tp && tp->task) ? tp->task->pid : -1));
4450
4451 if (gp->tstate & TSTATE_PRIV) {
4452- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4453+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4454 (void *) gp->tpc,
4455 (void *) gp->o7,
4456 (void *) gp->i7,
4457diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c
4458--- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4459+++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4460@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4461 if (ARCH_SUN4C && len > 0x20000000)
4462 return -ENOMEM;
4463 if (!addr)
4464- addr = TASK_UNMAPPED_BASE;
4465+ addr = current->mm->mmap_base;
4466
4467 if (flags & MAP_SHARED)
4468 addr = COLOUR_ALIGN(addr);
4469@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4470 }
4471 if (TASK_SIZE - PAGE_SIZE - len < addr)
4472 return -ENOMEM;
4473- if (!vmm || addr + len <= vmm->vm_start)
4474+ if (check_heap_stack_gap(vmm, addr, len))
4475 return addr;
4476 addr = vmm->vm_end;
4477 if (flags & MAP_SHARED)
4478diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c
4479--- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4480+++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4481@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4482 /* We do not accept a shared mapping if it would violate
4483 * cache aliasing constraints.
4484 */
4485- if ((flags & MAP_SHARED) &&
4486+ if ((filp || (flags & MAP_SHARED)) &&
4487 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4488 return -EINVAL;
4489 return addr;
4490@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4491 if (filp || (flags & MAP_SHARED))
4492 do_color_align = 1;
4493
4494+#ifdef CONFIG_PAX_RANDMMAP
4495+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4496+#endif
4497+
4498 if (addr) {
4499 if (do_color_align)
4500 addr = COLOUR_ALIGN(addr, pgoff);
4501@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4502 addr = PAGE_ALIGN(addr);
4503
4504 vma = find_vma(mm, addr);
4505- if (task_size - len >= addr &&
4506- (!vma || addr + len <= vma->vm_start))
4507+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4508 return addr;
4509 }
4510
4511 if (len > mm->cached_hole_size) {
4512- start_addr = addr = mm->free_area_cache;
4513+ start_addr = addr = mm->free_area_cache;
4514 } else {
4515- start_addr = addr = TASK_UNMAPPED_BASE;
4516+ start_addr = addr = mm->mmap_base;
4517 mm->cached_hole_size = 0;
4518 }
4519
4520@@ -175,14 +178,14 @@ full_search:
4521 vma = find_vma(mm, VA_EXCLUDE_END);
4522 }
4523 if (unlikely(task_size < addr)) {
4524- if (start_addr != TASK_UNMAPPED_BASE) {
4525- start_addr = addr = TASK_UNMAPPED_BASE;
4526+ if (start_addr != mm->mmap_base) {
4527+ start_addr = addr = mm->mmap_base;
4528 mm->cached_hole_size = 0;
4529 goto full_search;
4530 }
4531 return -ENOMEM;
4532 }
4533- if (likely(!vma || addr + len <= vma->vm_start)) {
4534+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4535 /*
4536 * Remember the place where we stopped the search:
4537 */
4538@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4539 /* We do not accept a shared mapping if it would violate
4540 * cache aliasing constraints.
4541 */
4542- if ((flags & MAP_SHARED) &&
4543+ if ((filp || (flags & MAP_SHARED)) &&
4544 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4545 return -EINVAL;
4546 return addr;
4547@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4548 addr = PAGE_ALIGN(addr);
4549
4550 vma = find_vma(mm, addr);
4551- if (task_size - len >= addr &&
4552- (!vma || addr + len <= vma->vm_start))
4553+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4554 return addr;
4555 }
4556
4557@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4558 /* make sure it can fit in the remaining address space */
4559 if (likely(addr > len)) {
4560 vma = find_vma(mm, addr-len);
4561- if (!vma || addr <= vma->vm_start) {
4562+ if (check_heap_stack_gap(vma, addr - len, len)) {
4563 /* remember the address as a hint for next time */
4564 return (mm->free_area_cache = addr-len);
4565 }
4566@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4567 if (unlikely(mm->mmap_base < len))
4568 goto bottomup;
4569
4570- addr = mm->mmap_base-len;
4571- if (do_color_align)
4572- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4573+ addr = mm->mmap_base - len;
4574
4575 do {
4576+ if (do_color_align)
4577+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4578 /*
4579 * Lookup failure means no vma is above this address,
4580 * else if new region fits below vma->vm_start,
4581 * return with success:
4582 */
4583 vma = find_vma(mm, addr);
4584- if (likely(!vma || addr+len <= vma->vm_start)) {
4585+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4586 /* remember the address as a hint for next time */
4587 return (mm->free_area_cache = addr);
4588 }
4589@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4590 mm->cached_hole_size = vma->vm_start - addr;
4591
4592 /* try just below the current vma->vm_start */
4593- addr = vma->vm_start-len;
4594- if (do_color_align)
4595- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4596- } while (likely(len < vma->vm_start));
4597+ addr = skip_heap_stack_gap(vma, len);
4598+ } while (!IS_ERR_VALUE(addr));
4599
4600 bottomup:
4601 /*
4602@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4603 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4604 sysctl_legacy_va_layout) {
4605 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4606+
4607+#ifdef CONFIG_PAX_RANDMMAP
4608+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4609+ mm->mmap_base += mm->delta_mmap;
4610+#endif
4611+
4612 mm->get_unmapped_area = arch_get_unmapped_area;
4613 mm->unmap_area = arch_unmap_area;
4614 } else {
4615@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4616 gap = (task_size / 6 * 5);
4617
4618 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4619+
4620+#ifdef CONFIG_PAX_RANDMMAP
4621+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4622+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4623+#endif
4624+
4625 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4626 mm->unmap_area = arch_unmap_area_topdown;
4627 }
4628diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_32.c linux-2.6.32.45/arch/sparc/kernel/traps_32.c
4629--- linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4630+++ linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4631@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4632 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4633 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4634
4635+extern void gr_handle_kernel_exploit(void);
4636+
4637 void die_if_kernel(char *str, struct pt_regs *regs)
4638 {
4639 static int die_counter;
4640@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4641 count++ < 30 &&
4642 (((unsigned long) rw) >= PAGE_OFFSET) &&
4643 !(((unsigned long) rw) & 0x7)) {
4644- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4645+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4646 (void *) rw->ins[7]);
4647 rw = (struct reg_window32 *)rw->ins[6];
4648 }
4649 }
4650 printk("Instruction DUMP:");
4651 instruction_dump ((unsigned long *) regs->pc);
4652- if(regs->psr & PSR_PS)
4653+ if(regs->psr & PSR_PS) {
4654+ gr_handle_kernel_exploit();
4655 do_exit(SIGKILL);
4656+ }
4657 do_exit(SIGSEGV);
4658 }
4659
4660diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_64.c linux-2.6.32.45/arch/sparc/kernel/traps_64.c
4661--- linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4662+++ linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4663@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4664 i + 1,
4665 p->trapstack[i].tstate, p->trapstack[i].tpc,
4666 p->trapstack[i].tnpc, p->trapstack[i].tt);
4667- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4668+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4669 }
4670 }
4671
4672@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4673
4674 lvl -= 0x100;
4675 if (regs->tstate & TSTATE_PRIV) {
4676+
4677+#ifdef CONFIG_PAX_REFCOUNT
4678+ if (lvl == 6)
4679+ pax_report_refcount_overflow(regs);
4680+#endif
4681+
4682 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4683 die_if_kernel(buffer, regs);
4684 }
4685@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4686 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4687 {
4688 char buffer[32];
4689-
4690+
4691 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4692 0, lvl, SIGTRAP) == NOTIFY_STOP)
4693 return;
4694
4695+#ifdef CONFIG_PAX_REFCOUNT
4696+ if (lvl == 6)
4697+ pax_report_refcount_overflow(regs);
4698+#endif
4699+
4700 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4701
4702 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4703@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4704 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4705 printk("%s" "ERROR(%d): ",
4706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4707- printk("TPC<%pS>\n", (void *) regs->tpc);
4708+ printk("TPC<%pA>\n", (void *) regs->tpc);
4709 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4710 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4711 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4712@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4713 smp_processor_id(),
4714 (type & 0x1) ? 'I' : 'D',
4715 regs->tpc);
4716- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4717+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4718 panic("Irrecoverable Cheetah+ parity error.");
4719 }
4720
4721@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4722 smp_processor_id(),
4723 (type & 0x1) ? 'I' : 'D',
4724 regs->tpc);
4725- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4726+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4727 }
4728
4729 struct sun4v_error_entry {
4730@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4731
4732 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4733 regs->tpc, tl);
4734- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4735+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4736 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4737- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4738+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4739 (void *) regs->u_regs[UREG_I7]);
4740 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4741 "pte[%lx] error[%lx]\n",
4742@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4743
4744 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4745 regs->tpc, tl);
4746- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4747+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4748 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4749- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4750+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4751 (void *) regs->u_regs[UREG_I7]);
4752 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4753 "pte[%lx] error[%lx]\n",
4754@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4755 fp = (unsigned long)sf->fp + STACK_BIAS;
4756 }
4757
4758- printk(" [%016lx] %pS\n", pc, (void *) pc);
4759+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4760 } while (++count < 16);
4761 }
4762
4763@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4764 return (struct reg_window *) (fp + STACK_BIAS);
4765 }
4766
4767+extern void gr_handle_kernel_exploit(void);
4768+
4769 void die_if_kernel(char *str, struct pt_regs *regs)
4770 {
4771 static int die_counter;
4772@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4773 while (rw &&
4774 count++ < 30&&
4775 is_kernel_stack(current, rw)) {
4776- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4777+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4778 (void *) rw->ins[7]);
4779
4780 rw = kernel_stack_up(rw);
4781@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4782 }
4783 user_instruction_dump ((unsigned int __user *) regs->tpc);
4784 }
4785- if (regs->tstate & TSTATE_PRIV)
4786+ if (regs->tstate & TSTATE_PRIV) {
4787+ gr_handle_kernel_exploit();
4788 do_exit(SIGKILL);
4789+ }
4790+
4791 do_exit(SIGSEGV);
4792 }
4793 EXPORT_SYMBOL(die_if_kernel);
4794diff -urNp linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S
4795--- linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4796+++ linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4797@@ -127,7 +127,7 @@ do_int_load:
4798 wr %o5, 0x0, %asi
4799 retl
4800 mov 0, %o0
4801- .size __do_int_load, .-__do_int_load
4802+ .size do_int_load, .-do_int_load
4803
4804 .section __ex_table,"a"
4805 .word 4b, __retl_efault
4806diff -urNp linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c
4807--- linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4808+++ linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4809@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4810 if (count < 5) {
4811 last_time = jiffies;
4812 count++;
4813- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4814+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4815 regs->tpc, (void *) regs->tpc);
4816 }
4817 }
4818diff -urNp linux-2.6.32.45/arch/sparc/lib/atomic_64.S linux-2.6.32.45/arch/sparc/lib/atomic_64.S
4819--- linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4820+++ linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4821@@ -18,7 +18,12 @@
4822 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4823 BACKOFF_SETUP(%o2)
4824 1: lduw [%o1], %g1
4825- add %g1, %o0, %g7
4826+ addcc %g1, %o0, %g7
4827+
4828+#ifdef CONFIG_PAX_REFCOUNT
4829+ tvs %icc, 6
4830+#endif
4831+
4832 cas [%o1], %g1, %g7
4833 cmp %g1, %g7
4834 bne,pn %icc, 2f
4835@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4836 2: BACKOFF_SPIN(%o2, %o3, 1b)
4837 .size atomic_add, .-atomic_add
4838
4839+ .globl atomic_add_unchecked
4840+ .type atomic_add_unchecked,#function
4841+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4842+ BACKOFF_SETUP(%o2)
4843+1: lduw [%o1], %g1
4844+ add %g1, %o0, %g7
4845+ cas [%o1], %g1, %g7
4846+ cmp %g1, %g7
4847+ bne,pn %icc, 2f
4848+ nop
4849+ retl
4850+ nop
4851+2: BACKOFF_SPIN(%o2, %o3, 1b)
4852+ .size atomic_add_unchecked, .-atomic_add_unchecked
4853+
4854 .globl atomic_sub
4855 .type atomic_sub,#function
4856 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4857 BACKOFF_SETUP(%o2)
4858 1: lduw [%o1], %g1
4859- sub %g1, %o0, %g7
4860+ subcc %g1, %o0, %g7
4861+
4862+#ifdef CONFIG_PAX_REFCOUNT
4863+ tvs %icc, 6
4864+#endif
4865+
4866 cas [%o1], %g1, %g7
4867 cmp %g1, %g7
4868 bne,pn %icc, 2f
4869@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4870 2: BACKOFF_SPIN(%o2, %o3, 1b)
4871 .size atomic_sub, .-atomic_sub
4872
4873+ .globl atomic_sub_unchecked
4874+ .type atomic_sub_unchecked,#function
4875+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4876+ BACKOFF_SETUP(%o2)
4877+1: lduw [%o1], %g1
4878+ sub %g1, %o0, %g7
4879+ cas [%o1], %g1, %g7
4880+ cmp %g1, %g7
4881+ bne,pn %icc, 2f
4882+ nop
4883+ retl
4884+ nop
4885+2: BACKOFF_SPIN(%o2, %o3, 1b)
4886+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4887+
4888 .globl atomic_add_ret
4889 .type atomic_add_ret,#function
4890 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4891 BACKOFF_SETUP(%o2)
4892 1: lduw [%o1], %g1
4893- add %g1, %o0, %g7
4894+ addcc %g1, %o0, %g7
4895+
4896+#ifdef CONFIG_PAX_REFCOUNT
4897+ tvs %icc, 6
4898+#endif
4899+
4900 cas [%o1], %g1, %g7
4901 cmp %g1, %g7
4902 bne,pn %icc, 2f
4903@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4904 2: BACKOFF_SPIN(%o2, %o3, 1b)
4905 .size atomic_add_ret, .-atomic_add_ret
4906
4907+ .globl atomic_add_ret_unchecked
4908+ .type atomic_add_ret_unchecked,#function
4909+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4910+ BACKOFF_SETUP(%o2)
4911+1: lduw [%o1], %g1
4912+ addcc %g1, %o0, %g7
4913+ cas [%o1], %g1, %g7
4914+ cmp %g1, %g7
4915+ bne,pn %icc, 2f
4916+ add %g7, %o0, %g7
4917+ sra %g7, 0, %o0
4918+ retl
4919+ nop
4920+2: BACKOFF_SPIN(%o2, %o3, 1b)
4921+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4922+
4923 .globl atomic_sub_ret
4924 .type atomic_sub_ret,#function
4925 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4926 BACKOFF_SETUP(%o2)
4927 1: lduw [%o1], %g1
4928- sub %g1, %o0, %g7
4929+ subcc %g1, %o0, %g7
4930+
4931+#ifdef CONFIG_PAX_REFCOUNT
4932+ tvs %icc, 6
4933+#endif
4934+
4935 cas [%o1], %g1, %g7
4936 cmp %g1, %g7
4937 bne,pn %icc, 2f
4938@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4939 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4940 BACKOFF_SETUP(%o2)
4941 1: ldx [%o1], %g1
4942- add %g1, %o0, %g7
4943+ addcc %g1, %o0, %g7
4944+
4945+#ifdef CONFIG_PAX_REFCOUNT
4946+ tvs %xcc, 6
4947+#endif
4948+
4949 casx [%o1], %g1, %g7
4950 cmp %g1, %g7
4951 bne,pn %xcc, 2f
4952@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4953 2: BACKOFF_SPIN(%o2, %o3, 1b)
4954 .size atomic64_add, .-atomic64_add
4955
4956+ .globl atomic64_add_unchecked
4957+ .type atomic64_add_unchecked,#function
4958+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4959+ BACKOFF_SETUP(%o2)
4960+1: ldx [%o1], %g1
4961+ addcc %g1, %o0, %g7
4962+ casx [%o1], %g1, %g7
4963+ cmp %g1, %g7
4964+ bne,pn %xcc, 2f
4965+ nop
4966+ retl
4967+ nop
4968+2: BACKOFF_SPIN(%o2, %o3, 1b)
4969+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4970+
4971 .globl atomic64_sub
4972 .type atomic64_sub,#function
4973 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4974 BACKOFF_SETUP(%o2)
4975 1: ldx [%o1], %g1
4976- sub %g1, %o0, %g7
4977+ subcc %g1, %o0, %g7
4978+
4979+#ifdef CONFIG_PAX_REFCOUNT
4980+ tvs %xcc, 6
4981+#endif
4982+
4983 casx [%o1], %g1, %g7
4984 cmp %g1, %g7
4985 bne,pn %xcc, 2f
4986@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4987 2: BACKOFF_SPIN(%o2, %o3, 1b)
4988 .size atomic64_sub, .-atomic64_sub
4989
4990+ .globl atomic64_sub_unchecked
4991+ .type atomic64_sub_unchecked,#function
4992+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4993+ BACKOFF_SETUP(%o2)
4994+1: ldx [%o1], %g1
4995+ subcc %g1, %o0, %g7
4996+ casx [%o1], %g1, %g7
4997+ cmp %g1, %g7
4998+ bne,pn %xcc, 2f
4999+ nop
5000+ retl
5001+ nop
5002+2: BACKOFF_SPIN(%o2, %o3, 1b)
5003+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5004+
5005 .globl atomic64_add_ret
5006 .type atomic64_add_ret,#function
5007 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5008 BACKOFF_SETUP(%o2)
5009 1: ldx [%o1], %g1
5010- add %g1, %o0, %g7
5011+ addcc %g1, %o0, %g7
5012+
5013+#ifdef CONFIG_PAX_REFCOUNT
5014+ tvs %xcc, 6
5015+#endif
5016+
5017 casx [%o1], %g1, %g7
5018 cmp %g1, %g7
5019 bne,pn %xcc, 2f
5020@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
5021 2: BACKOFF_SPIN(%o2, %o3, 1b)
5022 .size atomic64_add_ret, .-atomic64_add_ret
5023
5024+ .globl atomic64_add_ret_unchecked
5025+ .type atomic64_add_ret_unchecked,#function
5026+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5027+ BACKOFF_SETUP(%o2)
5028+1: ldx [%o1], %g1
5029+ addcc %g1, %o0, %g7
5030+ casx [%o1], %g1, %g7
5031+ cmp %g1, %g7
5032+ bne,pn %xcc, 2f
5033+ add %g7, %o0, %g7
5034+ mov %g7, %o0
5035+ retl
5036+ nop
5037+2: BACKOFF_SPIN(%o2, %o3, 1b)
5038+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5039+
5040 .globl atomic64_sub_ret
5041 .type atomic64_sub_ret,#function
5042 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5043 BACKOFF_SETUP(%o2)
5044 1: ldx [%o1], %g1
5045- sub %g1, %o0, %g7
5046+ subcc %g1, %o0, %g7
5047+
5048+#ifdef CONFIG_PAX_REFCOUNT
5049+ tvs %xcc, 6
5050+#endif
5051+
5052 casx [%o1], %g1, %g7
5053 cmp %g1, %g7
5054 bne,pn %xcc, 2f
5055diff -urNp linux-2.6.32.45/arch/sparc/lib/ksyms.c linux-2.6.32.45/arch/sparc/lib/ksyms.c
5056--- linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
5057+++ linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-08-19 23:05:14.000000000 -0400
5058@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5059
5060 /* Atomic counter implementation. */
5061 EXPORT_SYMBOL(atomic_add);
5062+EXPORT_SYMBOL(atomic_add_unchecked);
5063 EXPORT_SYMBOL(atomic_add_ret);
5064+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5065 EXPORT_SYMBOL(atomic_sub);
5066+EXPORT_SYMBOL(atomic_sub_unchecked);
5067 EXPORT_SYMBOL(atomic_sub_ret);
5068 EXPORT_SYMBOL(atomic64_add);
5069+EXPORT_SYMBOL(atomic64_add_unchecked);
5070 EXPORT_SYMBOL(atomic64_add_ret);
5071+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5072 EXPORT_SYMBOL(atomic64_sub);
5073+EXPORT_SYMBOL(atomic64_sub_unchecked);
5074 EXPORT_SYMBOL(atomic64_sub_ret);
5075
5076 /* Atomic bit operations. */
5077diff -urNp linux-2.6.32.45/arch/sparc/lib/Makefile linux-2.6.32.45/arch/sparc/lib/Makefile
5078--- linux-2.6.32.45/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
5079+++ linux-2.6.32.45/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
5080@@ -2,7 +2,7 @@
5081 #
5082
5083 asflags-y := -ansi -DST_DIV0=0x02
5084-ccflags-y := -Werror
5085+#ccflags-y := -Werror
5086
5087 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5088 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5089diff -urNp linux-2.6.32.45/arch/sparc/lib/rwsem_64.S linux-2.6.32.45/arch/sparc/lib/rwsem_64.S
5090--- linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
5091+++ linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
5092@@ -11,7 +11,12 @@
5093 .globl __down_read
5094 __down_read:
5095 1: lduw [%o0], %g1
5096- add %g1, 1, %g7
5097+ addcc %g1, 1, %g7
5098+
5099+#ifdef CONFIG_PAX_REFCOUNT
5100+ tvs %icc, 6
5101+#endif
5102+
5103 cas [%o0], %g1, %g7
5104 cmp %g1, %g7
5105 bne,pn %icc, 1b
5106@@ -33,7 +38,12 @@ __down_read:
5107 .globl __down_read_trylock
5108 __down_read_trylock:
5109 1: lduw [%o0], %g1
5110- add %g1, 1, %g7
5111+ addcc %g1, 1, %g7
5112+
5113+#ifdef CONFIG_PAX_REFCOUNT
5114+ tvs %icc, 6
5115+#endif
5116+
5117 cmp %g7, 0
5118 bl,pn %icc, 2f
5119 mov 0, %o1
5120@@ -51,7 +61,12 @@ __down_write:
5121 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5122 1:
5123 lduw [%o0], %g3
5124- add %g3, %g1, %g7
5125+ addcc %g3, %g1, %g7
5126+
5127+#ifdef CONFIG_PAX_REFCOUNT
5128+ tvs %icc, 6
5129+#endif
5130+
5131 cas [%o0], %g3, %g7
5132 cmp %g3, %g7
5133 bne,pn %icc, 1b
5134@@ -77,7 +92,12 @@ __down_write_trylock:
5135 cmp %g3, 0
5136 bne,pn %icc, 2f
5137 mov 0, %o1
5138- add %g3, %g1, %g7
5139+ addcc %g3, %g1, %g7
5140+
5141+#ifdef CONFIG_PAX_REFCOUNT
5142+ tvs %icc, 6
5143+#endif
5144+
5145 cas [%o0], %g3, %g7
5146 cmp %g3, %g7
5147 bne,pn %icc, 1b
5148@@ -90,7 +110,12 @@ __down_write_trylock:
5149 __up_read:
5150 1:
5151 lduw [%o0], %g1
5152- sub %g1, 1, %g7
5153+ subcc %g1, 1, %g7
5154+
5155+#ifdef CONFIG_PAX_REFCOUNT
5156+ tvs %icc, 6
5157+#endif
5158+
5159 cas [%o0], %g1, %g7
5160 cmp %g1, %g7
5161 bne,pn %icc, 1b
5162@@ -118,7 +143,12 @@ __up_write:
5163 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5164 1:
5165 lduw [%o0], %g3
5166- sub %g3, %g1, %g7
5167+ subcc %g3, %g1, %g7
5168+
5169+#ifdef CONFIG_PAX_REFCOUNT
5170+ tvs %icc, 6
5171+#endif
5172+
5173 cas [%o0], %g3, %g7
5174 cmp %g3, %g7
5175 bne,pn %icc, 1b
5176@@ -143,7 +173,12 @@ __downgrade_write:
5177 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5178 1:
5179 lduw [%o0], %g3
5180- sub %g3, %g1, %g7
5181+ subcc %g3, %g1, %g7
5182+
5183+#ifdef CONFIG_PAX_REFCOUNT
5184+ tvs %icc, 6
5185+#endif
5186+
5187 cas [%o0], %g3, %g7
5188 cmp %g3, %g7
5189 bne,pn %icc, 1b
5190diff -urNp linux-2.6.32.45/arch/sparc/Makefile linux-2.6.32.45/arch/sparc/Makefile
5191--- linux-2.6.32.45/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5192+++ linux-2.6.32.45/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5193@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5194 # Export what is needed by arch/sparc/boot/Makefile
5195 export VMLINUX_INIT VMLINUX_MAIN
5196 VMLINUX_INIT := $(head-y) $(init-y)
5197-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5198+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5199 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5200 VMLINUX_MAIN += $(drivers-y) $(net-y)
5201
5202diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_32.c linux-2.6.32.45/arch/sparc/mm/fault_32.c
5203--- linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5204+++ linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5205@@ -21,6 +21,9 @@
5206 #include <linux/interrupt.h>
5207 #include <linux/module.h>
5208 #include <linux/kdebug.h>
5209+#include <linux/slab.h>
5210+#include <linux/pagemap.h>
5211+#include <linux/compiler.h>
5212
5213 #include <asm/system.h>
5214 #include <asm/page.h>
5215@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5216 return safe_compute_effective_address(regs, insn);
5217 }
5218
5219+#ifdef CONFIG_PAX_PAGEEXEC
5220+#ifdef CONFIG_PAX_DLRESOLVE
5221+static void pax_emuplt_close(struct vm_area_struct *vma)
5222+{
5223+ vma->vm_mm->call_dl_resolve = 0UL;
5224+}
5225+
5226+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5227+{
5228+ unsigned int *kaddr;
5229+
5230+ vmf->page = alloc_page(GFP_HIGHUSER);
5231+ if (!vmf->page)
5232+ return VM_FAULT_OOM;
5233+
5234+ kaddr = kmap(vmf->page);
5235+ memset(kaddr, 0, PAGE_SIZE);
5236+ kaddr[0] = 0x9DE3BFA8U; /* save */
5237+ flush_dcache_page(vmf->page);
5238+ kunmap(vmf->page);
5239+ return VM_FAULT_MAJOR;
5240+}
5241+
5242+static const struct vm_operations_struct pax_vm_ops = {
5243+ .close = pax_emuplt_close,
5244+ .fault = pax_emuplt_fault
5245+};
5246+
5247+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5248+{
5249+ int ret;
5250+
5251+ vma->vm_mm = current->mm;
5252+ vma->vm_start = addr;
5253+ vma->vm_end = addr + PAGE_SIZE;
5254+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5255+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5256+ vma->vm_ops = &pax_vm_ops;
5257+
5258+ ret = insert_vm_struct(current->mm, vma);
5259+ if (ret)
5260+ return ret;
5261+
5262+ ++current->mm->total_vm;
5263+ return 0;
5264+}
5265+#endif
5266+
5267+/*
5268+ * PaX: decide what to do with offenders (regs->pc = fault address)
5269+ *
5270+ * returns 1 when task should be killed
5271+ * 2 when patched PLT trampoline was detected
5272+ * 3 when unpatched PLT trampoline was detected
5273+ */
5274+static int pax_handle_fetch_fault(struct pt_regs *regs)
5275+{
5276+
5277+#ifdef CONFIG_PAX_EMUPLT
5278+ int err;
5279+
5280+ do { /* PaX: patched PLT emulation #1 */
5281+ unsigned int sethi1, sethi2, jmpl;
5282+
5283+ err = get_user(sethi1, (unsigned int *)regs->pc);
5284+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5285+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5286+
5287+ if (err)
5288+ break;
5289+
5290+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5291+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5292+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5293+ {
5294+ unsigned int addr;
5295+
5296+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5297+ addr = regs->u_regs[UREG_G1];
5298+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5299+ regs->pc = addr;
5300+ regs->npc = addr+4;
5301+ return 2;
5302+ }
5303+ } while (0);
5304+
5305+ { /* PaX: patched PLT emulation #2 */
5306+ unsigned int ba;
5307+
5308+ err = get_user(ba, (unsigned int *)regs->pc);
5309+
5310+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5311+ unsigned int addr;
5312+
5313+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5314+ regs->pc = addr;
5315+ regs->npc = addr+4;
5316+ return 2;
5317+ }
5318+ }
5319+
5320+ do { /* PaX: patched PLT emulation #3 */
5321+ unsigned int sethi, jmpl, nop;
5322+
5323+ err = get_user(sethi, (unsigned int *)regs->pc);
5324+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5325+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5326+
5327+ if (err)
5328+ break;
5329+
5330+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5331+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5332+ nop == 0x01000000U)
5333+ {
5334+ unsigned int addr;
5335+
5336+ addr = (sethi & 0x003FFFFFU) << 10;
5337+ regs->u_regs[UREG_G1] = addr;
5338+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5339+ regs->pc = addr;
5340+ regs->npc = addr+4;
5341+ return 2;
5342+ }
5343+ } while (0);
5344+
5345+ do { /* PaX: unpatched PLT emulation step 1 */
5346+ unsigned int sethi, ba, nop;
5347+
5348+ err = get_user(sethi, (unsigned int *)regs->pc);
5349+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5350+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5351+
5352+ if (err)
5353+ break;
5354+
5355+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5356+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5357+ nop == 0x01000000U)
5358+ {
5359+ unsigned int addr, save, call;
5360+
5361+ if ((ba & 0xFFC00000U) == 0x30800000U)
5362+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5363+ else
5364+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5365+
5366+ err = get_user(save, (unsigned int *)addr);
5367+ err |= get_user(call, (unsigned int *)(addr+4));
5368+ err |= get_user(nop, (unsigned int *)(addr+8));
5369+ if (err)
5370+ break;
5371+
5372+#ifdef CONFIG_PAX_DLRESOLVE
5373+ if (save == 0x9DE3BFA8U &&
5374+ (call & 0xC0000000U) == 0x40000000U &&
5375+ nop == 0x01000000U)
5376+ {
5377+ struct vm_area_struct *vma;
5378+ unsigned long call_dl_resolve;
5379+
5380+ down_read(&current->mm->mmap_sem);
5381+ call_dl_resolve = current->mm->call_dl_resolve;
5382+ up_read(&current->mm->mmap_sem);
5383+ if (likely(call_dl_resolve))
5384+ goto emulate;
5385+
5386+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5387+
5388+ down_write(&current->mm->mmap_sem);
5389+ if (current->mm->call_dl_resolve) {
5390+ call_dl_resolve = current->mm->call_dl_resolve;
5391+ up_write(&current->mm->mmap_sem);
5392+ if (vma)
5393+ kmem_cache_free(vm_area_cachep, vma);
5394+ goto emulate;
5395+ }
5396+
5397+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5398+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5399+ up_write(&current->mm->mmap_sem);
5400+ if (vma)
5401+ kmem_cache_free(vm_area_cachep, vma);
5402+ return 1;
5403+ }
5404+
5405+ if (pax_insert_vma(vma, call_dl_resolve)) {
5406+ up_write(&current->mm->mmap_sem);
5407+ kmem_cache_free(vm_area_cachep, vma);
5408+ return 1;
5409+ }
5410+
5411+ current->mm->call_dl_resolve = call_dl_resolve;
5412+ up_write(&current->mm->mmap_sem);
5413+
5414+emulate:
5415+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5416+ regs->pc = call_dl_resolve;
5417+ regs->npc = addr+4;
5418+ return 3;
5419+ }
5420+#endif
5421+
5422+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5423+ if ((save & 0xFFC00000U) == 0x05000000U &&
5424+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5425+ nop == 0x01000000U)
5426+ {
5427+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5428+ regs->u_regs[UREG_G2] = addr + 4;
5429+ addr = (save & 0x003FFFFFU) << 10;
5430+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5431+ regs->pc = addr;
5432+ regs->npc = addr+4;
5433+ return 3;
5434+ }
5435+ }
5436+ } while (0);
5437+
5438+ do { /* PaX: unpatched PLT emulation step 2 */
5439+ unsigned int save, call, nop;
5440+
5441+ err = get_user(save, (unsigned int *)(regs->pc-4));
5442+ err |= get_user(call, (unsigned int *)regs->pc);
5443+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
5444+ if (err)
5445+ break;
5446+
5447+ if (save == 0x9DE3BFA8U &&
5448+ (call & 0xC0000000U) == 0x40000000U &&
5449+ nop == 0x01000000U)
5450+ {
5451+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5452+
5453+ regs->u_regs[UREG_RETPC] = regs->pc;
5454+ regs->pc = dl_resolve;
5455+ regs->npc = dl_resolve+4;
5456+ return 3;
5457+ }
5458+ } while (0);
5459+#endif
5460+
5461+ return 1;
5462+}
5463+
5464+void pax_report_insns(void *pc, void *sp)
5465+{
5466+ unsigned long i;
5467+
5468+ printk(KERN_ERR "PAX: bytes at PC: ");
5469+ for (i = 0; i < 8; i++) {
5470+ unsigned int c;
5471+ if (get_user(c, (unsigned int *)pc+i))
5472+ printk(KERN_CONT "???????? ");
5473+ else
5474+ printk(KERN_CONT "%08x ", c);
5475+ }
5476+ printk("\n");
5477+}
5478+#endif
5479+
5480 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5481 unsigned long address)
5482 {
5483@@ -231,6 +495,24 @@ good_area:
5484 if(!(vma->vm_flags & VM_WRITE))
5485 goto bad_area;
5486 } else {
5487+
5488+#ifdef CONFIG_PAX_PAGEEXEC
5489+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5490+ up_read(&mm->mmap_sem);
5491+ switch (pax_handle_fetch_fault(regs)) {
5492+
5493+#ifdef CONFIG_PAX_EMUPLT
5494+ case 2:
5495+ case 3:
5496+ return;
5497+#endif
5498+
5499+ }
5500+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5501+ do_group_exit(SIGKILL);
5502+ }
5503+#endif
5504+
5505 /* Allow reads even for write-only mappings */
5506 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5507 goto bad_area;
5508diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_64.c linux-2.6.32.45/arch/sparc/mm/fault_64.c
5509--- linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5510+++ linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5511@@ -20,6 +20,9 @@
5512 #include <linux/kprobes.h>
5513 #include <linux/kdebug.h>
5514 #include <linux/percpu.h>
5515+#include <linux/slab.h>
5516+#include <linux/pagemap.h>
5517+#include <linux/compiler.h>
5518
5519 #include <asm/page.h>
5520 #include <asm/pgtable.h>
5521@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5522 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5523 regs->tpc);
5524 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5525- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5526+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5527 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5528 dump_stack();
5529 unhandled_fault(regs->tpc, current, regs);
5530@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5531 show_regs(regs);
5532 }
5533
5534+#ifdef CONFIG_PAX_PAGEEXEC
5535+#ifdef CONFIG_PAX_DLRESOLVE
5536+static void pax_emuplt_close(struct vm_area_struct *vma)
5537+{
5538+ vma->vm_mm->call_dl_resolve = 0UL;
5539+}
5540+
5541+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5542+{
5543+ unsigned int *kaddr;
5544+
5545+ vmf->page = alloc_page(GFP_HIGHUSER);
5546+ if (!vmf->page)
5547+ return VM_FAULT_OOM;
5548+
5549+ kaddr = kmap(vmf->page);
5550+ memset(kaddr, 0, PAGE_SIZE);
5551+ kaddr[0] = 0x9DE3BFA8U; /* save */
5552+ flush_dcache_page(vmf->page);
5553+ kunmap(vmf->page);
5554+ return VM_FAULT_MAJOR;
5555+}
5556+
5557+static const struct vm_operations_struct pax_vm_ops = {
5558+ .close = pax_emuplt_close,
5559+ .fault = pax_emuplt_fault
5560+};
5561+
5562+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5563+{
5564+ int ret;
5565+
5566+ vma->vm_mm = current->mm;
5567+ vma->vm_start = addr;
5568+ vma->vm_end = addr + PAGE_SIZE;
5569+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5570+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5571+ vma->vm_ops = &pax_vm_ops;
5572+
5573+ ret = insert_vm_struct(current->mm, vma);
5574+ if (ret)
5575+ return ret;
5576+
5577+ ++current->mm->total_vm;
5578+ return 0;
5579+}
5580+#endif
5581+
5582+/*
5583+ * PaX: decide what to do with offenders (regs->tpc = fault address)
5584+ *
5585+ * returns 1 when task should be killed
5586+ * 2 when patched PLT trampoline was detected
5587+ * 3 when unpatched PLT trampoline was detected
5588+ */
5589+static int pax_handle_fetch_fault(struct pt_regs *regs)
5590+{
5591+
5592+#ifdef CONFIG_PAX_EMUPLT
5593+ int err;
5594+
5595+ do { /* PaX: patched PLT emulation #1 */
5596+ unsigned int sethi1, sethi2, jmpl;
5597+
5598+ err = get_user(sethi1, (unsigned int *)regs->tpc);
5599+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5600+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5601+
5602+ if (err)
5603+ break;
5604+
5605+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5606+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5607+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5608+ {
5609+ unsigned long addr;
5610+
5611+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5612+ addr = regs->u_regs[UREG_G1];
5613+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5614+
5615+ if (test_thread_flag(TIF_32BIT))
5616+ addr &= 0xFFFFFFFFUL;
5617+
5618+ regs->tpc = addr;
5619+ regs->tnpc = addr+4;
5620+ return 2;
5621+ }
5622+ } while (0);
5623+
5624+ { /* PaX: patched PLT emulation #2 */
5625+ unsigned int ba;
5626+
5627+ err = get_user(ba, (unsigned int *)regs->tpc);
5628+
5629+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5630+ unsigned long addr;
5631+
5632+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5633+
5634+ if (test_thread_flag(TIF_32BIT))
5635+ addr &= 0xFFFFFFFFUL;
5636+
5637+ regs->tpc = addr;
5638+ regs->tnpc = addr+4;
5639+ return 2;
5640+ }
5641+ }
5642+
5643+ do { /* PaX: patched PLT emulation #3 */
5644+ unsigned int sethi, jmpl, nop;
5645+
5646+ err = get_user(sethi, (unsigned int *)regs->tpc);
5647+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5648+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5649+
5650+ if (err)
5651+ break;
5652+
5653+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5654+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5655+ nop == 0x01000000U)
5656+ {
5657+ unsigned long addr;
5658+
5659+ addr = (sethi & 0x003FFFFFU) << 10;
5660+ regs->u_regs[UREG_G1] = addr;
5661+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5662+
5663+ if (test_thread_flag(TIF_32BIT))
5664+ addr &= 0xFFFFFFFFUL;
5665+
5666+ regs->tpc = addr;
5667+ regs->tnpc = addr+4;
5668+ return 2;
5669+ }
5670+ } while (0);
5671+
5672+ do { /* PaX: patched PLT emulation #4 */
5673+ unsigned int sethi, mov1, call, mov2;
5674+
5675+ err = get_user(sethi, (unsigned int *)regs->tpc);
5676+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5677+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5678+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5679+
5680+ if (err)
5681+ break;
5682+
5683+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5684+ mov1 == 0x8210000FU &&
5685+ (call & 0xC0000000U) == 0x40000000U &&
5686+ mov2 == 0x9E100001U)
5687+ {
5688+ unsigned long addr;
5689+
5690+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5691+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5692+
5693+ if (test_thread_flag(TIF_32BIT))
5694+ addr &= 0xFFFFFFFFUL;
5695+
5696+ regs->tpc = addr;
5697+ regs->tnpc = addr+4;
5698+ return 2;
5699+ }
5700+ } while (0);
5701+
5702+ do { /* PaX: patched PLT emulation #5 */
5703+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5704+
5705+ err = get_user(sethi, (unsigned int *)regs->tpc);
5706+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5707+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5708+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5709+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5710+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5711+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5712+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5713+
5714+ if (err)
5715+ break;
5716+
5717+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5718+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5719+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5720+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5721+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5722+ sllx == 0x83287020U &&
5723+ jmpl == 0x81C04005U &&
5724+ nop == 0x01000000U)
5725+ {
5726+ unsigned long addr;
5727+
5728+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5729+ regs->u_regs[UREG_G1] <<= 32;
5730+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5731+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5732+ regs->tpc = addr;
5733+ regs->tnpc = addr+4;
5734+ return 2;
5735+ }
5736+ } while (0);
5737+
5738+ do { /* PaX: patched PLT emulation #6 */
5739+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5740+
5741+ err = get_user(sethi, (unsigned int *)regs->tpc);
5742+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5743+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5744+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5745+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5746+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5747+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5748+
5749+ if (err)
5750+ break;
5751+
5752+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5753+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5754+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5755+ sllx == 0x83287020U &&
5756+ (or & 0xFFFFE000U) == 0x8A116000U &&
5757+ jmpl == 0x81C04005U &&
5758+ nop == 0x01000000U)
5759+ {
5760+ unsigned long addr;
5761+
5762+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5763+ regs->u_regs[UREG_G1] <<= 32;
5764+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5765+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5766+ regs->tpc = addr;
5767+ regs->tnpc = addr+4;
5768+ return 2;
5769+ }
5770+ } while (0);
5771+
5772+ do { /* PaX: unpatched PLT emulation step 1 */
5773+ unsigned int sethi, ba, nop;
5774+
5775+ err = get_user(sethi, (unsigned int *)regs->tpc);
5776+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5777+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5778+
5779+ if (err)
5780+ break;
5781+
5782+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5783+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5784+ nop == 0x01000000U)
5785+ {
5786+ unsigned long addr;
5787+ unsigned int save, call;
5788+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5789+
5790+ if ((ba & 0xFFC00000U) == 0x30800000U)
5791+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5792+ else
5793+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5794+
5795+ if (test_thread_flag(TIF_32BIT))
5796+ addr &= 0xFFFFFFFFUL;
5797+
5798+ err = get_user(save, (unsigned int *)addr);
5799+ err |= get_user(call, (unsigned int *)(addr+4));
5800+ err |= get_user(nop, (unsigned int *)(addr+8));
5801+ if (err)
5802+ break;
5803+
5804+#ifdef CONFIG_PAX_DLRESOLVE
5805+ if (save == 0x9DE3BFA8U &&
5806+ (call & 0xC0000000U) == 0x40000000U &&
5807+ nop == 0x01000000U)
5808+ {
5809+ struct vm_area_struct *vma;
5810+ unsigned long call_dl_resolve;
5811+
5812+ down_read(&current->mm->mmap_sem);
5813+ call_dl_resolve = current->mm->call_dl_resolve;
5814+ up_read(&current->mm->mmap_sem);
5815+ if (likely(call_dl_resolve))
5816+ goto emulate;
5817+
5818+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5819+
5820+ down_write(&current->mm->mmap_sem);
5821+ if (current->mm->call_dl_resolve) {
5822+ call_dl_resolve = current->mm->call_dl_resolve;
5823+ up_write(&current->mm->mmap_sem);
5824+ if (vma)
5825+ kmem_cache_free(vm_area_cachep, vma);
5826+ goto emulate;
5827+ }
5828+
5829+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5830+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5831+ up_write(&current->mm->mmap_sem);
5832+ if (vma)
5833+ kmem_cache_free(vm_area_cachep, vma);
5834+ return 1;
5835+ }
5836+
5837+ if (pax_insert_vma(vma, call_dl_resolve)) {
5838+ up_write(&current->mm->mmap_sem);
5839+ kmem_cache_free(vm_area_cachep, vma);
5840+ return 1;
5841+ }
5842+
5843+ current->mm->call_dl_resolve = call_dl_resolve;
5844+ up_write(&current->mm->mmap_sem);
5845+
5846+emulate:
5847+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5848+ regs->tpc = call_dl_resolve;
5849+ regs->tnpc = addr+4;
5850+ return 3;
5851+ }
5852+#endif
5853+
5854+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5855+ if ((save & 0xFFC00000U) == 0x05000000U &&
5856+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5857+ nop == 0x01000000U)
5858+ {
5859+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5860+ regs->u_regs[UREG_G2] = addr + 4;
5861+ addr = (save & 0x003FFFFFU) << 10;
5862+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5863+
5864+ if (test_thread_flag(TIF_32BIT))
5865+ addr &= 0xFFFFFFFFUL;
5866+
5867+ regs->tpc = addr;
5868+ regs->tnpc = addr+4;
5869+ return 3;
5870+ }
5871+
5872+ /* PaX: 64-bit PLT stub */
5873+ err = get_user(sethi1, (unsigned int *)addr);
5874+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5875+ err |= get_user(or1, (unsigned int *)(addr+8));
5876+ err |= get_user(or2, (unsigned int *)(addr+12));
5877+ err |= get_user(sllx, (unsigned int *)(addr+16));
5878+ err |= get_user(add, (unsigned int *)(addr+20));
5879+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5880+ err |= get_user(nop, (unsigned int *)(addr+28));
5881+ if (err)
5882+ break;
5883+
5884+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5885+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5886+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5887+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5888+ sllx == 0x89293020U &&
5889+ add == 0x8A010005U &&
5890+ jmpl == 0x89C14000U &&
5891+ nop == 0x01000000U)
5892+ {
5893+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5894+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5895+ regs->u_regs[UREG_G4] <<= 32;
5896+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5897+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5898+ regs->u_regs[UREG_G4] = addr + 24;
5899+ addr = regs->u_regs[UREG_G5];
5900+ regs->tpc = addr;
5901+ regs->tnpc = addr+4;
5902+ return 3;
5903+ }
5904+ }
5905+ } while (0);
5906+
5907+#ifdef CONFIG_PAX_DLRESOLVE
5908+ do { /* PaX: unpatched PLT emulation step 2 */
5909+ unsigned int save, call, nop;
5910+
5911+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5912+ err |= get_user(call, (unsigned int *)regs->tpc);
5913+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5914+ if (err)
5915+ break;
5916+
5917+ if (save == 0x9DE3BFA8U &&
5918+ (call & 0xC0000000U) == 0x40000000U &&
5919+ nop == 0x01000000U)
5920+ {
5921+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5922+
5923+ if (test_thread_flag(TIF_32BIT))
5924+ dl_resolve &= 0xFFFFFFFFUL;
5925+
5926+ regs->u_regs[UREG_RETPC] = regs->tpc;
5927+ regs->tpc = dl_resolve;
5928+ regs->tnpc = dl_resolve+4;
5929+ return 3;
5930+ }
5931+ } while (0);
5932+#endif
5933+
5934+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5935+ unsigned int sethi, ba, nop;
5936+
5937+ err = get_user(sethi, (unsigned int *)regs->tpc);
5938+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5939+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5940+
5941+ if (err)
5942+ break;
5943+
5944+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5945+ (ba & 0xFFF00000U) == 0x30600000U &&
5946+ nop == 0x01000000U)
5947+ {
5948+ unsigned long addr;
5949+
5950+ addr = (sethi & 0x003FFFFFU) << 10;
5951+ regs->u_regs[UREG_G1] = addr;
5952+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5953+
5954+ if (test_thread_flag(TIF_32BIT))
5955+ addr &= 0xFFFFFFFFUL;
5956+
5957+ regs->tpc = addr;
5958+ regs->tnpc = addr+4;
5959+ return 2;
5960+ }
5961+ } while (0);
5962+
5963+#endif
5964+
5965+ return 1;
5966+}
5967+
5968+void pax_report_insns(void *pc, void *sp)
5969+{
5970+ unsigned long i;
5971+
5972+ printk(KERN_ERR "PAX: bytes at PC: ");
5973+ for (i = 0; i < 8; i++) {
5974+ unsigned int c;
5975+ if (get_user(c, (unsigned int *)pc+i))
5976+ printk(KERN_CONT "???????? ");
5977+ else
5978+ printk(KERN_CONT "%08x ", c);
5979+ }
5980+ printk("\n");
5981+}
5982+#endif
5983+
5984 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5985 {
5986 struct mm_struct *mm = current->mm;
5987@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5988 if (!vma)
5989 goto bad_area;
5990
5991+#ifdef CONFIG_PAX_PAGEEXEC
5992+ /* PaX: detect ITLB misses on non-exec pages */
5993+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5994+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5995+ {
5996+ if (address != regs->tpc)
5997+ goto good_area;
5998+
5999+ up_read(&mm->mmap_sem);
6000+ switch (pax_handle_fetch_fault(regs)) {
6001+
6002+#ifdef CONFIG_PAX_EMUPLT
6003+ case 2:
6004+ case 3:
6005+ return;
6006+#endif
6007+
6008+ }
6009+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6010+ do_group_exit(SIGKILL);
6011+ }
6012+#endif
6013+
6014 /* Pure DTLB misses do not tell us whether the fault causing
6015 * load/store/atomic was a write or not, it only says that there
6016 * was no match. So in such a case we (carefully) read the
6017diff -urNp linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c
6018--- linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
6019+++ linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
6020@@ -69,7 +69,7 @@ full_search:
6021 }
6022 return -ENOMEM;
6023 }
6024- if (likely(!vma || addr + len <= vma->vm_start)) {
6025+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6026 /*
6027 * Remember the place where we stopped the search:
6028 */
6029@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
6030 /* make sure it can fit in the remaining address space */
6031 if (likely(addr > len)) {
6032 vma = find_vma(mm, addr-len);
6033- if (!vma || addr <= vma->vm_start) {
6034+ if (check_heap_stack_gap(vma, addr - len, len)) {
6035 /* remember the address as a hint for next time */
6036 return (mm->free_area_cache = addr-len);
6037 }
6038@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
6039 if (unlikely(mm->mmap_base < len))
6040 goto bottomup;
6041
6042- addr = (mm->mmap_base-len) & HPAGE_MASK;
6043+ addr = mm->mmap_base - len;
6044
6045 do {
6046+ addr &= HPAGE_MASK;
6047 /*
6048 * Lookup failure means no vma is above this address,
6049 * else if new region fits below vma->vm_start,
6050 * return with success:
6051 */
6052 vma = find_vma(mm, addr);
6053- if (likely(!vma || addr+len <= vma->vm_start)) {
6054+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6055 /* remember the address as a hint for next time */
6056 return (mm->free_area_cache = addr);
6057 }
6058@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
6059 mm->cached_hole_size = vma->vm_start - addr;
6060
6061 /* try just below the current vma->vm_start */
6062- addr = (vma->vm_start-len) & HPAGE_MASK;
6063- } while (likely(len < vma->vm_start));
6064+ addr = skip_heap_stack_gap(vma, len);
6065+ } while (!IS_ERR_VALUE(addr));
6066
6067 bottomup:
6068 /*
6069@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
6070 if (addr) {
6071 addr = ALIGN(addr, HPAGE_SIZE);
6072 vma = find_vma(mm, addr);
6073- if (task_size - len >= addr &&
6074- (!vma || addr + len <= vma->vm_start))
6075+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6076 return addr;
6077 }
6078 if (mm->get_unmapped_area == arch_get_unmapped_area)
6079diff -urNp linux-2.6.32.45/arch/sparc/mm/init_32.c linux-2.6.32.45/arch/sparc/mm/init_32.c
6080--- linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
6081+++ linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
6082@@ -317,6 +317,9 @@ extern void device_scan(void);
6083 pgprot_t PAGE_SHARED __read_mostly;
6084 EXPORT_SYMBOL(PAGE_SHARED);
6085
6086+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6087+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6088+
6089 void __init paging_init(void)
6090 {
6091 switch(sparc_cpu_model) {
6092@@ -345,17 +348,17 @@ void __init paging_init(void)
6093
6094 /* Initialize the protection map with non-constant, MMU dependent values. */
6095 protection_map[0] = PAGE_NONE;
6096- protection_map[1] = PAGE_READONLY;
6097- protection_map[2] = PAGE_COPY;
6098- protection_map[3] = PAGE_COPY;
6099+ protection_map[1] = PAGE_READONLY_NOEXEC;
6100+ protection_map[2] = PAGE_COPY_NOEXEC;
6101+ protection_map[3] = PAGE_COPY_NOEXEC;
6102 protection_map[4] = PAGE_READONLY;
6103 protection_map[5] = PAGE_READONLY;
6104 protection_map[6] = PAGE_COPY;
6105 protection_map[7] = PAGE_COPY;
6106 protection_map[8] = PAGE_NONE;
6107- protection_map[9] = PAGE_READONLY;
6108- protection_map[10] = PAGE_SHARED;
6109- protection_map[11] = PAGE_SHARED;
6110+ protection_map[9] = PAGE_READONLY_NOEXEC;
6111+ protection_map[10] = PAGE_SHARED_NOEXEC;
6112+ protection_map[11] = PAGE_SHARED_NOEXEC;
6113 protection_map[12] = PAGE_READONLY;
6114 protection_map[13] = PAGE_READONLY;
6115 protection_map[14] = PAGE_SHARED;
6116diff -urNp linux-2.6.32.45/arch/sparc/mm/Makefile linux-2.6.32.45/arch/sparc/mm/Makefile
6117--- linux-2.6.32.45/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
6118+++ linux-2.6.32.45/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
6119@@ -2,7 +2,7 @@
6120 #
6121
6122 asflags-y := -ansi
6123-ccflags-y := -Werror
6124+#ccflags-y := -Werror
6125
6126 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6127 obj-y += fault_$(BITS).o
6128diff -urNp linux-2.6.32.45/arch/sparc/mm/srmmu.c linux-2.6.32.45/arch/sparc/mm/srmmu.c
6129--- linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
6130+++ linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
6131@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6132 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6133 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6134 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6135+
6136+#ifdef CONFIG_PAX_PAGEEXEC
6137+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6138+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6139+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6140+#endif
6141+
6142 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6143 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6144
6145diff -urNp linux-2.6.32.45/arch/um/include/asm/kmap_types.h linux-2.6.32.45/arch/um/include/asm/kmap_types.h
6146--- linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6147+++ linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6148@@ -23,6 +23,7 @@ enum km_type {
6149 KM_IRQ1,
6150 KM_SOFTIRQ0,
6151 KM_SOFTIRQ1,
6152+ KM_CLEARPAGE,
6153 KM_TYPE_NR
6154 };
6155
6156diff -urNp linux-2.6.32.45/arch/um/include/asm/page.h linux-2.6.32.45/arch/um/include/asm/page.h
6157--- linux-2.6.32.45/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6158+++ linux-2.6.32.45/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6159@@ -14,6 +14,9 @@
6160 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6161 #define PAGE_MASK (~(PAGE_SIZE-1))
6162
6163+#define ktla_ktva(addr) (addr)
6164+#define ktva_ktla(addr) (addr)
6165+
6166 #ifndef __ASSEMBLY__
6167
6168 struct page;
6169diff -urNp linux-2.6.32.45/arch/um/kernel/process.c linux-2.6.32.45/arch/um/kernel/process.c
6170--- linux-2.6.32.45/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6171+++ linux-2.6.32.45/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6172@@ -393,22 +393,6 @@ int singlestepping(void * t)
6173 return 2;
6174 }
6175
6176-/*
6177- * Only x86 and x86_64 have an arch_align_stack().
6178- * All other arches have "#define arch_align_stack(x) (x)"
6179- * in their asm/system.h
6180- * As this is included in UML from asm-um/system-generic.h,
6181- * we can use it to behave as the subarch does.
6182- */
6183-#ifndef arch_align_stack
6184-unsigned long arch_align_stack(unsigned long sp)
6185-{
6186- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6187- sp -= get_random_int() % 8192;
6188- return sp & ~0xf;
6189-}
6190-#endif
6191-
6192 unsigned long get_wchan(struct task_struct *p)
6193 {
6194 unsigned long stack_page, sp, ip;
6195diff -urNp linux-2.6.32.45/arch/um/sys-i386/syscalls.c linux-2.6.32.45/arch/um/sys-i386/syscalls.c
6196--- linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6197+++ linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6198@@ -11,6 +11,21 @@
6199 #include "asm/uaccess.h"
6200 #include "asm/unistd.h"
6201
6202+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6203+{
6204+ unsigned long pax_task_size = TASK_SIZE;
6205+
6206+#ifdef CONFIG_PAX_SEGMEXEC
6207+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6208+ pax_task_size = SEGMEXEC_TASK_SIZE;
6209+#endif
6210+
6211+ if (len > pax_task_size || addr > pax_task_size - len)
6212+ return -EINVAL;
6213+
6214+ return 0;
6215+}
6216+
6217 /*
6218 * Perform the select(nd, in, out, ex, tv) and mmap() system
6219 * calls. Linux/i386 didn't use to be able to handle more than
6220diff -urNp linux-2.6.32.45/arch/x86/boot/bitops.h linux-2.6.32.45/arch/x86/boot/bitops.h
6221--- linux-2.6.32.45/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6222+++ linux-2.6.32.45/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6223@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6224 u8 v;
6225 const u32 *p = (const u32 *)addr;
6226
6227- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6228+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6229 return v;
6230 }
6231
6232@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6233
6234 static inline void set_bit(int nr, void *addr)
6235 {
6236- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6237+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6238 }
6239
6240 #endif /* BOOT_BITOPS_H */
6241diff -urNp linux-2.6.32.45/arch/x86/boot/boot.h linux-2.6.32.45/arch/x86/boot/boot.h
6242--- linux-2.6.32.45/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6243+++ linux-2.6.32.45/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6244@@ -82,7 +82,7 @@ static inline void io_delay(void)
6245 static inline u16 ds(void)
6246 {
6247 u16 seg;
6248- asm("movw %%ds,%0" : "=rm" (seg));
6249+ asm volatile("movw %%ds,%0" : "=rm" (seg));
6250 return seg;
6251 }
6252
6253@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6254 static inline int memcmp(const void *s1, const void *s2, size_t len)
6255 {
6256 u8 diff;
6257- asm("repe; cmpsb; setnz %0"
6258+ asm volatile("repe; cmpsb; setnz %0"
6259 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6260 return diff;
6261 }
6262diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_32.S linux-2.6.32.45/arch/x86/boot/compressed/head_32.S
6263--- linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6264+++ linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6265@@ -76,7 +76,7 @@ ENTRY(startup_32)
6266 notl %eax
6267 andl %eax, %ebx
6268 #else
6269- movl $LOAD_PHYSICAL_ADDR, %ebx
6270+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6271 #endif
6272
6273 /* Target address to relocate to for decompression */
6274@@ -149,7 +149,7 @@ relocated:
6275 * and where it was actually loaded.
6276 */
6277 movl %ebp, %ebx
6278- subl $LOAD_PHYSICAL_ADDR, %ebx
6279+ subl $____LOAD_PHYSICAL_ADDR, %ebx
6280 jz 2f /* Nothing to be done if loaded at compiled addr. */
6281 /*
6282 * Process relocations.
6283@@ -157,8 +157,7 @@ relocated:
6284
6285 1: subl $4, %edi
6286 movl (%edi), %ecx
6287- testl %ecx, %ecx
6288- jz 2f
6289+ jecxz 2f
6290 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6291 jmp 1b
6292 2:
6293diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_64.S linux-2.6.32.45/arch/x86/boot/compressed/head_64.S
6294--- linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6295+++ linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6296@@ -91,7 +91,7 @@ ENTRY(startup_32)
6297 notl %eax
6298 andl %eax, %ebx
6299 #else
6300- movl $LOAD_PHYSICAL_ADDR, %ebx
6301+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6302 #endif
6303
6304 /* Target address to relocate to for decompression */
6305@@ -183,7 +183,7 @@ no_longmode:
6306 hlt
6307 jmp 1b
6308
6309-#include "../../kernel/verify_cpu_64.S"
6310+#include "../../kernel/verify_cpu.S"
6311
6312 /*
6313 * Be careful here startup_64 needs to be at a predictable
6314@@ -234,7 +234,7 @@ ENTRY(startup_64)
6315 notq %rax
6316 andq %rax, %rbp
6317 #else
6318- movq $LOAD_PHYSICAL_ADDR, %rbp
6319+ movq $____LOAD_PHYSICAL_ADDR, %rbp
6320 #endif
6321
6322 /* Target address to relocate to for decompression */
6323diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/Makefile linux-2.6.32.45/arch/x86/boot/compressed/Makefile
6324--- linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-03-27 14:31:47.000000000 -0400
6325+++ linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-08-07 14:38:34.000000000 -0400
6326@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
6327 KBUILD_CFLAGS += $(cflags-y)
6328 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6329 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6330+ifdef CONSTIFY_PLUGIN
6331+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6332+endif
6333
6334 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6335 GCOV_PROFILE := n
6336diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/misc.c linux-2.6.32.45/arch/x86/boot/compressed/misc.c
6337--- linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6338+++ linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6339@@ -288,7 +288,7 @@ static void parse_elf(void *output)
6340 case PT_LOAD:
6341 #ifdef CONFIG_RELOCATABLE
6342 dest = output;
6343- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6344+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6345 #else
6346 dest = (void *)(phdr->p_paddr);
6347 #endif
6348@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6349 error("Destination address too large");
6350 #endif
6351 #ifndef CONFIG_RELOCATABLE
6352- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6353+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6354 error("Wrong destination address");
6355 #endif
6356
6357diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c
6358--- linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6359+++ linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6360@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6361
6362 offs = (olen > ilen) ? olen - ilen : 0;
6363 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6364- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6365+ offs += 64*1024; /* Add 64K bytes slack */
6366 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6367
6368 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6369diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/relocs.c linux-2.6.32.45/arch/x86/boot/compressed/relocs.c
6370--- linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6371+++ linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6372@@ -10,8 +10,11 @@
6373 #define USE_BSD
6374 #include <endian.h>
6375
6376+#include "../../../../include/linux/autoconf.h"
6377+
6378 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6379 static Elf32_Ehdr ehdr;
6380+static Elf32_Phdr *phdr;
6381 static unsigned long reloc_count, reloc_idx;
6382 static unsigned long *relocs;
6383
6384@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6385
6386 static int is_safe_abs_reloc(const char* sym_name)
6387 {
6388- int i;
6389+ unsigned int i;
6390
6391 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6392 if (!strcmp(sym_name, safe_abs_relocs[i]))
6393@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6394 }
6395 }
6396
6397+static void read_phdrs(FILE *fp)
6398+{
6399+ unsigned int i;
6400+
6401+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6402+ if (!phdr) {
6403+ die("Unable to allocate %d program headers\n",
6404+ ehdr.e_phnum);
6405+ }
6406+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6407+ die("Seek to %d failed: %s\n",
6408+ ehdr.e_phoff, strerror(errno));
6409+ }
6410+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6411+ die("Cannot read ELF program headers: %s\n",
6412+ strerror(errno));
6413+ }
6414+ for(i = 0; i < ehdr.e_phnum; i++) {
6415+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6416+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6417+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6418+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6419+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6420+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6421+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6422+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6423+ }
6424+
6425+}
6426+
6427 static void read_shdrs(FILE *fp)
6428 {
6429- int i;
6430+ unsigned int i;
6431 Elf32_Shdr shdr;
6432
6433 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6434@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6435
6436 static void read_strtabs(FILE *fp)
6437 {
6438- int i;
6439+ unsigned int i;
6440 for (i = 0; i < ehdr.e_shnum; i++) {
6441 struct section *sec = &secs[i];
6442 if (sec->shdr.sh_type != SHT_STRTAB) {
6443@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6444
6445 static void read_symtabs(FILE *fp)
6446 {
6447- int i,j;
6448+ unsigned int i,j;
6449 for (i = 0; i < ehdr.e_shnum; i++) {
6450 struct section *sec = &secs[i];
6451 if (sec->shdr.sh_type != SHT_SYMTAB) {
6452@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6453
6454 static void read_relocs(FILE *fp)
6455 {
6456- int i,j;
6457+ unsigned int i,j;
6458+ uint32_t base;
6459+
6460 for (i = 0; i < ehdr.e_shnum; i++) {
6461 struct section *sec = &secs[i];
6462 if (sec->shdr.sh_type != SHT_REL) {
6463@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6464 die("Cannot read symbol table: %s\n",
6465 strerror(errno));
6466 }
6467+ base = 0;
6468+ for (j = 0; j < ehdr.e_phnum; j++) {
6469+ if (phdr[j].p_type != PT_LOAD )
6470+ continue;
6471+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6472+ continue;
6473+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6474+ break;
6475+ }
6476 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6477 Elf32_Rel *rel = &sec->reltab[j];
6478- rel->r_offset = elf32_to_cpu(rel->r_offset);
6479+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6480 rel->r_info = elf32_to_cpu(rel->r_info);
6481 }
6482 }
6483@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6484
6485 static void print_absolute_symbols(void)
6486 {
6487- int i;
6488+ unsigned int i;
6489 printf("Absolute symbols\n");
6490 printf(" Num: Value Size Type Bind Visibility Name\n");
6491 for (i = 0; i < ehdr.e_shnum; i++) {
6492 struct section *sec = &secs[i];
6493 char *sym_strtab;
6494 Elf32_Sym *sh_symtab;
6495- int j;
6496+ unsigned int j;
6497
6498 if (sec->shdr.sh_type != SHT_SYMTAB) {
6499 continue;
6500@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6501
6502 static void print_absolute_relocs(void)
6503 {
6504- int i, printed = 0;
6505+ unsigned int i, printed = 0;
6506
6507 for (i = 0; i < ehdr.e_shnum; i++) {
6508 struct section *sec = &secs[i];
6509 struct section *sec_applies, *sec_symtab;
6510 char *sym_strtab;
6511 Elf32_Sym *sh_symtab;
6512- int j;
6513+ unsigned int j;
6514 if (sec->shdr.sh_type != SHT_REL) {
6515 continue;
6516 }
6517@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6518
6519 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6520 {
6521- int i;
6522+ unsigned int i;
6523 /* Walk through the relocations */
6524 for (i = 0; i < ehdr.e_shnum; i++) {
6525 char *sym_strtab;
6526 Elf32_Sym *sh_symtab;
6527 struct section *sec_applies, *sec_symtab;
6528- int j;
6529+ unsigned int j;
6530 struct section *sec = &secs[i];
6531
6532 if (sec->shdr.sh_type != SHT_REL) {
6533@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6534 if (sym->st_shndx == SHN_ABS) {
6535 continue;
6536 }
6537+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6538+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6539+ continue;
6540+
6541+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6542+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6543+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6544+ continue;
6545+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6546+ continue;
6547+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6548+ continue;
6549+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6550+ continue;
6551+#endif
6552 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6553 /*
6554 * NONE can be ignored and and PC relative
6555@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6556
6557 static void emit_relocs(int as_text)
6558 {
6559- int i;
6560+ unsigned int i;
6561 /* Count how many relocations I have and allocate space for them. */
6562 reloc_count = 0;
6563 walk_relocs(count_reloc);
6564@@ -634,6 +693,7 @@ int main(int argc, char **argv)
6565 fname, strerror(errno));
6566 }
6567 read_ehdr(fp);
6568+ read_phdrs(fp);
6569 read_shdrs(fp);
6570 read_strtabs(fp);
6571 read_symtabs(fp);
6572diff -urNp linux-2.6.32.45/arch/x86/boot/cpucheck.c linux-2.6.32.45/arch/x86/boot/cpucheck.c
6573--- linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6574+++ linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6575@@ -74,7 +74,7 @@ static int has_fpu(void)
6576 u16 fcw = -1, fsw = -1;
6577 u32 cr0;
6578
6579- asm("movl %%cr0,%0" : "=r" (cr0));
6580+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6581 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6582 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6583 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6584@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6585 {
6586 u32 f0, f1;
6587
6588- asm("pushfl ; "
6589+ asm volatile("pushfl ; "
6590 "pushfl ; "
6591 "popl %0 ; "
6592 "movl %0,%1 ; "
6593@@ -115,7 +115,7 @@ static void get_flags(void)
6594 set_bit(X86_FEATURE_FPU, cpu.flags);
6595
6596 if (has_eflag(X86_EFLAGS_ID)) {
6597- asm("cpuid"
6598+ asm volatile("cpuid"
6599 : "=a" (max_intel_level),
6600 "=b" (cpu_vendor[0]),
6601 "=d" (cpu_vendor[1]),
6602@@ -124,7 +124,7 @@ static void get_flags(void)
6603
6604 if (max_intel_level >= 0x00000001 &&
6605 max_intel_level <= 0x0000ffff) {
6606- asm("cpuid"
6607+ asm volatile("cpuid"
6608 : "=a" (tfms),
6609 "=c" (cpu.flags[4]),
6610 "=d" (cpu.flags[0])
6611@@ -136,7 +136,7 @@ static void get_flags(void)
6612 cpu.model += ((tfms >> 16) & 0xf) << 4;
6613 }
6614
6615- asm("cpuid"
6616+ asm volatile("cpuid"
6617 : "=a" (max_amd_level)
6618 : "a" (0x80000000)
6619 : "ebx", "ecx", "edx");
6620@@ -144,7 +144,7 @@ static void get_flags(void)
6621 if (max_amd_level >= 0x80000001 &&
6622 max_amd_level <= 0x8000ffff) {
6623 u32 eax = 0x80000001;
6624- asm("cpuid"
6625+ asm volatile("cpuid"
6626 : "+a" (eax),
6627 "=c" (cpu.flags[6]),
6628 "=d" (cpu.flags[1])
6629@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6630 u32 ecx = MSR_K7_HWCR;
6631 u32 eax, edx;
6632
6633- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6634+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6635 eax &= ~(1 << 15);
6636- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6637+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6638
6639 get_flags(); /* Make sure it really did something */
6640 err = check_flags();
6641@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6642 u32 ecx = MSR_VIA_FCR;
6643 u32 eax, edx;
6644
6645- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6646+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6647 eax |= (1<<1)|(1<<7);
6648- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6649+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6650
6651 set_bit(X86_FEATURE_CX8, cpu.flags);
6652 err = check_flags();
6653@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6654 u32 eax, edx;
6655 u32 level = 1;
6656
6657- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6658- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6659- asm("cpuid"
6660+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6661+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6662+ asm volatile("cpuid"
6663 : "+a" (level), "=d" (cpu.flags[0])
6664 : : "ecx", "ebx");
6665- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6666+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6667
6668 err = check_flags();
6669 }
6670diff -urNp linux-2.6.32.45/arch/x86/boot/header.S linux-2.6.32.45/arch/x86/boot/header.S
6671--- linux-2.6.32.45/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6672+++ linux-2.6.32.45/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6673@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6674 # single linked list of
6675 # struct setup_data
6676
6677-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6678+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6679
6680 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6681 #define VO_INIT_SIZE (VO__end - VO__text)
6682diff -urNp linux-2.6.32.45/arch/x86/boot/Makefile linux-2.6.32.45/arch/x86/boot/Makefile
6683--- linux-2.6.32.45/arch/x86/boot/Makefile 2011-03-27 14:31:47.000000000 -0400
6684+++ linux-2.6.32.45/arch/x86/boot/Makefile 2011-08-07 14:38:13.000000000 -0400
6685@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
6686 $(call cc-option, -fno-stack-protector) \
6687 $(call cc-option, -mpreferred-stack-boundary=2)
6688 KBUILD_CFLAGS += $(call cc-option, -m32)
6689+ifdef CONSTIFY_PLUGIN
6690+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6691+endif
6692 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6693 GCOV_PROFILE := n
6694
6695diff -urNp linux-2.6.32.45/arch/x86/boot/memory.c linux-2.6.32.45/arch/x86/boot/memory.c
6696--- linux-2.6.32.45/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6697+++ linux-2.6.32.45/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6698@@ -19,7 +19,7 @@
6699
6700 static int detect_memory_e820(void)
6701 {
6702- int count = 0;
6703+ unsigned int count = 0;
6704 struct biosregs ireg, oreg;
6705 struct e820entry *desc = boot_params.e820_map;
6706 static struct e820entry buf; /* static so it is zeroed */
6707diff -urNp linux-2.6.32.45/arch/x86/boot/video.c linux-2.6.32.45/arch/x86/boot/video.c
6708--- linux-2.6.32.45/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6709+++ linux-2.6.32.45/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6710@@ -90,7 +90,7 @@ static void store_mode_params(void)
6711 static unsigned int get_entry(void)
6712 {
6713 char entry_buf[4];
6714- int i, len = 0;
6715+ unsigned int i, len = 0;
6716 int key;
6717 unsigned int v;
6718
6719diff -urNp linux-2.6.32.45/arch/x86/boot/video-vesa.c linux-2.6.32.45/arch/x86/boot/video-vesa.c
6720--- linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6721+++ linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6722@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6723
6724 boot_params.screen_info.vesapm_seg = oreg.es;
6725 boot_params.screen_info.vesapm_off = oreg.di;
6726+ boot_params.screen_info.vesapm_size = oreg.cx;
6727 }
6728
6729 /*
6730diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_aout.c linux-2.6.32.45/arch/x86/ia32/ia32_aout.c
6731--- linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6732+++ linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6733@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6734 unsigned long dump_start, dump_size;
6735 struct user32 dump;
6736
6737+ memset(&dump, 0, sizeof(dump));
6738+
6739 fs = get_fs();
6740 set_fs(KERNEL_DS);
6741 has_dumped = 1;
6742@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6743 dump_size = dump.u_ssize << PAGE_SHIFT;
6744 DUMP_WRITE(dump_start, dump_size);
6745 }
6746- /*
6747- * Finally dump the task struct. Not be used by gdb, but
6748- * could be useful
6749- */
6750- set_fs(KERNEL_DS);
6751- DUMP_WRITE(current, sizeof(*current));
6752 end_coredump:
6753 set_fs(fs);
6754 return has_dumped;
6755diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32entry.S linux-2.6.32.45/arch/x86/ia32/ia32entry.S
6756--- linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6757+++ linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-08-25 17:42:18.000000000 -0400
6758@@ -13,6 +13,7 @@
6759 #include <asm/thread_info.h>
6760 #include <asm/segment.h>
6761 #include <asm/irqflags.h>
6762+#include <asm/pgtable.h>
6763 #include <linux/linkage.h>
6764
6765 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6766@@ -93,6 +94,29 @@ ENTRY(native_irq_enable_sysexit)
6767 ENDPROC(native_irq_enable_sysexit)
6768 #endif
6769
6770+ .macro pax_enter_kernel_user
6771+#ifdef CONFIG_PAX_MEMORY_UDEREF
6772+ call pax_enter_kernel_user
6773+#endif
6774+ .endm
6775+
6776+ .macro pax_exit_kernel_user
6777+#ifdef CONFIG_PAX_MEMORY_UDEREF
6778+ call pax_exit_kernel_user
6779+#endif
6780+#ifdef CONFIG_PAX_RANDKSTACK
6781+ pushq %rax
6782+ call pax_randomize_kstack
6783+ popq %rax
6784+#endif
6785+ .endm
6786+
6787+.macro pax_erase_kstack
6788+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6789+ call pax_erase_kstack
6790+#endif
6791+.endm
6792+
6793 /*
6794 * 32bit SYSENTER instruction entry.
6795 *
6796@@ -119,7 +143,7 @@ ENTRY(ia32_sysenter_target)
6797 CFI_REGISTER rsp,rbp
6798 SWAPGS_UNSAFE_STACK
6799 movq PER_CPU_VAR(kernel_stack), %rsp
6800- addq $(KERNEL_STACK_OFFSET),%rsp
6801+ pax_enter_kernel_user
6802 /*
6803 * No need to follow this irqs on/off section: the syscall
6804 * disabled irqs, here we enable it straight after entry:
6805@@ -135,7 +159,8 @@ ENTRY(ia32_sysenter_target)
6806 pushfq
6807 CFI_ADJUST_CFA_OFFSET 8
6808 /*CFI_REL_OFFSET rflags,0*/
6809- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6810+ GET_THREAD_INFO(%r10)
6811+ movl TI_sysenter_return(%r10), %r10d
6812 CFI_REGISTER rip,r10
6813 pushq $__USER32_CS
6814 CFI_ADJUST_CFA_OFFSET 8
6815@@ -150,6 +175,12 @@ ENTRY(ia32_sysenter_target)
6816 SAVE_ARGS 0,0,1
6817 /* no need to do an access_ok check here because rbp has been
6818 32bit zero extended */
6819+
6820+#ifdef CONFIG_PAX_MEMORY_UDEREF
6821+ mov $PAX_USER_SHADOW_BASE,%r10
6822+ add %r10,%rbp
6823+#endif
6824+
6825 1: movl (%rbp),%ebp
6826 .section __ex_table,"a"
6827 .quad 1b,ia32_badarg
6828@@ -172,6 +203,8 @@ sysenter_dispatch:
6829 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6830 jnz sysexit_audit
6831 sysexit_from_sys_call:
6832+ pax_exit_kernel_user
6833+ pax_erase_kstack
6834 andl $~TS_COMPAT,TI_status(%r10)
6835 /* clear IF, that popfq doesn't enable interrupts early */
6836 andl $~0x200,EFLAGS-R11(%rsp)
6837@@ -200,6 +233,9 @@ sysexit_from_sys_call:
6838 movl %eax,%esi /* 2nd arg: syscall number */
6839 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6840 call audit_syscall_entry
6841+
6842+ pax_erase_kstack
6843+
6844 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6845 cmpq $(IA32_NR_syscalls-1),%rax
6846 ja ia32_badsys
6847@@ -252,6 +288,9 @@ sysenter_tracesys:
6848 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6849 movq %rsp,%rdi /* &pt_regs -> arg1 */
6850 call syscall_trace_enter
6851+
6852+ pax_erase_kstack
6853+
6854 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6855 RESTORE_REST
6856 cmpq $(IA32_NR_syscalls-1),%rax
6857@@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6858 ENTRY(ia32_cstar_target)
6859 CFI_STARTPROC32 simple
6860 CFI_SIGNAL_FRAME
6861- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6862+ CFI_DEF_CFA rsp,0
6863 CFI_REGISTER rip,rcx
6864 /*CFI_REGISTER rflags,r11*/
6865 SWAPGS_UNSAFE_STACK
6866 movl %esp,%r8d
6867 CFI_REGISTER rsp,r8
6868 movq PER_CPU_VAR(kernel_stack),%rsp
6869+
6870+#ifdef CONFIG_PAX_MEMORY_UDEREF
6871+ pax_enter_kernel_user
6872+#endif
6873+
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879- SAVE_ARGS 8,1,1
6880+ SAVE_ARGS 8*6,1,1
6881 movl %eax,%eax /* zero extension */
6882 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6883 movq %rcx,RIP-ARGOFFSET(%rsp)
6884@@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6885 /* no need to do an access_ok check here because r8 has been
6886 32bit zero extended */
6887 /* hardware stack frame is complete now */
6888+
6889+#ifdef CONFIG_PAX_MEMORY_UDEREF
6890+ mov $PAX_USER_SHADOW_BASE,%r10
6891+ add %r10,%r8
6892+#endif
6893+
6894 1: movl (%r8),%r9d
6895 .section __ex_table,"a"
6896 .quad 1b,ia32_badarg
6897@@ -333,6 +383,8 @@ cstar_dispatch:
6898 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6899 jnz sysretl_audit
6900 sysretl_from_sys_call:
6901+ pax_exit_kernel_user
6902+ pax_erase_kstack
6903 andl $~TS_COMPAT,TI_status(%r10)
6904 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6905 movl RIP-ARGOFFSET(%rsp),%ecx
6906@@ -370,6 +422,9 @@ cstar_tracesys:
6907 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6908 movq %rsp,%rdi /* &pt_regs -> arg1 */
6909 call syscall_trace_enter
6910+
6911+ pax_erase_kstack
6912+
6913 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6914 RESTORE_REST
6915 xchgl %ebp,%r9d
6916@@ -415,6 +470,7 @@ ENTRY(ia32_syscall)
6917 CFI_REL_OFFSET rip,RIP-RIP
6918 PARAVIRT_ADJUST_EXCEPTION_FRAME
6919 SWAPGS
6920+ pax_enter_kernel_user
6921 /*
6922 * No need to follow this irqs on/off section: the syscall
6923 * disabled irqs and here we enable it straight after entry:
6924@@ -448,6 +504,9 @@ ia32_tracesys:
6925 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6926 movq %rsp,%rdi /* &pt_regs -> arg1 */
6927 call syscall_trace_enter
6928+
6929+ pax_erase_kstack
6930+
6931 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6932 RESTORE_REST
6933 cmpq $(IA32_NR_syscalls-1),%rax
6934diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_signal.c linux-2.6.32.45/arch/x86/ia32/ia32_signal.c
6935--- linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6936+++ linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6937@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6938 sp -= frame_size;
6939 /* Align the stack pointer according to the i386 ABI,
6940 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6941- sp = ((sp + 4) & -16ul) - 4;
6942+ sp = ((sp - 12) & -16ul) - 4;
6943 return (void __user *) sp;
6944 }
6945
6946@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6947 * These are actually not used anymore, but left because some
6948 * gdb versions depend on them as a marker.
6949 */
6950- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6951+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6952 } put_user_catch(err);
6953
6954 if (err)
6955@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6956 0xb8,
6957 __NR_ia32_rt_sigreturn,
6958 0x80cd,
6959- 0,
6960+ 0
6961 };
6962
6963 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6964@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6965
6966 if (ka->sa.sa_flags & SA_RESTORER)
6967 restorer = ka->sa.sa_restorer;
6968+ else if (current->mm->context.vdso)
6969+ /* Return stub is in 32bit vsyscall page */
6970+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6971 else
6972- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6973- rt_sigreturn);
6974+ restorer = &frame->retcode;
6975 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6976
6977 /*
6978 * Not actually used anymore, but left because some gdb
6979 * versions need it.
6980 */
6981- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6982+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6983 } put_user_catch(err);
6984
6985 if (err)
6986diff -urNp linux-2.6.32.45/arch/x86/include/asm/alternative.h linux-2.6.32.45/arch/x86/include/asm/alternative.h
6987--- linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6988+++ linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6989@@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6990 " .byte 662b-661b\n" /* sourcelen */ \
6991 " .byte 664f-663f\n" /* replacementlen */ \
6992 ".previous\n" \
6993- ".section .altinstr_replacement, \"ax\"\n" \
6994+ ".section .altinstr_replacement, \"a\"\n" \
6995 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6996 ".previous"
6997
6998diff -urNp linux-2.6.32.45/arch/x86/include/asm/apic.h linux-2.6.32.45/arch/x86/include/asm/apic.h
6999--- linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-03-27 14:31:47.000000000 -0400
7000+++ linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-08-17 20:01:15.000000000 -0400
7001@@ -46,7 +46,7 @@ static inline void generic_apic_probe(vo
7002
7003 #ifdef CONFIG_X86_LOCAL_APIC
7004
7005-extern unsigned int apic_verbosity;
7006+extern int apic_verbosity;
7007 extern int local_apic_timer_c2_ok;
7008
7009 extern int disable_apic;
7010diff -urNp linux-2.6.32.45/arch/x86/include/asm/apm.h linux-2.6.32.45/arch/x86/include/asm/apm.h
7011--- linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
7012+++ linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
7013@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
7014 __asm__ __volatile__(APM_DO_ZERO_SEGS
7015 "pushl %%edi\n\t"
7016 "pushl %%ebp\n\t"
7017- "lcall *%%cs:apm_bios_entry\n\t"
7018+ "lcall *%%ss:apm_bios_entry\n\t"
7019 "setc %%al\n\t"
7020 "popl %%ebp\n\t"
7021 "popl %%edi\n\t"
7022@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
7023 __asm__ __volatile__(APM_DO_ZERO_SEGS
7024 "pushl %%edi\n\t"
7025 "pushl %%ebp\n\t"
7026- "lcall *%%cs:apm_bios_entry\n\t"
7027+ "lcall *%%ss:apm_bios_entry\n\t"
7028 "setc %%bl\n\t"
7029 "popl %%ebp\n\t"
7030 "popl %%edi\n\t"
7031diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_32.h linux-2.6.32.45/arch/x86/include/asm/atomic_32.h
7032--- linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
7033+++ linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
7034@@ -25,6 +25,17 @@ static inline int atomic_read(const atom
7035 }
7036
7037 /**
7038+ * atomic_read_unchecked - read atomic variable
7039+ * @v: pointer of type atomic_unchecked_t
7040+ *
7041+ * Atomically reads the value of @v.
7042+ */
7043+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7044+{
7045+ return v->counter;
7046+}
7047+
7048+/**
7049 * atomic_set - set atomic variable
7050 * @v: pointer of type atomic_t
7051 * @i: required value
7052@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
7053 }
7054
7055 /**
7056+ * atomic_set_unchecked - set atomic variable
7057+ * @v: pointer of type atomic_unchecked_t
7058+ * @i: required value
7059+ *
7060+ * Atomically sets the value of @v to @i.
7061+ */
7062+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7063+{
7064+ v->counter = i;
7065+}
7066+
7067+/**
7068 * atomic_add - add integer to atomic variable
7069 * @i: integer value to add
7070 * @v: pointer of type atomic_t
7071@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
7072 */
7073 static inline void atomic_add(int i, atomic_t *v)
7074 {
7075- asm volatile(LOCK_PREFIX "addl %1,%0"
7076+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7077+
7078+#ifdef CONFIG_PAX_REFCOUNT
7079+ "jno 0f\n"
7080+ LOCK_PREFIX "subl %1,%0\n"
7081+ "int $4\n0:\n"
7082+ _ASM_EXTABLE(0b, 0b)
7083+#endif
7084+
7085+ : "+m" (v->counter)
7086+ : "ir" (i));
7087+}
7088+
7089+/**
7090+ * atomic_add_unchecked - add integer to atomic variable
7091+ * @i: integer value to add
7092+ * @v: pointer of type atomic_unchecked_t
7093+ *
7094+ * Atomically adds @i to @v.
7095+ */
7096+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7097+{
7098+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7099 : "+m" (v->counter)
7100 : "ir" (i));
7101 }
7102@@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
7103 */
7104 static inline void atomic_sub(int i, atomic_t *v)
7105 {
7106- asm volatile(LOCK_PREFIX "subl %1,%0"
7107+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7108+
7109+#ifdef CONFIG_PAX_REFCOUNT
7110+ "jno 0f\n"
7111+ LOCK_PREFIX "addl %1,%0\n"
7112+ "int $4\n0:\n"
7113+ _ASM_EXTABLE(0b, 0b)
7114+#endif
7115+
7116+ : "+m" (v->counter)
7117+ : "ir" (i));
7118+}
7119+
7120+/**
7121+ * atomic_sub_unchecked - subtract integer from atomic variable
7122+ * @i: integer value to subtract
7123+ * @v: pointer of type atomic_unchecked_t
7124+ *
7125+ * Atomically subtracts @i from @v.
7126+ */
7127+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7128+{
7129+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7130 : "+m" (v->counter)
7131 : "ir" (i));
7132 }
7133@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
7134 {
7135 unsigned char c;
7136
7137- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7138+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7139+
7140+#ifdef CONFIG_PAX_REFCOUNT
7141+ "jno 0f\n"
7142+ LOCK_PREFIX "addl %2,%0\n"
7143+ "int $4\n0:\n"
7144+ _ASM_EXTABLE(0b, 0b)
7145+#endif
7146+
7147+ "sete %1\n"
7148 : "+m" (v->counter), "=qm" (c)
7149 : "ir" (i) : "memory");
7150 return c;
7151@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
7152 */
7153 static inline void atomic_inc(atomic_t *v)
7154 {
7155- asm volatile(LOCK_PREFIX "incl %0"
7156+ asm volatile(LOCK_PREFIX "incl %0\n"
7157+
7158+#ifdef CONFIG_PAX_REFCOUNT
7159+ "jno 0f\n"
7160+ LOCK_PREFIX "decl %0\n"
7161+ "int $4\n0:\n"
7162+ _ASM_EXTABLE(0b, 0b)
7163+#endif
7164+
7165+ : "+m" (v->counter));
7166+}
7167+
7168+/**
7169+ * atomic_inc_unchecked - increment atomic variable
7170+ * @v: pointer of type atomic_unchecked_t
7171+ *
7172+ * Atomically increments @v by 1.
7173+ */
7174+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7175+{
7176+ asm volatile(LOCK_PREFIX "incl %0\n"
7177 : "+m" (v->counter));
7178 }
7179
7180@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7181 */
7182 static inline void atomic_dec(atomic_t *v)
7183 {
7184- asm volatile(LOCK_PREFIX "decl %0"
7185+ asm volatile(LOCK_PREFIX "decl %0\n"
7186+
7187+#ifdef CONFIG_PAX_REFCOUNT
7188+ "jno 0f\n"
7189+ LOCK_PREFIX "incl %0\n"
7190+ "int $4\n0:\n"
7191+ _ASM_EXTABLE(0b, 0b)
7192+#endif
7193+
7194+ : "+m" (v->counter));
7195+}
7196+
7197+/**
7198+ * atomic_dec_unchecked - decrement atomic variable
7199+ * @v: pointer of type atomic_unchecked_t
7200+ *
7201+ * Atomically decrements @v by 1.
7202+ */
7203+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7204+{
7205+ asm volatile(LOCK_PREFIX "decl %0\n"
7206 : "+m" (v->counter));
7207 }
7208
7209@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7210 {
7211 unsigned char c;
7212
7213- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7214+ asm volatile(LOCK_PREFIX "decl %0\n"
7215+
7216+#ifdef CONFIG_PAX_REFCOUNT
7217+ "jno 0f\n"
7218+ LOCK_PREFIX "incl %0\n"
7219+ "int $4\n0:\n"
7220+ _ASM_EXTABLE(0b, 0b)
7221+#endif
7222+
7223+ "sete %1\n"
7224 : "+m" (v->counter), "=qm" (c)
7225 : : "memory");
7226 return c != 0;
7227@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7228 {
7229 unsigned char c;
7230
7231- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7232+ asm volatile(LOCK_PREFIX "incl %0\n"
7233+
7234+#ifdef CONFIG_PAX_REFCOUNT
7235+ "jno 0f\n"
7236+ LOCK_PREFIX "decl %0\n"
7237+ "into\n0:\n"
7238+ _ASM_EXTABLE(0b, 0b)
7239+#endif
7240+
7241+ "sete %1\n"
7242+ : "+m" (v->counter), "=qm" (c)
7243+ : : "memory");
7244+ return c != 0;
7245+}
7246+
7247+/**
7248+ * atomic_inc_and_test_unchecked - increment and test
7249+ * @v: pointer of type atomic_unchecked_t
7250+ *
7251+ * Atomically increments @v by 1
7252+ * and returns true if the result is zero, or false for all
7253+ * other cases.
7254+ */
7255+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7256+{
7257+ unsigned char c;
7258+
7259+ asm volatile(LOCK_PREFIX "incl %0\n"
7260+ "sete %1\n"
7261 : "+m" (v->counter), "=qm" (c)
7262 : : "memory");
7263 return c != 0;
7264@@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7265 {
7266 unsigned char c;
7267
7268- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7269+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7270+
7271+#ifdef CONFIG_PAX_REFCOUNT
7272+ "jno 0f\n"
7273+ LOCK_PREFIX "subl %2,%0\n"
7274+ "int $4\n0:\n"
7275+ _ASM_EXTABLE(0b, 0b)
7276+#endif
7277+
7278+ "sets %1\n"
7279 : "+m" (v->counter), "=qm" (c)
7280 : "ir" (i) : "memory");
7281 return c;
7282@@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7283 #endif
7284 /* Modern 486+ processor */
7285 __i = i;
7286+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7287+
7288+#ifdef CONFIG_PAX_REFCOUNT
7289+ "jno 0f\n"
7290+ "movl %0, %1\n"
7291+ "int $4\n0:\n"
7292+ _ASM_EXTABLE(0b, 0b)
7293+#endif
7294+
7295+ : "+r" (i), "+m" (v->counter)
7296+ : : "memory");
7297+ return i + __i;
7298+
7299+#ifdef CONFIG_M386
7300+no_xadd: /* Legacy 386 processor */
7301+ local_irq_save(flags);
7302+ __i = atomic_read(v);
7303+ atomic_set(v, i + __i);
7304+ local_irq_restore(flags);
7305+ return i + __i;
7306+#endif
7307+}
7308+
7309+/**
7310+ * atomic_add_return_unchecked - add integer and return
7311+ * @v: pointer of type atomic_unchecked_t
7312+ * @i: integer value to add
7313+ *
7314+ * Atomically adds @i to @v and returns @i + @v
7315+ */
7316+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7317+{
7318+ int __i;
7319+#ifdef CONFIG_M386
7320+ unsigned long flags;
7321+ if (unlikely(boot_cpu_data.x86 <= 3))
7322+ goto no_xadd;
7323+#endif
7324+ /* Modern 486+ processor */
7325+ __i = i;
7326 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7327 : "+r" (i), "+m" (v->counter)
7328 : : "memory");
7329@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7330 return cmpxchg(&v->counter, old, new);
7331 }
7332
7333+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7334+{
7335+ return cmpxchg(&v->counter, old, new);
7336+}
7337+
7338 static inline int atomic_xchg(atomic_t *v, int new)
7339 {
7340 return xchg(&v->counter, new);
7341 }
7342
7343+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7344+{
7345+ return xchg(&v->counter, new);
7346+}
7347+
7348 /**
7349 * atomic_add_unless - add unless the number is already a given value
7350 * @v: pointer of type atomic_t
7351@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7352 */
7353 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7354 {
7355- int c, old;
7356+ int c, old, new;
7357 c = atomic_read(v);
7358 for (;;) {
7359- if (unlikely(c == (u)))
7360+ if (unlikely(c == u))
7361 break;
7362- old = atomic_cmpxchg((v), c, c + (a));
7363+
7364+ asm volatile("addl %2,%0\n"
7365+
7366+#ifdef CONFIG_PAX_REFCOUNT
7367+ "jno 0f\n"
7368+ "subl %2,%0\n"
7369+ "int $4\n0:\n"
7370+ _ASM_EXTABLE(0b, 0b)
7371+#endif
7372+
7373+ : "=r" (new)
7374+ : "0" (c), "ir" (a));
7375+
7376+ old = atomic_cmpxchg(v, c, new);
7377 if (likely(old == c))
7378 break;
7379 c = old;
7380 }
7381- return c != (u);
7382+ return c != u;
7383 }
7384
7385 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7386
7387 #define atomic_inc_return(v) (atomic_add_return(1, v))
7388+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7389+{
7390+ return atomic_add_return_unchecked(1, v);
7391+}
7392 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7393
7394 /* These are x86-specific, used by some header files */
7395@@ -266,9 +495,18 @@ typedef struct {
7396 u64 __aligned(8) counter;
7397 } atomic64_t;
7398
7399+#ifdef CONFIG_PAX_REFCOUNT
7400+typedef struct {
7401+ u64 __aligned(8) counter;
7402+} atomic64_unchecked_t;
7403+#else
7404+typedef atomic64_t atomic64_unchecked_t;
7405+#endif
7406+
7407 #define ATOMIC64_INIT(val) { (val) }
7408
7409 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7410+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7411
7412 /**
7413 * atomic64_xchg - xchg atomic64 variable
7414@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7415 * the old value.
7416 */
7417 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7418+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7419
7420 /**
7421 * atomic64_set - set atomic64 variable
7422@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7423 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7424
7425 /**
7426+ * atomic64_unchecked_set - set atomic64 variable
7427+ * @ptr: pointer to type atomic64_unchecked_t
7428+ * @new_val: value to assign
7429+ *
7430+ * Atomically sets the value of @ptr to @new_val.
7431+ */
7432+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7433+
7434+/**
7435 * atomic64_read - read atomic64 variable
7436 * @ptr: pointer to type atomic64_t
7437 *
7438@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7439 return res;
7440 }
7441
7442-extern u64 atomic64_read(atomic64_t *ptr);
7443+/**
7444+ * atomic64_read_unchecked - read atomic64 variable
7445+ * @ptr: pointer to type atomic64_unchecked_t
7446+ *
7447+ * Atomically reads the value of @ptr and returns it.
7448+ */
7449+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7450+{
7451+ u64 res;
7452+
7453+ /*
7454+ * Note, we inline this atomic64_unchecked_t primitive because
7455+ * it only clobbers EAX/EDX and leaves the others
7456+ * untouched. We also (somewhat subtly) rely on the
7457+ * fact that cmpxchg8b returns the current 64-bit value
7458+ * of the memory location we are touching:
7459+ */
7460+ asm volatile(
7461+ "mov %%ebx, %%eax\n\t"
7462+ "mov %%ecx, %%edx\n\t"
7463+ LOCK_PREFIX "cmpxchg8b %1\n"
7464+ : "=&A" (res)
7465+ : "m" (*ptr)
7466+ );
7467+
7468+ return res;
7469+}
7470
7471 /**
7472 * atomic64_add_return - add and return
7473@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7474 * Other variants with different arithmetic operators:
7475 */
7476 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7477+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7478 extern u64 atomic64_inc_return(atomic64_t *ptr);
7479+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7480 extern u64 atomic64_dec_return(atomic64_t *ptr);
7481+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7482
7483 /**
7484 * atomic64_add - add integer to atomic64 variable
7485@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7486 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7487
7488 /**
7489+ * atomic64_add_unchecked - add integer to atomic64 variable
7490+ * @delta: integer value to add
7491+ * @ptr: pointer to type atomic64_unchecked_t
7492+ *
7493+ * Atomically adds @delta to @ptr.
7494+ */
7495+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7496+
7497+/**
7498 * atomic64_sub - subtract the atomic64 variable
7499 * @delta: integer value to subtract
7500 * @ptr: pointer to type atomic64_t
7501@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7502 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7503
7504 /**
7505+ * atomic64_sub_unchecked - subtract the atomic64 variable
7506+ * @delta: integer value to subtract
7507+ * @ptr: pointer to type atomic64_unchecked_t
7508+ *
7509+ * Atomically subtracts @delta from @ptr.
7510+ */
7511+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7512+
7513+/**
7514 * atomic64_sub_and_test - subtract value from variable and test result
7515 * @delta: integer value to subtract
7516 * @ptr: pointer to type atomic64_t
7517@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7518 extern void atomic64_inc(atomic64_t *ptr);
7519
7520 /**
7521+ * atomic64_inc_unchecked - increment atomic64 variable
7522+ * @ptr: pointer to type atomic64_unchecked_t
7523+ *
7524+ * Atomically increments @ptr by 1.
7525+ */
7526+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7527+
7528+/**
7529 * atomic64_dec - decrement atomic64 variable
7530 * @ptr: pointer to type atomic64_t
7531 *
7532@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7533 extern void atomic64_dec(atomic64_t *ptr);
7534
7535 /**
7536+ * atomic64_dec_unchecked - decrement atomic64 variable
7537+ * @ptr: pointer to type atomic64_unchecked_t
7538+ *
7539+ * Atomically decrements @ptr by 1.
7540+ */
7541+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7542+
7543+/**
7544 * atomic64_dec_and_test - decrement and test
7545 * @ptr: pointer to type atomic64_t
7546 *
7547diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_64.h linux-2.6.32.45/arch/x86/include/asm/atomic_64.h
7548--- linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7549+++ linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7550@@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7551 }
7552
7553 /**
7554+ * atomic_read_unchecked - read atomic variable
7555+ * @v: pointer of type atomic_unchecked_t
7556+ *
7557+ * Atomically reads the value of @v.
7558+ */
7559+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7560+{
7561+ return v->counter;
7562+}
7563+
7564+/**
7565 * atomic_set - set atomic variable
7566 * @v: pointer of type atomic_t
7567 * @i: required value
7568@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7569 }
7570
7571 /**
7572+ * atomic_set_unchecked - set atomic variable
7573+ * @v: pointer of type atomic_unchecked_t
7574+ * @i: required value
7575+ *
7576+ * Atomically sets the value of @v to @i.
7577+ */
7578+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7579+{
7580+ v->counter = i;
7581+}
7582+
7583+/**
7584 * atomic_add - add integer to atomic variable
7585 * @i: integer value to add
7586 * @v: pointer of type atomic_t
7587@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7588 */
7589 static inline void atomic_add(int i, atomic_t *v)
7590 {
7591- asm volatile(LOCK_PREFIX "addl %1,%0"
7592+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7593+
7594+#ifdef CONFIG_PAX_REFCOUNT
7595+ "jno 0f\n"
7596+ LOCK_PREFIX "subl %1,%0\n"
7597+ "int $4\n0:\n"
7598+ _ASM_EXTABLE(0b, 0b)
7599+#endif
7600+
7601+ : "=m" (v->counter)
7602+ : "ir" (i), "m" (v->counter));
7603+}
7604+
7605+/**
7606+ * atomic_add_unchecked - add integer to atomic variable
7607+ * @i: integer value to add
7608+ * @v: pointer of type atomic_unchecked_t
7609+ *
7610+ * Atomically adds @i to @v.
7611+ */
7612+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7613+{
7614+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7615 : "=m" (v->counter)
7616 : "ir" (i), "m" (v->counter));
7617 }
7618@@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7619 */
7620 static inline void atomic_sub(int i, atomic_t *v)
7621 {
7622- asm volatile(LOCK_PREFIX "subl %1,%0"
7623+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7624+
7625+#ifdef CONFIG_PAX_REFCOUNT
7626+ "jno 0f\n"
7627+ LOCK_PREFIX "addl %1,%0\n"
7628+ "int $4\n0:\n"
7629+ _ASM_EXTABLE(0b, 0b)
7630+#endif
7631+
7632+ : "=m" (v->counter)
7633+ : "ir" (i), "m" (v->counter));
7634+}
7635+
7636+/**
7637+ * atomic_sub_unchecked - subtract the atomic variable
7638+ * @i: integer value to subtract
7639+ * @v: pointer of type atomic_unchecked_t
7640+ *
7641+ * Atomically subtracts @i from @v.
7642+ */
7643+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7644+{
7645+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7646 : "=m" (v->counter)
7647 : "ir" (i), "m" (v->counter));
7648 }
7649@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7650 {
7651 unsigned char c;
7652
7653- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7654+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7655+
7656+#ifdef CONFIG_PAX_REFCOUNT
7657+ "jno 0f\n"
7658+ LOCK_PREFIX "addl %2,%0\n"
7659+ "int $4\n0:\n"
7660+ _ASM_EXTABLE(0b, 0b)
7661+#endif
7662+
7663+ "sete %1\n"
7664 : "=m" (v->counter), "=qm" (c)
7665 : "ir" (i), "m" (v->counter) : "memory");
7666 return c;
7667@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7668 */
7669 static inline void atomic_inc(atomic_t *v)
7670 {
7671- asm volatile(LOCK_PREFIX "incl %0"
7672+ asm volatile(LOCK_PREFIX "incl %0\n"
7673+
7674+#ifdef CONFIG_PAX_REFCOUNT
7675+ "jno 0f\n"
7676+ LOCK_PREFIX "decl %0\n"
7677+ "int $4\n0:\n"
7678+ _ASM_EXTABLE(0b, 0b)
7679+#endif
7680+
7681+ : "=m" (v->counter)
7682+ : "m" (v->counter));
7683+}
7684+
7685+/**
7686+ * atomic_inc_unchecked - increment atomic variable
7687+ * @v: pointer of type atomic_unchecked_t
7688+ *
7689+ * Atomically increments @v by 1.
7690+ */
7691+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7692+{
7693+ asm volatile(LOCK_PREFIX "incl %0\n"
7694 : "=m" (v->counter)
7695 : "m" (v->counter));
7696 }
7697@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7698 */
7699 static inline void atomic_dec(atomic_t *v)
7700 {
7701- asm volatile(LOCK_PREFIX "decl %0"
7702+ asm volatile(LOCK_PREFIX "decl %0\n"
7703+
7704+#ifdef CONFIG_PAX_REFCOUNT
7705+ "jno 0f\n"
7706+ LOCK_PREFIX "incl %0\n"
7707+ "int $4\n0:\n"
7708+ _ASM_EXTABLE(0b, 0b)
7709+#endif
7710+
7711+ : "=m" (v->counter)
7712+ : "m" (v->counter));
7713+}
7714+
7715+/**
7716+ * atomic_dec_unchecked - decrement atomic variable
7717+ * @v: pointer of type atomic_unchecked_t
7718+ *
7719+ * Atomically decrements @v by 1.
7720+ */
7721+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7722+{
7723+ asm volatile(LOCK_PREFIX "decl %0\n"
7724 : "=m" (v->counter)
7725 : "m" (v->counter));
7726 }
7727@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7728 {
7729 unsigned char c;
7730
7731- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7732+ asm volatile(LOCK_PREFIX "decl %0\n"
7733+
7734+#ifdef CONFIG_PAX_REFCOUNT
7735+ "jno 0f\n"
7736+ LOCK_PREFIX "incl %0\n"
7737+ "int $4\n0:\n"
7738+ _ASM_EXTABLE(0b, 0b)
7739+#endif
7740+
7741+ "sete %1\n"
7742 : "=m" (v->counter), "=qm" (c)
7743 : "m" (v->counter) : "memory");
7744 return c != 0;
7745@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7746 {
7747 unsigned char c;
7748
7749- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7750+ asm volatile(LOCK_PREFIX "incl %0\n"
7751+
7752+#ifdef CONFIG_PAX_REFCOUNT
7753+ "jno 0f\n"
7754+ LOCK_PREFIX "decl %0\n"
7755+ "int $4\n0:\n"
7756+ _ASM_EXTABLE(0b, 0b)
7757+#endif
7758+
7759+ "sete %1\n"
7760+ : "=m" (v->counter), "=qm" (c)
7761+ : "m" (v->counter) : "memory");
7762+ return c != 0;
7763+}
7764+
7765+/**
7766+ * atomic_inc_and_test_unchecked - increment and test
7767+ * @v: pointer of type atomic_unchecked_t
7768+ *
7769+ * Atomically increments @v by 1
7770+ * and returns true if the result is zero, or false for all
7771+ * other cases.
7772+ */
7773+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7774+{
7775+ unsigned char c;
7776+
7777+ asm volatile(LOCK_PREFIX "incl %0\n"
7778+ "sete %1\n"
7779 : "=m" (v->counter), "=qm" (c)
7780 : "m" (v->counter) : "memory");
7781 return c != 0;
7782@@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7783 {
7784 unsigned char c;
7785
7786- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7787+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7788+
7789+#ifdef CONFIG_PAX_REFCOUNT
7790+ "jno 0f\n"
7791+ LOCK_PREFIX "subl %2,%0\n"
7792+ "int $4\n0:\n"
7793+ _ASM_EXTABLE(0b, 0b)
7794+#endif
7795+
7796+ "sets %1\n"
7797 : "=m" (v->counter), "=qm" (c)
7798 : "ir" (i), "m" (v->counter) : "memory");
7799 return c;
7800@@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7801 static inline int atomic_add_return(int i, atomic_t *v)
7802 {
7803 int __i = i;
7804- asm volatile(LOCK_PREFIX "xaddl %0, %1"
7805+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7806+
7807+#ifdef CONFIG_PAX_REFCOUNT
7808+ "jno 0f\n"
7809+ "movl %0, %1\n"
7810+ "int $4\n0:\n"
7811+ _ASM_EXTABLE(0b, 0b)
7812+#endif
7813+
7814+ : "+r" (i), "+m" (v->counter)
7815+ : : "memory");
7816+ return i + __i;
7817+}
7818+
7819+/**
7820+ * atomic_add_return_unchecked - add and return
7821+ * @i: integer value to add
7822+ * @v: pointer of type atomic_unchecked_t
7823+ *
7824+ * Atomically adds @i to @v and returns @i + @v
7825+ */
7826+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7827+{
7828+ int __i = i;
7829+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7830 : "+r" (i), "+m" (v->counter)
7831 : : "memory");
7832 return i + __i;
7833@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7834 }
7835
7836 #define atomic_inc_return(v) (atomic_add_return(1, v))
7837+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7838+{
7839+ return atomic_add_return_unchecked(1, v);
7840+}
7841 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7842
7843 /* The 64-bit atomic type */
7844@@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7845 }
7846
7847 /**
7848+ * atomic64_read_unchecked - read atomic64 variable
7849+ * @v: pointer of type atomic64_unchecked_t
7850+ *
7851+ * Atomically reads the value of @v.
7852+ * Doesn't imply a read memory barrier.
7853+ */
7854+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7855+{
7856+ return v->counter;
7857+}
7858+
7859+/**
7860 * atomic64_set - set atomic64 variable
7861 * @v: pointer to type atomic64_t
7862 * @i: required value
7863@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7864 }
7865
7866 /**
7867+ * atomic64_set_unchecked - set atomic64 variable
7868+ * @v: pointer to type atomic64_unchecked_t
7869+ * @i: required value
7870+ *
7871+ * Atomically sets the value of @v to @i.
7872+ */
7873+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7874+{
7875+ v->counter = i;
7876+}
7877+
7878+/**
7879 * atomic64_add - add integer to atomic64 variable
7880 * @i: integer value to add
7881 * @v: pointer to type atomic64_t
7882@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7883 */
7884 static inline void atomic64_add(long i, atomic64_t *v)
7885 {
7886+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7887+
7888+#ifdef CONFIG_PAX_REFCOUNT
7889+ "jno 0f\n"
7890+ LOCK_PREFIX "subq %1,%0\n"
7891+ "int $4\n0:\n"
7892+ _ASM_EXTABLE(0b, 0b)
7893+#endif
7894+
7895+ : "=m" (v->counter)
7896+ : "er" (i), "m" (v->counter));
7897+}
7898+
7899+/**
7900+ * atomic64_add_unchecked - add integer to atomic64 variable
7901+ * @i: integer value to add
7902+ * @v: pointer to type atomic64_unchecked_t
7903+ *
7904+ * Atomically adds @i to @v.
7905+ */
7906+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7907+{
7908 asm volatile(LOCK_PREFIX "addq %1,%0"
7909 : "=m" (v->counter)
7910 : "er" (i), "m" (v->counter));
7911@@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7912 */
7913 static inline void atomic64_sub(long i, atomic64_t *v)
7914 {
7915- asm volatile(LOCK_PREFIX "subq %1,%0"
7916+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7917+
7918+#ifdef CONFIG_PAX_REFCOUNT
7919+ "jno 0f\n"
7920+ LOCK_PREFIX "addq %1,%0\n"
7921+ "int $4\n0:\n"
7922+ _ASM_EXTABLE(0b, 0b)
7923+#endif
7924+
7925 : "=m" (v->counter)
7926 : "er" (i), "m" (v->counter));
7927 }
7928@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7929 {
7930 unsigned char c;
7931
7932- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7933+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7934+
7935+#ifdef CONFIG_PAX_REFCOUNT
7936+ "jno 0f\n"
7937+ LOCK_PREFIX "addq %2,%0\n"
7938+ "int $4\n0:\n"
7939+ _ASM_EXTABLE(0b, 0b)
7940+#endif
7941+
7942+ "sete %1\n"
7943 : "=m" (v->counter), "=qm" (c)
7944 : "er" (i), "m" (v->counter) : "memory");
7945 return c;
7946@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7947 */
7948 static inline void atomic64_inc(atomic64_t *v)
7949 {
7950+ asm volatile(LOCK_PREFIX "incq %0\n"
7951+
7952+#ifdef CONFIG_PAX_REFCOUNT
7953+ "jno 0f\n"
7954+ LOCK_PREFIX "decq %0\n"
7955+ "int $4\n0:\n"
7956+ _ASM_EXTABLE(0b, 0b)
7957+#endif
7958+
7959+ : "=m" (v->counter)
7960+ : "m" (v->counter));
7961+}
7962+
7963+/**
7964+ * atomic64_inc_unchecked - increment atomic64 variable
7965+ * @v: pointer to type atomic64_unchecked_t
7966+ *
7967+ * Atomically increments @v by 1.
7968+ */
7969+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7970+{
7971 asm volatile(LOCK_PREFIX "incq %0"
7972 : "=m" (v->counter)
7973 : "m" (v->counter));
7974@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7975 */
7976 static inline void atomic64_dec(atomic64_t *v)
7977 {
7978- asm volatile(LOCK_PREFIX "decq %0"
7979+ asm volatile(LOCK_PREFIX "decq %0\n"
7980+
7981+#ifdef CONFIG_PAX_REFCOUNT
7982+ "jno 0f\n"
7983+ LOCK_PREFIX "incq %0\n"
7984+ "int $4\n0:\n"
7985+ _ASM_EXTABLE(0b, 0b)
7986+#endif
7987+
7988+ : "=m" (v->counter)
7989+ : "m" (v->counter));
7990+}
7991+
7992+/**
7993+ * atomic64_dec_unchecked - decrement atomic64 variable
7994+ * @v: pointer to type atomic64_t
7995+ *
7996+ * Atomically decrements @v by 1.
7997+ */
7998+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7999+{
8000+ asm volatile(LOCK_PREFIX "decq %0\n"
8001 : "=m" (v->counter)
8002 : "m" (v->counter));
8003 }
8004@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
8005 {
8006 unsigned char c;
8007
8008- asm volatile(LOCK_PREFIX "decq %0; sete %1"
8009+ asm volatile(LOCK_PREFIX "decq %0\n"
8010+
8011+#ifdef CONFIG_PAX_REFCOUNT
8012+ "jno 0f\n"
8013+ LOCK_PREFIX "incq %0\n"
8014+ "int $4\n0:\n"
8015+ _ASM_EXTABLE(0b, 0b)
8016+#endif
8017+
8018+ "sete %1\n"
8019 : "=m" (v->counter), "=qm" (c)
8020 : "m" (v->counter) : "memory");
8021 return c != 0;
8022@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
8023 {
8024 unsigned char c;
8025
8026- asm volatile(LOCK_PREFIX "incq %0; sete %1"
8027+ asm volatile(LOCK_PREFIX "incq %0\n"
8028+
8029+#ifdef CONFIG_PAX_REFCOUNT
8030+ "jno 0f\n"
8031+ LOCK_PREFIX "decq %0\n"
8032+ "int $4\n0:\n"
8033+ _ASM_EXTABLE(0b, 0b)
8034+#endif
8035+
8036+ "sete %1\n"
8037 : "=m" (v->counter), "=qm" (c)
8038 : "m" (v->counter) : "memory");
8039 return c != 0;
8040@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
8041 {
8042 unsigned char c;
8043
8044- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
8045+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
8046+
8047+#ifdef CONFIG_PAX_REFCOUNT
8048+ "jno 0f\n"
8049+ LOCK_PREFIX "subq %2,%0\n"
8050+ "int $4\n0:\n"
8051+ _ASM_EXTABLE(0b, 0b)
8052+#endif
8053+
8054+ "sets %1\n"
8055 : "=m" (v->counter), "=qm" (c)
8056 : "er" (i), "m" (v->counter) : "memory");
8057 return c;
8058@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
8059 static inline long atomic64_add_return(long i, atomic64_t *v)
8060 {
8061 long __i = i;
8062- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
8063+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
8064+
8065+#ifdef CONFIG_PAX_REFCOUNT
8066+ "jno 0f\n"
8067+ "movq %0, %1\n"
8068+ "int $4\n0:\n"
8069+ _ASM_EXTABLE(0b, 0b)
8070+#endif
8071+
8072+ : "+r" (i), "+m" (v->counter)
8073+ : : "memory");
8074+ return i + __i;
8075+}
8076+
8077+/**
8078+ * atomic64_add_return_unchecked - add and return
8079+ * @i: integer value to add
8080+ * @v: pointer to type atomic64_unchecked_t
8081+ *
8082+ * Atomically adds @i to @v and returns @i + @v
8083+ */
8084+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8085+{
8086+ long __i = i;
8087+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
8088 : "+r" (i), "+m" (v->counter)
8089 : : "memory");
8090 return i + __i;
8091@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
8092 }
8093
8094 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
8095+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8096+{
8097+ return atomic64_add_return_unchecked(1, v);
8098+}
8099 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
8100
8101 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8102@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
8103 return cmpxchg(&v->counter, old, new);
8104 }
8105
8106+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8107+{
8108+ return cmpxchg(&v->counter, old, new);
8109+}
8110+
8111 static inline long atomic64_xchg(atomic64_t *v, long new)
8112 {
8113 return xchg(&v->counter, new);
8114 }
8115
8116+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8117+{
8118+ return xchg(&v->counter, new);
8119+}
8120+
8121 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
8122 {
8123 return cmpxchg(&v->counter, old, new);
8124 }
8125
8126+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8127+{
8128+ return cmpxchg(&v->counter, old, new);
8129+}
8130+
8131 static inline long atomic_xchg(atomic_t *v, int new)
8132 {
8133 return xchg(&v->counter, new);
8134 }
8135
8136+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8137+{
8138+ return xchg(&v->counter, new);
8139+}
8140+
8141 /**
8142 * atomic_add_unless - add unless the number is a given value
8143 * @v: pointer of type atomic_t
8144@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
8145 */
8146 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8147 {
8148- int c, old;
8149+ int c, old, new;
8150 c = atomic_read(v);
8151 for (;;) {
8152- if (unlikely(c == (u)))
8153+ if (unlikely(c == u))
8154 break;
8155- old = atomic_cmpxchg((v), c, c + (a));
8156+
8157+ asm volatile("addl %2,%0\n"
8158+
8159+#ifdef CONFIG_PAX_REFCOUNT
8160+ "jno 0f\n"
8161+ "subl %2,%0\n"
8162+ "int $4\n0:\n"
8163+ _ASM_EXTABLE(0b, 0b)
8164+#endif
8165+
8166+ : "=r" (new)
8167+ : "0" (c), "ir" (a));
8168+
8169+ old = atomic_cmpxchg(v, c, new);
8170 if (likely(old == c))
8171 break;
8172 c = old;
8173 }
8174- return c != (u);
8175+ return c != u;
8176 }
8177
8178 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8179@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8180 */
8181 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8182 {
8183- long c, old;
8184+ long c, old, new;
8185 c = atomic64_read(v);
8186 for (;;) {
8187- if (unlikely(c == (u)))
8188+ if (unlikely(c == u))
8189 break;
8190- old = atomic64_cmpxchg((v), c, c + (a));
8191+
8192+ asm volatile("addq %2,%0\n"
8193+
8194+#ifdef CONFIG_PAX_REFCOUNT
8195+ "jno 0f\n"
8196+ "subq %2,%0\n"
8197+ "int $4\n0:\n"
8198+ _ASM_EXTABLE(0b, 0b)
8199+#endif
8200+
8201+ : "=r" (new)
8202+ : "0" (c), "er" (a));
8203+
8204+ old = atomic64_cmpxchg(v, c, new);
8205 if (likely(old == c))
8206 break;
8207 c = old;
8208 }
8209- return c != (u);
8210+ return c != u;
8211 }
8212
8213 /**
8214diff -urNp linux-2.6.32.45/arch/x86/include/asm/bitops.h linux-2.6.32.45/arch/x86/include/asm/bitops.h
8215--- linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8216+++ linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8217@@ -38,7 +38,7 @@
8218 * a mask operation on a byte.
8219 */
8220 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8221-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8222+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8223 #define CONST_MASK(nr) (1 << ((nr) & 7))
8224
8225 /**
8226diff -urNp linux-2.6.32.45/arch/x86/include/asm/boot.h linux-2.6.32.45/arch/x86/include/asm/boot.h
8227--- linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8228+++ linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8229@@ -11,10 +11,15 @@
8230 #include <asm/pgtable_types.h>
8231
8232 /* Physical address where kernel should be loaded. */
8233-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8234+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8235 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8236 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8237
8238+#ifndef __ASSEMBLY__
8239+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8240+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8241+#endif
8242+
8243 /* Minimum kernel alignment, as a power of two */
8244 #ifdef CONFIG_X86_64
8245 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8246diff -urNp linux-2.6.32.45/arch/x86/include/asm/cacheflush.h linux-2.6.32.45/arch/x86/include/asm/cacheflush.h
8247--- linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8248+++ linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8249@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8250 static inline unsigned long get_page_memtype(struct page *pg)
8251 {
8252 if (!PageUncached(pg) && !PageWC(pg))
8253- return -1;
8254+ return ~0UL;
8255 else if (!PageUncached(pg) && PageWC(pg))
8256 return _PAGE_CACHE_WC;
8257 else if (PageUncached(pg) && !PageWC(pg))
8258@@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8259 SetPageWC(pg);
8260 break;
8261 default:
8262- case -1:
8263+ case ~0UL:
8264 ClearPageUncached(pg);
8265 ClearPageWC(pg);
8266 break;
8267diff -urNp linux-2.6.32.45/arch/x86/include/asm/cache.h linux-2.6.32.45/arch/x86/include/asm/cache.h
8268--- linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8269+++ linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8270@@ -5,9 +5,10 @@
8271
8272 /* L1 cache line size */
8273 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8274-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8275+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8276
8277 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8278+#define __read_only __attribute__((__section__(".data.read_only")))
8279
8280 #ifdef CONFIG_X86_VSMP
8281 /* vSMP Internode cacheline shift */
8282diff -urNp linux-2.6.32.45/arch/x86/include/asm/checksum_32.h linux-2.6.32.45/arch/x86/include/asm/checksum_32.h
8283--- linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8284+++ linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8285@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8286 int len, __wsum sum,
8287 int *src_err_ptr, int *dst_err_ptr);
8288
8289+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8290+ int len, __wsum sum,
8291+ int *src_err_ptr, int *dst_err_ptr);
8292+
8293+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8294+ int len, __wsum sum,
8295+ int *src_err_ptr, int *dst_err_ptr);
8296+
8297 /*
8298 * Note: when you get a NULL pointer exception here this means someone
8299 * passed in an incorrect kernel address to one of these functions.
8300@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8301 int *err_ptr)
8302 {
8303 might_sleep();
8304- return csum_partial_copy_generic((__force void *)src, dst,
8305+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8306 len, sum, err_ptr, NULL);
8307 }
8308
8309@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8310 {
8311 might_sleep();
8312 if (access_ok(VERIFY_WRITE, dst, len))
8313- return csum_partial_copy_generic(src, (__force void *)dst,
8314+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8315 len, sum, NULL, err_ptr);
8316
8317 if (len)
8318diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc_defs.h linux-2.6.32.45/arch/x86/include/asm/desc_defs.h
8319--- linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8320+++ linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8321@@ -31,6 +31,12 @@ struct desc_struct {
8322 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8323 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8324 };
8325+ struct {
8326+ u16 offset_low;
8327+ u16 seg;
8328+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8329+ unsigned offset_high: 16;
8330+ } gate;
8331 };
8332 } __attribute__((packed));
8333
8334diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc.h linux-2.6.32.45/arch/x86/include/asm/desc.h
8335--- linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8336+++ linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8337@@ -4,6 +4,7 @@
8338 #include <asm/desc_defs.h>
8339 #include <asm/ldt.h>
8340 #include <asm/mmu.h>
8341+#include <asm/pgtable.h>
8342 #include <linux/smp.h>
8343
8344 static inline void fill_ldt(struct desc_struct *desc,
8345@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8346 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8347 desc->type = (info->read_exec_only ^ 1) << 1;
8348 desc->type |= info->contents << 2;
8349+ desc->type |= info->seg_not_present ^ 1;
8350 desc->s = 1;
8351 desc->dpl = 0x3;
8352 desc->p = info->seg_not_present ^ 1;
8353@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8354 }
8355
8356 extern struct desc_ptr idt_descr;
8357-extern gate_desc idt_table[];
8358-
8359-struct gdt_page {
8360- struct desc_struct gdt[GDT_ENTRIES];
8361-} __attribute__((aligned(PAGE_SIZE)));
8362-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8363+extern gate_desc idt_table[256];
8364
8365+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8366 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8367 {
8368- return per_cpu(gdt_page, cpu).gdt;
8369+ return cpu_gdt_table[cpu];
8370 }
8371
8372 #ifdef CONFIG_X86_64
8373@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8374 unsigned long base, unsigned dpl, unsigned flags,
8375 unsigned short seg)
8376 {
8377- gate->a = (seg << 16) | (base & 0xffff);
8378- gate->b = (base & 0xffff0000) |
8379- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8380+ gate->gate.offset_low = base;
8381+ gate->gate.seg = seg;
8382+ gate->gate.reserved = 0;
8383+ gate->gate.type = type;
8384+ gate->gate.s = 0;
8385+ gate->gate.dpl = dpl;
8386+ gate->gate.p = 1;
8387+ gate->gate.offset_high = base >> 16;
8388 }
8389
8390 #endif
8391@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8392 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8393 const gate_desc *gate)
8394 {
8395+ pax_open_kernel();
8396 memcpy(&idt[entry], gate, sizeof(*gate));
8397+ pax_close_kernel();
8398 }
8399
8400 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8401 const void *desc)
8402 {
8403+ pax_open_kernel();
8404 memcpy(&ldt[entry], desc, 8);
8405+ pax_close_kernel();
8406 }
8407
8408 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8409@@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8410 size = sizeof(struct desc_struct);
8411 break;
8412 }
8413+
8414+ pax_open_kernel();
8415 memcpy(&gdt[entry], desc, size);
8416+ pax_close_kernel();
8417 }
8418
8419 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8420@@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8421
8422 static inline void native_load_tr_desc(void)
8423 {
8424+ pax_open_kernel();
8425 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8426+ pax_close_kernel();
8427 }
8428
8429 static inline void native_load_gdt(const struct desc_ptr *dtr)
8430@@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8431 unsigned int i;
8432 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8433
8434+ pax_open_kernel();
8435 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8436 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8437+ pax_close_kernel();
8438 }
8439
8440 #define _LDT_empty(info) \
8441@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8442 desc->limit = (limit >> 16) & 0xf;
8443 }
8444
8445-static inline void _set_gate(int gate, unsigned type, void *addr,
8446+static inline void _set_gate(int gate, unsigned type, const void *addr,
8447 unsigned dpl, unsigned ist, unsigned seg)
8448 {
8449 gate_desc s;
8450@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8451 * Pentium F0 0F bugfix can have resulted in the mapped
8452 * IDT being write-protected.
8453 */
8454-static inline void set_intr_gate(unsigned int n, void *addr)
8455+static inline void set_intr_gate(unsigned int n, const void *addr)
8456 {
8457 BUG_ON((unsigned)n > 0xFF);
8458 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8459@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8460 /*
8461 * This routine sets up an interrupt gate at directory privilege level 3.
8462 */
8463-static inline void set_system_intr_gate(unsigned int n, void *addr)
8464+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8465 {
8466 BUG_ON((unsigned)n > 0xFF);
8467 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8468 }
8469
8470-static inline void set_system_trap_gate(unsigned int n, void *addr)
8471+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8472 {
8473 BUG_ON((unsigned)n > 0xFF);
8474 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8475 }
8476
8477-static inline void set_trap_gate(unsigned int n, void *addr)
8478+static inline void set_trap_gate(unsigned int n, const void *addr)
8479 {
8480 BUG_ON((unsigned)n > 0xFF);
8481 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8482@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8483 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8484 {
8485 BUG_ON((unsigned)n > 0xFF);
8486- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8487+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8488 }
8489
8490-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8491+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8492 {
8493 BUG_ON((unsigned)n > 0xFF);
8494 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8495 }
8496
8497-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8498+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8499 {
8500 BUG_ON((unsigned)n > 0xFF);
8501 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8502 }
8503
8504+#ifdef CONFIG_X86_32
8505+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8506+{
8507+ struct desc_struct d;
8508+
8509+ if (likely(limit))
8510+ limit = (limit - 1UL) >> PAGE_SHIFT;
8511+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8512+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8513+}
8514+#endif
8515+
8516 #endif /* _ASM_X86_DESC_H */
8517diff -urNp linux-2.6.32.45/arch/x86/include/asm/device.h linux-2.6.32.45/arch/x86/include/asm/device.h
8518--- linux-2.6.32.45/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8519+++ linux-2.6.32.45/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8520@@ -6,7 +6,7 @@ struct dev_archdata {
8521 void *acpi_handle;
8522 #endif
8523 #ifdef CONFIG_X86_64
8524-struct dma_map_ops *dma_ops;
8525+ const struct dma_map_ops *dma_ops;
8526 #endif
8527 #ifdef CONFIG_DMAR
8528 void *iommu; /* hook for IOMMU specific extension */
8529diff -urNp linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h
8530--- linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8531+++ linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8532@@ -25,9 +25,9 @@ extern int iommu_merge;
8533 extern struct device x86_dma_fallback_dev;
8534 extern int panic_on_overflow;
8535
8536-extern struct dma_map_ops *dma_ops;
8537+extern const struct dma_map_ops *dma_ops;
8538
8539-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8540+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8541 {
8542 #ifdef CONFIG_X86_32
8543 return dma_ops;
8544@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8545 /* Make sure we keep the same behaviour */
8546 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8547 {
8548- struct dma_map_ops *ops = get_dma_ops(dev);
8549+ const struct dma_map_ops *ops = get_dma_ops(dev);
8550 if (ops->mapping_error)
8551 return ops->mapping_error(dev, dma_addr);
8552
8553@@ -122,7 +122,7 @@ static inline void *
8554 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8555 gfp_t gfp)
8556 {
8557- struct dma_map_ops *ops = get_dma_ops(dev);
8558+ const struct dma_map_ops *ops = get_dma_ops(dev);
8559 void *memory;
8560
8561 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8562@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8563 static inline void dma_free_coherent(struct device *dev, size_t size,
8564 void *vaddr, dma_addr_t bus)
8565 {
8566- struct dma_map_ops *ops = get_dma_ops(dev);
8567+ const struct dma_map_ops *ops = get_dma_ops(dev);
8568
8569 WARN_ON(irqs_disabled()); /* for portability */
8570
8571diff -urNp linux-2.6.32.45/arch/x86/include/asm/e820.h linux-2.6.32.45/arch/x86/include/asm/e820.h
8572--- linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8573+++ linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8574@@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8575 #define ISA_END_ADDRESS 0x100000
8576 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8577
8578-#define BIOS_BEGIN 0x000a0000
8579+#define BIOS_BEGIN 0x000c0000
8580 #define BIOS_END 0x00100000
8581
8582 #ifdef __KERNEL__
8583diff -urNp linux-2.6.32.45/arch/x86/include/asm/elf.h linux-2.6.32.45/arch/x86/include/asm/elf.h
8584--- linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8585+++ linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-08-23 20:24:19.000000000 -0400
8586@@ -257,7 +257,25 @@ extern int force_personality32;
8587 the loader. We need to make sure that it is out of the way of the program
8588 that it will "exec", and that there is sufficient room for the brk. */
8589
8590+#ifdef CONFIG_PAX_SEGMEXEC
8591+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8592+#else
8593 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8594+#endif
8595+
8596+#ifdef CONFIG_PAX_ASLR
8597+#ifdef CONFIG_X86_32
8598+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8599+
8600+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8601+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8602+#else
8603+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8604+
8605+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8606+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8607+#endif
8608+#endif
8609
8610 /* This yields a mask that user programs can use to figure out what
8611 instruction set this CPU supports. This could be done in user space,
8612@@ -310,9 +328,7 @@ do { \
8613
8614 #define ARCH_DLINFO \
8615 do { \
8616- if (vdso_enabled) \
8617- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8618- (unsigned long)current->mm->context.vdso); \
8619+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8620 } while (0)
8621
8622 #define AT_SYSINFO 32
8623@@ -323,7 +339,7 @@ do { \
8624
8625 #endif /* !CONFIG_X86_32 */
8626
8627-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8628+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8629
8630 #define VDSO_ENTRY \
8631 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8632@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(s
8633 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8634 #define compat_arch_setup_additional_pages syscall32_setup_pages
8635
8636-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8637-#define arch_randomize_brk arch_randomize_brk
8638-
8639 #endif /* _ASM_X86_ELF_H */
8640diff -urNp linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h
8641--- linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8642+++ linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8643@@ -15,6 +15,6 @@ enum reboot_type {
8644
8645 extern enum reboot_type reboot_type;
8646
8647-extern void machine_emergency_restart(void);
8648+extern void machine_emergency_restart(void) __noreturn;
8649
8650 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8651diff -urNp linux-2.6.32.45/arch/x86/include/asm/futex.h linux-2.6.32.45/arch/x86/include/asm/futex.h
8652--- linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8653+++ linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8654@@ -12,16 +12,18 @@
8655 #include <asm/system.h>
8656
8657 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8658+ typecheck(u32 *, uaddr); \
8659 asm volatile("1:\t" insn "\n" \
8660 "2:\t.section .fixup,\"ax\"\n" \
8661 "3:\tmov\t%3, %1\n" \
8662 "\tjmp\t2b\n" \
8663 "\t.previous\n" \
8664 _ASM_EXTABLE(1b, 3b) \
8665- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8666+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8667 : "i" (-EFAULT), "0" (oparg), "1" (0))
8668
8669 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8670+ typecheck(u32 *, uaddr); \
8671 asm volatile("1:\tmovl %2, %0\n" \
8672 "\tmovl\t%0, %3\n" \
8673 "\t" insn "\n" \
8674@@ -34,10 +36,10 @@
8675 _ASM_EXTABLE(1b, 4b) \
8676 _ASM_EXTABLE(2b, 4b) \
8677 : "=&a" (oldval), "=&r" (ret), \
8678- "+m" (*uaddr), "=&r" (tem) \
8679+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8680 : "r" (oparg), "i" (-EFAULT), "1" (0))
8681
8682-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8683+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8684 {
8685 int op = (encoded_op >> 28) & 7;
8686 int cmp = (encoded_op >> 24) & 15;
8687@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8688
8689 switch (op) {
8690 case FUTEX_OP_SET:
8691- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8692+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8693 break;
8694 case FUTEX_OP_ADD:
8695- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8696+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8697 uaddr, oparg);
8698 break;
8699 case FUTEX_OP_OR:
8700@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8701 return ret;
8702 }
8703
8704-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8705+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8706 int newval)
8707 {
8708
8709@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8710 return -ENOSYS;
8711 #endif
8712
8713- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8714+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8715 return -EFAULT;
8716
8717- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8718+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8719 "2:\t.section .fixup, \"ax\"\n"
8720 "3:\tmov %2, %0\n"
8721 "\tjmp 2b\n"
8722 "\t.previous\n"
8723 _ASM_EXTABLE(1b, 3b)
8724- : "=a" (oldval), "+m" (*uaddr)
8725+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8726 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8727 : "memory"
8728 );
8729diff -urNp linux-2.6.32.45/arch/x86/include/asm/hw_irq.h linux-2.6.32.45/arch/x86/include/asm/hw_irq.h
8730--- linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8731+++ linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8732@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8733 extern void enable_IO_APIC(void);
8734
8735 /* Statistics */
8736-extern atomic_t irq_err_count;
8737-extern atomic_t irq_mis_count;
8738+extern atomic_unchecked_t irq_err_count;
8739+extern atomic_unchecked_t irq_mis_count;
8740
8741 /* EISA */
8742 extern void eisa_set_level_irq(unsigned int irq);
8743diff -urNp linux-2.6.32.45/arch/x86/include/asm/i387.h linux-2.6.32.45/arch/x86/include/asm/i387.h
8744--- linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8745+++ linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8746@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8747 {
8748 int err;
8749
8750+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8751+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8752+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8753+#endif
8754+
8755 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8756 "2:\n"
8757 ".section .fixup,\"ax\"\n"
8758@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8759 {
8760 int err;
8761
8762+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8763+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8764+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8765+#endif
8766+
8767 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8768 "2:\n"
8769 ".section .fixup,\"ax\"\n"
8770@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8771 }
8772
8773 /* We need a safe address that is cheap to find and that is already
8774- in L1 during context switch. The best choices are unfortunately
8775- different for UP and SMP */
8776-#ifdef CONFIG_SMP
8777-#define safe_address (__per_cpu_offset[0])
8778-#else
8779-#define safe_address (kstat_cpu(0).cpustat.user)
8780-#endif
8781+ in L1 during context switch. */
8782+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8783
8784 /*
8785 * These must be called with preempt disabled
8786@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8787 struct thread_info *me = current_thread_info();
8788 preempt_disable();
8789 if (me->status & TS_USEDFPU)
8790- __save_init_fpu(me->task);
8791+ __save_init_fpu(current);
8792 else
8793 clts();
8794 }
8795diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_32.h linux-2.6.32.45/arch/x86/include/asm/io_32.h
8796--- linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8797+++ linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8798@@ -3,6 +3,7 @@
8799
8800 #include <linux/string.h>
8801 #include <linux/compiler.h>
8802+#include <asm/processor.h>
8803
8804 /*
8805 * This file contains the definitions for the x86 IO instructions
8806@@ -42,6 +43,17 @@
8807
8808 #ifdef __KERNEL__
8809
8810+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8811+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8812+{
8813+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8814+}
8815+
8816+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8817+{
8818+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8819+}
8820+
8821 #include <asm-generic/iomap.h>
8822
8823 #include <linux/vmalloc.h>
8824diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_64.h linux-2.6.32.45/arch/x86/include/asm/io_64.h
8825--- linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8826+++ linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8827@@ -140,6 +140,17 @@ __OUTS(l)
8828
8829 #include <linux/vmalloc.h>
8830
8831+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8832+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8833+{
8834+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8835+}
8836+
8837+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8838+{
8839+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8840+}
8841+
8842 #include <asm-generic/iomap.h>
8843
8844 void __memcpy_fromio(void *, unsigned long, unsigned);
8845diff -urNp linux-2.6.32.45/arch/x86/include/asm/iommu.h linux-2.6.32.45/arch/x86/include/asm/iommu.h
8846--- linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8847+++ linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8848@@ -3,7 +3,7 @@
8849
8850 extern void pci_iommu_shutdown(void);
8851 extern void no_iommu_init(void);
8852-extern struct dma_map_ops nommu_dma_ops;
8853+extern const struct dma_map_ops nommu_dma_ops;
8854 extern int force_iommu, no_iommu;
8855 extern int iommu_detected;
8856 extern int iommu_pass_through;
8857diff -urNp linux-2.6.32.45/arch/x86/include/asm/irqflags.h linux-2.6.32.45/arch/x86/include/asm/irqflags.h
8858--- linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8859+++ linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8860@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8861 sti; \
8862 sysexit
8863
8864+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8865+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8866+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8867+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8868+
8869 #else
8870 #define INTERRUPT_RETURN iret
8871 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8872diff -urNp linux-2.6.32.45/arch/x86/include/asm/kprobes.h linux-2.6.32.45/arch/x86/include/asm/kprobes.h
8873--- linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8874+++ linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8875@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8876 #define BREAKPOINT_INSTRUCTION 0xcc
8877 #define RELATIVEJUMP_INSTRUCTION 0xe9
8878 #define MAX_INSN_SIZE 16
8879-#define MAX_STACK_SIZE 64
8880-#define MIN_STACK_SIZE(ADDR) \
8881- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8882- THREAD_SIZE - (unsigned long)(ADDR))) \
8883- ? (MAX_STACK_SIZE) \
8884- : (((unsigned long)current_thread_info()) + \
8885- THREAD_SIZE - (unsigned long)(ADDR)))
8886+#define MAX_STACK_SIZE 64UL
8887+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8888
8889 #define flush_insn_slot(p) do { } while (0)
8890
8891diff -urNp linux-2.6.32.45/arch/x86/include/asm/kvm_host.h linux-2.6.32.45/arch/x86/include/asm/kvm_host.h
8892--- linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8893+++ linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8894@@ -536,7 +536,7 @@ struct kvm_x86_ops {
8895 const struct trace_print_flags *exit_reasons_str;
8896 };
8897
8898-extern struct kvm_x86_ops *kvm_x86_ops;
8899+extern const struct kvm_x86_ops *kvm_x86_ops;
8900
8901 int kvm_mmu_module_init(void);
8902 void kvm_mmu_module_exit(void);
8903diff -urNp linux-2.6.32.45/arch/x86/include/asm/local.h linux-2.6.32.45/arch/x86/include/asm/local.h
8904--- linux-2.6.32.45/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8905+++ linux-2.6.32.45/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8906@@ -18,26 +18,58 @@ typedef struct {
8907
8908 static inline void local_inc(local_t *l)
8909 {
8910- asm volatile(_ASM_INC "%0"
8911+ asm volatile(_ASM_INC "%0\n"
8912+
8913+#ifdef CONFIG_PAX_REFCOUNT
8914+ "jno 0f\n"
8915+ _ASM_DEC "%0\n"
8916+ "int $4\n0:\n"
8917+ _ASM_EXTABLE(0b, 0b)
8918+#endif
8919+
8920 : "+m" (l->a.counter));
8921 }
8922
8923 static inline void local_dec(local_t *l)
8924 {
8925- asm volatile(_ASM_DEC "%0"
8926+ asm volatile(_ASM_DEC "%0\n"
8927+
8928+#ifdef CONFIG_PAX_REFCOUNT
8929+ "jno 0f\n"
8930+ _ASM_INC "%0\n"
8931+ "int $4\n0:\n"
8932+ _ASM_EXTABLE(0b, 0b)
8933+#endif
8934+
8935 : "+m" (l->a.counter));
8936 }
8937
8938 static inline void local_add(long i, local_t *l)
8939 {
8940- asm volatile(_ASM_ADD "%1,%0"
8941+ asm volatile(_ASM_ADD "%1,%0\n"
8942+
8943+#ifdef CONFIG_PAX_REFCOUNT
8944+ "jno 0f\n"
8945+ _ASM_SUB "%1,%0\n"
8946+ "int $4\n0:\n"
8947+ _ASM_EXTABLE(0b, 0b)
8948+#endif
8949+
8950 : "+m" (l->a.counter)
8951 : "ir" (i));
8952 }
8953
8954 static inline void local_sub(long i, local_t *l)
8955 {
8956- asm volatile(_ASM_SUB "%1,%0"
8957+ asm volatile(_ASM_SUB "%1,%0\n"
8958+
8959+#ifdef CONFIG_PAX_REFCOUNT
8960+ "jno 0f\n"
8961+ _ASM_ADD "%1,%0\n"
8962+ "int $4\n0:\n"
8963+ _ASM_EXTABLE(0b, 0b)
8964+#endif
8965+
8966 : "+m" (l->a.counter)
8967 : "ir" (i));
8968 }
8969@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8970 {
8971 unsigned char c;
8972
8973- asm volatile(_ASM_SUB "%2,%0; sete %1"
8974+ asm volatile(_ASM_SUB "%2,%0\n"
8975+
8976+#ifdef CONFIG_PAX_REFCOUNT
8977+ "jno 0f\n"
8978+ _ASM_ADD "%2,%0\n"
8979+ "int $4\n0:\n"
8980+ _ASM_EXTABLE(0b, 0b)
8981+#endif
8982+
8983+ "sete %1\n"
8984 : "+m" (l->a.counter), "=qm" (c)
8985 : "ir" (i) : "memory");
8986 return c;
8987@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8988 {
8989 unsigned char c;
8990
8991- asm volatile(_ASM_DEC "%0; sete %1"
8992+ asm volatile(_ASM_DEC "%0\n"
8993+
8994+#ifdef CONFIG_PAX_REFCOUNT
8995+ "jno 0f\n"
8996+ _ASM_INC "%0\n"
8997+ "int $4\n0:\n"
8998+ _ASM_EXTABLE(0b, 0b)
8999+#endif
9000+
9001+ "sete %1\n"
9002 : "+m" (l->a.counter), "=qm" (c)
9003 : : "memory");
9004 return c != 0;
9005@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
9006 {
9007 unsigned char c;
9008
9009- asm volatile(_ASM_INC "%0; sete %1"
9010+ asm volatile(_ASM_INC "%0\n"
9011+
9012+#ifdef CONFIG_PAX_REFCOUNT
9013+ "jno 0f\n"
9014+ _ASM_DEC "%0\n"
9015+ "int $4\n0:\n"
9016+ _ASM_EXTABLE(0b, 0b)
9017+#endif
9018+
9019+ "sete %1\n"
9020 : "+m" (l->a.counter), "=qm" (c)
9021 : : "memory");
9022 return c != 0;
9023@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
9024 {
9025 unsigned char c;
9026
9027- asm volatile(_ASM_ADD "%2,%0; sets %1"
9028+ asm volatile(_ASM_ADD "%2,%0\n"
9029+
9030+#ifdef CONFIG_PAX_REFCOUNT
9031+ "jno 0f\n"
9032+ _ASM_SUB "%2,%0\n"
9033+ "int $4\n0:\n"
9034+ _ASM_EXTABLE(0b, 0b)
9035+#endif
9036+
9037+ "sets %1\n"
9038 : "+m" (l->a.counter), "=qm" (c)
9039 : "ir" (i) : "memory");
9040 return c;
9041@@ -133,7 +201,15 @@ static inline long local_add_return(long
9042 #endif
9043 /* Modern 486+ processor */
9044 __i = i;
9045- asm volatile(_ASM_XADD "%0, %1;"
9046+ asm volatile(_ASM_XADD "%0, %1\n"
9047+
9048+#ifdef CONFIG_PAX_REFCOUNT
9049+ "jno 0f\n"
9050+ _ASM_MOV "%0,%1\n"
9051+ "int $4\n0:\n"
9052+ _ASM_EXTABLE(0b, 0b)
9053+#endif
9054+
9055 : "+r" (i), "+m" (l->a.counter)
9056 : : "memory");
9057 return i + __i;
9058diff -urNp linux-2.6.32.45/arch/x86/include/asm/microcode.h linux-2.6.32.45/arch/x86/include/asm/microcode.h
9059--- linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
9060+++ linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
9061@@ -12,13 +12,13 @@ struct device;
9062 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
9063
9064 struct microcode_ops {
9065- enum ucode_state (*request_microcode_user) (int cpu,
9066+ enum ucode_state (* const request_microcode_user) (int cpu,
9067 const void __user *buf, size_t size);
9068
9069- enum ucode_state (*request_microcode_fw) (int cpu,
9070+ enum ucode_state (* const request_microcode_fw) (int cpu,
9071 struct device *device);
9072
9073- void (*microcode_fini_cpu) (int cpu);
9074+ void (* const microcode_fini_cpu) (int cpu);
9075
9076 /*
9077 * The generic 'microcode_core' part guarantees that
9078@@ -38,18 +38,18 @@ struct ucode_cpu_info {
9079 extern struct ucode_cpu_info ucode_cpu_info[];
9080
9081 #ifdef CONFIG_MICROCODE_INTEL
9082-extern struct microcode_ops * __init init_intel_microcode(void);
9083+extern const struct microcode_ops * __init init_intel_microcode(void);
9084 #else
9085-static inline struct microcode_ops * __init init_intel_microcode(void)
9086+static inline const struct microcode_ops * __init init_intel_microcode(void)
9087 {
9088 return NULL;
9089 }
9090 #endif /* CONFIG_MICROCODE_INTEL */
9091
9092 #ifdef CONFIG_MICROCODE_AMD
9093-extern struct microcode_ops * __init init_amd_microcode(void);
9094+extern const struct microcode_ops * __init init_amd_microcode(void);
9095 #else
9096-static inline struct microcode_ops * __init init_amd_microcode(void)
9097+static inline const struct microcode_ops * __init init_amd_microcode(void)
9098 {
9099 return NULL;
9100 }
9101diff -urNp linux-2.6.32.45/arch/x86/include/asm/mman.h linux-2.6.32.45/arch/x86/include/asm/mman.h
9102--- linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
9103+++ linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
9104@@ -5,4 +5,14 @@
9105
9106 #include <asm-generic/mman.h>
9107
9108+#ifdef __KERNEL__
9109+#ifndef __ASSEMBLY__
9110+#ifdef CONFIG_X86_32
9111+#define arch_mmap_check i386_mmap_check
9112+int i386_mmap_check(unsigned long addr, unsigned long len,
9113+ unsigned long flags);
9114+#endif
9115+#endif
9116+#endif
9117+
9118 #endif /* _ASM_X86_MMAN_H */
9119diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu_context.h linux-2.6.32.45/arch/x86/include/asm/mmu_context.h
9120--- linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
9121+++ linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-08-23 20:24:19.000000000 -0400
9122@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
9123
9124 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9125 {
9126+
9127+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9128+ unsigned int i;
9129+ pgd_t *pgd;
9130+
9131+ pax_open_kernel();
9132+ pgd = get_cpu_pgd(smp_processor_id());
9133+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9134+ set_pgd_batched(pgd+i, native_make_pgd(0));
9135+ pax_close_kernel();
9136+#endif
9137+
9138 #ifdef CONFIG_SMP
9139 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9140 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9141@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
9142 struct task_struct *tsk)
9143 {
9144 unsigned cpu = smp_processor_id();
9145+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
9146+ int tlbstate = TLBSTATE_OK;
9147+#endif
9148
9149 if (likely(prev != next)) {
9150 #ifdef CONFIG_SMP
9151+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9152+ tlbstate = percpu_read(cpu_tlbstate.state);
9153+#endif
9154 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9155 percpu_write(cpu_tlbstate.active_mm, next);
9156 #endif
9157 cpumask_set_cpu(cpu, mm_cpumask(next));
9158
9159 /* Re-load page tables */
9160+#ifdef CONFIG_PAX_PER_CPU_PGD
9161+ pax_open_kernel();
9162+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9163+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9164+ pax_close_kernel();
9165+ load_cr3(get_cpu_pgd(cpu));
9166+#else
9167 load_cr3(next->pgd);
9168+#endif
9169
9170 /* stop flush ipis for the previous mm */
9171 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9172@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
9173 */
9174 if (unlikely(prev->context.ldt != next->context.ldt))
9175 load_LDT_nolock(&next->context);
9176- }
9177+
9178+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9179+ if (!nx_enabled) {
9180+ smp_mb__before_clear_bit();
9181+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9182+ smp_mb__after_clear_bit();
9183+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9184+ }
9185+#endif
9186+
9187+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9188+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9189+ prev->context.user_cs_limit != next->context.user_cs_limit))
9190+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9191 #ifdef CONFIG_SMP
9192+ else if (unlikely(tlbstate != TLBSTATE_OK))
9193+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9194+#endif
9195+#endif
9196+
9197+ }
9198 else {
9199+
9200+#ifdef CONFIG_PAX_PER_CPU_PGD
9201+ pax_open_kernel();
9202+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9203+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9204+ pax_close_kernel();
9205+ load_cr3(get_cpu_pgd(cpu));
9206+#endif
9207+
9208+#ifdef CONFIG_SMP
9209 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9210 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9211
9212@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
9213 * tlb flush IPI delivery. We must reload CR3
9214 * to make sure to use no freed page tables.
9215 */
9216+
9217+#ifndef CONFIG_PAX_PER_CPU_PGD
9218 load_cr3(next->pgd);
9219+#endif
9220+
9221 load_LDT_nolock(&next->context);
9222+
9223+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9224+ if (!nx_enabled)
9225+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9226+#endif
9227+
9228+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9229+#ifdef CONFIG_PAX_PAGEEXEC
9230+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9231+#endif
9232+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9233+#endif
9234+
9235 }
9236- }
9237 #endif
9238+ }
9239 }
9240
9241 #define activate_mm(prev, next) \
9242diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu.h linux-2.6.32.45/arch/x86/include/asm/mmu.h
9243--- linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9244+++ linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9245@@ -9,10 +9,23 @@
9246 * we put the segment information here.
9247 */
9248 typedef struct {
9249- void *ldt;
9250+ struct desc_struct *ldt;
9251 int size;
9252 struct mutex lock;
9253- void *vdso;
9254+ unsigned long vdso;
9255+
9256+#ifdef CONFIG_X86_32
9257+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9258+ unsigned long user_cs_base;
9259+ unsigned long user_cs_limit;
9260+
9261+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9262+ cpumask_t cpu_user_cs_mask;
9263+#endif
9264+
9265+#endif
9266+#endif
9267+
9268 } mm_context_t;
9269
9270 #ifdef CONFIG_SMP
9271diff -urNp linux-2.6.32.45/arch/x86/include/asm/module.h linux-2.6.32.45/arch/x86/include/asm/module.h
9272--- linux-2.6.32.45/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9273+++ linux-2.6.32.45/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9274@@ -5,6 +5,7 @@
9275
9276 #ifdef CONFIG_X86_64
9277 /* X86_64 does not define MODULE_PROC_FAMILY */
9278+#define MODULE_PROC_FAMILY ""
9279 #elif defined CONFIG_M386
9280 #define MODULE_PROC_FAMILY "386 "
9281 #elif defined CONFIG_M486
9282@@ -59,13 +60,36 @@
9283 #error unknown processor family
9284 #endif
9285
9286-#ifdef CONFIG_X86_32
9287-# ifdef CONFIG_4KSTACKS
9288-# define MODULE_STACKSIZE "4KSTACKS "
9289-# else
9290-# define MODULE_STACKSIZE ""
9291-# endif
9292-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9293+#ifdef CONFIG_PAX_MEMORY_UDEREF
9294+#define MODULE_PAX_UDEREF "UDEREF "
9295+#else
9296+#define MODULE_PAX_UDEREF ""
9297+#endif
9298+
9299+#ifdef CONFIG_PAX_KERNEXEC
9300+#define MODULE_PAX_KERNEXEC "KERNEXEC "
9301+#else
9302+#define MODULE_PAX_KERNEXEC ""
9303+#endif
9304+
9305+#ifdef CONFIG_PAX_REFCOUNT
9306+#define MODULE_PAX_REFCOUNT "REFCOUNT "
9307+#else
9308+#define MODULE_PAX_REFCOUNT ""
9309 #endif
9310
9311+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9312+#define MODULE_STACKSIZE "4KSTACKS "
9313+#else
9314+#define MODULE_STACKSIZE ""
9315+#endif
9316+
9317+#ifdef CONFIG_GRKERNSEC
9318+#define MODULE_GRSEC "GRSECURITY "
9319+#else
9320+#define MODULE_GRSEC ""
9321+#endif
9322+
9323+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9324+
9325 #endif /* _ASM_X86_MODULE_H */
9326diff -urNp linux-2.6.32.45/arch/x86/include/asm/page_64_types.h linux-2.6.32.45/arch/x86/include/asm/page_64_types.h
9327--- linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9328+++ linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9329@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9330
9331 /* duplicated to the one in bootmem.h */
9332 extern unsigned long max_pfn;
9333-extern unsigned long phys_base;
9334+extern const unsigned long phys_base;
9335
9336 extern unsigned long __phys_addr(unsigned long);
9337 #define __phys_reloc_hide(x) (x)
9338diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt.h linux-2.6.32.45/arch/x86/include/asm/paravirt.h
9339--- linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9340+++ linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-08-23 21:36:48.000000000 -0400
9341@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp,
9342 val);
9343 }
9344
9345+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9346+{
9347+ pgdval_t val = native_pgd_val(pgd);
9348+
9349+ if (sizeof(pgdval_t) > sizeof(long))
9350+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9351+ val, (u64)val >> 32);
9352+ else
9353+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9354+ val);
9355+}
9356+
9357 static inline void pgd_clear(pgd_t *pgdp)
9358 {
9359 set_pgd(pgdp, __pgd(0));
9360@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned
9361 pv_mmu_ops.set_fixmap(idx, phys, flags);
9362 }
9363
9364+#ifdef CONFIG_PAX_KERNEXEC
9365+static inline unsigned long pax_open_kernel(void)
9366+{
9367+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9368+}
9369+
9370+static inline unsigned long pax_close_kernel(void)
9371+{
9372+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9373+}
9374+#else
9375+static inline unsigned long pax_open_kernel(void) { return 0; }
9376+static inline unsigned long pax_close_kernel(void) { return 0; }
9377+#endif
9378+
9379 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9380
9381 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9382@@ -945,7 +972,7 @@ extern void default_banner(void);
9383
9384 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9385 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9386-#define PARA_INDIRECT(addr) *%cs:addr
9387+#define PARA_INDIRECT(addr) *%ss:addr
9388 #endif
9389
9390 #define INTERRUPT_RETURN \
9391@@ -1022,6 +1049,21 @@ extern void default_banner(void);
9392 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9393 CLBR_NONE, \
9394 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9395+
9396+#define GET_CR0_INTO_RDI \
9397+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9398+ mov %rax,%rdi
9399+
9400+#define SET_RDI_INTO_CR0 \
9401+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9402+
9403+#define GET_CR3_INTO_RDI \
9404+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9405+ mov %rax,%rdi
9406+
9407+#define SET_RDI_INTO_CR3 \
9408+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9409+
9410 #endif /* CONFIG_X86_32 */
9411
9412 #endif /* __ASSEMBLY__ */
9413diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h
9414--- linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9415+++ linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-08-23 20:24:19.000000000 -0400
9416@@ -78,19 +78,19 @@ struct pv_init_ops {
9417 */
9418 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9419 unsigned long addr, unsigned len);
9420-};
9421+} __no_const;
9422
9423
9424 struct pv_lazy_ops {
9425 /* Set deferred update mode, used for batching operations. */
9426 void (*enter)(void);
9427 void (*leave)(void);
9428-};
9429+} __no_const;
9430
9431 struct pv_time_ops {
9432 unsigned long long (*sched_clock)(void);
9433 unsigned long (*get_tsc_khz)(void);
9434-};
9435+} __no_const;
9436
9437 struct pv_cpu_ops {
9438 /* hooks for various privileged instructions */
9439@@ -186,7 +186,7 @@ struct pv_cpu_ops {
9440
9441 void (*start_context_switch)(struct task_struct *prev);
9442 void (*end_context_switch)(struct task_struct *next);
9443-};
9444+} __no_const;
9445
9446 struct pv_irq_ops {
9447 /*
9448@@ -217,7 +217,7 @@ struct pv_apic_ops {
9449 unsigned long start_eip,
9450 unsigned long start_esp);
9451 #endif
9452-};
9453+} __no_const;
9454
9455 struct pv_mmu_ops {
9456 unsigned long (*read_cr2)(void);
9457@@ -301,6 +301,7 @@ struct pv_mmu_ops {
9458 struct paravirt_callee_save make_pud;
9459
9460 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9461+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9462 #endif /* PAGETABLE_LEVELS == 4 */
9463 #endif /* PAGETABLE_LEVELS >= 3 */
9464
9465@@ -316,6 +317,12 @@ struct pv_mmu_ops {
9466 an mfn. We can tell which is which from the index. */
9467 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9468 phys_addr_t phys, pgprot_t flags);
9469+
9470+#ifdef CONFIG_PAX_KERNEXEC
9471+ unsigned long (*pax_open_kernel)(void);
9472+ unsigned long (*pax_close_kernel)(void);
9473+#endif
9474+
9475 };
9476
9477 struct raw_spinlock;
9478@@ -326,7 +333,7 @@ struct pv_lock_ops {
9479 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
9480 int (*spin_trylock)(struct raw_spinlock *lock);
9481 void (*spin_unlock)(struct raw_spinlock *lock);
9482-};
9483+} __no_const;
9484
9485 /* This contains all the paravirt structures: we get a convenient
9486 * number for each function using the offset which we use to indicate
9487diff -urNp linux-2.6.32.45/arch/x86/include/asm/pci_x86.h linux-2.6.32.45/arch/x86/include/asm/pci_x86.h
9488--- linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9489+++ linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9490@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9491 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9492
9493 struct pci_raw_ops {
9494- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9495+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9496 int reg, int len, u32 *val);
9497- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9498+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9499 int reg, int len, u32 val);
9500 };
9501
9502-extern struct pci_raw_ops *raw_pci_ops;
9503-extern struct pci_raw_ops *raw_pci_ext_ops;
9504+extern const struct pci_raw_ops *raw_pci_ops;
9505+extern const struct pci_raw_ops *raw_pci_ext_ops;
9506
9507-extern struct pci_raw_ops pci_direct_conf1;
9508+extern const struct pci_raw_ops pci_direct_conf1;
9509 extern bool port_cf9_safe;
9510
9511 /* arch_initcall level */
9512diff -urNp linux-2.6.32.45/arch/x86/include/asm/percpu.h linux-2.6.32.45/arch/x86/include/asm/percpu.h
9513--- linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-03-27 14:31:47.000000000 -0400
9514+++ linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-08-17 19:33:59.000000000 -0400
9515@@ -78,6 +78,7 @@ do { \
9516 if (0) { \
9517 T__ tmp__; \
9518 tmp__ = (val); \
9519+ (void)tmp__; \
9520 } \
9521 switch (sizeof(var)) { \
9522 case 1: \
9523diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgalloc.h linux-2.6.32.45/arch/x86/include/asm/pgalloc.h
9524--- linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9525+++ linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9526@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9527 pmd_t *pmd, pte_t *pte)
9528 {
9529 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9530+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9531+}
9532+
9533+static inline void pmd_populate_user(struct mm_struct *mm,
9534+ pmd_t *pmd, pte_t *pte)
9535+{
9536+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9537 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9538 }
9539
9540diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h
9541--- linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9542+++ linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9543@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9544
9545 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9546 {
9547+ pax_open_kernel();
9548 *pmdp = pmd;
9549+ pax_close_kernel();
9550 }
9551
9552 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9553diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h
9554--- linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9555+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9556@@ -26,9 +26,6 @@
9557 struct mm_struct;
9558 struct vm_area_struct;
9559
9560-extern pgd_t swapper_pg_dir[1024];
9561-extern pgd_t trampoline_pg_dir[1024];
9562-
9563 static inline void pgtable_cache_init(void) { }
9564 static inline void check_pgt_cache(void) { }
9565 void paging_init(void);
9566@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9567 # include <asm/pgtable-2level.h>
9568 #endif
9569
9570+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9571+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9572+#ifdef CONFIG_X86_PAE
9573+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9574+#endif
9575+
9576 #if defined(CONFIG_HIGHPTE)
9577 #define __KM_PTE \
9578 (in_nmi() ? KM_NMI_PTE : \
9579@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9580 /* Clear a kernel PTE and flush it from the TLB */
9581 #define kpte_clear_flush(ptep, vaddr) \
9582 do { \
9583+ pax_open_kernel(); \
9584 pte_clear(&init_mm, (vaddr), (ptep)); \
9585+ pax_close_kernel(); \
9586 __flush_tlb_one((vaddr)); \
9587 } while (0)
9588
9589@@ -85,6 +90,9 @@ do { \
9590
9591 #endif /* !__ASSEMBLY__ */
9592
9593+#define HAVE_ARCH_UNMAPPED_AREA
9594+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9595+
9596 /*
9597 * kern_addr_valid() is (1) for FLATMEM and (0) for
9598 * SPARSEMEM and DISCONTIGMEM
9599diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h
9600--- linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9601+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9602@@ -8,7 +8,7 @@
9603 */
9604 #ifdef CONFIG_X86_PAE
9605 # include <asm/pgtable-3level_types.h>
9606-# define PMD_SIZE (1UL << PMD_SHIFT)
9607+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9608 # define PMD_MASK (~(PMD_SIZE - 1))
9609 #else
9610 # include <asm/pgtable-2level_types.h>
9611@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9612 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9613 #endif
9614
9615+#ifdef CONFIG_PAX_KERNEXEC
9616+#ifndef __ASSEMBLY__
9617+extern unsigned char MODULES_EXEC_VADDR[];
9618+extern unsigned char MODULES_EXEC_END[];
9619+#endif
9620+#include <asm/boot.h>
9621+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9622+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9623+#else
9624+#define ktla_ktva(addr) (addr)
9625+#define ktva_ktla(addr) (addr)
9626+#endif
9627+
9628 #define MODULES_VADDR VMALLOC_START
9629 #define MODULES_END VMALLOC_END
9630 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9631diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h
9632--- linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9633+++ linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9634@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9635
9636 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9637 {
9638+ pax_open_kernel();
9639 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9640+ pax_close_kernel();
9641 }
9642
9643 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9644 {
9645+ pax_open_kernel();
9646 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9647+ pax_close_kernel();
9648 }
9649
9650 /*
9651diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h
9652--- linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9653+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-08-23 20:24:19.000000000 -0400
9654@@ -16,10 +16,13 @@
9655
9656 extern pud_t level3_kernel_pgt[512];
9657 extern pud_t level3_ident_pgt[512];
9658+extern pud_t level3_vmalloc_pgt[512];
9659+extern pud_t level3_vmemmap_pgt[512];
9660+extern pud_t level2_vmemmap_pgt[512];
9661 extern pmd_t level2_kernel_pgt[512];
9662 extern pmd_t level2_fixmap_pgt[512];
9663-extern pmd_t level2_ident_pgt[512];
9664-extern pgd_t init_level4_pgt[];
9665+extern pmd_t level2_ident_pgt[512*2];
9666+extern pgd_t init_level4_pgt[512];
9667
9668 #define swapper_pg_dir init_level4_pgt
9669
9670@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9671
9672 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9673 {
9674+ pax_open_kernel();
9675 *pmdp = pmd;
9676+ pax_close_kernel();
9677 }
9678
9679 static inline void native_pmd_clear(pmd_t *pmd)
9680@@ -94,6 +99,13 @@ static inline void native_pud_clear(pud_
9681
9682 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9683 {
9684+ pax_open_kernel();
9685+ *pgdp = pgd;
9686+ pax_close_kernel();
9687+}
9688+
9689+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9690+{
9691 *pgdp = pgd;
9692 }
9693
9694diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h
9695--- linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9696+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9697@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9698 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9699 #define MODULES_END _AC(0xffffffffff000000, UL)
9700 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9701+#define MODULES_EXEC_VADDR MODULES_VADDR
9702+#define MODULES_EXEC_END MODULES_END
9703+
9704+#define ktla_ktva(addr) (addr)
9705+#define ktva_ktla(addr) (addr)
9706
9707 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9708diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable.h linux-2.6.32.45/arch/x86/include/asm/pgtable.h
9709--- linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9710+++ linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-08-23 20:24:19.000000000 -0400
9711@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
9712
9713 #ifndef __PAGETABLE_PUD_FOLDED
9714 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9715+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9716 #define pgd_clear(pgd) native_pgd_clear(pgd)
9717 #endif
9718
9719@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
9720
9721 #define arch_end_context_switch(prev) do {} while(0)
9722
9723+#define pax_open_kernel() native_pax_open_kernel()
9724+#define pax_close_kernel() native_pax_close_kernel()
9725 #endif /* CONFIG_PARAVIRT */
9726
9727+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9728+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9729+
9730+#ifdef CONFIG_PAX_KERNEXEC
9731+static inline unsigned long native_pax_open_kernel(void)
9732+{
9733+ unsigned long cr0;
9734+
9735+ preempt_disable();
9736+ barrier();
9737+ cr0 = read_cr0() ^ X86_CR0_WP;
9738+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9739+ write_cr0(cr0);
9740+ return cr0 ^ X86_CR0_WP;
9741+}
9742+
9743+static inline unsigned long native_pax_close_kernel(void)
9744+{
9745+ unsigned long cr0;
9746+
9747+ cr0 = read_cr0() ^ X86_CR0_WP;
9748+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9749+ write_cr0(cr0);
9750+ barrier();
9751+ preempt_enable_no_resched();
9752+ return cr0 ^ X86_CR0_WP;
9753+}
9754+#else
9755+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9756+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9757+#endif
9758+
9759 /*
9760 * The following only work if pte_present() is true.
9761 * Undefined behaviour if not..
9762 */
9763+static inline int pte_user(pte_t pte)
9764+{
9765+ return pte_val(pte) & _PAGE_USER;
9766+}
9767+
9768 static inline int pte_dirty(pte_t pte)
9769 {
9770 return pte_flags(pte) & _PAGE_DIRTY;
9771@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t
9772 return pte_clear_flags(pte, _PAGE_RW);
9773 }
9774
9775+static inline pte_t pte_mkread(pte_t pte)
9776+{
9777+ return __pte(pte_val(pte) | _PAGE_USER);
9778+}
9779+
9780 static inline pte_t pte_mkexec(pte_t pte)
9781 {
9782- return pte_clear_flags(pte, _PAGE_NX);
9783+#ifdef CONFIG_X86_PAE
9784+ if (__supported_pte_mask & _PAGE_NX)
9785+ return pte_clear_flags(pte, _PAGE_NX);
9786+ else
9787+#endif
9788+ return pte_set_flags(pte, _PAGE_USER);
9789+}
9790+
9791+static inline pte_t pte_exprotect(pte_t pte)
9792+{
9793+#ifdef CONFIG_X86_PAE
9794+ if (__supported_pte_mask & _PAGE_NX)
9795+ return pte_set_flags(pte, _PAGE_NX);
9796+ else
9797+#endif
9798+ return pte_clear_flags(pte, _PAGE_USER);
9799 }
9800
9801 static inline pte_t pte_mkdirty(pte_t pte)
9802@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long
9803 #endif
9804
9805 #ifndef __ASSEMBLY__
9806+
9807+#ifdef CONFIG_PAX_PER_CPU_PGD
9808+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9809+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9810+{
9811+ return cpu_pgd[cpu];
9812+}
9813+#endif
9814+
9815 #include <linux/mm_types.h>
9816
9817 static inline int pte_none(pte_t pte)
9818@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *p
9819
9820 static inline int pgd_bad(pgd_t pgd)
9821 {
9822- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9823+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9824 }
9825
9826 static inline int pgd_none(pgd_t pgd)
9827@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
9828 * pgd_offset() returns a (pgd_t *)
9829 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9830 */
9831-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9832+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9833+
9834+#ifdef CONFIG_PAX_PER_CPU_PGD
9835+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9836+#endif
9837+
9838 /*
9839 * a shortcut which implies the use of the kernel's pgd, instead
9840 * of a process's
9841@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
9842 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9843 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9844
9845+#ifdef CONFIG_X86_32
9846+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9847+#else
9848+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9849+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9850+
9851+#ifdef CONFIG_PAX_MEMORY_UDEREF
9852+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9853+#else
9854+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9855+#endif
9856+
9857+#endif
9858+
9859 #ifndef __ASSEMBLY__
9860
9861 extern int direct_gbpages;
9862@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(st
9863 * dst and src can be on the same page, but the range must not overlap,
9864 * and must not cross a page boundary.
9865 */
9866-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9867+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9868 {
9869- memcpy(dst, src, count * sizeof(pgd_t));
9870+ pax_open_kernel();
9871+ while (count--)
9872+ *dst++ = *src++;
9873+ pax_close_kernel();
9874 }
9875
9876+#ifdef CONFIG_PAX_PER_CPU_PGD
9877+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9878+#endif
9879+
9880+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9881+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9882+#else
9883+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9884+#endif
9885
9886 #include <asm-generic/pgtable.h>
9887 #endif /* __ASSEMBLY__ */
9888diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h
9889--- linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9890+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9891@@ -16,12 +16,11 @@
9892 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9893 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9894 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9895-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9896+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9897 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9898 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9899 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9900-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9901-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9902+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9903 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9904
9905 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9906@@ -39,7 +38,6 @@
9907 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9908 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9909 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9910-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9911 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9912 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9913 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9914@@ -55,8 +53,10 @@
9915
9916 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9917 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9918-#else
9919+#elif defined(CONFIG_KMEMCHECK)
9920 #define _PAGE_NX (_AT(pteval_t, 0))
9921+#else
9922+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9923 #endif
9924
9925 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9926@@ -93,6 +93,9 @@
9927 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9928 _PAGE_ACCESSED)
9929
9930+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9931+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9932+
9933 #define __PAGE_KERNEL_EXEC \
9934 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9935 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9936@@ -103,8 +106,8 @@
9937 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9938 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9939 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9940-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9941-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9942+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9943+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9944 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9945 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9946 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9947@@ -163,8 +166,8 @@
9948 * bits are combined, this will alow user to access the high address mapped
9949 * VDSO in the presence of CONFIG_COMPAT_VDSO
9950 */
9951-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9952-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9953+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9954+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9955 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9956 #endif
9957
9958@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9959 {
9960 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9961 }
9962+#endif
9963
9964+#if PAGETABLE_LEVELS == 3
9965+#include <asm-generic/pgtable-nopud.h>
9966+#endif
9967+
9968+#if PAGETABLE_LEVELS == 2
9969+#include <asm-generic/pgtable-nopmd.h>
9970+#endif
9971+
9972+#ifndef __ASSEMBLY__
9973 #if PAGETABLE_LEVELS > 3
9974 typedef struct { pudval_t pud; } pud_t;
9975
9976@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9977 return pud.pud;
9978 }
9979 #else
9980-#include <asm-generic/pgtable-nopud.h>
9981-
9982 static inline pudval_t native_pud_val(pud_t pud)
9983 {
9984 return native_pgd_val(pud.pgd);
9985@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9986 return pmd.pmd;
9987 }
9988 #else
9989-#include <asm-generic/pgtable-nopmd.h>
9990-
9991 static inline pmdval_t native_pmd_val(pmd_t pmd)
9992 {
9993 return native_pgd_val(pmd.pud.pgd);
9994@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9995
9996 extern pteval_t __supported_pte_mask;
9997 extern void set_nx(void);
9998+
9999+#ifdef CONFIG_X86_32
10000+#ifdef CONFIG_X86_PAE
10001 extern int nx_enabled;
10002+#else
10003+#define nx_enabled (0)
10004+#endif
10005+#else
10006+#define nx_enabled (1)
10007+#endif
10008
10009 #define pgprot_writecombine pgprot_writecombine
10010 extern pgprot_t pgprot_writecombine(pgprot_t prot);
10011diff -urNp linux-2.6.32.45/arch/x86/include/asm/processor.h linux-2.6.32.45/arch/x86/include/asm/processor.h
10012--- linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
10013+++ linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
10014@@ -272,7 +272,7 @@ struct tss_struct {
10015
10016 } ____cacheline_aligned;
10017
10018-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
10019+extern struct tss_struct init_tss[NR_CPUS];
10020
10021 /*
10022 * Save the original ist values for checking stack pointers during debugging
10023@@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
10024 */
10025 #define TASK_SIZE PAGE_OFFSET
10026 #define TASK_SIZE_MAX TASK_SIZE
10027+
10028+#ifdef CONFIG_PAX_SEGMEXEC
10029+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
10030+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
10031+#else
10032 #define STACK_TOP TASK_SIZE
10033-#define STACK_TOP_MAX STACK_TOP
10034+#endif
10035+
10036+#define STACK_TOP_MAX TASK_SIZE
10037
10038 #define INIT_THREAD { \
10039- .sp0 = sizeof(init_stack) + (long)&init_stack, \
10040+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10041 .vm86_info = NULL, \
10042 .sysenter_cs = __KERNEL_CS, \
10043 .io_bitmap_ptr = NULL, \
10044@@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
10045 */
10046 #define INIT_TSS { \
10047 .x86_tss = { \
10048- .sp0 = sizeof(init_stack) + (long)&init_stack, \
10049+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10050 .ss0 = __KERNEL_DS, \
10051 .ss1 = __KERNEL_CS, \
10052 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
10053@@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
10054 extern unsigned long thread_saved_pc(struct task_struct *tsk);
10055
10056 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
10057-#define KSTK_TOP(info) \
10058-({ \
10059- unsigned long *__ptr = (unsigned long *)(info); \
10060- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
10061-})
10062+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
10063
10064 /*
10065 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
10066@@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
10067 #define task_pt_regs(task) \
10068 ({ \
10069 struct pt_regs *__regs__; \
10070- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
10071+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
10072 __regs__ - 1; \
10073 })
10074
10075@@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
10076 /*
10077 * User space process size. 47bits minus one guard page.
10078 */
10079-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
10080+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
10081
10082 /* This decides where the kernel will search for a free chunk of vm
10083 * space during mmap's.
10084 */
10085 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
10086- 0xc0000000 : 0xFFFFe000)
10087+ 0xc0000000 : 0xFFFFf000)
10088
10089 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
10090 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
10091@@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
10092 #define STACK_TOP_MAX TASK_SIZE_MAX
10093
10094 #define INIT_THREAD { \
10095- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10096+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10097 }
10098
10099 #define INIT_TSS { \
10100- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10101+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10102 }
10103
10104 /*
10105@@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
10106 */
10107 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
10108
10109+#ifdef CONFIG_PAX_SEGMEXEC
10110+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
10111+#endif
10112+
10113 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
10114
10115 /* Get/set a process' ability to use the timestamp counter instruction */
10116diff -urNp linux-2.6.32.45/arch/x86/include/asm/ptrace.h linux-2.6.32.45/arch/x86/include/asm/ptrace.h
10117--- linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
10118+++ linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
10119@@ -151,28 +151,29 @@ static inline unsigned long regs_return_
10120 }
10121
10122 /*
10123- * user_mode_vm(regs) determines whether a register set came from user mode.
10124+ * user_mode(regs) determines whether a register set came from user mode.
10125 * This is true if V8086 mode was enabled OR if the register set was from
10126 * protected mode with RPL-3 CS value. This tricky test checks that with
10127 * one comparison. Many places in the kernel can bypass this full check
10128- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
10129+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
10130+ * be used.
10131 */
10132-static inline int user_mode(struct pt_regs *regs)
10133+static inline int user_mode_novm(struct pt_regs *regs)
10134 {
10135 #ifdef CONFIG_X86_32
10136 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
10137 #else
10138- return !!(regs->cs & 3);
10139+ return !!(regs->cs & SEGMENT_RPL_MASK);
10140 #endif
10141 }
10142
10143-static inline int user_mode_vm(struct pt_regs *regs)
10144+static inline int user_mode(struct pt_regs *regs)
10145 {
10146 #ifdef CONFIG_X86_32
10147 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
10148 USER_RPL;
10149 #else
10150- return user_mode(regs);
10151+ return user_mode_novm(regs);
10152 #endif
10153 }
10154
10155diff -urNp linux-2.6.32.45/arch/x86/include/asm/reboot.h linux-2.6.32.45/arch/x86/include/asm/reboot.h
10156--- linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
10157+++ linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-08-05 20:33:55.000000000 -0400
10158@@ -6,19 +6,19 @@
10159 struct pt_regs;
10160
10161 struct machine_ops {
10162- void (*restart)(char *cmd);
10163- void (*halt)(void);
10164- void (*power_off)(void);
10165+ void (* __noreturn restart)(char *cmd);
10166+ void (* __noreturn halt)(void);
10167+ void (* __noreturn power_off)(void);
10168 void (*shutdown)(void);
10169 void (*crash_shutdown)(struct pt_regs *);
10170- void (*emergency_restart)(void);
10171-};
10172+ void (* __noreturn emergency_restart)(void);
10173+} __no_const;
10174
10175 extern struct machine_ops machine_ops;
10176
10177 void native_machine_crash_shutdown(struct pt_regs *regs);
10178 void native_machine_shutdown(void);
10179-void machine_real_restart(const unsigned char *code, int length);
10180+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
10181
10182 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
10183 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
10184diff -urNp linux-2.6.32.45/arch/x86/include/asm/rwsem.h linux-2.6.32.45/arch/x86/include/asm/rwsem.h
10185--- linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
10186+++ linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
10187@@ -118,6 +118,14 @@ static inline void __down_read(struct rw
10188 {
10189 asm volatile("# beginning down_read\n\t"
10190 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10191+
10192+#ifdef CONFIG_PAX_REFCOUNT
10193+ "jno 0f\n"
10194+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
10195+ "int $4\n0:\n"
10196+ _ASM_EXTABLE(0b, 0b)
10197+#endif
10198+
10199 /* adds 0x00000001, returns the old value */
10200 " jns 1f\n"
10201 " call call_rwsem_down_read_failed\n"
10202@@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
10203 "1:\n\t"
10204 " mov %1,%2\n\t"
10205 " add %3,%2\n\t"
10206+
10207+#ifdef CONFIG_PAX_REFCOUNT
10208+ "jno 0f\n"
10209+ "sub %3,%2\n"
10210+ "int $4\n0:\n"
10211+ _ASM_EXTABLE(0b, 0b)
10212+#endif
10213+
10214 " jle 2f\n\t"
10215 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10216 " jnz 1b\n\t"
10217@@ -160,6 +176,14 @@ static inline void __down_write_nested(s
10218 tmp = RWSEM_ACTIVE_WRITE_BIAS;
10219 asm volatile("# beginning down_write\n\t"
10220 LOCK_PREFIX " xadd %1,(%2)\n\t"
10221+
10222+#ifdef CONFIG_PAX_REFCOUNT
10223+ "jno 0f\n"
10224+ "mov %1,(%2)\n"
10225+ "int $4\n0:\n"
10226+ _ASM_EXTABLE(0b, 0b)
10227+#endif
10228+
10229 /* subtract 0x0000ffff, returns the old value */
10230 " test %1,%1\n\t"
10231 /* was the count 0 before? */
10232@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10233 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10234 asm volatile("# beginning __up_read\n\t"
10235 LOCK_PREFIX " xadd %1,(%2)\n\t"
10236+
10237+#ifdef CONFIG_PAX_REFCOUNT
10238+ "jno 0f\n"
10239+ "mov %1,(%2)\n"
10240+ "int $4\n0:\n"
10241+ _ASM_EXTABLE(0b, 0b)
10242+#endif
10243+
10244 /* subtracts 1, returns the old value */
10245 " jns 1f\n\t"
10246 " call call_rwsem_wake\n"
10247@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10248 rwsem_count_t tmp;
10249 asm volatile("# beginning __up_write\n\t"
10250 LOCK_PREFIX " xadd %1,(%2)\n\t"
10251+
10252+#ifdef CONFIG_PAX_REFCOUNT
10253+ "jno 0f\n"
10254+ "mov %1,(%2)\n"
10255+ "int $4\n0:\n"
10256+ _ASM_EXTABLE(0b, 0b)
10257+#endif
10258+
10259 /* tries to transition
10260 0xffff0001 -> 0x00000000 */
10261 " jz 1f\n"
10262@@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10263 {
10264 asm volatile("# beginning __downgrade_write\n\t"
10265 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10266+
10267+#ifdef CONFIG_PAX_REFCOUNT
10268+ "jno 0f\n"
10269+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10270+ "int $4\n0:\n"
10271+ _ASM_EXTABLE(0b, 0b)
10272+#endif
10273+
10274 /*
10275 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10276 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10277@@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10278 static inline void rwsem_atomic_add(rwsem_count_t delta,
10279 struct rw_semaphore *sem)
10280 {
10281- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10282+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10283+
10284+#ifdef CONFIG_PAX_REFCOUNT
10285+ "jno 0f\n"
10286+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10287+ "int $4\n0:\n"
10288+ _ASM_EXTABLE(0b, 0b)
10289+#endif
10290+
10291 : "+m" (sem->count)
10292 : "er" (delta));
10293 }
10294@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10295 {
10296 rwsem_count_t tmp = delta;
10297
10298- asm volatile(LOCK_PREFIX "xadd %0,%1"
10299+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10300+
10301+#ifdef CONFIG_PAX_REFCOUNT
10302+ "jno 0f\n"
10303+ "mov %0,%1\n"
10304+ "int $4\n0:\n"
10305+ _ASM_EXTABLE(0b, 0b)
10306+#endif
10307+
10308 : "+r" (tmp), "+m" (sem->count)
10309 : : "memory");
10310
10311diff -urNp linux-2.6.32.45/arch/x86/include/asm/segment.h linux-2.6.32.45/arch/x86/include/asm/segment.h
10312--- linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10313+++ linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10314@@ -62,8 +62,8 @@
10315 * 26 - ESPFIX small SS
10316 * 27 - per-cpu [ offset to per-cpu data area ]
10317 * 28 - stack_canary-20 [ for stack protector ]
10318- * 29 - unused
10319- * 30 - unused
10320+ * 29 - PCI BIOS CS
10321+ * 30 - PCI BIOS DS
10322 * 31 - TSS for double fault handler
10323 */
10324 #define GDT_ENTRY_TLS_MIN 6
10325@@ -77,6 +77,8 @@
10326
10327 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10328
10329+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10330+
10331 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10332
10333 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10334@@ -88,7 +90,7 @@
10335 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10336 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10337
10338-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10339+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10340 #ifdef CONFIG_SMP
10341 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10342 #else
10343@@ -102,6 +104,12 @@
10344 #define __KERNEL_STACK_CANARY 0
10345 #endif
10346
10347+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10348+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10349+
10350+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10351+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10352+
10353 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10354
10355 /*
10356@@ -139,7 +147,7 @@
10357 */
10358
10359 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10360-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10361+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10362
10363
10364 #else
10365@@ -163,6 +171,8 @@
10366 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10367 #define __USER32_DS __USER_DS
10368
10369+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10370+
10371 #define GDT_ENTRY_TSS 8 /* needs two entries */
10372 #define GDT_ENTRY_LDT 10 /* needs two entries */
10373 #define GDT_ENTRY_TLS_MIN 12
10374@@ -183,6 +193,7 @@
10375 #endif
10376
10377 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10378+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10379 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10380 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10381 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10382diff -urNp linux-2.6.32.45/arch/x86/include/asm/smp.h linux-2.6.32.45/arch/x86/include/asm/smp.h
10383--- linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10384+++ linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-08-05 20:33:55.000000000 -0400
10385@@ -24,7 +24,7 @@ extern unsigned int num_processors;
10386 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10387 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10388 DECLARE_PER_CPU(u16, cpu_llc_id);
10389-DECLARE_PER_CPU(int, cpu_number);
10390+DECLARE_PER_CPU(unsigned int, cpu_number);
10391
10392 static inline struct cpumask *cpu_sibling_mask(int cpu)
10393 {
10394@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10395 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10396
10397 /* Static state in head.S used to set up a CPU */
10398-extern struct {
10399- void *sp;
10400- unsigned short ss;
10401-} stack_start;
10402+extern unsigned long stack_start; /* Initial stack pointer address */
10403
10404 struct smp_ops {
10405 void (*smp_prepare_boot_cpu)(void);
10406@@ -60,7 +57,7 @@ struct smp_ops {
10407
10408 void (*send_call_func_ipi)(const struct cpumask *mask);
10409 void (*send_call_func_single_ipi)(int cpu);
10410-};
10411+} __no_const;
10412
10413 /* Globals due to paravirt */
10414 extern void set_cpu_sibling_map(int cpu);
10415@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10416 extern int safe_smp_processor_id(void);
10417
10418 #elif defined(CONFIG_X86_64_SMP)
10419-#define raw_smp_processor_id() (percpu_read(cpu_number))
10420-
10421-#define stack_smp_processor_id() \
10422-({ \
10423- struct thread_info *ti; \
10424- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10425- ti->cpu; \
10426-})
10427+#define raw_smp_processor_id() (percpu_read(cpu_number))
10428+#define stack_smp_processor_id() raw_smp_processor_id()
10429 #define safe_smp_processor_id() smp_processor_id()
10430
10431 #endif
10432diff -urNp linux-2.6.32.45/arch/x86/include/asm/spinlock.h linux-2.6.32.45/arch/x86/include/asm/spinlock.h
10433--- linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10434+++ linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10435@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10436 static inline void __raw_read_lock(raw_rwlock_t *rw)
10437 {
10438 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10439+
10440+#ifdef CONFIG_PAX_REFCOUNT
10441+ "jno 0f\n"
10442+ LOCK_PREFIX " addl $1,(%0)\n"
10443+ "int $4\n0:\n"
10444+ _ASM_EXTABLE(0b, 0b)
10445+#endif
10446+
10447 "jns 1f\n"
10448 "call __read_lock_failed\n\t"
10449 "1:\n"
10450@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10451 static inline void __raw_write_lock(raw_rwlock_t *rw)
10452 {
10453 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10454+
10455+#ifdef CONFIG_PAX_REFCOUNT
10456+ "jno 0f\n"
10457+ LOCK_PREFIX " addl %1,(%0)\n"
10458+ "int $4\n0:\n"
10459+ _ASM_EXTABLE(0b, 0b)
10460+#endif
10461+
10462 "jz 1f\n"
10463 "call __write_lock_failed\n\t"
10464 "1:\n"
10465@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10466
10467 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10468 {
10469- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10470+ asm volatile(LOCK_PREFIX "incl %0\n"
10471+
10472+#ifdef CONFIG_PAX_REFCOUNT
10473+ "jno 0f\n"
10474+ LOCK_PREFIX "decl %0\n"
10475+ "int $4\n0:\n"
10476+ _ASM_EXTABLE(0b, 0b)
10477+#endif
10478+
10479+ :"+m" (rw->lock) : : "memory");
10480 }
10481
10482 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10483 {
10484- asm volatile(LOCK_PREFIX "addl %1, %0"
10485+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
10486+
10487+#ifdef CONFIG_PAX_REFCOUNT
10488+ "jno 0f\n"
10489+ LOCK_PREFIX "subl %1, %0\n"
10490+ "int $4\n0:\n"
10491+ _ASM_EXTABLE(0b, 0b)
10492+#endif
10493+
10494 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10495 }
10496
10497diff -urNp linux-2.6.32.45/arch/x86/include/asm/stackprotector.h linux-2.6.32.45/arch/x86/include/asm/stackprotector.h
10498--- linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10499+++ linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10500@@ -48,7 +48,7 @@
10501 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10502 */
10503 #define GDT_STACK_CANARY_INIT \
10504- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10505+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10506
10507 /*
10508 * Initialize the stackprotector canary value.
10509@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10510
10511 static inline void load_stack_canary_segment(void)
10512 {
10513-#ifdef CONFIG_X86_32
10514+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10515 asm volatile ("mov %0, %%gs" : : "r" (0));
10516 #endif
10517 }
10518diff -urNp linux-2.6.32.45/arch/x86/include/asm/system.h linux-2.6.32.45/arch/x86/include/asm/system.h
10519--- linux-2.6.32.45/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10520+++ linux-2.6.32.45/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10521@@ -132,7 +132,7 @@ do { \
10522 "thread_return:\n\t" \
10523 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10524 __switch_canary \
10525- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10526+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10527 "movq %%rax,%%rdi\n\t" \
10528 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10529 "jnz ret_from_fork\n\t" \
10530@@ -143,7 +143,7 @@ do { \
10531 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10532 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10533 [_tif_fork] "i" (_TIF_FORK), \
10534- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10535+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
10536 [current_task] "m" (per_cpu_var(current_task)) \
10537 __switch_canary_iparam \
10538 : "memory", "cc" __EXTRA_CLOBBER)
10539@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10540 {
10541 unsigned long __limit;
10542 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10543- return __limit + 1;
10544+ return __limit;
10545 }
10546
10547 static inline void native_clts(void)
10548@@ -340,12 +340,12 @@ void enable_hlt(void);
10549
10550 void cpu_idle_wait(void);
10551
10552-extern unsigned long arch_align_stack(unsigned long sp);
10553+#define arch_align_stack(x) ((x) & ~0xfUL)
10554 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10555
10556 void default_idle(void);
10557
10558-void stop_this_cpu(void *dummy);
10559+void stop_this_cpu(void *dummy) __noreturn;
10560
10561 /*
10562 * Force strict CPU ordering.
10563diff -urNp linux-2.6.32.45/arch/x86/include/asm/thread_info.h linux-2.6.32.45/arch/x86/include/asm/thread_info.h
10564--- linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10565+++ linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10566@@ -10,6 +10,7 @@
10567 #include <linux/compiler.h>
10568 #include <asm/page.h>
10569 #include <asm/types.h>
10570+#include <asm/percpu.h>
10571
10572 /*
10573 * low level task data that entry.S needs immediate access to
10574@@ -24,7 +25,6 @@ struct exec_domain;
10575 #include <asm/atomic.h>
10576
10577 struct thread_info {
10578- struct task_struct *task; /* main task structure */
10579 struct exec_domain *exec_domain; /* execution domain */
10580 __u32 flags; /* low level flags */
10581 __u32 status; /* thread synchronous flags */
10582@@ -34,18 +34,12 @@ struct thread_info {
10583 mm_segment_t addr_limit;
10584 struct restart_block restart_block;
10585 void __user *sysenter_return;
10586-#ifdef CONFIG_X86_32
10587- unsigned long previous_esp; /* ESP of the previous stack in
10588- case of nested (IRQ) stacks
10589- */
10590- __u8 supervisor_stack[0];
10591-#endif
10592+ unsigned long lowest_stack;
10593 int uaccess_err;
10594 };
10595
10596-#define INIT_THREAD_INFO(tsk) \
10597+#define INIT_THREAD_INFO \
10598 { \
10599- .task = &tsk, \
10600 .exec_domain = &default_exec_domain, \
10601 .flags = 0, \
10602 .cpu = 0, \
10603@@ -56,7 +50,7 @@ struct thread_info {
10604 }, \
10605 }
10606
10607-#define init_thread_info (init_thread_union.thread_info)
10608+#define init_thread_info (init_thread_union.stack)
10609 #define init_stack (init_thread_union.stack)
10610
10611 #else /* !__ASSEMBLY__ */
10612@@ -163,6 +157,23 @@ struct thread_info {
10613 #define alloc_thread_info(tsk) \
10614 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10615
10616+#ifdef __ASSEMBLY__
10617+/* how to get the thread information struct from ASM */
10618+#define GET_THREAD_INFO(reg) \
10619+ mov PER_CPU_VAR(current_tinfo), reg
10620+
10621+/* use this one if reg already contains %esp */
10622+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10623+#else
10624+/* how to get the thread information struct from C */
10625+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10626+
10627+static __always_inline struct thread_info *current_thread_info(void)
10628+{
10629+ return percpu_read_stable(current_tinfo);
10630+}
10631+#endif
10632+
10633 #ifdef CONFIG_X86_32
10634
10635 #define STACK_WARN (THREAD_SIZE/8)
10636@@ -173,35 +184,13 @@ struct thread_info {
10637 */
10638 #ifndef __ASSEMBLY__
10639
10640-
10641 /* how to get the current stack pointer from C */
10642 register unsigned long current_stack_pointer asm("esp") __used;
10643
10644-/* how to get the thread information struct from C */
10645-static inline struct thread_info *current_thread_info(void)
10646-{
10647- return (struct thread_info *)
10648- (current_stack_pointer & ~(THREAD_SIZE - 1));
10649-}
10650-
10651-#else /* !__ASSEMBLY__ */
10652-
10653-/* how to get the thread information struct from ASM */
10654-#define GET_THREAD_INFO(reg) \
10655- movl $-THREAD_SIZE, reg; \
10656- andl %esp, reg
10657-
10658-/* use this one if reg already contains %esp */
10659-#define GET_THREAD_INFO_WITH_ESP(reg) \
10660- andl $-THREAD_SIZE, reg
10661-
10662 #endif
10663
10664 #else /* X86_32 */
10665
10666-#include <asm/percpu.h>
10667-#define KERNEL_STACK_OFFSET (5*8)
10668-
10669 /*
10670 * macros/functions for gaining access to the thread information structure
10671 * preempt_count needs to be 1 initially, until the scheduler is functional.
10672@@ -209,21 +198,8 @@ static inline struct thread_info *curren
10673 #ifndef __ASSEMBLY__
10674 DECLARE_PER_CPU(unsigned long, kernel_stack);
10675
10676-static inline struct thread_info *current_thread_info(void)
10677-{
10678- struct thread_info *ti;
10679- ti = (void *)(percpu_read_stable(kernel_stack) +
10680- KERNEL_STACK_OFFSET - THREAD_SIZE);
10681- return ti;
10682-}
10683-
10684-#else /* !__ASSEMBLY__ */
10685-
10686-/* how to get the thread information struct from ASM */
10687-#define GET_THREAD_INFO(reg) \
10688- movq PER_CPU_VAR(kernel_stack),reg ; \
10689- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10690-
10691+/* how to get the current stack pointer from C */
10692+register unsigned long current_stack_pointer asm("rsp") __used;
10693 #endif
10694
10695 #endif /* !X86_32 */
10696@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10697 extern void free_thread_info(struct thread_info *ti);
10698 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10699 #define arch_task_cache_init arch_task_cache_init
10700+
10701+#define __HAVE_THREAD_FUNCTIONS
10702+#define task_thread_info(task) (&(task)->tinfo)
10703+#define task_stack_page(task) ((task)->stack)
10704+#define setup_thread_stack(p, org) do {} while (0)
10705+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10706+
10707+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10708+extern struct task_struct *alloc_task_struct(void);
10709+extern void free_task_struct(struct task_struct *);
10710+
10711 #endif
10712 #endif /* _ASM_X86_THREAD_INFO_H */
10713diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h
10714--- linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10715+++ linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10716@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10717 static __always_inline unsigned long __must_check
10718 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10719 {
10720+ pax_track_stack();
10721+
10722+ if ((long)n < 0)
10723+ return n;
10724+
10725 if (__builtin_constant_p(n)) {
10726 unsigned long ret;
10727
10728@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10729 return ret;
10730 }
10731 }
10732+ if (!__builtin_constant_p(n))
10733+ check_object_size(from, n, true);
10734 return __copy_to_user_ll(to, from, n);
10735 }
10736
10737@@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10738 __copy_to_user(void __user *to, const void *from, unsigned long n)
10739 {
10740 might_fault();
10741+
10742 return __copy_to_user_inatomic(to, from, n);
10743 }
10744
10745 static __always_inline unsigned long
10746 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10747 {
10748+ if ((long)n < 0)
10749+ return n;
10750+
10751 /* Avoid zeroing the tail if the copy fails..
10752 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10753 * but as the zeroing behaviour is only significant when n is not
10754@@ -138,6 +149,12 @@ static __always_inline unsigned long
10755 __copy_from_user(void *to, const void __user *from, unsigned long n)
10756 {
10757 might_fault();
10758+
10759+ pax_track_stack();
10760+
10761+ if ((long)n < 0)
10762+ return n;
10763+
10764 if (__builtin_constant_p(n)) {
10765 unsigned long ret;
10766
10767@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10768 return ret;
10769 }
10770 }
10771+ if (!__builtin_constant_p(n))
10772+ check_object_size(to, n, false);
10773 return __copy_from_user_ll(to, from, n);
10774 }
10775
10776@@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10777 const void __user *from, unsigned long n)
10778 {
10779 might_fault();
10780+
10781+ if ((long)n < 0)
10782+ return n;
10783+
10784 if (__builtin_constant_p(n)) {
10785 unsigned long ret;
10786
10787@@ -182,14 +205,62 @@ static __always_inline unsigned long
10788 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10789 unsigned long n)
10790 {
10791- return __copy_from_user_ll_nocache_nozero(to, from, n);
10792+ if ((long)n < 0)
10793+ return n;
10794+
10795+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10796+}
10797+
10798+/**
10799+ * copy_to_user: - Copy a block of data into user space.
10800+ * @to: Destination address, in user space.
10801+ * @from: Source address, in kernel space.
10802+ * @n: Number of bytes to copy.
10803+ *
10804+ * Context: User context only. This function may sleep.
10805+ *
10806+ * Copy data from kernel space to user space.
10807+ *
10808+ * Returns number of bytes that could not be copied.
10809+ * On success, this will be zero.
10810+ */
10811+static __always_inline unsigned long __must_check
10812+copy_to_user(void __user *to, const void *from, unsigned long n)
10813+{
10814+ if (access_ok(VERIFY_WRITE, to, n))
10815+ n = __copy_to_user(to, from, n);
10816+ return n;
10817+}
10818+
10819+/**
10820+ * copy_from_user: - Copy a block of data from user space.
10821+ * @to: Destination address, in kernel space.
10822+ * @from: Source address, in user space.
10823+ * @n: Number of bytes to copy.
10824+ *
10825+ * Context: User context only. This function may sleep.
10826+ *
10827+ * Copy data from user space to kernel space.
10828+ *
10829+ * Returns number of bytes that could not be copied.
10830+ * On success, this will be zero.
10831+ *
10832+ * If some data could not be copied, this function will pad the copied
10833+ * data to the requested size using zero bytes.
10834+ */
10835+static __always_inline unsigned long __must_check
10836+copy_from_user(void *to, const void __user *from, unsigned long n)
10837+{
10838+ if (access_ok(VERIFY_READ, from, n))
10839+ n = __copy_from_user(to, from, n);
10840+ else if ((long)n > 0) {
10841+ if (!__builtin_constant_p(n))
10842+ check_object_size(to, n, false);
10843+ memset(to, 0, n);
10844+ }
10845+ return n;
10846 }
10847
10848-unsigned long __must_check copy_to_user(void __user *to,
10849- const void *from, unsigned long n);
10850-unsigned long __must_check copy_from_user(void *to,
10851- const void __user *from,
10852- unsigned long n);
10853 long __must_check strncpy_from_user(char *dst, const char __user *src,
10854 long count);
10855 long __must_check __strncpy_from_user(char *dst,
10856diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h
10857--- linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10858+++ linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10859@@ -9,6 +9,9 @@
10860 #include <linux/prefetch.h>
10861 #include <linux/lockdep.h>
10862 #include <asm/page.h>
10863+#include <asm/pgtable.h>
10864+
10865+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10866
10867 /*
10868 * Copy To/From Userspace
10869@@ -19,113 +22,203 @@ __must_check unsigned long
10870 copy_user_generic(void *to, const void *from, unsigned len);
10871
10872 __must_check unsigned long
10873-copy_to_user(void __user *to, const void *from, unsigned len);
10874-__must_check unsigned long
10875-copy_from_user(void *to, const void __user *from, unsigned len);
10876-__must_check unsigned long
10877 copy_in_user(void __user *to, const void __user *from, unsigned len);
10878
10879 static __always_inline __must_check
10880-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10881+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10882 {
10883- int ret = 0;
10884+ unsigned ret = 0;
10885
10886 might_fault();
10887- if (!__builtin_constant_p(size))
10888- return copy_user_generic(dst, (__force void *)src, size);
10889+
10890+ if ((int)size < 0)
10891+ return size;
10892+
10893+#ifdef CONFIG_PAX_MEMORY_UDEREF
10894+ if (!__access_ok(VERIFY_READ, src, size))
10895+ return size;
10896+#endif
10897+
10898+ if (!__builtin_constant_p(size)) {
10899+ check_object_size(dst, size, false);
10900+
10901+#ifdef CONFIG_PAX_MEMORY_UDEREF
10902+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10903+ src += PAX_USER_SHADOW_BASE;
10904+#endif
10905+
10906+ return copy_user_generic(dst, (__force const void *)src, size);
10907+ }
10908 switch (size) {
10909- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10910+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10911 ret, "b", "b", "=q", 1);
10912 return ret;
10913- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10914+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10915 ret, "w", "w", "=r", 2);
10916 return ret;
10917- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10918+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10919 ret, "l", "k", "=r", 4);
10920 return ret;
10921- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10922+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10923 ret, "q", "", "=r", 8);
10924 return ret;
10925 case 10:
10926- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10927+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10928 ret, "q", "", "=r", 10);
10929 if (unlikely(ret))
10930 return ret;
10931 __get_user_asm(*(u16 *)(8 + (char *)dst),
10932- (u16 __user *)(8 + (char __user *)src),
10933+ (const u16 __user *)(8 + (const char __user *)src),
10934 ret, "w", "w", "=r", 2);
10935 return ret;
10936 case 16:
10937- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10938+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10939 ret, "q", "", "=r", 16);
10940 if (unlikely(ret))
10941 return ret;
10942 __get_user_asm(*(u64 *)(8 + (char *)dst),
10943- (u64 __user *)(8 + (char __user *)src),
10944+ (const u64 __user *)(8 + (const char __user *)src),
10945 ret, "q", "", "=r", 8);
10946 return ret;
10947 default:
10948- return copy_user_generic(dst, (__force void *)src, size);
10949+
10950+#ifdef CONFIG_PAX_MEMORY_UDEREF
10951+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10952+ src += PAX_USER_SHADOW_BASE;
10953+#endif
10954+
10955+ return copy_user_generic(dst, (__force const void *)src, size);
10956 }
10957 }
10958
10959 static __always_inline __must_check
10960-int __copy_to_user(void __user *dst, const void *src, unsigned size)
10961+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10962 {
10963- int ret = 0;
10964+ unsigned ret = 0;
10965
10966 might_fault();
10967- if (!__builtin_constant_p(size))
10968+
10969+ pax_track_stack();
10970+
10971+ if ((int)size < 0)
10972+ return size;
10973+
10974+#ifdef CONFIG_PAX_MEMORY_UDEREF
10975+ if (!__access_ok(VERIFY_WRITE, dst, size))
10976+ return size;
10977+#endif
10978+
10979+ if (!__builtin_constant_p(size)) {
10980+ check_object_size(src, size, true);
10981+
10982+#ifdef CONFIG_PAX_MEMORY_UDEREF
10983+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10984+ dst += PAX_USER_SHADOW_BASE;
10985+#endif
10986+
10987 return copy_user_generic((__force void *)dst, src, size);
10988+ }
10989 switch (size) {
10990- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10991+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10992 ret, "b", "b", "iq", 1);
10993 return ret;
10994- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10995+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10996 ret, "w", "w", "ir", 2);
10997 return ret;
10998- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10999+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11000 ret, "l", "k", "ir", 4);
11001 return ret;
11002- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11003+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11004 ret, "q", "", "er", 8);
11005 return ret;
11006 case 10:
11007- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11008+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11009 ret, "q", "", "er", 10);
11010 if (unlikely(ret))
11011 return ret;
11012 asm("":::"memory");
11013- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11014+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11015 ret, "w", "w", "ir", 2);
11016 return ret;
11017 case 16:
11018- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11019+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11020 ret, "q", "", "er", 16);
11021 if (unlikely(ret))
11022 return ret;
11023 asm("":::"memory");
11024- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11025+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11026 ret, "q", "", "er", 8);
11027 return ret;
11028 default:
11029+
11030+#ifdef CONFIG_PAX_MEMORY_UDEREF
11031+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11032+ dst += PAX_USER_SHADOW_BASE;
11033+#endif
11034+
11035 return copy_user_generic((__force void *)dst, src, size);
11036 }
11037 }
11038
11039 static __always_inline __must_check
11040-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11041+unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
11042+{
11043+ if (access_ok(VERIFY_WRITE, to, len))
11044+ len = __copy_to_user(to, from, len);
11045+ return len;
11046+}
11047+
11048+static __always_inline __must_check
11049+unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
11050+{
11051+ if ((int)len < 0)
11052+ return len;
11053+
11054+ if (access_ok(VERIFY_READ, from, len))
11055+ len = __copy_from_user(to, from, len);
11056+ else if ((int)len > 0) {
11057+ if (!__builtin_constant_p(len))
11058+ check_object_size(to, len, false);
11059+ memset(to, 0, len);
11060+ }
11061+ return len;
11062+}
11063+
11064+static __always_inline __must_check
11065+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11066 {
11067- int ret = 0;
11068+ unsigned ret = 0;
11069
11070 might_fault();
11071- if (!__builtin_constant_p(size))
11072+
11073+ pax_track_stack();
11074+
11075+ if ((int)size < 0)
11076+ return size;
11077+
11078+#ifdef CONFIG_PAX_MEMORY_UDEREF
11079+ if (!__access_ok(VERIFY_READ, src, size))
11080+ return size;
11081+ if (!__access_ok(VERIFY_WRITE, dst, size))
11082+ return size;
11083+#endif
11084+
11085+ if (!__builtin_constant_p(size)) {
11086+
11087+#ifdef CONFIG_PAX_MEMORY_UDEREF
11088+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11089+ src += PAX_USER_SHADOW_BASE;
11090+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11091+ dst += PAX_USER_SHADOW_BASE;
11092+#endif
11093+
11094 return copy_user_generic((__force void *)dst,
11095- (__force void *)src, size);
11096+ (__force const void *)src, size);
11097+ }
11098 switch (size) {
11099 case 1: {
11100 u8 tmp;
11101- __get_user_asm(tmp, (u8 __user *)src,
11102+ __get_user_asm(tmp, (const u8 __user *)src,
11103 ret, "b", "b", "=q", 1);
11104 if (likely(!ret))
11105 __put_user_asm(tmp, (u8 __user *)dst,
11106@@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
11107 }
11108 case 2: {
11109 u16 tmp;
11110- __get_user_asm(tmp, (u16 __user *)src,
11111+ __get_user_asm(tmp, (const u16 __user *)src,
11112 ret, "w", "w", "=r", 2);
11113 if (likely(!ret))
11114 __put_user_asm(tmp, (u16 __user *)dst,
11115@@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
11116
11117 case 4: {
11118 u32 tmp;
11119- __get_user_asm(tmp, (u32 __user *)src,
11120+ __get_user_asm(tmp, (const u32 __user *)src,
11121 ret, "l", "k", "=r", 4);
11122 if (likely(!ret))
11123 __put_user_asm(tmp, (u32 __user *)dst,
11124@@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
11125 }
11126 case 8: {
11127 u64 tmp;
11128- __get_user_asm(tmp, (u64 __user *)src,
11129+ __get_user_asm(tmp, (const u64 __user *)src,
11130 ret, "q", "", "=r", 8);
11131 if (likely(!ret))
11132 __put_user_asm(tmp, (u64 __user *)dst,
11133@@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
11134 return ret;
11135 }
11136 default:
11137+
11138+#ifdef CONFIG_PAX_MEMORY_UDEREF
11139+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11140+ src += PAX_USER_SHADOW_BASE;
11141+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11142+ dst += PAX_USER_SHADOW_BASE;
11143+#endif
11144+
11145 return copy_user_generic((__force void *)dst,
11146- (__force void *)src, size);
11147+ (__force const void *)src, size);
11148 }
11149 }
11150
11151@@ -176,33 +277,75 @@ __must_check long strlen_user(const char
11152 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11153 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11154
11155-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
11156- unsigned size);
11157+static __must_check __always_inline unsigned long
11158+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11159+{
11160+ pax_track_stack();
11161+
11162+ if ((int)size < 0)
11163+ return size;
11164
11165-static __must_check __always_inline int
11166+#ifdef CONFIG_PAX_MEMORY_UDEREF
11167+ if (!__access_ok(VERIFY_READ, src, size))
11168+ return size;
11169+
11170+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11171+ src += PAX_USER_SHADOW_BASE;
11172+#endif
11173+
11174+ return copy_user_generic(dst, (__force const void *)src, size);
11175+}
11176+
11177+static __must_check __always_inline unsigned long
11178 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11179 {
11180+ if ((int)size < 0)
11181+ return size;
11182+
11183+#ifdef CONFIG_PAX_MEMORY_UDEREF
11184+ if (!__access_ok(VERIFY_WRITE, dst, size))
11185+ return size;
11186+
11187+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11188+ dst += PAX_USER_SHADOW_BASE;
11189+#endif
11190+
11191 return copy_user_generic((__force void *)dst, src, size);
11192 }
11193
11194-extern long __copy_user_nocache(void *dst, const void __user *src,
11195+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11196 unsigned size, int zerorest);
11197
11198-static inline int
11199-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11200+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11201 {
11202 might_sleep();
11203+
11204+ if ((int)size < 0)
11205+ return size;
11206+
11207+#ifdef CONFIG_PAX_MEMORY_UDEREF
11208+ if (!__access_ok(VERIFY_READ, src, size))
11209+ return size;
11210+#endif
11211+
11212 return __copy_user_nocache(dst, src, size, 1);
11213 }
11214
11215-static inline int
11216-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11217+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11218 unsigned size)
11219 {
11220+ if ((int)size < 0)
11221+ return size;
11222+
11223+#ifdef CONFIG_PAX_MEMORY_UDEREF
11224+ if (!__access_ok(VERIFY_READ, src, size))
11225+ return size;
11226+#endif
11227+
11228 return __copy_user_nocache(dst, src, size, 0);
11229 }
11230
11231-unsigned long
11232+extern unsigned long
11233 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11234
11235 #endif /* _ASM_X86_UACCESS_64_H */
11236diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess.h linux-2.6.32.45/arch/x86/include/asm/uaccess.h
11237--- linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
11238+++ linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
11239@@ -8,12 +8,15 @@
11240 #include <linux/thread_info.h>
11241 #include <linux/prefetch.h>
11242 #include <linux/string.h>
11243+#include <linux/sched.h>
11244 #include <asm/asm.h>
11245 #include <asm/page.h>
11246
11247 #define VERIFY_READ 0
11248 #define VERIFY_WRITE 1
11249
11250+extern void check_object_size(const void *ptr, unsigned long n, bool to);
11251+
11252 /*
11253 * The fs value determines whether argument validity checking should be
11254 * performed or not. If get_fs() == USER_DS, checking is performed, with
11255@@ -29,7 +32,12 @@
11256
11257 #define get_ds() (KERNEL_DS)
11258 #define get_fs() (current_thread_info()->addr_limit)
11259+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11260+void __set_fs(mm_segment_t x);
11261+void set_fs(mm_segment_t x);
11262+#else
11263 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11264+#endif
11265
11266 #define segment_eq(a, b) ((a).seg == (b).seg)
11267
11268@@ -77,7 +85,33 @@
11269 * checks that the pointer is in the user space range - after calling
11270 * this function, memory access functions may still return -EFAULT.
11271 */
11272-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11273+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11274+#define access_ok(type, addr, size) \
11275+({ \
11276+ long __size = size; \
11277+ unsigned long __addr = (unsigned long)addr; \
11278+ unsigned long __addr_ao = __addr & PAGE_MASK; \
11279+ unsigned long __end_ao = __addr + __size - 1; \
11280+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11281+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11282+ while(__addr_ao <= __end_ao) { \
11283+ char __c_ao; \
11284+ __addr_ao += PAGE_SIZE; \
11285+ if (__size > PAGE_SIZE) \
11286+ cond_resched(); \
11287+ if (__get_user(__c_ao, (char __user *)__addr)) \
11288+ break; \
11289+ if (type != VERIFY_WRITE) { \
11290+ __addr = __addr_ao; \
11291+ continue; \
11292+ } \
11293+ if (__put_user(__c_ao, (char __user *)__addr)) \
11294+ break; \
11295+ __addr = __addr_ao; \
11296+ } \
11297+ } \
11298+ __ret_ao; \
11299+})
11300
11301 /*
11302 * The exception table consists of pairs of addresses: the first is the
11303@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11304 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11305 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11306
11307-
11308+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11309+#define __copyuser_seg "gs;"
11310+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11311+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11312+#else
11313+#define __copyuser_seg
11314+#define __COPYUSER_SET_ES
11315+#define __COPYUSER_RESTORE_ES
11316+#endif
11317
11318 #ifdef CONFIG_X86_32
11319 #define __put_user_asm_u64(x, addr, err, errret) \
11320- asm volatile("1: movl %%eax,0(%2)\n" \
11321- "2: movl %%edx,4(%2)\n" \
11322+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11323+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11324 "3:\n" \
11325 ".section .fixup,\"ax\"\n" \
11326 "4: movl %3,%0\n" \
11327@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11328 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11329
11330 #define __put_user_asm_ex_u64(x, addr) \
11331- asm volatile("1: movl %%eax,0(%1)\n" \
11332- "2: movl %%edx,4(%1)\n" \
11333+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11334+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11335 "3:\n" \
11336 _ASM_EXTABLE(1b, 2b - 1b) \
11337 _ASM_EXTABLE(2b, 3b - 2b) \
11338@@ -374,7 +416,7 @@ do { \
11339 } while (0)
11340
11341 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11342- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11343+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11344 "2:\n" \
11345 ".section .fixup,\"ax\"\n" \
11346 "3: mov %3,%0\n" \
11347@@ -382,7 +424,7 @@ do { \
11348 " jmp 2b\n" \
11349 ".previous\n" \
11350 _ASM_EXTABLE(1b, 3b) \
11351- : "=r" (err), ltype(x) \
11352+ : "=r" (err), ltype (x) \
11353 : "m" (__m(addr)), "i" (errret), "0" (err))
11354
11355 #define __get_user_size_ex(x, ptr, size) \
11356@@ -407,7 +449,7 @@ do { \
11357 } while (0)
11358
11359 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11360- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11361+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11362 "2:\n" \
11363 _ASM_EXTABLE(1b, 2b - 1b) \
11364 : ltype(x) : "m" (__m(addr)))
11365@@ -424,13 +466,24 @@ do { \
11366 int __gu_err; \
11367 unsigned long __gu_val; \
11368 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11369- (x) = (__force __typeof__(*(ptr)))__gu_val; \
11370+ (x) = (__typeof__(*(ptr)))__gu_val; \
11371 __gu_err; \
11372 })
11373
11374 /* FIXME: this hack is definitely wrong -AK */
11375 struct __large_struct { unsigned long buf[100]; };
11376-#define __m(x) (*(struct __large_struct __user *)(x))
11377+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11378+#define ____m(x) \
11379+({ \
11380+ unsigned long ____x = (unsigned long)(x); \
11381+ if (____x < PAX_USER_SHADOW_BASE) \
11382+ ____x += PAX_USER_SHADOW_BASE; \
11383+ (void __user *)____x; \
11384+})
11385+#else
11386+#define ____m(x) (x)
11387+#endif
11388+#define __m(x) (*(struct __large_struct __user *)____m(x))
11389
11390 /*
11391 * Tell gcc we read from memory instead of writing: this is because
11392@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11393 * aliasing issues.
11394 */
11395 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11396- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11397+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11398 "2:\n" \
11399 ".section .fixup,\"ax\"\n" \
11400 "3: mov %3,%0\n" \
11401@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11402 ".previous\n" \
11403 _ASM_EXTABLE(1b, 3b) \
11404 : "=r"(err) \
11405- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11406+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11407
11408 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11409- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11410+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11411 "2:\n" \
11412 _ASM_EXTABLE(1b, 2b - 1b) \
11413 : : ltype(x), "m" (__m(addr)))
11414@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11415 * On error, the variable @x is set to zero.
11416 */
11417
11418+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11419+#define __get_user(x, ptr) get_user((x), (ptr))
11420+#else
11421 #define __get_user(x, ptr) \
11422 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11423+#endif
11424
11425 /**
11426 * __put_user: - Write a simple value into user space, with less checking.
11427@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11428 * Returns zero on success, or -EFAULT on error.
11429 */
11430
11431+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11432+#define __put_user(x, ptr) put_user((x), (ptr))
11433+#else
11434 #define __put_user(x, ptr) \
11435 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11436+#endif
11437
11438 #define __get_user_unaligned __get_user
11439 #define __put_user_unaligned __put_user
11440@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11441 #define get_user_ex(x, ptr) do { \
11442 unsigned long __gue_val; \
11443 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11444- (x) = (__force __typeof__(*(ptr)))__gue_val; \
11445+ (x) = (__typeof__(*(ptr)))__gue_val; \
11446 } while (0)
11447
11448 #ifdef CONFIG_X86_WP_WORKS_OK
11449@@ -567,6 +628,7 @@ extern struct movsl_mask {
11450
11451 #define ARCH_HAS_NOCACHE_UACCESS 1
11452
11453+#define ARCH_HAS_SORT_EXTABLE
11454 #ifdef CONFIG_X86_32
11455 # include "uaccess_32.h"
11456 #else
11457diff -urNp linux-2.6.32.45/arch/x86/include/asm/vgtod.h linux-2.6.32.45/arch/x86/include/asm/vgtod.h
11458--- linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11459+++ linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11460@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11461 int sysctl_enabled;
11462 struct timezone sys_tz;
11463 struct { /* extract of a clocksource struct */
11464+ char name[8];
11465 cycle_t (*vread)(void);
11466 cycle_t cycle_last;
11467 cycle_t mask;
11468diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi.h linux-2.6.32.45/arch/x86/include/asm/vmi.h
11469--- linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11470+++ linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11471@@ -191,6 +191,7 @@ struct vrom_header {
11472 u8 reserved[96]; /* Reserved for headers */
11473 char vmi_init[8]; /* VMI_Init jump point */
11474 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11475+ char rom_data[8048]; /* rest of the option ROM */
11476 } __attribute__((packed));
11477
11478 struct pnp_header {
11479diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi_time.h linux-2.6.32.45/arch/x86/include/asm/vmi_time.h
11480--- linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-03-27 14:31:47.000000000 -0400
11481+++ linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-08-05 20:33:55.000000000 -0400
11482@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
11483 int (*wallclock_updated)(void);
11484 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
11485 void (*cancel_alarm)(u32 flags);
11486-} vmi_timer_ops;
11487+} __no_const vmi_timer_ops;
11488
11489 /* Prototypes */
11490 extern void __init vmi_time_init(void);
11491diff -urNp linux-2.6.32.45/arch/x86/include/asm/vsyscall.h linux-2.6.32.45/arch/x86/include/asm/vsyscall.h
11492--- linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11493+++ linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11494@@ -15,9 +15,10 @@ enum vsyscall_num {
11495
11496 #ifdef __KERNEL__
11497 #include <linux/seqlock.h>
11498+#include <linux/getcpu.h>
11499+#include <linux/time.h>
11500
11501 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11502-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11503
11504 /* Definitions for CONFIG_GENERIC_TIME definitions */
11505 #define __section_vsyscall_gtod_data __attribute__ \
11506@@ -31,7 +32,6 @@ enum vsyscall_num {
11507 #define VGETCPU_LSL 2
11508
11509 extern int __vgetcpu_mode;
11510-extern volatile unsigned long __jiffies;
11511
11512 /* kernel space (writeable) */
11513 extern int vgetcpu_mode;
11514@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11515
11516 extern void map_vsyscall(void);
11517
11518+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11519+extern time_t vtime(time_t *t);
11520+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11521 #endif /* __KERNEL__ */
11522
11523 #endif /* _ASM_X86_VSYSCALL_H */
11524diff -urNp linux-2.6.32.45/arch/x86/include/asm/x86_init.h linux-2.6.32.45/arch/x86/include/asm/x86_init.h
11525--- linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-03-27 14:31:47.000000000 -0400
11526+++ linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-08-05 20:33:55.000000000 -0400
11527@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11528 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11529 void (*find_smp_config)(unsigned int reserve);
11530 void (*get_smp_config)(unsigned int early);
11531-};
11532+} __no_const;
11533
11534 /**
11535 * struct x86_init_resources - platform specific resource related ops
11536@@ -42,7 +42,7 @@ struct x86_init_resources {
11537 void (*probe_roms)(void);
11538 void (*reserve_resources)(void);
11539 char *(*memory_setup)(void);
11540-};
11541+} __no_const;
11542
11543 /**
11544 * struct x86_init_irqs - platform specific interrupt setup
11545@@ -55,7 +55,7 @@ struct x86_init_irqs {
11546 void (*pre_vector_init)(void);
11547 void (*intr_init)(void);
11548 void (*trap_init)(void);
11549-};
11550+} __no_const;
11551
11552 /**
11553 * struct x86_init_oem - oem platform specific customizing functions
11554@@ -65,7 +65,7 @@ struct x86_init_irqs {
11555 struct x86_init_oem {
11556 void (*arch_setup)(void);
11557 void (*banner)(void);
11558-};
11559+} __no_const;
11560
11561 /**
11562 * struct x86_init_paging - platform specific paging functions
11563@@ -75,7 +75,7 @@ struct x86_init_oem {
11564 struct x86_init_paging {
11565 void (*pagetable_setup_start)(pgd_t *base);
11566 void (*pagetable_setup_done)(pgd_t *base);
11567-};
11568+} __no_const;
11569
11570 /**
11571 * struct x86_init_timers - platform specific timer setup
11572@@ -88,7 +88,7 @@ struct x86_init_timers {
11573 void (*setup_percpu_clockev)(void);
11574 void (*tsc_pre_init)(void);
11575 void (*timer_init)(void);
11576-};
11577+} __no_const;
11578
11579 /**
11580 * struct x86_init_ops - functions for platform specific setup
11581@@ -101,7 +101,7 @@ struct x86_init_ops {
11582 struct x86_init_oem oem;
11583 struct x86_init_paging paging;
11584 struct x86_init_timers timers;
11585-};
11586+} __no_const;
11587
11588 /**
11589 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11590@@ -109,7 +109,7 @@ struct x86_init_ops {
11591 */
11592 struct x86_cpuinit_ops {
11593 void (*setup_percpu_clockev)(void);
11594-};
11595+} __no_const;
11596
11597 /**
11598 * struct x86_platform_ops - platform specific runtime functions
11599@@ -121,7 +121,7 @@ struct x86_platform_ops {
11600 unsigned long (*calibrate_tsc)(void);
11601 unsigned long (*get_wallclock)(void);
11602 int (*set_wallclock)(unsigned long nowtime);
11603-};
11604+} __no_const;
11605
11606 extern struct x86_init_ops x86_init;
11607 extern struct x86_cpuinit_ops x86_cpuinit;
11608diff -urNp linux-2.6.32.45/arch/x86/include/asm/xsave.h linux-2.6.32.45/arch/x86/include/asm/xsave.h
11609--- linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11610+++ linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11611@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11612 static inline int xsave_user(struct xsave_struct __user *buf)
11613 {
11614 int err;
11615+
11616+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11617+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11618+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11619+#endif
11620+
11621 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11622 "2:\n"
11623 ".section .fixup,\"ax\"\n"
11624@@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11625 u32 lmask = mask;
11626 u32 hmask = mask >> 32;
11627
11628+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11629+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11630+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11631+#endif
11632+
11633 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11634 "2:\n"
11635 ".section .fixup,\"ax\"\n"
11636diff -urNp linux-2.6.32.45/arch/x86/Kconfig linux-2.6.32.45/arch/x86/Kconfig
11637--- linux-2.6.32.45/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11638+++ linux-2.6.32.45/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11639@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11640
11641 config X86_32_LAZY_GS
11642 def_bool y
11643- depends on X86_32 && !CC_STACKPROTECTOR
11644+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11645
11646 config KTIME_SCALAR
11647 def_bool X86_32
11648@@ -1008,7 +1008,7 @@ choice
11649
11650 config NOHIGHMEM
11651 bool "off"
11652- depends on !X86_NUMAQ
11653+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11654 ---help---
11655 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11656 However, the address space of 32-bit x86 processors is only 4
11657@@ -1045,7 +1045,7 @@ config NOHIGHMEM
11658
11659 config HIGHMEM4G
11660 bool "4GB"
11661- depends on !X86_NUMAQ
11662+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11663 ---help---
11664 Select this if you have a 32-bit processor and between 1 and 4
11665 gigabytes of physical RAM.
11666@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11667 hex
11668 default 0xB0000000 if VMSPLIT_3G_OPT
11669 default 0x80000000 if VMSPLIT_2G
11670- default 0x78000000 if VMSPLIT_2G_OPT
11671+ default 0x70000000 if VMSPLIT_2G_OPT
11672 default 0x40000000 if VMSPLIT_1G
11673 default 0xC0000000
11674 depends on X86_32
11675@@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11676
11677 config EFI
11678 bool "EFI runtime service support"
11679- depends on ACPI
11680+ depends on ACPI && !PAX_KERNEXEC
11681 ---help---
11682 This enables the kernel to use EFI runtime services that are
11683 available (such as the EFI variable services).
11684@@ -1460,6 +1460,7 @@ config SECCOMP
11685
11686 config CC_STACKPROTECTOR
11687 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11688+ depends on X86_64 || !PAX_MEMORY_UDEREF
11689 ---help---
11690 This option turns on the -fstack-protector GCC feature. This
11691 feature puts, at the beginning of functions, a canary value on
11692@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11693 config PHYSICAL_START
11694 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11695 default "0x1000000"
11696+ range 0x400000 0x40000000
11697 ---help---
11698 This gives the physical address where the kernel is loaded.
11699
11700@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11701 hex
11702 prompt "Alignment value to which kernel should be aligned" if X86_32
11703 default "0x1000000"
11704+ range 0x400000 0x1000000 if PAX_KERNEXEC
11705 range 0x2000 0x1000000
11706 ---help---
11707 This value puts the alignment restrictions on physical address
11708@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11709 Say N if you want to disable CPU hotplug.
11710
11711 config COMPAT_VDSO
11712- def_bool y
11713+ def_bool n
11714 prompt "Compat VDSO support"
11715 depends on X86_32 || IA32_EMULATION
11716+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11717 ---help---
11718 Map the 32-bit VDSO to the predictable old-style address too.
11719 ---help---
11720diff -urNp linux-2.6.32.45/arch/x86/Kconfig.cpu linux-2.6.32.45/arch/x86/Kconfig.cpu
11721--- linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11722+++ linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11723@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11724
11725 config X86_F00F_BUG
11726 def_bool y
11727- depends on M586MMX || M586TSC || M586 || M486 || M386
11728+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11729
11730 config X86_WP_WORKS_OK
11731 def_bool y
11732@@ -360,7 +360,7 @@ config X86_POPAD_OK
11733
11734 config X86_ALIGNMENT_16
11735 def_bool y
11736- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11737+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11738
11739 config X86_INTEL_USERCOPY
11740 def_bool y
11741@@ -406,7 +406,7 @@ config X86_CMPXCHG64
11742 # generates cmov.
11743 config X86_CMOV
11744 def_bool y
11745- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11746+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11747
11748 config X86_MINIMUM_CPU_FAMILY
11749 int
11750diff -urNp linux-2.6.32.45/arch/x86/Kconfig.debug linux-2.6.32.45/arch/x86/Kconfig.debug
11751--- linux-2.6.32.45/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11752+++ linux-2.6.32.45/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11753@@ -99,7 +99,7 @@ config X86_PTDUMP
11754 config DEBUG_RODATA
11755 bool "Write protect kernel read-only data structures"
11756 default y
11757- depends on DEBUG_KERNEL
11758+ depends on DEBUG_KERNEL && BROKEN
11759 ---help---
11760 Mark the kernel read-only data as write-protected in the pagetables,
11761 in order to catch accidental (and incorrect) writes to such const
11762diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile
11763--- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-03-27 14:31:47.000000000 -0400
11764+++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-08-07 14:38:58.000000000 -0400
11765@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
11766 $(call cc-option, -fno-stack-protector) \
11767 $(call cc-option, -mpreferred-stack-boundary=2)
11768 KBUILD_CFLAGS += $(call cc-option, -m32)
11769+ifdef CONSTIFY_PLUGIN
11770+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11771+endif
11772 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11773 GCOV_PROFILE := n
11774
11775diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S
11776--- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11777+++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11778@@ -91,6 +91,9 @@ _start:
11779 /* Do any other stuff... */
11780
11781 #ifndef CONFIG_64BIT
11782+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
11783+ call verify_cpu
11784+
11785 /* This could also be done in C code... */
11786 movl pmode_cr3, %eax
11787 movl %eax, %cr3
11788@@ -104,7 +107,7 @@ _start:
11789 movl %eax, %ecx
11790 orl %edx, %ecx
11791 jz 1f
11792- movl $0xc0000080, %ecx
11793+ mov $MSR_EFER, %ecx
11794 wrmsr
11795 1:
11796
11797@@ -114,6 +117,7 @@ _start:
11798 movl pmode_cr0, %eax
11799 movl %eax, %cr0
11800 jmp pmode_return
11801+# include "../../verify_cpu.S"
11802 #else
11803 pushw $0
11804 pushw trampoline_segment
11805diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c
11806--- linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11807+++ linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11808@@ -11,11 +11,12 @@
11809 #include <linux/cpumask.h>
11810 #include <asm/segment.h>
11811 #include <asm/desc.h>
11812+#include <asm/e820.h>
11813
11814 #include "realmode/wakeup.h"
11815 #include "sleep.h"
11816
11817-unsigned long acpi_wakeup_address;
11818+unsigned long acpi_wakeup_address = 0x2000;
11819 unsigned long acpi_realmode_flags;
11820
11821 /* address in low memory of the wakeup routine. */
11822@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11823 #else /* CONFIG_64BIT */
11824 header->trampoline_segment = setup_trampoline() >> 4;
11825 #ifdef CONFIG_SMP
11826- stack_start.sp = temp_stack + sizeof(temp_stack);
11827+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11828+
11829+ pax_open_kernel();
11830 early_gdt_descr.address =
11831 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11832+ pax_close_kernel();
11833+
11834 initial_gs = per_cpu_offset(smp_processor_id());
11835 #endif
11836 initial_code = (unsigned long)wakeup_long64;
11837@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11838 return;
11839 }
11840
11841- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11842-
11843- if (!acpi_realmode) {
11844- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11845- return;
11846- }
11847-
11848- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11849+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11850+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11851 }
11852
11853
11854diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S
11855--- linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11856+++ linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11857@@ -30,13 +30,11 @@ wakeup_pmode_return:
11858 # and restore the stack ... but you need gdt for this to work
11859 movl saved_context_esp, %esp
11860
11861- movl %cs:saved_magic, %eax
11862- cmpl $0x12345678, %eax
11863+ cmpl $0x12345678, saved_magic
11864 jne bogus_magic
11865
11866 # jump to place where we left off
11867- movl saved_eip, %eax
11868- jmp *%eax
11869+ jmp *(saved_eip)
11870
11871 bogus_magic:
11872 jmp bogus_magic
11873diff -urNp linux-2.6.32.45/arch/x86/kernel/alternative.c linux-2.6.32.45/arch/x86/kernel/alternative.c
11874--- linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11875+++ linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11876@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11877
11878 BUG_ON(p->len > MAX_PATCH_LEN);
11879 /* prep the buffer with the original instructions */
11880- memcpy(insnbuf, p->instr, p->len);
11881+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11882 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11883 (unsigned long)p->instr, p->len);
11884
11885@@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11886 if (smp_alt_once)
11887 free_init_pages("SMP alternatives",
11888 (unsigned long)__smp_locks,
11889- (unsigned long)__smp_locks_end);
11890+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11891
11892 restart_nmi();
11893 }
11894@@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11895 * instructions. And on the local CPU you need to be protected again NMI or MCE
11896 * handlers seeing an inconsistent instruction while you patch.
11897 */
11898-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11899+static void *__kprobes text_poke_early(void *addr, const void *opcode,
11900 size_t len)
11901 {
11902 unsigned long flags;
11903 local_irq_save(flags);
11904- memcpy(addr, opcode, len);
11905+
11906+ pax_open_kernel();
11907+ memcpy(ktla_ktva(addr), opcode, len);
11908 sync_core();
11909+ pax_close_kernel();
11910+
11911 local_irq_restore(flags);
11912 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11913 that causes hangs on some VIA CPUs. */
11914@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11915 */
11916 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11917 {
11918- unsigned long flags;
11919- char *vaddr;
11920+ unsigned char *vaddr = ktla_ktva(addr);
11921 struct page *pages[2];
11922- int i;
11923+ size_t i;
11924
11925 if (!core_kernel_text((unsigned long)addr)) {
11926- pages[0] = vmalloc_to_page(addr);
11927- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11928+ pages[0] = vmalloc_to_page(vaddr);
11929+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11930 } else {
11931- pages[0] = virt_to_page(addr);
11932+ pages[0] = virt_to_page(vaddr);
11933 WARN_ON(!PageReserved(pages[0]));
11934- pages[1] = virt_to_page(addr + PAGE_SIZE);
11935+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11936 }
11937 BUG_ON(!pages[0]);
11938- local_irq_save(flags);
11939- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11940- if (pages[1])
11941- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11942- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11943- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11944- clear_fixmap(FIX_TEXT_POKE0);
11945- if (pages[1])
11946- clear_fixmap(FIX_TEXT_POKE1);
11947- local_flush_tlb();
11948- sync_core();
11949- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11950- that causes hangs on some VIA CPUs. */
11951+ text_poke_early(addr, opcode, len);
11952 for (i = 0; i < len; i++)
11953- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11954- local_irq_restore(flags);
11955+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11956 return addr;
11957 }
11958diff -urNp linux-2.6.32.45/arch/x86/kernel/amd_iommu.c linux-2.6.32.45/arch/x86/kernel/amd_iommu.c
11959--- linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11960+++ linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11961@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11962 }
11963 }
11964
11965-static struct dma_map_ops amd_iommu_dma_ops = {
11966+static const struct dma_map_ops amd_iommu_dma_ops = {
11967 .alloc_coherent = alloc_coherent,
11968 .free_coherent = free_coherent,
11969 .map_page = map_page,
11970diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/apic.c linux-2.6.32.45/arch/x86/kernel/apic/apic.c
11971--- linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11972+++ linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-08-17 20:00:16.000000000 -0400
11973@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
11974 /*
11975 * Debug level, exported for io_apic.c
11976 */
11977-unsigned int apic_verbosity;
11978+int apic_verbosity;
11979
11980 int pic_mode;
11981
11982@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11983 apic_write(APIC_ESR, 0);
11984 v1 = apic_read(APIC_ESR);
11985 ack_APIC_irq();
11986- atomic_inc(&irq_err_count);
11987+ atomic_inc_unchecked(&irq_err_count);
11988
11989 /*
11990 * Here is what the APIC error bits mean:
11991@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11992 u16 *bios_cpu_apicid;
11993 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11994
11995+ pax_track_stack();
11996+
11997 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11998 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11999
12000diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c
12001--- linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
12002+++ linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
12003@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
12004 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
12005 GFP_ATOMIC);
12006 if (!ioapic_entries)
12007- return 0;
12008+ return NULL;
12009
12010 for (apic = 0; apic < nr_ioapics; apic++) {
12011 ioapic_entries[apic] =
12012@@ -733,7 +733,7 @@ nomem:
12013 kfree(ioapic_entries[apic]);
12014 kfree(ioapic_entries);
12015
12016- return 0;
12017+ return NULL;
12018 }
12019
12020 /*
12021@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
12022 }
12023 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
12024
12025-void lock_vector_lock(void)
12026+void lock_vector_lock(void) __acquires(vector_lock)
12027 {
12028 /* Used to the online set of cpus does not change
12029 * during assign_irq_vector.
12030@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
12031 spin_lock(&vector_lock);
12032 }
12033
12034-void unlock_vector_lock(void)
12035+void unlock_vector_lock(void) __releases(vector_lock)
12036 {
12037 spin_unlock(&vector_lock);
12038 }
12039@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
12040 ack_APIC_irq();
12041 }
12042
12043-atomic_t irq_mis_count;
12044+atomic_unchecked_t irq_mis_count;
12045
12046 static void ack_apic_level(unsigned int irq)
12047 {
12048@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
12049
12050 /* Tail end of version 0x11 I/O APIC bug workaround */
12051 if (!(v & (1 << (i & 0x1f)))) {
12052- atomic_inc(&irq_mis_count);
12053+ atomic_inc_unchecked(&irq_mis_count);
12054 spin_lock(&ioapic_lock);
12055 __mask_and_edge_IO_APIC_irq(cfg);
12056 __unmask_and_level_IO_APIC_irq(cfg);
12057diff -urNp linux-2.6.32.45/arch/x86/kernel/apm_32.c linux-2.6.32.45/arch/x86/kernel/apm_32.c
12058--- linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
12059+++ linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
12060@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
12061 * This is for buggy BIOS's that refer to (real mode) segment 0x40
12062 * even though they are called in protected mode.
12063 */
12064-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
12065+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
12066 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
12067
12068 static const char driver_version[] = "1.16ac"; /* no spaces */
12069@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
12070 BUG_ON(cpu != 0);
12071 gdt = get_cpu_gdt_table(cpu);
12072 save_desc_40 = gdt[0x40 / 8];
12073+
12074+ pax_open_kernel();
12075 gdt[0x40 / 8] = bad_bios_desc;
12076+ pax_close_kernel();
12077
12078 apm_irq_save(flags);
12079 APM_DO_SAVE_SEGS;
12080@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
12081 &call->esi);
12082 APM_DO_RESTORE_SEGS;
12083 apm_irq_restore(flags);
12084+
12085+ pax_open_kernel();
12086 gdt[0x40 / 8] = save_desc_40;
12087+ pax_close_kernel();
12088+
12089 put_cpu();
12090
12091 return call->eax & 0xff;
12092@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
12093 BUG_ON(cpu != 0);
12094 gdt = get_cpu_gdt_table(cpu);
12095 save_desc_40 = gdt[0x40 / 8];
12096+
12097+ pax_open_kernel();
12098 gdt[0x40 / 8] = bad_bios_desc;
12099+ pax_close_kernel();
12100
12101 apm_irq_save(flags);
12102 APM_DO_SAVE_SEGS;
12103@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
12104 &call->eax);
12105 APM_DO_RESTORE_SEGS;
12106 apm_irq_restore(flags);
12107+
12108+ pax_open_kernel();
12109 gdt[0x40 / 8] = save_desc_40;
12110+ pax_close_kernel();
12111+
12112 put_cpu();
12113 return error;
12114 }
12115@@ -975,7 +989,7 @@ recalc:
12116
12117 static void apm_power_off(void)
12118 {
12119- unsigned char po_bios_call[] = {
12120+ const unsigned char po_bios_call[] = {
12121 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
12122 0x8e, 0xd0, /* movw ax,ss */
12123 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
12124@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
12125 * code to that CPU.
12126 */
12127 gdt = get_cpu_gdt_table(0);
12128+
12129+ pax_open_kernel();
12130 set_desc_base(&gdt[APM_CS >> 3],
12131 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
12132 set_desc_base(&gdt[APM_CS_16 >> 3],
12133 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
12134 set_desc_base(&gdt[APM_DS >> 3],
12135 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
12136+ pax_close_kernel();
12137
12138 proc_create("apm", 0, NULL, &apm_file_ops);
12139
12140diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c
12141--- linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
12142+++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
12143@@ -51,7 +51,6 @@ void foo(void)
12144 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
12145 BLANK();
12146
12147- OFFSET(TI_task, thread_info, task);
12148 OFFSET(TI_exec_domain, thread_info, exec_domain);
12149 OFFSET(TI_flags, thread_info, flags);
12150 OFFSET(TI_status, thread_info, status);
12151@@ -60,6 +59,8 @@ void foo(void)
12152 OFFSET(TI_restart_block, thread_info, restart_block);
12153 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
12154 OFFSET(TI_cpu, thread_info, cpu);
12155+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12156+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12157 BLANK();
12158
12159 OFFSET(GDS_size, desc_ptr, size);
12160@@ -99,6 +100,7 @@ void foo(void)
12161
12162 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12163 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12164+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12165 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
12166 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
12167 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
12168@@ -115,6 +117,11 @@ void foo(void)
12169 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
12170 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12171 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12172+
12173+#ifdef CONFIG_PAX_KERNEXEC
12174+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12175+#endif
12176+
12177 #endif
12178
12179 #ifdef CONFIG_XEN
12180diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c
12181--- linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
12182+++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-08-23 20:24:19.000000000 -0400
12183@@ -44,6 +44,8 @@ int main(void)
12184 ENTRY(addr_limit);
12185 ENTRY(preempt_count);
12186 ENTRY(status);
12187+ ENTRY(lowest_stack);
12188+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12189 #ifdef CONFIG_IA32_EMULATION
12190 ENTRY(sysenter_return);
12191 #endif
12192@@ -63,6 +65,18 @@ int main(void)
12193 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12194 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
12195 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12196+
12197+#ifdef CONFIG_PAX_KERNEXEC
12198+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12199+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12200+#endif
12201+
12202+#ifdef CONFIG_PAX_MEMORY_UDEREF
12203+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12204+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12205+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
12206+#endif
12207+
12208 #endif
12209
12210
12211@@ -115,6 +129,7 @@ int main(void)
12212 ENTRY(cr8);
12213 BLANK();
12214 #undef ENTRY
12215+ DEFINE(TSS_size, sizeof(struct tss_struct));
12216 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
12217 BLANK();
12218 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
12219@@ -130,6 +145,7 @@ int main(void)
12220
12221 BLANK();
12222 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12223+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12224 #ifdef CONFIG_XEN
12225 BLANK();
12226 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12227diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/amd.c
12228--- linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
12229+++ linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
12230@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
12231 unsigned int size)
12232 {
12233 /* AMD errata T13 (order #21922) */
12234- if ((c->x86 == 6)) {
12235+ if (c->x86 == 6) {
12236 /* Duron Rev A0 */
12237 if (c->x86_model == 3 && c->x86_mask == 0)
12238 size = 64;
12239diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/common.c linux-2.6.32.45/arch/x86/kernel/cpu/common.c
12240--- linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
12241+++ linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
12242@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
12243
12244 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12245
12246-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12247-#ifdef CONFIG_X86_64
12248- /*
12249- * We need valid kernel segments for data and code in long mode too
12250- * IRET will check the segment types kkeil 2000/10/28
12251- * Also sysret mandates a special GDT layout
12252- *
12253- * TLS descriptors are currently at a different place compared to i386.
12254- * Hopefully nobody expects them at a fixed place (Wine?)
12255- */
12256- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12257- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12258- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12259- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12260- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12261- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12262-#else
12263- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12264- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12265- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12266- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12267- /*
12268- * Segments used for calling PnP BIOS have byte granularity.
12269- * They code segments and data segments have fixed 64k limits,
12270- * the transfer segment sizes are set at run time.
12271- */
12272- /* 32-bit code */
12273- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12274- /* 16-bit code */
12275- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12276- /* 16-bit data */
12277- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12278- /* 16-bit data */
12279- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12280- /* 16-bit data */
12281- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12282- /*
12283- * The APM segments have byte granularity and their bases
12284- * are set at run time. All have 64k limits.
12285- */
12286- /* 32-bit code */
12287- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12288- /* 16-bit code */
12289- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12290- /* data */
12291- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12292-
12293- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12294- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12295- GDT_STACK_CANARY_INIT
12296-#endif
12297-} };
12298-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12299-
12300 static int __init x86_xsave_setup(char *s)
12301 {
12302 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12303@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
12304 {
12305 struct desc_ptr gdt_descr;
12306
12307- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12308+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12309 gdt_descr.size = GDT_SIZE - 1;
12310 load_gdt(&gdt_descr);
12311 /* Reload the per-cpu base */
12312@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
12313 /* Filter out anything that depends on CPUID levels we don't have */
12314 filter_cpuid_features(c, true);
12315
12316+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
12317+ setup_clear_cpu_cap(X86_FEATURE_SEP);
12318+#endif
12319+
12320 /* If the model name is still unset, do table lookup. */
12321 if (!c->x86_model_id[0]) {
12322 const char *p;
12323@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
12324 }
12325 __setup("clearcpuid=", setup_disablecpuid);
12326
12327+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12328+EXPORT_PER_CPU_SYMBOL(current_tinfo);
12329+
12330 #ifdef CONFIG_X86_64
12331 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12332
12333@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
12334 EXPORT_PER_CPU_SYMBOL(current_task);
12335
12336 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12337- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12338+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12339 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12340
12341 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12342@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
12343 {
12344 memset(regs, 0, sizeof(struct pt_regs));
12345 regs->fs = __KERNEL_PERCPU;
12346- regs->gs = __KERNEL_STACK_CANARY;
12347+ savesegment(gs, regs->gs);
12348
12349 return regs;
12350 }
12351@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
12352 int i;
12353
12354 cpu = stack_smp_processor_id();
12355- t = &per_cpu(init_tss, cpu);
12356+ t = init_tss + cpu;
12357 orig_ist = &per_cpu(orig_ist, cpu);
12358
12359 #ifdef CONFIG_NUMA
12360@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12361 switch_to_new_gdt(cpu);
12362 loadsegment(fs, 0);
12363
12364- load_idt((const struct desc_ptr *)&idt_descr);
12365+ load_idt(&idt_descr);
12366
12367 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12368 syscall_init();
12369@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12370 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12371 barrier();
12372
12373- check_efer();
12374 if (cpu != 0)
12375 enable_x2apic();
12376
12377@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12378 {
12379 int cpu = smp_processor_id();
12380 struct task_struct *curr = current;
12381- struct tss_struct *t = &per_cpu(init_tss, cpu);
12382+ struct tss_struct *t = init_tss + cpu;
12383 struct thread_struct *thread = &curr->thread;
12384
12385 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12386diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel.c linux-2.6.32.45/arch/x86/kernel/cpu/intel.c
12387--- linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12388+++ linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12389@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12390 * Update the IDT descriptor and reload the IDT so that
12391 * it uses the read-only mapped virtual address.
12392 */
12393- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12394+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12395 load_idt(&idt_descr);
12396 }
12397 #endif
12398diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c
12399--- linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12400+++ linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12401@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12402 return ret;
12403 }
12404
12405-static struct sysfs_ops sysfs_ops = {
12406+static const struct sysfs_ops sysfs_ops = {
12407 .show = show,
12408 .store = store,
12409 };
12410diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/Makefile linux-2.6.32.45/arch/x86/kernel/cpu/Makefile
12411--- linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12412+++ linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12413@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12414 CFLAGS_REMOVE_common.o = -pg
12415 endif
12416
12417-# Make sure load_percpu_segment has no stackprotector
12418-nostackp := $(call cc-option, -fno-stack-protector)
12419-CFLAGS_common.o := $(nostackp)
12420-
12421 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12422 obj-y += proc.o capflags.o powerflags.o common.o
12423 obj-y += vmware.o hypervisor.o sched.o
12424diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c
12425--- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12426+++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12427@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12428 return ret;
12429 }
12430
12431-static struct sysfs_ops threshold_ops = {
12432+static const struct sysfs_ops threshold_ops = {
12433 .show = show,
12434 .store = store,
12435 };
12436diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c
12437--- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12438+++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12439@@ -43,6 +43,7 @@
12440 #include <asm/ipi.h>
12441 #include <asm/mce.h>
12442 #include <asm/msr.h>
12443+#include <asm/local.h>
12444
12445 #include "mce-internal.h"
12446
12447@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12448 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12449 m->cs, m->ip);
12450
12451- if (m->cs == __KERNEL_CS)
12452+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12453 print_symbol("{%s}", m->ip);
12454 pr_cont("\n");
12455 }
12456@@ -221,10 +222,10 @@ static void print_mce_tail(void)
12457
12458 #define PANIC_TIMEOUT 5 /* 5 seconds */
12459
12460-static atomic_t mce_paniced;
12461+static atomic_unchecked_t mce_paniced;
12462
12463 static int fake_panic;
12464-static atomic_t mce_fake_paniced;
12465+static atomic_unchecked_t mce_fake_paniced;
12466
12467 /* Panic in progress. Enable interrupts and wait for final IPI */
12468 static void wait_for_panic(void)
12469@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12470 /*
12471 * Make sure only one CPU runs in machine check panic
12472 */
12473- if (atomic_inc_return(&mce_paniced) > 1)
12474+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12475 wait_for_panic();
12476 barrier();
12477
12478@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12479 console_verbose();
12480 } else {
12481 /* Don't log too much for fake panic */
12482- if (atomic_inc_return(&mce_fake_paniced) > 1)
12483+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12484 return;
12485 }
12486 print_mce_head();
12487@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12488 * might have been modified by someone else.
12489 */
12490 rmb();
12491- if (atomic_read(&mce_paniced))
12492+ if (atomic_read_unchecked(&mce_paniced))
12493 wait_for_panic();
12494 if (!monarch_timeout)
12495 goto out;
12496@@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12497 */
12498
12499 static DEFINE_SPINLOCK(mce_state_lock);
12500-static int open_count; /* #times opened */
12501+static local_t open_count; /* #times opened */
12502 static int open_exclu; /* already open exclusive? */
12503
12504 static int mce_open(struct inode *inode, struct file *file)
12505 {
12506 spin_lock(&mce_state_lock);
12507
12508- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12509+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12510 spin_unlock(&mce_state_lock);
12511
12512 return -EBUSY;
12513@@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12514
12515 if (file->f_flags & O_EXCL)
12516 open_exclu = 1;
12517- open_count++;
12518+ local_inc(&open_count);
12519
12520 spin_unlock(&mce_state_lock);
12521
12522@@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12523 {
12524 spin_lock(&mce_state_lock);
12525
12526- open_count--;
12527+ local_dec(&open_count);
12528 open_exclu = 0;
12529
12530 spin_unlock(&mce_state_lock);
12531@@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12532 static void mce_reset(void)
12533 {
12534 cpu_missing = 0;
12535- atomic_set(&mce_fake_paniced, 0);
12536+ atomic_set_unchecked(&mce_fake_paniced, 0);
12537 atomic_set(&mce_executing, 0);
12538 atomic_set(&mce_callin, 0);
12539 atomic_set(&global_nwo, 0);
12540diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c
12541--- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-03-27 14:31:47.000000000 -0400
12542+++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:33:55.000000000 -0400
12543@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *fi
12544 static int inject_init(void)
12545 {
12546 printk(KERN_INFO "Machine check injector initialized\n");
12547- mce_chrdev_ops.write = mce_write;
12548+ pax_open_kernel();
12549+ *(void **)&mce_chrdev_ops.write = mce_write;
12550+ pax_close_kernel();
12551 register_die_notifier(&mce_raise_nb);
12552 return 0;
12553 }
12554diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c
12555--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12556+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12557@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12558 return 0;
12559 }
12560
12561-static struct mtrr_ops amd_mtrr_ops = {
12562+static const struct mtrr_ops amd_mtrr_ops = {
12563 .vendor = X86_VENDOR_AMD,
12564 .set = amd_set_mtrr,
12565 .get = amd_get_mtrr,
12566diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c
12567--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12568+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12569@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12570 return 0;
12571 }
12572
12573-static struct mtrr_ops centaur_mtrr_ops = {
12574+static const struct mtrr_ops centaur_mtrr_ops = {
12575 .vendor = X86_VENDOR_CENTAUR,
12576 .set = centaur_set_mcr,
12577 .get = centaur_get_mcr,
12578diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c
12579--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12580+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12581@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12582 post_set();
12583 }
12584
12585-static struct mtrr_ops cyrix_mtrr_ops = {
12586+static const struct mtrr_ops cyrix_mtrr_ops = {
12587 .vendor = X86_VENDOR_CYRIX,
12588 .set_all = cyrix_set_all,
12589 .set = cyrix_set_arr,
12590diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c
12591--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12592+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12593@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12594 /*
12595 * Generic structure...
12596 */
12597-struct mtrr_ops generic_mtrr_ops = {
12598+const struct mtrr_ops generic_mtrr_ops = {
12599 .use_intel_if = 1,
12600 .set_all = generic_set_all,
12601 .get = generic_get_mtrr,
12602diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c
12603--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12604+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12605@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12606 u64 size_or_mask, size_and_mask;
12607 static bool mtrr_aps_delayed_init;
12608
12609-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12610+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12611
12612-struct mtrr_ops *mtrr_if;
12613+const struct mtrr_ops *mtrr_if;
12614
12615 static void set_mtrr(unsigned int reg, unsigned long base,
12616 unsigned long size, mtrr_type type);
12617
12618-void set_mtrr_ops(struct mtrr_ops *ops)
12619+void set_mtrr_ops(const struct mtrr_ops *ops)
12620 {
12621 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12622 mtrr_ops[ops->vendor] = ops;
12623diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h
12624--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12625+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12626@@ -12,19 +12,19 @@
12627 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12628
12629 struct mtrr_ops {
12630- u32 vendor;
12631- u32 use_intel_if;
12632- void (*set)(unsigned int reg, unsigned long base,
12633+ const u32 vendor;
12634+ const u32 use_intel_if;
12635+ void (* const set)(unsigned int reg, unsigned long base,
12636 unsigned long size, mtrr_type type);
12637- void (*set_all)(void);
12638+ void (* const set_all)(void);
12639
12640- void (*get)(unsigned int reg, unsigned long *base,
12641+ void (* const get)(unsigned int reg, unsigned long *base,
12642 unsigned long *size, mtrr_type *type);
12643- int (*get_free_region)(unsigned long base, unsigned long size,
12644+ int (* const get_free_region)(unsigned long base, unsigned long size,
12645 int replace_reg);
12646- int (*validate_add_page)(unsigned long base, unsigned long size,
12647+ int (* const validate_add_page)(unsigned long base, unsigned long size,
12648 unsigned int type);
12649- int (*have_wrcomb)(void);
12650+ int (* const have_wrcomb)(void);
12651 };
12652
12653 extern int generic_get_free_region(unsigned long base, unsigned long size,
12654@@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12655 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12656 unsigned int type);
12657
12658-extern struct mtrr_ops generic_mtrr_ops;
12659+extern const struct mtrr_ops generic_mtrr_ops;
12660
12661 extern int positive_have_wrcomb(void);
12662
12663@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12664 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12665 void get_mtrr_state(void);
12666
12667-extern void set_mtrr_ops(struct mtrr_ops *ops);
12668+extern void set_mtrr_ops(const struct mtrr_ops *ops);
12669
12670 extern u64 size_or_mask, size_and_mask;
12671-extern struct mtrr_ops *mtrr_if;
12672+extern const struct mtrr_ops *mtrr_if;
12673
12674 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12675 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12676diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c
12677--- linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12678+++ linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12679@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12680
12681 /* Interface defining a CPU specific perfctr watchdog */
12682 struct wd_ops {
12683- int (*reserve)(void);
12684- void (*unreserve)(void);
12685- int (*setup)(unsigned nmi_hz);
12686- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12687- void (*stop)(void);
12688+ int (* const reserve)(void);
12689+ void (* const unreserve)(void);
12690+ int (* const setup)(unsigned nmi_hz);
12691+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12692+ void (* const stop)(void);
12693 unsigned perfctr;
12694 unsigned evntsel;
12695 u64 checkbit;
12696@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12697 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12698 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12699
12700+/* cannot be const */
12701 static struct wd_ops intel_arch_wd_ops;
12702
12703 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12704@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12705 return 1;
12706 }
12707
12708+/* cannot be const */
12709 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12710 .reserve = single_msr_reserve,
12711 .unreserve = single_msr_unreserve,
12712diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c
12713--- linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12714+++ linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12715@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12716 * count to the generic event atomically:
12717 */
12718 again:
12719- prev_raw_count = atomic64_read(&hwc->prev_count);
12720+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12721 rdmsrl(hwc->event_base + idx, new_raw_count);
12722
12723- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12724+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12725 new_raw_count) != prev_raw_count)
12726 goto again;
12727
12728@@ -741,7 +741,7 @@ again:
12729 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12730 delta >>= shift;
12731
12732- atomic64_add(delta, &event->count);
12733+ atomic64_add_unchecked(delta, &event->count);
12734 atomic64_sub(delta, &hwc->period_left);
12735
12736 return new_raw_count;
12737@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12738 * The hw event starts counting from this event offset,
12739 * mark it to be able to extra future deltas:
12740 */
12741- atomic64_set(&hwc->prev_count, (u64)-left);
12742+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12743
12744 err = checking_wrmsrl(hwc->event_base + idx,
12745 (u64)(-left) & x86_pmu.event_mask);
12746@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12747 break;
12748
12749 callchain_store(entry, frame.return_address);
12750- fp = frame.next_frame;
12751+ fp = (__force const void __user *)frame.next_frame;
12752 }
12753 }
12754
12755diff -urNp linux-2.6.32.45/arch/x86/kernel/crash.c linux-2.6.32.45/arch/x86/kernel/crash.c
12756--- linux-2.6.32.45/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12757+++ linux-2.6.32.45/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12758@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12759 regs = args->regs;
12760
12761 #ifdef CONFIG_X86_32
12762- if (!user_mode_vm(regs)) {
12763+ if (!user_mode(regs)) {
12764 crash_fixup_ss_esp(&fixed_regs, regs);
12765 regs = &fixed_regs;
12766 }
12767diff -urNp linux-2.6.32.45/arch/x86/kernel/doublefault_32.c linux-2.6.32.45/arch/x86/kernel/doublefault_32.c
12768--- linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12769+++ linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12770@@ -11,7 +11,7 @@
12771
12772 #define DOUBLEFAULT_STACKSIZE (1024)
12773 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12774-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12775+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12776
12777 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12778
12779@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12780 unsigned long gdt, tss;
12781
12782 store_gdt(&gdt_desc);
12783- gdt = gdt_desc.address;
12784+ gdt = (unsigned long)gdt_desc.address;
12785
12786 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12787
12788@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12789 /* 0x2 bit is always set */
12790 .flags = X86_EFLAGS_SF | 0x2,
12791 .sp = STACK_START,
12792- .es = __USER_DS,
12793+ .es = __KERNEL_DS,
12794 .cs = __KERNEL_CS,
12795 .ss = __KERNEL_DS,
12796- .ds = __USER_DS,
12797+ .ds = __KERNEL_DS,
12798 .fs = __KERNEL_PERCPU,
12799
12800 .__cr3 = __pa_nodebug(swapper_pg_dir),
12801diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c
12802--- linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12803+++ linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12804@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12805 #endif
12806
12807 for (;;) {
12808- struct thread_info *context;
12809+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12810+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12811
12812- context = (struct thread_info *)
12813- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12814- bp = print_context_stack(context, stack, bp, ops,
12815- data, NULL, &graph);
12816-
12817- stack = (unsigned long *)context->previous_esp;
12818- if (!stack)
12819+ if (stack_start == task_stack_page(task))
12820 break;
12821+ stack = *(unsigned long **)stack_start;
12822 if (ops->stack(data, "IRQ") < 0)
12823 break;
12824 touch_nmi_watchdog();
12825@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12826 * When in-kernel, we also print out the stack and code at the
12827 * time of the fault..
12828 */
12829- if (!user_mode_vm(regs)) {
12830+ if (!user_mode(regs)) {
12831 unsigned int code_prologue = code_bytes * 43 / 64;
12832 unsigned int code_len = code_bytes;
12833 unsigned char c;
12834 u8 *ip;
12835+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12836
12837 printk(KERN_EMERG "Stack:\n");
12838 show_stack_log_lvl(NULL, regs, &regs->sp,
12839@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12840
12841 printk(KERN_EMERG "Code: ");
12842
12843- ip = (u8 *)regs->ip - code_prologue;
12844+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12845 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12846 /* try starting at IP */
12847- ip = (u8 *)regs->ip;
12848+ ip = (u8 *)regs->ip + cs_base;
12849 code_len = code_len - code_prologue + 1;
12850 }
12851 for (i = 0; i < code_len; i++, ip++) {
12852@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12853 printk(" Bad EIP value.");
12854 break;
12855 }
12856- if (ip == (u8 *)regs->ip)
12857+ if (ip == (u8 *)regs->ip + cs_base)
12858 printk("<%02x> ", c);
12859 else
12860 printk("%02x ", c);
12861@@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12862 {
12863 unsigned short ud2;
12864
12865+ ip = ktla_ktva(ip);
12866 if (ip < PAGE_OFFSET)
12867 return 0;
12868 if (probe_kernel_address((unsigned short *)ip, ud2))
12869diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c
12870--- linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12871+++ linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12872@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12873 unsigned long *irq_stack_end =
12874 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12875 unsigned used = 0;
12876- struct thread_info *tinfo;
12877 int graph = 0;
12878+ void *stack_start;
12879
12880 if (!task)
12881 task = current;
12882@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12883 * current stack address. If the stacks consist of nested
12884 * exceptions
12885 */
12886- tinfo = task_thread_info(task);
12887 for (;;) {
12888 char *id;
12889 unsigned long *estack_end;
12890+
12891 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12892 &used, &id);
12893
12894@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12895 if (ops->stack(data, id) < 0)
12896 break;
12897
12898- bp = print_context_stack(tinfo, stack, bp, ops,
12899+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12900 data, estack_end, &graph);
12901 ops->stack(data, "<EOE>");
12902 /*
12903@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12904 if (stack >= irq_stack && stack < irq_stack_end) {
12905 if (ops->stack(data, "IRQ") < 0)
12906 break;
12907- bp = print_context_stack(tinfo, stack, bp,
12908+ bp = print_context_stack(task, irq_stack, stack, bp,
12909 ops, data, irq_stack_end, &graph);
12910 /*
12911 * We link to the next stack (which would be
12912@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12913 /*
12914 * This handles the process stack:
12915 */
12916- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12917+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12918+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12919 put_cpu();
12920 }
12921 EXPORT_SYMBOL(dump_trace);
12922diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.c linux-2.6.32.45/arch/x86/kernel/dumpstack.c
12923--- linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12924+++ linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12925@@ -2,6 +2,9 @@
12926 * Copyright (C) 1991, 1992 Linus Torvalds
12927 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12928 */
12929+#ifdef CONFIG_GRKERNSEC_HIDESYM
12930+#define __INCLUDED_BY_HIDESYM 1
12931+#endif
12932 #include <linux/kallsyms.h>
12933 #include <linux/kprobes.h>
12934 #include <linux/uaccess.h>
12935@@ -28,7 +31,7 @@ static int die_counter;
12936
12937 void printk_address(unsigned long address, int reliable)
12938 {
12939- printk(" [<%p>] %s%pS\n", (void *) address,
12940+ printk(" [<%p>] %s%pA\n", (void *) address,
12941 reliable ? "" : "? ", (void *) address);
12942 }
12943
12944@@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12945 static void
12946 print_ftrace_graph_addr(unsigned long addr, void *data,
12947 const struct stacktrace_ops *ops,
12948- struct thread_info *tinfo, int *graph)
12949+ struct task_struct *task, int *graph)
12950 {
12951- struct task_struct *task = tinfo->task;
12952 unsigned long ret_addr;
12953 int index = task->curr_ret_stack;
12954
12955@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12956 static inline void
12957 print_ftrace_graph_addr(unsigned long addr, void *data,
12958 const struct stacktrace_ops *ops,
12959- struct thread_info *tinfo, int *graph)
12960+ struct task_struct *task, int *graph)
12961 { }
12962 #endif
12963
12964@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12965 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12966 */
12967
12968-static inline int valid_stack_ptr(struct thread_info *tinfo,
12969- void *p, unsigned int size, void *end)
12970+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12971 {
12972- void *t = tinfo;
12973 if (end) {
12974 if (p < end && p >= (end-THREAD_SIZE))
12975 return 1;
12976@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12977 }
12978
12979 unsigned long
12980-print_context_stack(struct thread_info *tinfo,
12981+print_context_stack(struct task_struct *task, void *stack_start,
12982 unsigned long *stack, unsigned long bp,
12983 const struct stacktrace_ops *ops, void *data,
12984 unsigned long *end, int *graph)
12985 {
12986 struct stack_frame *frame = (struct stack_frame *)bp;
12987
12988- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12989+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12990 unsigned long addr;
12991
12992 addr = *stack;
12993@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12994 } else {
12995 ops->address(data, addr, 0);
12996 }
12997- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12998+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12999 }
13000 stack++;
13001 }
13002@@ -180,7 +180,7 @@ void dump_stack(void)
13003 #endif
13004
13005 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
13006- current->pid, current->comm, print_tainted(),
13007+ task_pid_nr(current), current->comm, print_tainted(),
13008 init_utsname()->release,
13009 (int)strcspn(init_utsname()->version, " "),
13010 init_utsname()->version);
13011@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
13012 return flags;
13013 }
13014
13015+extern void gr_handle_kernel_exploit(void);
13016+
13017 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13018 {
13019 if (regs && kexec_should_crash(current))
13020@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
13021 panic("Fatal exception in interrupt");
13022 if (panic_on_oops)
13023 panic("Fatal exception");
13024- do_exit(signr);
13025+
13026+ gr_handle_kernel_exploit();
13027+
13028+ do_group_exit(signr);
13029 }
13030
13031 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13032@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
13033 unsigned long flags = oops_begin();
13034 int sig = SIGSEGV;
13035
13036- if (!user_mode_vm(regs))
13037+ if (!user_mode(regs))
13038 report_bug(regs->ip, regs);
13039
13040 if (__die(str, regs, err))
13041diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.h linux-2.6.32.45/arch/x86/kernel/dumpstack.h
13042--- linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
13043+++ linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
13044@@ -15,7 +15,7 @@
13045 #endif
13046
13047 extern unsigned long
13048-print_context_stack(struct thread_info *tinfo,
13049+print_context_stack(struct task_struct *task, void *stack_start,
13050 unsigned long *stack, unsigned long bp,
13051 const struct stacktrace_ops *ops, void *data,
13052 unsigned long *end, int *graph);
13053diff -urNp linux-2.6.32.45/arch/x86/kernel/e820.c linux-2.6.32.45/arch/x86/kernel/e820.c
13054--- linux-2.6.32.45/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
13055+++ linux-2.6.32.45/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
13056@@ -733,7 +733,7 @@ struct early_res {
13057 };
13058 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
13059 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
13060- {}
13061+ { 0, 0, {0}, 0 }
13062 };
13063
13064 static int __init find_overlapped_early(u64 start, u64 end)
13065diff -urNp linux-2.6.32.45/arch/x86/kernel/early_printk.c linux-2.6.32.45/arch/x86/kernel/early_printk.c
13066--- linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
13067+++ linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
13068@@ -7,6 +7,7 @@
13069 #include <linux/pci_regs.h>
13070 #include <linux/pci_ids.h>
13071 #include <linux/errno.h>
13072+#include <linux/sched.h>
13073 #include <asm/io.h>
13074 #include <asm/processor.h>
13075 #include <asm/fcntl.h>
13076@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
13077 int n;
13078 va_list ap;
13079
13080+ pax_track_stack();
13081+
13082 va_start(ap, fmt);
13083 n = vscnprintf(buf, sizeof(buf), fmt, ap);
13084 early_console->write(early_console, buf, n);
13085diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_32.c linux-2.6.32.45/arch/x86/kernel/efi_32.c
13086--- linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
13087+++ linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
13088@@ -38,70 +38,38 @@
13089 */
13090
13091 static unsigned long efi_rt_eflags;
13092-static pgd_t efi_bak_pg_dir_pointer[2];
13093+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
13094
13095-void efi_call_phys_prelog(void)
13096+void __init efi_call_phys_prelog(void)
13097 {
13098- unsigned long cr4;
13099- unsigned long temp;
13100 struct desc_ptr gdt_descr;
13101
13102 local_irq_save(efi_rt_eflags);
13103
13104- /*
13105- * If I don't have PAE, I should just duplicate two entries in page
13106- * directory. If I have PAE, I just need to duplicate one entry in
13107- * page directory.
13108- */
13109- cr4 = read_cr4_safe();
13110
13111- if (cr4 & X86_CR4_PAE) {
13112- efi_bak_pg_dir_pointer[0].pgd =
13113- swapper_pg_dir[pgd_index(0)].pgd;
13114- swapper_pg_dir[0].pgd =
13115- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13116- } else {
13117- efi_bak_pg_dir_pointer[0].pgd =
13118- swapper_pg_dir[pgd_index(0)].pgd;
13119- efi_bak_pg_dir_pointer[1].pgd =
13120- swapper_pg_dir[pgd_index(0x400000)].pgd;
13121- swapper_pg_dir[pgd_index(0)].pgd =
13122- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13123- temp = PAGE_OFFSET + 0x400000;
13124- swapper_pg_dir[pgd_index(0x400000)].pgd =
13125- swapper_pg_dir[pgd_index(temp)].pgd;
13126- }
13127+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
13128+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13129+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
13130
13131 /*
13132 * After the lock is released, the original page table is restored.
13133 */
13134 __flush_tlb_all();
13135
13136- gdt_descr.address = __pa(get_cpu_gdt_table(0));
13137+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
13138 gdt_descr.size = GDT_SIZE - 1;
13139 load_gdt(&gdt_descr);
13140 }
13141
13142-void efi_call_phys_epilog(void)
13143+void __init efi_call_phys_epilog(void)
13144 {
13145- unsigned long cr4;
13146 struct desc_ptr gdt_descr;
13147
13148- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
13149+ gdt_descr.address = get_cpu_gdt_table(0);
13150 gdt_descr.size = GDT_SIZE - 1;
13151 load_gdt(&gdt_descr);
13152
13153- cr4 = read_cr4_safe();
13154-
13155- if (cr4 & X86_CR4_PAE) {
13156- swapper_pg_dir[pgd_index(0)].pgd =
13157- efi_bak_pg_dir_pointer[0].pgd;
13158- } else {
13159- swapper_pg_dir[pgd_index(0)].pgd =
13160- efi_bak_pg_dir_pointer[0].pgd;
13161- swapper_pg_dir[pgd_index(0x400000)].pgd =
13162- efi_bak_pg_dir_pointer[1].pgd;
13163- }
13164+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
13165
13166 /*
13167 * After the lock is released, the original page table is restored.
13168diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S
13169--- linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
13170+++ linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
13171@@ -6,6 +6,7 @@
13172 */
13173
13174 #include <linux/linkage.h>
13175+#include <linux/init.h>
13176 #include <asm/page_types.h>
13177
13178 /*
13179@@ -20,7 +21,7 @@
13180 * service functions will comply with gcc calling convention, too.
13181 */
13182
13183-.text
13184+__INIT
13185 ENTRY(efi_call_phys)
13186 /*
13187 * 0. The function can only be called in Linux kernel. So CS has been
13188@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
13189 * The mapping of lower virtual memory has been created in prelog and
13190 * epilog.
13191 */
13192- movl $1f, %edx
13193- subl $__PAGE_OFFSET, %edx
13194- jmp *%edx
13195+ jmp 1f-__PAGE_OFFSET
13196 1:
13197
13198 /*
13199@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
13200 * parameter 2, ..., param n. To make things easy, we save the return
13201 * address of efi_call_phys in a global variable.
13202 */
13203- popl %edx
13204- movl %edx, saved_return_addr
13205- /* get the function pointer into ECX*/
13206- popl %ecx
13207- movl %ecx, efi_rt_function_ptr
13208- movl $2f, %edx
13209- subl $__PAGE_OFFSET, %edx
13210- pushl %edx
13211+ popl (saved_return_addr)
13212+ popl (efi_rt_function_ptr)
13213
13214 /*
13215 * 3. Clear PG bit in %CR0.
13216@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
13217 /*
13218 * 5. Call the physical function.
13219 */
13220- jmp *%ecx
13221+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
13222
13223-2:
13224 /*
13225 * 6. After EFI runtime service returns, control will return to
13226 * following instruction. We'd better readjust stack pointer first.
13227@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
13228 movl %cr0, %edx
13229 orl $0x80000000, %edx
13230 movl %edx, %cr0
13231- jmp 1f
13232-1:
13233+
13234 /*
13235 * 8. Now restore the virtual mode from flat mode by
13236 * adding EIP with PAGE_OFFSET.
13237 */
13238- movl $1f, %edx
13239- jmp *%edx
13240+ jmp 1f+__PAGE_OFFSET
13241 1:
13242
13243 /*
13244 * 9. Balance the stack. And because EAX contain the return value,
13245 * we'd better not clobber it.
13246 */
13247- leal efi_rt_function_ptr, %edx
13248- movl (%edx), %ecx
13249- pushl %ecx
13250+ pushl (efi_rt_function_ptr)
13251
13252 /*
13253- * 10. Push the saved return address onto the stack and return.
13254+ * 10. Return to the saved return address.
13255 */
13256- leal saved_return_addr, %edx
13257- movl (%edx), %ecx
13258- pushl %ecx
13259- ret
13260+ jmpl *(saved_return_addr)
13261 ENDPROC(efi_call_phys)
13262 .previous
13263
13264-.data
13265+__INITDATA
13266 saved_return_addr:
13267 .long 0
13268 efi_rt_function_ptr:
13269diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_32.S linux-2.6.32.45/arch/x86/kernel/entry_32.S
13270--- linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
13271+++ linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-08-23 20:24:19.000000000 -0400
13272@@ -185,13 +185,146 @@
13273 /*CFI_REL_OFFSET gs, PT_GS*/
13274 .endm
13275 .macro SET_KERNEL_GS reg
13276+
13277+#ifdef CONFIG_CC_STACKPROTECTOR
13278 movl $(__KERNEL_STACK_CANARY), \reg
13279+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13280+ movl $(__USER_DS), \reg
13281+#else
13282+ xorl \reg, \reg
13283+#endif
13284+
13285 movl \reg, %gs
13286 .endm
13287
13288 #endif /* CONFIG_X86_32_LAZY_GS */
13289
13290-.macro SAVE_ALL
13291+.macro pax_enter_kernel
13292+#ifdef CONFIG_PAX_KERNEXEC
13293+ call pax_enter_kernel
13294+#endif
13295+.endm
13296+
13297+.macro pax_exit_kernel
13298+#ifdef CONFIG_PAX_KERNEXEC
13299+ call pax_exit_kernel
13300+#endif
13301+.endm
13302+
13303+#ifdef CONFIG_PAX_KERNEXEC
13304+ENTRY(pax_enter_kernel)
13305+#ifdef CONFIG_PARAVIRT
13306+ pushl %eax
13307+ pushl %ecx
13308+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13309+ mov %eax, %esi
13310+#else
13311+ mov %cr0, %esi
13312+#endif
13313+ bts $16, %esi
13314+ jnc 1f
13315+ mov %cs, %esi
13316+ cmp $__KERNEL_CS, %esi
13317+ jz 3f
13318+ ljmp $__KERNEL_CS, $3f
13319+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13320+2:
13321+#ifdef CONFIG_PARAVIRT
13322+ mov %esi, %eax
13323+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13324+#else
13325+ mov %esi, %cr0
13326+#endif
13327+3:
13328+#ifdef CONFIG_PARAVIRT
13329+ popl %ecx
13330+ popl %eax
13331+#endif
13332+ ret
13333+ENDPROC(pax_enter_kernel)
13334+
13335+ENTRY(pax_exit_kernel)
13336+#ifdef CONFIG_PARAVIRT
13337+ pushl %eax
13338+ pushl %ecx
13339+#endif
13340+ mov %cs, %esi
13341+ cmp $__KERNEXEC_KERNEL_CS, %esi
13342+ jnz 2f
13343+#ifdef CONFIG_PARAVIRT
13344+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13345+ mov %eax, %esi
13346+#else
13347+ mov %cr0, %esi
13348+#endif
13349+ btr $16, %esi
13350+ ljmp $__KERNEL_CS, $1f
13351+1:
13352+#ifdef CONFIG_PARAVIRT
13353+ mov %esi, %eax
13354+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13355+#else
13356+ mov %esi, %cr0
13357+#endif
13358+2:
13359+#ifdef CONFIG_PARAVIRT
13360+ popl %ecx
13361+ popl %eax
13362+#endif
13363+ ret
13364+ENDPROC(pax_exit_kernel)
13365+#endif
13366+
13367+.macro pax_erase_kstack
13368+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13369+ call pax_erase_kstack
13370+#endif
13371+.endm
13372+
13373+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13374+/*
13375+ * ebp: thread_info
13376+ * ecx, edx: can be clobbered
13377+ */
13378+ENTRY(pax_erase_kstack)
13379+ pushl %edi
13380+ pushl %eax
13381+
13382+ mov TI_lowest_stack(%ebp), %edi
13383+ mov $-0xBEEF, %eax
13384+ std
13385+
13386+1: mov %edi, %ecx
13387+ and $THREAD_SIZE_asm - 1, %ecx
13388+ shr $2, %ecx
13389+ repne scasl
13390+ jecxz 2f
13391+
13392+ cmp $2*16, %ecx
13393+ jc 2f
13394+
13395+ mov $2*16, %ecx
13396+ repe scasl
13397+ jecxz 2f
13398+ jne 1b
13399+
13400+2: cld
13401+ mov %esp, %ecx
13402+ sub %edi, %ecx
13403+ shr $2, %ecx
13404+ rep stosl
13405+
13406+ mov TI_task_thread_sp0(%ebp), %edi
13407+ sub $128, %edi
13408+ mov %edi, TI_lowest_stack(%ebp)
13409+
13410+ popl %eax
13411+ popl %edi
13412+ ret
13413+ENDPROC(pax_erase_kstack)
13414+#endif
13415+
13416+.macro __SAVE_ALL _DS
13417 cld
13418 PUSH_GS
13419 pushl %fs
13420@@ -224,7 +357,7 @@
13421 pushl %ebx
13422 CFI_ADJUST_CFA_OFFSET 4
13423 CFI_REL_OFFSET ebx, 0
13424- movl $(__USER_DS), %edx
13425+ movl $\_DS, %edx
13426 movl %edx, %ds
13427 movl %edx, %es
13428 movl $(__KERNEL_PERCPU), %edx
13429@@ -232,6 +365,15 @@
13430 SET_KERNEL_GS %edx
13431 .endm
13432
13433+.macro SAVE_ALL
13434+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13435+ __SAVE_ALL __KERNEL_DS
13436+ pax_enter_kernel
13437+#else
13438+ __SAVE_ALL __USER_DS
13439+#endif
13440+.endm
13441+
13442 .macro RESTORE_INT_REGS
13443 popl %ebx
13444 CFI_ADJUST_CFA_OFFSET -4
13445@@ -352,7 +494,15 @@ check_userspace:
13446 movb PT_CS(%esp), %al
13447 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13448 cmpl $USER_RPL, %eax
13449+
13450+#ifdef CONFIG_PAX_KERNEXEC
13451+ jae resume_userspace
13452+
13453+ PAX_EXIT_KERNEL
13454+ jmp resume_kernel
13455+#else
13456 jb resume_kernel # not returning to v8086 or userspace
13457+#endif
13458
13459 ENTRY(resume_userspace)
13460 LOCKDEP_SYS_EXIT
13461@@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13462 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13463 # int/exception return?
13464 jne work_pending
13465- jmp restore_all
13466+ jmp restore_all_pax
13467 END(ret_from_exception)
13468
13469 #ifdef CONFIG_PREEMPT
13470@@ -414,25 +564,36 @@ sysenter_past_esp:
13471 /*CFI_REL_OFFSET cs, 0*/
13472 /*
13473 * Push current_thread_info()->sysenter_return to the stack.
13474- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13475- * pushed above; +8 corresponds to copy_thread's esp0 setting.
13476 */
13477- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13478+ pushl $0
13479 CFI_ADJUST_CFA_OFFSET 4
13480 CFI_REL_OFFSET eip, 0
13481
13482 pushl %eax
13483 CFI_ADJUST_CFA_OFFSET 4
13484 SAVE_ALL
13485+ GET_THREAD_INFO(%ebp)
13486+ movl TI_sysenter_return(%ebp),%ebp
13487+ movl %ebp,PT_EIP(%esp)
13488 ENABLE_INTERRUPTS(CLBR_NONE)
13489
13490 /*
13491 * Load the potential sixth argument from user stack.
13492 * Careful about security.
13493 */
13494+ movl PT_OLDESP(%esp),%ebp
13495+
13496+#ifdef CONFIG_PAX_MEMORY_UDEREF
13497+ mov PT_OLDSS(%esp),%ds
13498+1: movl %ds:(%ebp),%ebp
13499+ push %ss
13500+ pop %ds
13501+#else
13502 cmpl $__PAGE_OFFSET-3,%ebp
13503 jae syscall_fault
13504 1: movl (%ebp),%ebp
13505+#endif
13506+
13507 movl %ebp,PT_EBP(%esp)
13508 .section __ex_table,"a"
13509 .align 4
13510@@ -455,12 +616,23 @@ sysenter_do_call:
13511 testl $_TIF_ALLWORK_MASK, %ecx
13512 jne sysexit_audit
13513 sysenter_exit:
13514+
13515+#ifdef CONFIG_PAX_RANDKSTACK
13516+ pushl_cfi %eax
13517+ call pax_randomize_kstack
13518+ popl_cfi %eax
13519+#endif
13520+
13521+ pax_erase_kstack
13522+
13523 /* if something modifies registers it must also disable sysexit */
13524 movl PT_EIP(%esp), %edx
13525 movl PT_OLDESP(%esp), %ecx
13526 xorl %ebp,%ebp
13527 TRACE_IRQS_ON
13528 1: mov PT_FS(%esp), %fs
13529+2: mov PT_DS(%esp), %ds
13530+3: mov PT_ES(%esp), %es
13531 PTGS_TO_GS
13532 ENABLE_INTERRUPTS_SYSEXIT
13533
13534@@ -477,6 +649,9 @@ sysenter_audit:
13535 movl %eax,%edx /* 2nd arg: syscall number */
13536 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13537 call audit_syscall_entry
13538+
13539+ pax_erase_kstack
13540+
13541 pushl %ebx
13542 CFI_ADJUST_CFA_OFFSET 4
13543 movl PT_EAX(%esp),%eax /* reload syscall number */
13544@@ -504,11 +679,17 @@ sysexit_audit:
13545
13546 CFI_ENDPROC
13547 .pushsection .fixup,"ax"
13548-2: movl $0,PT_FS(%esp)
13549+4: movl $0,PT_FS(%esp)
13550+ jmp 1b
13551+5: movl $0,PT_DS(%esp)
13552+ jmp 1b
13553+6: movl $0,PT_ES(%esp)
13554 jmp 1b
13555 .section __ex_table,"a"
13556 .align 4
13557- .long 1b,2b
13558+ .long 1b,4b
13559+ .long 2b,5b
13560+ .long 3b,6b
13561 .popsection
13562 PTGS_TO_GS_EX
13563 ENDPROC(ia32_sysenter_target)
13564@@ -538,6 +719,14 @@ syscall_exit:
13565 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13566 jne syscall_exit_work
13567
13568+restore_all_pax:
13569+
13570+#ifdef CONFIG_PAX_RANDKSTACK
13571+ call pax_randomize_kstack
13572+#endif
13573+
13574+ pax_erase_kstack
13575+
13576 restore_all:
13577 TRACE_IRQS_IRET
13578 restore_all_notrace:
13579@@ -602,10 +791,29 @@ ldt_ss:
13580 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13581 mov %dx, %ax /* eax: new kernel esp */
13582 sub %eax, %edx /* offset (low word is 0) */
13583- PER_CPU(gdt_page, %ebx)
13584+#ifdef CONFIG_SMP
13585+ movl PER_CPU_VAR(cpu_number), %ebx
13586+ shll $PAGE_SHIFT_asm, %ebx
13587+ addl $cpu_gdt_table, %ebx
13588+#else
13589+ movl $cpu_gdt_table, %ebx
13590+#endif
13591 shr $16, %edx
13592+
13593+#ifdef CONFIG_PAX_KERNEXEC
13594+ mov %cr0, %esi
13595+ btr $16, %esi
13596+ mov %esi, %cr0
13597+#endif
13598+
13599 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13600 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13601+
13602+#ifdef CONFIG_PAX_KERNEXEC
13603+ bts $16, %esi
13604+ mov %esi, %cr0
13605+#endif
13606+
13607 pushl $__ESPFIX_SS
13608 CFI_ADJUST_CFA_OFFSET 4
13609 push %eax /* new kernel esp */
13610@@ -636,31 +844,25 @@ work_resched:
13611 movl TI_flags(%ebp), %ecx
13612 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13613 # than syscall tracing?
13614- jz restore_all
13615+ jz restore_all_pax
13616 testb $_TIF_NEED_RESCHED, %cl
13617 jnz work_resched
13618
13619 work_notifysig: # deal with pending signals and
13620 # notify-resume requests
13621+ movl %esp, %eax
13622 #ifdef CONFIG_VM86
13623 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13624- movl %esp, %eax
13625- jne work_notifysig_v86 # returning to kernel-space or
13626+ jz 1f # returning to kernel-space or
13627 # vm86-space
13628- xorl %edx, %edx
13629- call do_notify_resume
13630- jmp resume_userspace_sig
13631
13632- ALIGN
13633-work_notifysig_v86:
13634 pushl %ecx # save ti_flags for do_notify_resume
13635 CFI_ADJUST_CFA_OFFSET 4
13636 call save_v86_state # %eax contains pt_regs pointer
13637 popl %ecx
13638 CFI_ADJUST_CFA_OFFSET -4
13639 movl %eax, %esp
13640-#else
13641- movl %esp, %eax
13642+1:
13643 #endif
13644 xorl %edx, %edx
13645 call do_notify_resume
13646@@ -673,6 +875,9 @@ syscall_trace_entry:
13647 movl $-ENOSYS,PT_EAX(%esp)
13648 movl %esp, %eax
13649 call syscall_trace_enter
13650+
13651+ pax_erase_kstack
13652+
13653 /* What it returned is what we'll actually use. */
13654 cmpl $(nr_syscalls), %eax
13655 jnae syscall_call
13656@@ -695,6 +900,10 @@ END(syscall_exit_work)
13657
13658 RING0_INT_FRAME # can't unwind into user space anyway
13659 syscall_fault:
13660+#ifdef CONFIG_PAX_MEMORY_UDEREF
13661+ push %ss
13662+ pop %ds
13663+#endif
13664 GET_THREAD_INFO(%ebp)
13665 movl $-EFAULT,PT_EAX(%esp)
13666 jmp resume_userspace
13667@@ -726,6 +935,33 @@ PTREGSCALL(rt_sigreturn)
13668 PTREGSCALL(vm86)
13669 PTREGSCALL(vm86old)
13670
13671+ ALIGN;
13672+ENTRY(kernel_execve)
13673+ push %ebp
13674+ sub $PT_OLDSS+4,%esp
13675+ push %edi
13676+ push %ecx
13677+ push %eax
13678+ lea 3*4(%esp),%edi
13679+ mov $PT_OLDSS/4+1,%ecx
13680+ xorl %eax,%eax
13681+ rep stosl
13682+ pop %eax
13683+ pop %ecx
13684+ pop %edi
13685+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13686+ mov %eax,PT_EBX(%esp)
13687+ mov %edx,PT_ECX(%esp)
13688+ mov %ecx,PT_EDX(%esp)
13689+ mov %esp,%eax
13690+ call sys_execve
13691+ GET_THREAD_INFO(%ebp)
13692+ test %eax,%eax
13693+ jz syscall_exit
13694+ add $PT_OLDSS+4,%esp
13695+ pop %ebp
13696+ ret
13697+
13698 .macro FIXUP_ESPFIX_STACK
13699 /*
13700 * Switch back for ESPFIX stack to the normal zerobased stack
13701@@ -735,7 +971,13 @@ PTREGSCALL(vm86old)
13702 * normal stack and adjusts ESP with the matching offset.
13703 */
13704 /* fixup the stack */
13705- PER_CPU(gdt_page, %ebx)
13706+#ifdef CONFIG_SMP
13707+ movl PER_CPU_VAR(cpu_number), %ebx
13708+ shll $PAGE_SHIFT_asm, %ebx
13709+ addl $cpu_gdt_table, %ebx
13710+#else
13711+ movl $cpu_gdt_table, %ebx
13712+#endif
13713 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13714 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13715 shl $16, %eax
13716@@ -1198,7 +1440,6 @@ return_to_handler:
13717 ret
13718 #endif
13719
13720-.section .rodata,"a"
13721 #include "syscall_table_32.S"
13722
13723 syscall_table_size=(.-sys_call_table)
13724@@ -1255,9 +1496,12 @@ error_code:
13725 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13726 REG_TO_PTGS %ecx
13727 SET_KERNEL_GS %ecx
13728- movl $(__USER_DS), %ecx
13729+ movl $(__KERNEL_DS), %ecx
13730 movl %ecx, %ds
13731 movl %ecx, %es
13732+
13733+ pax_enter_kernel
13734+
13735 TRACE_IRQS_OFF
13736 movl %esp,%eax # pt_regs pointer
13737 call *%edi
13738@@ -1351,6 +1595,9 @@ nmi_stack_correct:
13739 xorl %edx,%edx # zero error code
13740 movl %esp,%eax # pt_regs pointer
13741 call do_nmi
13742+
13743+ pax_exit_kernel
13744+
13745 jmp restore_all_notrace
13746 CFI_ENDPROC
13747
13748@@ -1391,6 +1638,9 @@ nmi_espfix_stack:
13749 FIXUP_ESPFIX_STACK # %eax == %esp
13750 xorl %edx,%edx # zero error code
13751 call do_nmi
13752+
13753+ pax_exit_kernel
13754+
13755 RESTORE_REGS
13756 lss 12+4(%esp), %esp # back to espfix stack
13757 CFI_ADJUST_CFA_OFFSET -24
13758diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_64.S linux-2.6.32.45/arch/x86/kernel/entry_64.S
13759--- linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13760+++ linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-08-25 17:42:18.000000000 -0400
13761@@ -53,6 +53,7 @@
13762 #include <asm/paravirt.h>
13763 #include <asm/ftrace.h>
13764 #include <asm/percpu.h>
13765+#include <asm/pgtable.h>
13766
13767 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13768 #include <linux/elf-em.h>
13769@@ -174,6 +175,262 @@ ENTRY(native_usergs_sysret64)
13770 ENDPROC(native_usergs_sysret64)
13771 #endif /* CONFIG_PARAVIRT */
13772
13773+ .macro ljmpq sel, off
13774+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13775+ .byte 0x48; ljmp *1234f(%rip)
13776+ .pushsection .rodata
13777+ .align 16
13778+ 1234: .quad \off; .word \sel
13779+ .popsection
13780+#else
13781+ pushq $\sel
13782+ pushq $\off
13783+ lretq
13784+#endif
13785+ .endm
13786+
13787+ .macro pax_enter_kernel
13788+#ifdef CONFIG_PAX_KERNEXEC
13789+ call pax_enter_kernel
13790+#endif
13791+ .endm
13792+
13793+ .macro pax_exit_kernel
13794+#ifdef CONFIG_PAX_KERNEXEC
13795+ call pax_exit_kernel
13796+#endif
13797+ .endm
13798+
13799+#ifdef CONFIG_PAX_KERNEXEC
13800+ENTRY(pax_enter_kernel)
13801+ pushq %rdi
13802+
13803+#ifdef CONFIG_PARAVIRT
13804+ PV_SAVE_REGS(CLBR_RDI)
13805+#endif
13806+
13807+ GET_CR0_INTO_RDI
13808+ bts $16,%rdi
13809+ jnc 1f
13810+ mov %cs,%edi
13811+ cmp $__KERNEL_CS,%edi
13812+ jz 3f
13813+ ljmpq __KERNEL_CS,3f
13814+1: ljmpq __KERNEXEC_KERNEL_CS,2f
13815+2: SET_RDI_INTO_CR0
13816+3:
13817+
13818+#ifdef CONFIG_PARAVIRT
13819+ PV_RESTORE_REGS(CLBR_RDI)
13820+#endif
13821+
13822+ popq %rdi
13823+ retq
13824+ENDPROC(pax_enter_kernel)
13825+
13826+ENTRY(pax_exit_kernel)
13827+ pushq %rdi
13828+
13829+#ifdef CONFIG_PARAVIRT
13830+ PV_SAVE_REGS(CLBR_RDI)
13831+#endif
13832+
13833+ mov %cs,%rdi
13834+ cmp $__KERNEXEC_KERNEL_CS,%edi
13835+ jnz 2f
13836+ GET_CR0_INTO_RDI
13837+ btr $16,%rdi
13838+ ljmpq __KERNEL_CS,1f
13839+1: SET_RDI_INTO_CR0
13840+2:
13841+
13842+#ifdef CONFIG_PARAVIRT
13843+ PV_RESTORE_REGS(CLBR_RDI);
13844+#endif
13845+
13846+ popq %rdi
13847+ retq
13848+ENDPROC(pax_exit_kernel)
13849+#endif
13850+
13851+ .macro pax_enter_kernel_user
13852+#ifdef CONFIG_PAX_MEMORY_UDEREF
13853+ call pax_enter_kernel_user
13854+#endif
13855+ .endm
13856+
13857+ .macro pax_exit_kernel_user
13858+#ifdef CONFIG_PAX_MEMORY_UDEREF
13859+ call pax_exit_kernel_user
13860+#endif
13861+#ifdef CONFIG_PAX_RANDKSTACK
13862+ push %rax
13863+ call pax_randomize_kstack
13864+ pop %rax
13865+#endif
13866+ .endm
13867+
13868+#ifdef CONFIG_PAX_MEMORY_UDEREF
13869+ENTRY(pax_enter_kernel_user)
13870+ pushq %rdi
13871+ pushq %rbx
13872+
13873+#ifdef CONFIG_PARAVIRT
13874+ PV_SAVE_REGS(CLBR_RDI)
13875+#endif
13876+
13877+ GET_CR3_INTO_RDI
13878+ mov %rdi,%rbx
13879+ add $__START_KERNEL_map,%rbx
13880+ sub phys_base(%rip),%rbx
13881+
13882+#ifdef CONFIG_PARAVIRT
13883+ pushq %rdi
13884+ cmpl $0, pv_info+PARAVIRT_enabled
13885+ jz 1f
13886+ i = 0
13887+ .rept USER_PGD_PTRS
13888+ mov i*8(%rbx),%rsi
13889+ mov $0,%sil
13890+ lea i*8(%rbx),%rdi
13891+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13892+ i = i + 1
13893+ .endr
13894+ jmp 2f
13895+1:
13896+#endif
13897+
13898+ i = 0
13899+ .rept USER_PGD_PTRS
13900+ movb $0,i*8(%rbx)
13901+ i = i + 1
13902+ .endr
13903+
13904+#ifdef CONFIG_PARAVIRT
13905+2: popq %rdi
13906+#endif
13907+ SET_RDI_INTO_CR3
13908+
13909+#ifdef CONFIG_PAX_KERNEXEC
13910+ GET_CR0_INTO_RDI
13911+ bts $16,%rdi
13912+ SET_RDI_INTO_CR0
13913+#endif
13914+
13915+#ifdef CONFIG_PARAVIRT
13916+ PV_RESTORE_REGS(CLBR_RDI)
13917+#endif
13918+
13919+ popq %rbx
13920+ popq %rdi
13921+ retq
13922+ENDPROC(pax_enter_kernel_user)
13923+
13924+ENTRY(pax_exit_kernel_user)
13925+ push %rdi
13926+
13927+#ifdef CONFIG_PARAVIRT
13928+ pushq %rbx
13929+ PV_SAVE_REGS(CLBR_RDI)
13930+#endif
13931+
13932+#ifdef CONFIG_PAX_KERNEXEC
13933+ GET_CR0_INTO_RDI
13934+ btr $16,%rdi
13935+ SET_RDI_INTO_CR0
13936+#endif
13937+
13938+ GET_CR3_INTO_RDI
13939+ add $__START_KERNEL_map,%rdi
13940+ sub phys_base(%rip),%rdi
13941+
13942+#ifdef CONFIG_PARAVIRT
13943+ cmpl $0, pv_info+PARAVIRT_enabled
13944+ jz 1f
13945+ mov %rdi,%rbx
13946+ i = 0
13947+ .rept USER_PGD_PTRS
13948+ mov i*8(%rbx),%rsi
13949+ mov $0x67,%sil
13950+ lea i*8(%rbx),%rdi
13951+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13952+ i = i + 1
13953+ .endr
13954+ jmp 2f
13955+1:
13956+#endif
13957+
13958+ i = 0
13959+ .rept USER_PGD_PTRS
13960+ movb $0x67,i*8(%rdi)
13961+ i = i + 1
13962+ .endr
13963+
13964+#ifdef CONFIG_PARAVIRT
13965+2: PV_RESTORE_REGS(CLBR_RDI)
13966+ popq %rbx
13967+#endif
13968+
13969+ popq %rdi
13970+ retq
13971+ENDPROC(pax_exit_kernel_user)
13972+#endif
13973+
13974+.macro pax_erase_kstack
13975+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13976+ call pax_erase_kstack
13977+#endif
13978+.endm
13979+
13980+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13981+/*
13982+ * r10: thread_info
13983+ * rcx, rdx: can be clobbered
13984+ */
13985+ENTRY(pax_erase_kstack)
13986+ pushq %rdi
13987+ pushq %rax
13988+
13989+ GET_THREAD_INFO(%r10)
13990+ mov TI_lowest_stack(%r10), %rdi
13991+ mov $-0xBEEF, %rax
13992+ std
13993+
13994+1: mov %edi, %ecx
13995+ and $THREAD_SIZE_asm - 1, %ecx
13996+ shr $3, %ecx
13997+ repne scasq
13998+ jecxz 2f
13999+
14000+ cmp $2*8, %ecx
14001+ jc 2f
14002+
14003+ mov $2*8, %ecx
14004+ repe scasq
14005+ jecxz 2f
14006+ jne 1b
14007+
14008+2: cld
14009+ mov %esp, %ecx
14010+ sub %edi, %ecx
14011+
14012+ cmp $THREAD_SIZE_asm, %rcx
14013+ jb 3f
14014+ ud2
14015+3:
14016+
14017+ shr $3, %ecx
14018+ rep stosq
14019+
14020+ mov TI_task_thread_sp0(%r10), %rdi
14021+ sub $256, %rdi
14022+ mov %rdi, TI_lowest_stack(%r10)
14023+
14024+ popq %rax
14025+ popq %rdi
14026+ ret
14027+ENDPROC(pax_erase_kstack)
14028+#endif
14029
14030 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
14031 #ifdef CONFIG_TRACE_IRQFLAGS
14032@@ -317,7 +574,7 @@ ENTRY(save_args)
14033 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
14034 movq_cfi rbp, 8 /* push %rbp */
14035 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
14036- testl $3, CS(%rdi)
14037+ testb $3, CS(%rdi)
14038 je 1f
14039 SWAPGS
14040 /*
14041@@ -409,7 +666,7 @@ ENTRY(ret_from_fork)
14042
14043 RESTORE_REST
14044
14045- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14046+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14047 je int_ret_from_sys_call
14048
14049 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
14050@@ -455,7 +712,7 @@ END(ret_from_fork)
14051 ENTRY(system_call)
14052 CFI_STARTPROC simple
14053 CFI_SIGNAL_FRAME
14054- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14055+ CFI_DEF_CFA rsp,0
14056 CFI_REGISTER rip,rcx
14057 /*CFI_REGISTER rflags,r11*/
14058 SWAPGS_UNSAFE_STACK
14059@@ -468,12 +725,13 @@ ENTRY(system_call_after_swapgs)
14060
14061 movq %rsp,PER_CPU_VAR(old_rsp)
14062 movq PER_CPU_VAR(kernel_stack),%rsp
14063+ pax_enter_kernel_user
14064 /*
14065 * No need to follow this irqs off/on section - it's straight
14066 * and short:
14067 */
14068 ENABLE_INTERRUPTS(CLBR_NONE)
14069- SAVE_ARGS 8,1
14070+ SAVE_ARGS 8*6,1
14071 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14072 movq %rcx,RIP-ARGOFFSET(%rsp)
14073 CFI_REL_OFFSET rip,RIP-ARGOFFSET
14074@@ -502,6 +760,8 @@ sysret_check:
14075 andl %edi,%edx
14076 jnz sysret_careful
14077 CFI_REMEMBER_STATE
14078+ pax_exit_kernel_user
14079+ pax_erase_kstack
14080 /*
14081 * sysretq will re-enable interrupts:
14082 */
14083@@ -562,6 +822,9 @@ auditsys:
14084 movq %rax,%rsi /* 2nd arg: syscall number */
14085 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
14086 call audit_syscall_entry
14087+
14088+ pax_erase_kstack
14089+
14090 LOAD_ARGS 0 /* reload call-clobbered registers */
14091 jmp system_call_fastpath
14092
14093@@ -592,6 +855,9 @@ tracesys:
14094 FIXUP_TOP_OF_STACK %rdi
14095 movq %rsp,%rdi
14096 call syscall_trace_enter
14097+
14098+ pax_erase_kstack
14099+
14100 /*
14101 * Reload arg registers from stack in case ptrace changed them.
14102 * We don't reload %rax because syscall_trace_enter() returned
14103@@ -613,7 +879,7 @@ tracesys:
14104 GLOBAL(int_ret_from_sys_call)
14105 DISABLE_INTERRUPTS(CLBR_NONE)
14106 TRACE_IRQS_OFF
14107- testl $3,CS-ARGOFFSET(%rsp)
14108+ testb $3,CS-ARGOFFSET(%rsp)
14109 je retint_restore_args
14110 movl $_TIF_ALLWORK_MASK,%edi
14111 /* edi: mask to check */
14112@@ -800,6 +1066,16 @@ END(interrupt)
14113 CFI_ADJUST_CFA_OFFSET 10*8
14114 call save_args
14115 PARTIAL_FRAME 0
14116+#ifdef CONFIG_PAX_MEMORY_UDEREF
14117+ testb $3, CS(%rdi)
14118+ jnz 1f
14119+ pax_enter_kernel
14120+ jmp 2f
14121+1: pax_enter_kernel_user
14122+2:
14123+#else
14124+ pax_enter_kernel
14125+#endif
14126 call \func
14127 .endm
14128
14129@@ -822,7 +1098,7 @@ ret_from_intr:
14130 CFI_ADJUST_CFA_OFFSET -8
14131 exit_intr:
14132 GET_THREAD_INFO(%rcx)
14133- testl $3,CS-ARGOFFSET(%rsp)
14134+ testb $3,CS-ARGOFFSET(%rsp)
14135 je retint_kernel
14136
14137 /* Interrupt came from user space */
14138@@ -844,12 +1120,15 @@ retint_swapgs: /* return to user-space
14139 * The iretq could re-enable interrupts:
14140 */
14141 DISABLE_INTERRUPTS(CLBR_ANY)
14142+ pax_exit_kernel_user
14143+ pax_erase_kstack
14144 TRACE_IRQS_IRETQ
14145 SWAPGS
14146 jmp restore_args
14147
14148 retint_restore_args: /* return to kernel space */
14149 DISABLE_INTERRUPTS(CLBR_ANY)
14150+ pax_exit_kernel
14151 /*
14152 * The iretq could re-enable interrupts:
14153 */
14154@@ -1032,6 +1311,16 @@ ENTRY(\sym)
14155 CFI_ADJUST_CFA_OFFSET 15*8
14156 call error_entry
14157 DEFAULT_FRAME 0
14158+#ifdef CONFIG_PAX_MEMORY_UDEREF
14159+ testb $3, CS(%rsp)
14160+ jnz 1f
14161+ pax_enter_kernel
14162+ jmp 2f
14163+1: pax_enter_kernel_user
14164+2:
14165+#else
14166+ pax_enter_kernel
14167+#endif
14168 movq %rsp,%rdi /* pt_regs pointer */
14169 xorl %esi,%esi /* no error code */
14170 call \do_sym
14171@@ -1049,6 +1338,16 @@ ENTRY(\sym)
14172 subq $15*8, %rsp
14173 call save_paranoid
14174 TRACE_IRQS_OFF
14175+#ifdef CONFIG_PAX_MEMORY_UDEREF
14176+ testb $3, CS(%rsp)
14177+ jnz 1f
14178+ pax_enter_kernel
14179+ jmp 2f
14180+1: pax_enter_kernel_user
14181+2:
14182+#else
14183+ pax_enter_kernel
14184+#endif
14185 movq %rsp,%rdi /* pt_regs pointer */
14186 xorl %esi,%esi /* no error code */
14187 call \do_sym
14188@@ -1066,9 +1365,24 @@ ENTRY(\sym)
14189 subq $15*8, %rsp
14190 call save_paranoid
14191 TRACE_IRQS_OFF
14192+#ifdef CONFIG_PAX_MEMORY_UDEREF
14193+ testb $3, CS(%rsp)
14194+ jnz 1f
14195+ pax_enter_kernel
14196+ jmp 2f
14197+1: pax_enter_kernel_user
14198+2:
14199+#else
14200+ pax_enter_kernel
14201+#endif
14202 movq %rsp,%rdi /* pt_regs pointer */
14203 xorl %esi,%esi /* no error code */
14204- PER_CPU(init_tss, %rbp)
14205+#ifdef CONFIG_SMP
14206+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
14207+ lea init_tss(%rbp), %rbp
14208+#else
14209+ lea init_tss(%rip), %rbp
14210+#endif
14211 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14212 call \do_sym
14213 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14214@@ -1085,6 +1399,16 @@ ENTRY(\sym)
14215 CFI_ADJUST_CFA_OFFSET 15*8
14216 call error_entry
14217 DEFAULT_FRAME 0
14218+#ifdef CONFIG_PAX_MEMORY_UDEREF
14219+ testb $3, CS(%rsp)
14220+ jnz 1f
14221+ pax_enter_kernel
14222+ jmp 2f
14223+1: pax_enter_kernel_user
14224+2:
14225+#else
14226+ pax_enter_kernel
14227+#endif
14228 movq %rsp,%rdi /* pt_regs pointer */
14229 movq ORIG_RAX(%rsp),%rsi /* get error code */
14230 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14231@@ -1104,6 +1428,16 @@ ENTRY(\sym)
14232 call save_paranoid
14233 DEFAULT_FRAME 0
14234 TRACE_IRQS_OFF
14235+#ifdef CONFIG_PAX_MEMORY_UDEREF
14236+ testb $3, CS(%rsp)
14237+ jnz 1f
14238+ pax_enter_kernel
14239+ jmp 2f
14240+1: pax_enter_kernel_user
14241+2:
14242+#else
14243+ pax_enter_kernel
14244+#endif
14245 movq %rsp,%rdi /* pt_regs pointer */
14246 movq ORIG_RAX(%rsp),%rsi /* get error code */
14247 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14248@@ -1405,14 +1739,27 @@ ENTRY(paranoid_exit)
14249 TRACE_IRQS_OFF
14250 testl %ebx,%ebx /* swapgs needed? */
14251 jnz paranoid_restore
14252- testl $3,CS(%rsp)
14253+ testb $3,CS(%rsp)
14254 jnz paranoid_userspace
14255+#ifdef CONFIG_PAX_MEMORY_UDEREF
14256+ pax_exit_kernel
14257+ TRACE_IRQS_IRETQ 0
14258+ SWAPGS_UNSAFE_STACK
14259+ RESTORE_ALL 8
14260+ jmp irq_return
14261+#endif
14262 paranoid_swapgs:
14263+#ifdef CONFIG_PAX_MEMORY_UDEREF
14264+ pax_exit_kernel_user
14265+#else
14266+ pax_exit_kernel
14267+#endif
14268 TRACE_IRQS_IRETQ 0
14269 SWAPGS_UNSAFE_STACK
14270 RESTORE_ALL 8
14271 jmp irq_return
14272 paranoid_restore:
14273+ pax_exit_kernel
14274 TRACE_IRQS_IRETQ 0
14275 RESTORE_ALL 8
14276 jmp irq_return
14277@@ -1470,7 +1817,7 @@ ENTRY(error_entry)
14278 movq_cfi r14, R14+8
14279 movq_cfi r15, R15+8
14280 xorl %ebx,%ebx
14281- testl $3,CS+8(%rsp)
14282+ testb $3,CS+8(%rsp)
14283 je error_kernelspace
14284 error_swapgs:
14285 SWAPGS
14286@@ -1529,6 +1876,16 @@ ENTRY(nmi)
14287 CFI_ADJUST_CFA_OFFSET 15*8
14288 call save_paranoid
14289 DEFAULT_FRAME 0
14290+#ifdef CONFIG_PAX_MEMORY_UDEREF
14291+ testb $3, CS(%rsp)
14292+ jnz 1f
14293+ pax_enter_kernel
14294+ jmp 2f
14295+1: pax_enter_kernel_user
14296+2:
14297+#else
14298+ pax_enter_kernel
14299+#endif
14300 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14301 movq %rsp,%rdi
14302 movq $-1,%rsi
14303@@ -1539,11 +1896,25 @@ ENTRY(nmi)
14304 DISABLE_INTERRUPTS(CLBR_NONE)
14305 testl %ebx,%ebx /* swapgs needed? */
14306 jnz nmi_restore
14307- testl $3,CS(%rsp)
14308+ testb $3,CS(%rsp)
14309 jnz nmi_userspace
14310+#ifdef CONFIG_PAX_MEMORY_UDEREF
14311+ pax_exit_kernel
14312+ SWAPGS_UNSAFE_STACK
14313+ RESTORE_ALL 8
14314+ jmp irq_return
14315+#endif
14316 nmi_swapgs:
14317+#ifdef CONFIG_PAX_MEMORY_UDEREF
14318+ pax_exit_kernel_user
14319+#else
14320+ pax_exit_kernel
14321+#endif
14322 SWAPGS_UNSAFE_STACK
14323+ RESTORE_ALL 8
14324+ jmp irq_return
14325 nmi_restore:
14326+ pax_exit_kernel
14327 RESTORE_ALL 8
14328 jmp irq_return
14329 nmi_userspace:
14330diff -urNp linux-2.6.32.45/arch/x86/kernel/ftrace.c linux-2.6.32.45/arch/x86/kernel/ftrace.c
14331--- linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
14332+++ linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
14333@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
14334 static void *mod_code_newcode; /* holds the text to write to the IP */
14335
14336 static unsigned nmi_wait_count;
14337-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14338+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14339
14340 int ftrace_arch_read_dyn_info(char *buf, int size)
14341 {
14342@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
14343
14344 r = snprintf(buf, size, "%u %u",
14345 nmi_wait_count,
14346- atomic_read(&nmi_update_count));
14347+ atomic_read_unchecked(&nmi_update_count));
14348 return r;
14349 }
14350
14351@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
14352 {
14353 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14354 smp_rmb();
14355+ pax_open_kernel();
14356 ftrace_mod_code();
14357- atomic_inc(&nmi_update_count);
14358+ pax_close_kernel();
14359+ atomic_inc_unchecked(&nmi_update_count);
14360 }
14361 /* Must have previous changes seen before executions */
14362 smp_mb();
14363@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
14364
14365
14366
14367-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
14368+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
14369
14370 static unsigned char *ftrace_nop_replace(void)
14371 {
14372@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
14373 {
14374 unsigned char replaced[MCOUNT_INSN_SIZE];
14375
14376+ ip = ktla_ktva(ip);
14377+
14378 /*
14379 * Note: Due to modules and __init, code can
14380 * disappear and change, we need to protect against faulting
14381@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
14382 unsigned char old[MCOUNT_INSN_SIZE], *new;
14383 int ret;
14384
14385- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14386+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14387 new = ftrace_call_replace(ip, (unsigned long)func);
14388 ret = ftrace_modify_code(ip, old, new);
14389
14390@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
14391 switch (faulted) {
14392 case 0:
14393 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
14394- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
14395+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14396 break;
14397 case 1:
14398 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14399- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14400+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14401 break;
14402 case 2:
14403 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14404- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14405+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14406 break;
14407 }
14408
14409@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14410 {
14411 unsigned char code[MCOUNT_INSN_SIZE];
14412
14413+ ip = ktla_ktva(ip);
14414+
14415 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14416 return -EFAULT;
14417
14418diff -urNp linux-2.6.32.45/arch/x86/kernel/head32.c linux-2.6.32.45/arch/x86/kernel/head32.c
14419--- linux-2.6.32.45/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14420+++ linux-2.6.32.45/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14421@@ -16,6 +16,7 @@
14422 #include <asm/apic.h>
14423 #include <asm/io_apic.h>
14424 #include <asm/bios_ebda.h>
14425+#include <asm/boot.h>
14426
14427 static void __init i386_default_early_setup(void)
14428 {
14429@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14430 {
14431 reserve_trampoline_memory();
14432
14433- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14434+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14435
14436 #ifdef CONFIG_BLK_DEV_INITRD
14437 /* Reserve INITRD */
14438diff -urNp linux-2.6.32.45/arch/x86/kernel/head_32.S linux-2.6.32.45/arch/x86/kernel/head_32.S
14439--- linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14440+++ linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14441@@ -19,10 +19,17 @@
14442 #include <asm/setup.h>
14443 #include <asm/processor-flags.h>
14444 #include <asm/percpu.h>
14445+#include <asm/msr-index.h>
14446
14447 /* Physical address */
14448 #define pa(X) ((X) - __PAGE_OFFSET)
14449
14450+#ifdef CONFIG_PAX_KERNEXEC
14451+#define ta(X) (X)
14452+#else
14453+#define ta(X) ((X) - __PAGE_OFFSET)
14454+#endif
14455+
14456 /*
14457 * References to members of the new_cpu_data structure.
14458 */
14459@@ -52,11 +59,7 @@
14460 * and small than max_low_pfn, otherwise will waste some page table entries
14461 */
14462
14463-#if PTRS_PER_PMD > 1
14464-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14465-#else
14466-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14467-#endif
14468+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14469
14470 /* Enough space to fit pagetables for the low memory linear map */
14471 MAPPING_BEYOND_END = \
14472@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14473 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14474
14475 /*
14476+ * Real beginning of normal "text" segment
14477+ */
14478+ENTRY(stext)
14479+ENTRY(_stext)
14480+
14481+/*
14482 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14483 * %esi points to the real-mode code as a 32-bit pointer.
14484 * CS and DS must be 4 GB flat segments, but we don't depend on
14485@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14486 * can.
14487 */
14488 __HEAD
14489+
14490+#ifdef CONFIG_PAX_KERNEXEC
14491+ jmp startup_32
14492+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14493+.fill PAGE_SIZE-5,1,0xcc
14494+#endif
14495+
14496 ENTRY(startup_32)
14497+ movl pa(stack_start),%ecx
14498+
14499 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14500 us to not reload segments */
14501 testb $(1<<6), BP_loadflags(%esi)
14502@@ -95,7 +113,60 @@ ENTRY(startup_32)
14503 movl %eax,%es
14504 movl %eax,%fs
14505 movl %eax,%gs
14506+ movl %eax,%ss
14507 2:
14508+ leal -__PAGE_OFFSET(%ecx),%esp
14509+
14510+#ifdef CONFIG_SMP
14511+ movl $pa(cpu_gdt_table),%edi
14512+ movl $__per_cpu_load,%eax
14513+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14514+ rorl $16,%eax
14515+ movb %al,__KERNEL_PERCPU + 4(%edi)
14516+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14517+ movl $__per_cpu_end - 1,%eax
14518+ subl $__per_cpu_start,%eax
14519+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14520+#endif
14521+
14522+#ifdef CONFIG_PAX_MEMORY_UDEREF
14523+ movl $NR_CPUS,%ecx
14524+ movl $pa(cpu_gdt_table),%edi
14525+1:
14526+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14527+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14528+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14529+ addl $PAGE_SIZE_asm,%edi
14530+ loop 1b
14531+#endif
14532+
14533+#ifdef CONFIG_PAX_KERNEXEC
14534+ movl $pa(boot_gdt),%edi
14535+ movl $__LOAD_PHYSICAL_ADDR,%eax
14536+ movw %ax,__BOOT_CS + 2(%edi)
14537+ rorl $16,%eax
14538+ movb %al,__BOOT_CS + 4(%edi)
14539+ movb %ah,__BOOT_CS + 7(%edi)
14540+ rorl $16,%eax
14541+
14542+ ljmp $(__BOOT_CS),$1f
14543+1:
14544+
14545+ movl $NR_CPUS,%ecx
14546+ movl $pa(cpu_gdt_table),%edi
14547+ addl $__PAGE_OFFSET,%eax
14548+1:
14549+ movw %ax,__KERNEL_CS + 2(%edi)
14550+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14551+ rorl $16,%eax
14552+ movb %al,__KERNEL_CS + 4(%edi)
14553+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14554+ movb %ah,__KERNEL_CS + 7(%edi)
14555+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14556+ rorl $16,%eax
14557+ addl $PAGE_SIZE_asm,%edi
14558+ loop 1b
14559+#endif
14560
14561 /*
14562 * Clear BSS first so that there are no surprises...
14563@@ -140,9 +211,7 @@ ENTRY(startup_32)
14564 cmpl $num_subarch_entries, %eax
14565 jae bad_subarch
14566
14567- movl pa(subarch_entries)(,%eax,4), %eax
14568- subl $__PAGE_OFFSET, %eax
14569- jmp *%eax
14570+ jmp *pa(subarch_entries)(,%eax,4)
14571
14572 bad_subarch:
14573 WEAK(lguest_entry)
14574@@ -154,10 +223,10 @@ WEAK(xen_entry)
14575 __INITDATA
14576
14577 subarch_entries:
14578- .long default_entry /* normal x86/PC */
14579- .long lguest_entry /* lguest hypervisor */
14580- .long xen_entry /* Xen hypervisor */
14581- .long default_entry /* Moorestown MID */
14582+ .long ta(default_entry) /* normal x86/PC */
14583+ .long ta(lguest_entry) /* lguest hypervisor */
14584+ .long ta(xen_entry) /* Xen hypervisor */
14585+ .long ta(default_entry) /* Moorestown MID */
14586 num_subarch_entries = (. - subarch_entries) / 4
14587 .previous
14588 #endif /* CONFIG_PARAVIRT */
14589@@ -218,8 +287,11 @@ default_entry:
14590 movl %eax, pa(max_pfn_mapped)
14591
14592 /* Do early initialization of the fixmap area */
14593- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14594- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14595+#ifdef CONFIG_COMPAT_VDSO
14596+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14597+#else
14598+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14599+#endif
14600 #else /* Not PAE */
14601
14602 page_pde_offset = (__PAGE_OFFSET >> 20);
14603@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14604 movl %eax, pa(max_pfn_mapped)
14605
14606 /* Do early initialization of the fixmap area */
14607- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14608- movl %eax,pa(swapper_pg_dir+0xffc)
14609+#ifdef CONFIG_COMPAT_VDSO
14610+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14611+#else
14612+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14613+#endif
14614 #endif
14615 jmp 3f
14616 /*
14617@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14618 movl %eax,%es
14619 movl %eax,%fs
14620 movl %eax,%gs
14621+ movl pa(stack_start),%ecx
14622+ movl %eax,%ss
14623+ leal -__PAGE_OFFSET(%ecx),%esp
14624 #endif /* CONFIG_SMP */
14625 3:
14626
14627@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14628 orl %edx,%eax
14629 movl %eax,%cr4
14630
14631+#ifdef CONFIG_X86_PAE
14632 btl $5, %eax # check if PAE is enabled
14633 jnc 6f
14634
14635@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14636 cpuid
14637 cmpl $0x80000000, %eax
14638 jbe 6f
14639+
14640+ /* Clear bogus XD_DISABLE bits */
14641+ call verify_cpu
14642+
14643 mov $0x80000001, %eax
14644 cpuid
14645 /* Execute Disable bit supported? */
14646@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14647 jnc 6f
14648
14649 /* Setup EFER (Extended Feature Enable Register) */
14650- movl $0xc0000080, %ecx
14651+ movl $MSR_EFER, %ecx
14652 rdmsr
14653
14654 btsl $11, %eax
14655 /* Make changes effective */
14656 wrmsr
14657
14658+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14659+ movl $1,pa(nx_enabled)
14660+#endif
14661+
14662 6:
14663
14664 /*
14665@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14666 movl %eax,%cr0 /* ..and set paging (PG) bit */
14667 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14668 1:
14669- /* Set up the stack pointer */
14670- lss stack_start,%esp
14671+ /* Shift the stack pointer to a virtual address */
14672+ addl $__PAGE_OFFSET, %esp
14673
14674 /*
14675 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14676@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14677
14678 #ifdef CONFIG_SMP
14679 cmpb $0, ready
14680- jz 1f /* Initial CPU cleans BSS */
14681- jmp checkCPUtype
14682-1:
14683+ jnz checkCPUtype
14684 #endif /* CONFIG_SMP */
14685
14686 /*
14687@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14688 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14689 movl %eax,%ss # after changing gdt.
14690
14691- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14692+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14693 movl %eax,%ds
14694 movl %eax,%es
14695
14696@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14697 */
14698 cmpb $0,ready
14699 jne 1f
14700- movl $per_cpu__gdt_page,%eax
14701+ movl $cpu_gdt_table,%eax
14702 movl $per_cpu__stack_canary,%ecx
14703+#ifdef CONFIG_SMP
14704+ addl $__per_cpu_load,%ecx
14705+#endif
14706 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14707 shrl $16, %ecx
14708 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14709 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14710 1:
14711-#endif
14712 movl $(__KERNEL_STACK_CANARY),%eax
14713+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14714+ movl $(__USER_DS),%eax
14715+#else
14716+ xorl %eax,%eax
14717+#endif
14718 movl %eax,%gs
14719
14720 xorl %eax,%eax # Clear LDT
14721@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14722
14723 cld # gcc2 wants the direction flag cleared at all times
14724 pushl $0 # fake return address for unwinder
14725-#ifdef CONFIG_SMP
14726- movb ready, %cl
14727 movb $1, ready
14728- cmpb $0,%cl # the first CPU calls start_kernel
14729- je 1f
14730- movl (stack_start), %esp
14731-1:
14732-#endif /* CONFIG_SMP */
14733 jmp *(initial_code)
14734
14735 /*
14736@@ -546,22 +631,22 @@ early_page_fault:
14737 jmp early_fault
14738
14739 early_fault:
14740- cld
14741 #ifdef CONFIG_PRINTK
14742+ cmpl $1,%ss:early_recursion_flag
14743+ je hlt_loop
14744+ incl %ss:early_recursion_flag
14745+ cld
14746 pusha
14747 movl $(__KERNEL_DS),%eax
14748 movl %eax,%ds
14749 movl %eax,%es
14750- cmpl $2,early_recursion_flag
14751- je hlt_loop
14752- incl early_recursion_flag
14753 movl %cr2,%eax
14754 pushl %eax
14755 pushl %edx /* trapno */
14756 pushl $fault_msg
14757 call printk
14758+; call dump_stack
14759 #endif
14760- call dump_stack
14761 hlt_loop:
14762 hlt
14763 jmp hlt_loop
14764@@ -569,8 +654,11 @@ hlt_loop:
14765 /* This is the default interrupt "handler" :-) */
14766 ALIGN
14767 ignore_int:
14768- cld
14769 #ifdef CONFIG_PRINTK
14770+ cmpl $2,%ss:early_recursion_flag
14771+ je hlt_loop
14772+ incl %ss:early_recursion_flag
14773+ cld
14774 pushl %eax
14775 pushl %ecx
14776 pushl %edx
14777@@ -579,9 +667,6 @@ ignore_int:
14778 movl $(__KERNEL_DS),%eax
14779 movl %eax,%ds
14780 movl %eax,%es
14781- cmpl $2,early_recursion_flag
14782- je hlt_loop
14783- incl early_recursion_flag
14784 pushl 16(%esp)
14785 pushl 24(%esp)
14786 pushl 32(%esp)
14787@@ -600,6 +685,8 @@ ignore_int:
14788 #endif
14789 iret
14790
14791+#include "verify_cpu.S"
14792+
14793 __REFDATA
14794 .align 4
14795 ENTRY(initial_code)
14796@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14797 /*
14798 * BSS section
14799 */
14800-__PAGE_ALIGNED_BSS
14801- .align PAGE_SIZE_asm
14802 #ifdef CONFIG_X86_PAE
14803+.section .swapper_pg_pmd,"a",@progbits
14804 swapper_pg_pmd:
14805 .fill 1024*KPMDS,4,0
14806 #else
14807+.section .swapper_pg_dir,"a",@progbits
14808 ENTRY(swapper_pg_dir)
14809 .fill 1024,4,0
14810 #endif
14811+.section .swapper_pg_fixmap,"a",@progbits
14812 swapper_pg_fixmap:
14813 .fill 1024,4,0
14814 #ifdef CONFIG_X86_TRAMPOLINE
14815+.section .trampoline_pg_dir,"a",@progbits
14816 ENTRY(trampoline_pg_dir)
14817+#ifdef CONFIG_X86_PAE
14818+ .fill 4,8,0
14819+#else
14820 .fill 1024,4,0
14821 #endif
14822+#endif
14823+
14824+.section .empty_zero_page,"a",@progbits
14825 ENTRY(empty_zero_page)
14826 .fill 4096,1,0
14827
14828 /*
14829+ * The IDT has to be page-aligned to simplify the Pentium
14830+ * F0 0F bug workaround.. We have a special link segment
14831+ * for this.
14832+ */
14833+.section .idt,"a",@progbits
14834+ENTRY(idt_table)
14835+ .fill 256,8,0
14836+
14837+/*
14838 * This starts the data section.
14839 */
14840 #ifdef CONFIG_X86_PAE
14841-__PAGE_ALIGNED_DATA
14842- /* Page-aligned for the benefit of paravirt? */
14843- .align PAGE_SIZE_asm
14844+.section .swapper_pg_dir,"a",@progbits
14845+
14846 ENTRY(swapper_pg_dir)
14847 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14848 # if KPMDS == 3
14849@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14850 # error "Kernel PMDs should be 1, 2 or 3"
14851 # endif
14852 .align PAGE_SIZE_asm /* needs to be page-sized too */
14853+
14854+#ifdef CONFIG_PAX_PER_CPU_PGD
14855+ENTRY(cpu_pgd)
14856+ .rept NR_CPUS
14857+ .fill 4,8,0
14858+ .endr
14859+#endif
14860+
14861 #endif
14862
14863 .data
14864+.balign 4
14865 ENTRY(stack_start)
14866- .long init_thread_union+THREAD_SIZE
14867- .long __BOOT_DS
14868+ .long init_thread_union+THREAD_SIZE-8
14869
14870 ready: .byte 0
14871
14872+.section .rodata,"a",@progbits
14873 early_recursion_flag:
14874 .long 0
14875
14876@@ -697,7 +809,7 @@ fault_msg:
14877 .word 0 # 32 bit align gdt_desc.address
14878 boot_gdt_descr:
14879 .word __BOOT_DS+7
14880- .long boot_gdt - __PAGE_OFFSET
14881+ .long pa(boot_gdt)
14882
14883 .word 0 # 32-bit align idt_desc.address
14884 idt_descr:
14885@@ -708,7 +820,7 @@ idt_descr:
14886 .word 0 # 32 bit align gdt_desc.address
14887 ENTRY(early_gdt_descr)
14888 .word GDT_ENTRIES*8-1
14889- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14890+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14891
14892 /*
14893 * The boot_gdt must mirror the equivalent in setup.S and is
14894@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14895 .align L1_CACHE_BYTES
14896 ENTRY(boot_gdt)
14897 .fill GDT_ENTRY_BOOT_CS,8,0
14898- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14899- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14900+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14901+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14902+
14903+ .align PAGE_SIZE_asm
14904+ENTRY(cpu_gdt_table)
14905+ .rept NR_CPUS
14906+ .quad 0x0000000000000000 /* NULL descriptor */
14907+ .quad 0x0000000000000000 /* 0x0b reserved */
14908+ .quad 0x0000000000000000 /* 0x13 reserved */
14909+ .quad 0x0000000000000000 /* 0x1b reserved */
14910+
14911+#ifdef CONFIG_PAX_KERNEXEC
14912+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14913+#else
14914+ .quad 0x0000000000000000 /* 0x20 unused */
14915+#endif
14916+
14917+ .quad 0x0000000000000000 /* 0x28 unused */
14918+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14919+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14920+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14921+ .quad 0x0000000000000000 /* 0x4b reserved */
14922+ .quad 0x0000000000000000 /* 0x53 reserved */
14923+ .quad 0x0000000000000000 /* 0x5b reserved */
14924+
14925+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14926+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14927+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14928+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14929+
14930+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14931+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14932+
14933+ /*
14934+ * Segments used for calling PnP BIOS have byte granularity.
14935+ * The code segments and data segments have fixed 64k limits,
14936+ * the transfer segment sizes are set at run time.
14937+ */
14938+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14939+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14940+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14941+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14942+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14943+
14944+ /*
14945+ * The APM segments have byte granularity and their bases
14946+ * are set at run time. All have 64k limits.
14947+ */
14948+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14949+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14950+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14951+
14952+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14953+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14954+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14955+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14956+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14957+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14958+
14959+ /* Be sure this is zeroed to avoid false validations in Xen */
14960+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14961+ .endr
14962diff -urNp linux-2.6.32.45/arch/x86/kernel/head_64.S linux-2.6.32.45/arch/x86/kernel/head_64.S
14963--- linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14964+++ linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14965@@ -19,6 +19,7 @@
14966 #include <asm/cache.h>
14967 #include <asm/processor-flags.h>
14968 #include <asm/percpu.h>
14969+#include <asm/cpufeature.h>
14970
14971 #ifdef CONFIG_PARAVIRT
14972 #include <asm/asm-offsets.h>
14973@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14974 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14975 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14976 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14977+L4_VMALLOC_START = pgd_index(VMALLOC_START)
14978+L3_VMALLOC_START = pud_index(VMALLOC_START)
14979+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14980+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14981
14982 .text
14983 __HEAD
14984@@ -85,35 +90,22 @@ startup_64:
14985 */
14986 addq %rbp, init_level4_pgt + 0(%rip)
14987 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14988+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14989+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14990 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14991
14992 addq %rbp, level3_ident_pgt + 0(%rip)
14993+#ifndef CONFIG_XEN
14994+ addq %rbp, level3_ident_pgt + 8(%rip)
14995+#endif
14996
14997- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14998- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14999+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15000
15001- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15002+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15003+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15004
15005- /* Add an Identity mapping if I am above 1G */
15006- leaq _text(%rip), %rdi
15007- andq $PMD_PAGE_MASK, %rdi
15008-
15009- movq %rdi, %rax
15010- shrq $PUD_SHIFT, %rax
15011- andq $(PTRS_PER_PUD - 1), %rax
15012- jz ident_complete
15013-
15014- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15015- leaq level3_ident_pgt(%rip), %rbx
15016- movq %rdx, 0(%rbx, %rax, 8)
15017-
15018- movq %rdi, %rax
15019- shrq $PMD_SHIFT, %rax
15020- andq $(PTRS_PER_PMD - 1), %rax
15021- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15022- leaq level2_spare_pgt(%rip), %rbx
15023- movq %rdx, 0(%rbx, %rax, 8)
15024-ident_complete:
15025+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15026+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15027
15028 /*
15029 * Fixup the kernel text+data virtual addresses. Note that
15030@@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
15031 * after the boot processor executes this code.
15032 */
15033
15034- /* Enable PAE mode and PGE */
15035- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15036+ /* Enable PAE mode and PSE/PGE */
15037+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15038 movq %rax, %cr4
15039
15040 /* Setup early boot stage 4 level pagetables. */
15041@@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
15042 movl $MSR_EFER, %ecx
15043 rdmsr
15044 btsl $_EFER_SCE, %eax /* Enable System Call */
15045- btl $20,%edi /* No Execute supported? */
15046+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15047 jnc 1f
15048 btsl $_EFER_NX, %eax
15049+ leaq init_level4_pgt(%rip), %rdi
15050+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15051+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15052+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15053 1: wrmsr /* Make changes effective */
15054
15055 /* Setup cr0 */
15056@@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
15057 .quad x86_64_start_kernel
15058 ENTRY(initial_gs)
15059 .quad INIT_PER_CPU_VAR(irq_stack_union)
15060- __FINITDATA
15061
15062 ENTRY(stack_start)
15063 .quad init_thread_union+THREAD_SIZE-8
15064 .word 0
15065+ __FINITDATA
15066
15067 bad_address:
15068 jmp bad_address
15069
15070- .section ".init.text","ax"
15071+ __INIT
15072 #ifdef CONFIG_EARLY_PRINTK
15073 .globl early_idt_handlers
15074 early_idt_handlers:
15075@@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
15076 #endif /* EARLY_PRINTK */
15077 1: hlt
15078 jmp 1b
15079+ .previous
15080
15081 #ifdef CONFIG_EARLY_PRINTK
15082+ __INITDATA
15083 early_recursion_flag:
15084 .long 0
15085+ .previous
15086
15087+ .section .rodata,"a",@progbits
15088 early_idt_msg:
15089 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15090 early_idt_ripmsg:
15091 .asciz "RIP %s\n"
15092-#endif /* CONFIG_EARLY_PRINTK */
15093 .previous
15094+#endif /* CONFIG_EARLY_PRINTK */
15095
15096+ .section .rodata,"a",@progbits
15097 #define NEXT_PAGE(name) \
15098 .balign PAGE_SIZE; \
15099 ENTRY(name)
15100@@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
15101 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15102 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15103 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15104+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
15105+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
15106+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15107+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15108 .org init_level4_pgt + L4_START_KERNEL*8, 0
15109 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15110 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15111
15112+#ifdef CONFIG_PAX_PER_CPU_PGD
15113+NEXT_PAGE(cpu_pgd)
15114+ .rept NR_CPUS
15115+ .fill 512,8,0
15116+ .endr
15117+#endif
15118+
15119 NEXT_PAGE(level3_ident_pgt)
15120 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15121+#ifdef CONFIG_XEN
15122 .fill 511,8,0
15123+#else
15124+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15125+ .fill 510,8,0
15126+#endif
15127+
15128+NEXT_PAGE(level3_vmalloc_pgt)
15129+ .fill 512,8,0
15130+
15131+NEXT_PAGE(level3_vmemmap_pgt)
15132+ .fill L3_VMEMMAP_START,8,0
15133+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15134
15135 NEXT_PAGE(level3_kernel_pgt)
15136 .fill L3_START_KERNEL,8,0
15137@@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
15138 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15139 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15140
15141+NEXT_PAGE(level2_vmemmap_pgt)
15142+ .fill 512,8,0
15143+
15144 NEXT_PAGE(level2_fixmap_pgt)
15145- .fill 506,8,0
15146- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15147- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15148- .fill 5,8,0
15149+ .fill 507,8,0
15150+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15151+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15152+ .fill 4,8,0
15153
15154-NEXT_PAGE(level1_fixmap_pgt)
15155+NEXT_PAGE(level1_vsyscall_pgt)
15156 .fill 512,8,0
15157
15158-NEXT_PAGE(level2_ident_pgt)
15159- /* Since I easily can, map the first 1G.
15160+ /* Since I easily can, map the first 2G.
15161 * Don't set NX because code runs from these pages.
15162 */
15163- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15164+NEXT_PAGE(level2_ident_pgt)
15165+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15166
15167 NEXT_PAGE(level2_kernel_pgt)
15168 /*
15169@@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
15170 * If you want to increase this then increase MODULES_VADDR
15171 * too.)
15172 */
15173- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15174- KERNEL_IMAGE_SIZE/PMD_SIZE)
15175-
15176-NEXT_PAGE(level2_spare_pgt)
15177- .fill 512, 8, 0
15178+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15179
15180 #undef PMDS
15181 #undef NEXT_PAGE
15182
15183- .data
15184+ .align PAGE_SIZE
15185+ENTRY(cpu_gdt_table)
15186+ .rept NR_CPUS
15187+ .quad 0x0000000000000000 /* NULL descriptor */
15188+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15189+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15190+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15191+ .quad 0x00cffb000000ffff /* __USER32_CS */
15192+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15193+ .quad 0x00affb000000ffff /* __USER_CS */
15194+
15195+#ifdef CONFIG_PAX_KERNEXEC
15196+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15197+#else
15198+ .quad 0x0 /* unused */
15199+#endif
15200+
15201+ .quad 0,0 /* TSS */
15202+ .quad 0,0 /* LDT */
15203+ .quad 0,0,0 /* three TLS descriptors */
15204+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15205+ /* asm/segment.h:GDT_ENTRIES must match this */
15206+
15207+ /* zero the remaining page */
15208+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15209+ .endr
15210+
15211 .align 16
15212 .globl early_gdt_descr
15213 early_gdt_descr:
15214 .word GDT_ENTRIES*8-1
15215 early_gdt_descr_base:
15216- .quad INIT_PER_CPU_VAR(gdt_page)
15217+ .quad cpu_gdt_table
15218
15219 ENTRY(phys_base)
15220 /* This must match the first entry in level2_kernel_pgt */
15221 .quad 0x0000000000000000
15222
15223 #include "../../x86/xen/xen-head.S"
15224-
15225- .section .bss, "aw", @nobits
15226+
15227+ .section .rodata,"a",@progbits
15228 .align L1_CACHE_BYTES
15229 ENTRY(idt_table)
15230- .skip IDT_ENTRIES * 16
15231+ .fill 512,8,0
15232
15233 __PAGE_ALIGNED_BSS
15234 .align PAGE_SIZE
15235diff -urNp linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c
15236--- linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
15237+++ linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
15238@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15239 EXPORT_SYMBOL(cmpxchg8b_emu);
15240 #endif
15241
15242+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15243+
15244 /* Networking helper routines. */
15245 EXPORT_SYMBOL(csum_partial_copy_generic);
15246+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15247+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15248
15249 EXPORT_SYMBOL(__get_user_1);
15250 EXPORT_SYMBOL(__get_user_2);
15251@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15252
15253 EXPORT_SYMBOL(csum_partial);
15254 EXPORT_SYMBOL(empty_zero_page);
15255+
15256+#ifdef CONFIG_PAX_KERNEXEC
15257+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15258+#endif
15259diff -urNp linux-2.6.32.45/arch/x86/kernel/i8259.c linux-2.6.32.45/arch/x86/kernel/i8259.c
15260--- linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
15261+++ linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
15262@@ -208,7 +208,7 @@ spurious_8259A_irq:
15263 "spurious 8259A interrupt: IRQ%d.\n", irq);
15264 spurious_irq_mask |= irqmask;
15265 }
15266- atomic_inc(&irq_err_count);
15267+ atomic_inc_unchecked(&irq_err_count);
15268 /*
15269 * Theoretically we do not have to handle this IRQ,
15270 * but in Linux this does not cause problems and is
15271diff -urNp linux-2.6.32.45/arch/x86/kernel/init_task.c linux-2.6.32.45/arch/x86/kernel/init_task.c
15272--- linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
15273+++ linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
15274@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
15275 * way process stacks are handled. This is done by having a special
15276 * "init_task" linker map entry..
15277 */
15278-union thread_union init_thread_union __init_task_data =
15279- { INIT_THREAD_INFO(init_task) };
15280+union thread_union init_thread_union __init_task_data;
15281
15282 /*
15283 * Initial task structure.
15284@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15285 * section. Since TSS's are completely CPU-local, we want them
15286 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15287 */
15288-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15289-
15290+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15291+EXPORT_SYMBOL(init_tss);
15292diff -urNp linux-2.6.32.45/arch/x86/kernel/ioport.c linux-2.6.32.45/arch/x86/kernel/ioport.c
15293--- linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
15294+++ linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
15295@@ -6,6 +6,7 @@
15296 #include <linux/sched.h>
15297 #include <linux/kernel.h>
15298 #include <linux/capability.h>
15299+#include <linux/security.h>
15300 #include <linux/errno.h>
15301 #include <linux/types.h>
15302 #include <linux/ioport.h>
15303@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
15304
15305 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15306 return -EINVAL;
15307+#ifdef CONFIG_GRKERNSEC_IO
15308+ if (turn_on && grsec_disable_privio) {
15309+ gr_handle_ioperm();
15310+ return -EPERM;
15311+ }
15312+#endif
15313 if (turn_on && !capable(CAP_SYS_RAWIO))
15314 return -EPERM;
15315
15316@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
15317 * because the ->io_bitmap_max value must match the bitmap
15318 * contents:
15319 */
15320- tss = &per_cpu(init_tss, get_cpu());
15321+ tss = init_tss + get_cpu();
15322
15323 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
15324
15325@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
15326 return -EINVAL;
15327 /* Trying to gain more privileges? */
15328 if (level > old) {
15329+#ifdef CONFIG_GRKERNSEC_IO
15330+ if (grsec_disable_privio) {
15331+ gr_handle_iopl();
15332+ return -EPERM;
15333+ }
15334+#endif
15335 if (!capable(CAP_SYS_RAWIO))
15336 return -EPERM;
15337 }
15338diff -urNp linux-2.6.32.45/arch/x86/kernel/irq_32.c linux-2.6.32.45/arch/x86/kernel/irq_32.c
15339--- linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
15340+++ linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
15341@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
15342 __asm__ __volatile__("andl %%esp,%0" :
15343 "=r" (sp) : "0" (THREAD_SIZE - 1));
15344
15345- return sp < (sizeof(struct thread_info) + STACK_WARN);
15346+ return sp < STACK_WARN;
15347 }
15348
15349 static void print_stack_overflow(void)
15350@@ -54,9 +54,9 @@ static inline void print_stack_overflow(
15351 * per-CPU IRQ handling contexts (thread information and stack)
15352 */
15353 union irq_ctx {
15354- struct thread_info tinfo;
15355- u32 stack[THREAD_SIZE/sizeof(u32)];
15356-} __attribute__((aligned(PAGE_SIZE)));
15357+ unsigned long previous_esp;
15358+ u32 stack[THREAD_SIZE/sizeof(u32)];
15359+} __attribute__((aligned(THREAD_SIZE)));
15360
15361 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15362 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
15363@@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
15364 static inline int
15365 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15366 {
15367- union irq_ctx *curctx, *irqctx;
15368+ union irq_ctx *irqctx;
15369 u32 *isp, arg1, arg2;
15370
15371- curctx = (union irq_ctx *) current_thread_info();
15372 irqctx = __get_cpu_var(hardirq_ctx);
15373
15374 /*
15375@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
15376 * handler) we can't do that and just have to keep using the
15377 * current stack (which is the irq stack already after all)
15378 */
15379- if (unlikely(curctx == irqctx))
15380+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15381 return 0;
15382
15383 /* build the stack frame on the IRQ stack */
15384- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15385- irqctx->tinfo.task = curctx->tinfo.task;
15386- irqctx->tinfo.previous_esp = current_stack_pointer;
15387+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15388+ irqctx->previous_esp = current_stack_pointer;
15389
15390- /*
15391- * Copy the softirq bits in preempt_count so that the
15392- * softirq checks work in the hardirq context.
15393- */
15394- irqctx->tinfo.preempt_count =
15395- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15396- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15397+#ifdef CONFIG_PAX_MEMORY_UDEREF
15398+ __set_fs(MAKE_MM_SEG(0));
15399+#endif
15400
15401 if (unlikely(overflow))
15402 call_on_stack(print_stack_overflow, isp);
15403@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15404 : "0" (irq), "1" (desc), "2" (isp),
15405 "D" (desc->handle_irq)
15406 : "memory", "cc", "ecx");
15407+
15408+#ifdef CONFIG_PAX_MEMORY_UDEREF
15409+ __set_fs(current_thread_info()->addr_limit);
15410+#endif
15411+
15412 return 1;
15413 }
15414
15415@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15416 */
15417 void __cpuinit irq_ctx_init(int cpu)
15418 {
15419- union irq_ctx *irqctx;
15420-
15421 if (per_cpu(hardirq_ctx, cpu))
15422 return;
15423
15424- irqctx = &per_cpu(hardirq_stack, cpu);
15425- irqctx->tinfo.task = NULL;
15426- irqctx->tinfo.exec_domain = NULL;
15427- irqctx->tinfo.cpu = cpu;
15428- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15429- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15430-
15431- per_cpu(hardirq_ctx, cpu) = irqctx;
15432-
15433- irqctx = &per_cpu(softirq_stack, cpu);
15434- irqctx->tinfo.task = NULL;
15435- irqctx->tinfo.exec_domain = NULL;
15436- irqctx->tinfo.cpu = cpu;
15437- irqctx->tinfo.preempt_count = 0;
15438- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15439-
15440- per_cpu(softirq_ctx, cpu) = irqctx;
15441+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15442+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15443
15444 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15445 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15446@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15447 asmlinkage void do_softirq(void)
15448 {
15449 unsigned long flags;
15450- struct thread_info *curctx;
15451 union irq_ctx *irqctx;
15452 u32 *isp;
15453
15454@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15455 local_irq_save(flags);
15456
15457 if (local_softirq_pending()) {
15458- curctx = current_thread_info();
15459 irqctx = __get_cpu_var(softirq_ctx);
15460- irqctx->tinfo.task = curctx->task;
15461- irqctx->tinfo.previous_esp = current_stack_pointer;
15462+ irqctx->previous_esp = current_stack_pointer;
15463
15464 /* build the stack frame on the softirq stack */
15465- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15466+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15467+
15468+#ifdef CONFIG_PAX_MEMORY_UDEREF
15469+ __set_fs(MAKE_MM_SEG(0));
15470+#endif
15471
15472 call_on_stack(__do_softirq, isp);
15473+
15474+#ifdef CONFIG_PAX_MEMORY_UDEREF
15475+ __set_fs(current_thread_info()->addr_limit);
15476+#endif
15477+
15478 /*
15479 * Shouldnt happen, we returned above if in_interrupt():
15480 */
15481diff -urNp linux-2.6.32.45/arch/x86/kernel/irq.c linux-2.6.32.45/arch/x86/kernel/irq.c
15482--- linux-2.6.32.45/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15483+++ linux-2.6.32.45/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15484@@ -15,7 +15,7 @@
15485 #include <asm/mce.h>
15486 #include <asm/hw_irq.h>
15487
15488-atomic_t irq_err_count;
15489+atomic_unchecked_t irq_err_count;
15490
15491 /* Function pointer for generic interrupt vector handling */
15492 void (*generic_interrupt_extension)(void) = NULL;
15493@@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15494 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15495 seq_printf(p, " Machine check polls\n");
15496 #endif
15497- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15498+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15499 #if defined(CONFIG_X86_IO_APIC)
15500- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15501+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15502 #endif
15503 return 0;
15504 }
15505@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15506
15507 u64 arch_irq_stat(void)
15508 {
15509- u64 sum = atomic_read(&irq_err_count);
15510+ u64 sum = atomic_read_unchecked(&irq_err_count);
15511
15512 #ifdef CONFIG_X86_IO_APIC
15513- sum += atomic_read(&irq_mis_count);
15514+ sum += atomic_read_unchecked(&irq_mis_count);
15515 #endif
15516 return sum;
15517 }
15518diff -urNp linux-2.6.32.45/arch/x86/kernel/kgdb.c linux-2.6.32.45/arch/x86/kernel/kgdb.c
15519--- linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15520+++ linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15521@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15522
15523 /* clear the trace bit */
15524 linux_regs->flags &= ~X86_EFLAGS_TF;
15525- atomic_set(&kgdb_cpu_doing_single_step, -1);
15526+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15527
15528 /* set the trace bit if we're stepping */
15529 if (remcomInBuffer[0] == 's') {
15530 linux_regs->flags |= X86_EFLAGS_TF;
15531 kgdb_single_step = 1;
15532- atomic_set(&kgdb_cpu_doing_single_step,
15533+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15534 raw_smp_processor_id());
15535 }
15536
15537@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15538 break;
15539
15540 case DIE_DEBUG:
15541- if (atomic_read(&kgdb_cpu_doing_single_step) ==
15542+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15543 raw_smp_processor_id()) {
15544 if (user_mode(regs))
15545 return single_step_cont(regs, args);
15546@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15547 return instruction_pointer(regs);
15548 }
15549
15550-struct kgdb_arch arch_kgdb_ops = {
15551+const struct kgdb_arch arch_kgdb_ops = {
15552 /* Breakpoint instruction: */
15553 .gdb_bpt_instr = { 0xcc },
15554 .flags = KGDB_HW_BREAKPOINT,
15555diff -urNp linux-2.6.32.45/arch/x86/kernel/kprobes.c linux-2.6.32.45/arch/x86/kernel/kprobes.c
15556--- linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15557+++ linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15558@@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15559 char op;
15560 s32 raddr;
15561 } __attribute__((packed)) * jop;
15562- jop = (struct __arch_jmp_op *)from;
15563+
15564+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15565+
15566+ pax_open_kernel();
15567 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15568 jop->op = RELATIVEJUMP_INSTRUCTION;
15569+ pax_close_kernel();
15570 }
15571
15572 /*
15573@@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15574 kprobe_opcode_t opcode;
15575 kprobe_opcode_t *orig_opcodes = opcodes;
15576
15577- if (search_exception_tables((unsigned long)opcodes))
15578+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15579 return 0; /* Page fault may occur on this address. */
15580
15581 retry:
15582@@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15583 disp = (u8 *) p->addr + *((s32 *) insn) -
15584 (u8 *) p->ainsn.insn;
15585 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15586+ pax_open_kernel();
15587 *(s32 *)insn = (s32) disp;
15588+ pax_close_kernel();
15589 }
15590 }
15591 #endif
15592@@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15593
15594 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15595 {
15596- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15597+ pax_open_kernel();
15598+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15599+ pax_close_kernel();
15600
15601 fix_riprel(p);
15602
15603- if (can_boost(p->addr))
15604+ if (can_boost(ktla_ktva(p->addr)))
15605 p->ainsn.boostable = 0;
15606 else
15607 p->ainsn.boostable = -1;
15608
15609- p->opcode = *p->addr;
15610+ p->opcode = *(ktla_ktva(p->addr));
15611 }
15612
15613 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15614@@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15615 if (p->opcode == BREAKPOINT_INSTRUCTION)
15616 regs->ip = (unsigned long)p->addr;
15617 else
15618- regs->ip = (unsigned long)p->ainsn.insn;
15619+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15620 }
15621
15622 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15623@@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15624 if (p->ainsn.boostable == 1 && !p->post_handler) {
15625 /* Boost up -- we can execute copied instructions directly */
15626 reset_current_kprobe();
15627- regs->ip = (unsigned long)p->ainsn.insn;
15628+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15629 preempt_enable_no_resched();
15630 return;
15631 }
15632@@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15633 struct kprobe_ctlblk *kcb;
15634
15635 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15636- if (*addr != BREAKPOINT_INSTRUCTION) {
15637+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15638 /*
15639 * The breakpoint instruction was removed right
15640 * after we hit it. Another cpu has removed
15641@@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15642 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15643 {
15644 unsigned long *tos = stack_addr(regs);
15645- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15646+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15647 unsigned long orig_ip = (unsigned long)p->addr;
15648 kprobe_opcode_t *insn = p->ainsn.insn;
15649
15650@@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15651 struct die_args *args = data;
15652 int ret = NOTIFY_DONE;
15653
15654- if (args->regs && user_mode_vm(args->regs))
15655+ if (args->regs && user_mode(args->regs))
15656 return ret;
15657
15658 switch (val) {
15659diff -urNp linux-2.6.32.45/arch/x86/kernel/kvm.c linux-2.6.32.45/arch/x86/kernel/kvm.c
15660--- linux-2.6.32.45/arch/x86/kernel/kvm.c 2011-03-27 14:31:47.000000000 -0400
15661+++ linux-2.6.32.45/arch/x86/kernel/kvm.c 2011-08-24 18:35:52.000000000 -0400
15662@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(vo
15663 pv_mmu_ops.set_pud = kvm_set_pud;
15664 #if PAGETABLE_LEVELS == 4
15665 pv_mmu_ops.set_pgd = kvm_set_pgd;
15666+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15667 #endif
15668 #endif
15669 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15670diff -urNp linux-2.6.32.45/arch/x86/kernel/ldt.c linux-2.6.32.45/arch/x86/kernel/ldt.c
15671--- linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15672+++ linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15673@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15674 if (reload) {
15675 #ifdef CONFIG_SMP
15676 preempt_disable();
15677- load_LDT(pc);
15678+ load_LDT_nolock(pc);
15679 if (!cpumask_equal(mm_cpumask(current->mm),
15680 cpumask_of(smp_processor_id())))
15681 smp_call_function(flush_ldt, current->mm, 1);
15682 preempt_enable();
15683 #else
15684- load_LDT(pc);
15685+ load_LDT_nolock(pc);
15686 #endif
15687 }
15688 if (oldsize) {
15689@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15690 return err;
15691
15692 for (i = 0; i < old->size; i++)
15693- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15694+ write_ldt_entry(new->ldt, i, old->ldt + i);
15695 return 0;
15696 }
15697
15698@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15699 retval = copy_ldt(&mm->context, &old_mm->context);
15700 mutex_unlock(&old_mm->context.lock);
15701 }
15702+
15703+ if (tsk == current) {
15704+ mm->context.vdso = 0;
15705+
15706+#ifdef CONFIG_X86_32
15707+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15708+ mm->context.user_cs_base = 0UL;
15709+ mm->context.user_cs_limit = ~0UL;
15710+
15711+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15712+ cpus_clear(mm->context.cpu_user_cs_mask);
15713+#endif
15714+
15715+#endif
15716+#endif
15717+
15718+ }
15719+
15720 return retval;
15721 }
15722
15723@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15724 }
15725 }
15726
15727+#ifdef CONFIG_PAX_SEGMEXEC
15728+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15729+ error = -EINVAL;
15730+ goto out_unlock;
15731+ }
15732+#endif
15733+
15734 fill_ldt(&ldt, &ldt_info);
15735 if (oldmode)
15736 ldt.avl = 0;
15737diff -urNp linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c
15738--- linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15739+++ linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15740@@ -26,7 +26,7 @@
15741 #include <asm/system.h>
15742 #include <asm/cacheflush.h>
15743
15744-static void set_idt(void *newidt, __u16 limit)
15745+static void set_idt(struct desc_struct *newidt, __u16 limit)
15746 {
15747 struct desc_ptr curidt;
15748
15749@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15750 }
15751
15752
15753-static void set_gdt(void *newgdt, __u16 limit)
15754+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15755 {
15756 struct desc_ptr curgdt;
15757
15758@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15759 }
15760
15761 control_page = page_address(image->control_code_page);
15762- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15763+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15764
15765 relocate_kernel_ptr = control_page;
15766 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15767diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_amd.c linux-2.6.32.45/arch/x86/kernel/microcode_amd.c
15768--- linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15769+++ linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15770@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15771 uci->mc = NULL;
15772 }
15773
15774-static struct microcode_ops microcode_amd_ops = {
15775+static const struct microcode_ops microcode_amd_ops = {
15776 .request_microcode_user = request_microcode_user,
15777 .request_microcode_fw = request_microcode_fw,
15778 .collect_cpu_info = collect_cpu_info_amd,
15779@@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15780 .microcode_fini_cpu = microcode_fini_cpu_amd,
15781 };
15782
15783-struct microcode_ops * __init init_amd_microcode(void)
15784+const struct microcode_ops * __init init_amd_microcode(void)
15785 {
15786 return &microcode_amd_ops;
15787 }
15788diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_core.c linux-2.6.32.45/arch/x86/kernel/microcode_core.c
15789--- linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15790+++ linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15791@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15792
15793 #define MICROCODE_VERSION "2.00"
15794
15795-static struct microcode_ops *microcode_ops;
15796+static const struct microcode_ops *microcode_ops;
15797
15798 /*
15799 * Synchronization.
15800diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_intel.c linux-2.6.32.45/arch/x86/kernel/microcode_intel.c
15801--- linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15802+++ linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15803@@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15804
15805 static int get_ucode_user(void *to, const void *from, size_t n)
15806 {
15807- return copy_from_user(to, from, n);
15808+ return copy_from_user(to, (__force const void __user *)from, n);
15809 }
15810
15811 static enum ucode_state
15812 request_microcode_user(int cpu, const void __user *buf, size_t size)
15813 {
15814- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15815+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15816 }
15817
15818 static void microcode_fini_cpu(int cpu)
15819@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15820 uci->mc = NULL;
15821 }
15822
15823-static struct microcode_ops microcode_intel_ops = {
15824+static const struct microcode_ops microcode_intel_ops = {
15825 .request_microcode_user = request_microcode_user,
15826 .request_microcode_fw = request_microcode_fw,
15827 .collect_cpu_info = collect_cpu_info,
15828@@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15829 .microcode_fini_cpu = microcode_fini_cpu,
15830 };
15831
15832-struct microcode_ops * __init init_intel_microcode(void)
15833+const struct microcode_ops * __init init_intel_microcode(void)
15834 {
15835 return &microcode_intel_ops;
15836 }
15837diff -urNp linux-2.6.32.45/arch/x86/kernel/module.c linux-2.6.32.45/arch/x86/kernel/module.c
15838--- linux-2.6.32.45/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15839+++ linux-2.6.32.45/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15840@@ -34,7 +34,7 @@
15841 #define DEBUGP(fmt...)
15842 #endif
15843
15844-void *module_alloc(unsigned long size)
15845+static void *__module_alloc(unsigned long size, pgprot_t prot)
15846 {
15847 struct vm_struct *area;
15848
15849@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15850 if (!area)
15851 return NULL;
15852
15853- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15854- PAGE_KERNEL_EXEC);
15855+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15856+}
15857+
15858+void *module_alloc(unsigned long size)
15859+{
15860+
15861+#ifdef CONFIG_PAX_KERNEXEC
15862+ return __module_alloc(size, PAGE_KERNEL);
15863+#else
15864+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15865+#endif
15866+
15867 }
15868
15869 /* Free memory returned from module_alloc */
15870@@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15871 vfree(module_region);
15872 }
15873
15874+#ifdef CONFIG_PAX_KERNEXEC
15875+#ifdef CONFIG_X86_32
15876+void *module_alloc_exec(unsigned long size)
15877+{
15878+ struct vm_struct *area;
15879+
15880+ if (size == 0)
15881+ return NULL;
15882+
15883+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15884+ return area ? area->addr : NULL;
15885+}
15886+EXPORT_SYMBOL(module_alloc_exec);
15887+
15888+void module_free_exec(struct module *mod, void *module_region)
15889+{
15890+ vunmap(module_region);
15891+}
15892+EXPORT_SYMBOL(module_free_exec);
15893+#else
15894+void module_free_exec(struct module *mod, void *module_region)
15895+{
15896+ module_free(mod, module_region);
15897+}
15898+EXPORT_SYMBOL(module_free_exec);
15899+
15900+void *module_alloc_exec(unsigned long size)
15901+{
15902+ return __module_alloc(size, PAGE_KERNEL_RX);
15903+}
15904+EXPORT_SYMBOL(module_alloc_exec);
15905+#endif
15906+#endif
15907+
15908 /* We don't need anything special. */
15909 int module_frob_arch_sections(Elf_Ehdr *hdr,
15910 Elf_Shdr *sechdrs,
15911@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15912 unsigned int i;
15913 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15914 Elf32_Sym *sym;
15915- uint32_t *location;
15916+ uint32_t *plocation, location;
15917
15918 DEBUGP("Applying relocate section %u to %u\n", relsec,
15919 sechdrs[relsec].sh_info);
15920 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15921 /* This is where to make the change */
15922- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15923- + rel[i].r_offset;
15924+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15925+ location = (uint32_t)plocation;
15926+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15927+ plocation = ktla_ktva((void *)plocation);
15928 /* This is the symbol it is referring to. Note that all
15929 undefined symbols have been resolved. */
15930 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15931@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15932 switch (ELF32_R_TYPE(rel[i].r_info)) {
15933 case R_386_32:
15934 /* We add the value into the location given */
15935- *location += sym->st_value;
15936+ pax_open_kernel();
15937+ *plocation += sym->st_value;
15938+ pax_close_kernel();
15939 break;
15940 case R_386_PC32:
15941 /* Add the value, subtract its postition */
15942- *location += sym->st_value - (uint32_t)location;
15943+ pax_open_kernel();
15944+ *plocation += sym->st_value - location;
15945+ pax_close_kernel();
15946 break;
15947 default:
15948 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15949@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15950 case R_X86_64_NONE:
15951 break;
15952 case R_X86_64_64:
15953+ pax_open_kernel();
15954 *(u64 *)loc = val;
15955+ pax_close_kernel();
15956 break;
15957 case R_X86_64_32:
15958+ pax_open_kernel();
15959 *(u32 *)loc = val;
15960+ pax_close_kernel();
15961 if (val != *(u32 *)loc)
15962 goto overflow;
15963 break;
15964 case R_X86_64_32S:
15965+ pax_open_kernel();
15966 *(s32 *)loc = val;
15967+ pax_close_kernel();
15968 if ((s64)val != *(s32 *)loc)
15969 goto overflow;
15970 break;
15971 case R_X86_64_PC32:
15972 val -= (u64)loc;
15973+ pax_open_kernel();
15974 *(u32 *)loc = val;
15975+ pax_close_kernel();
15976+
15977 #if 0
15978 if ((s64)val != *(s32 *)loc)
15979 goto overflow;
15980diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt.c linux-2.6.32.45/arch/x86/kernel/paravirt.c
15981--- linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15982+++ linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-08-23 20:24:19.000000000 -0400
15983@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15984 {
15985 return x;
15986 }
15987+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15988+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15989+#endif
15990
15991 void __init default_banner(void)
15992 {
15993@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15994 * corresponding structure. */
15995 static void *get_call_destination(u8 type)
15996 {
15997- struct paravirt_patch_template tmpl = {
15998+ const struct paravirt_patch_template tmpl = {
15999 .pv_init_ops = pv_init_ops,
16000 .pv_time_ops = pv_time_ops,
16001 .pv_cpu_ops = pv_cpu_ops,
16002@@ -133,6 +136,8 @@ static void *get_call_destination(u8 typ
16003 .pv_lock_ops = pv_lock_ops,
16004 #endif
16005 };
16006+
16007+ pax_track_stack();
16008 return *((void **)&tmpl + type);
16009 }
16010
16011@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
16012 if (opfunc == NULL)
16013 /* If there's no function, patch it with a ud2a (BUG) */
16014 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16015- else if (opfunc == _paravirt_nop)
16016+ else if (opfunc == (void *)_paravirt_nop)
16017 /* If the operation is a nop, then nop the callsite */
16018 ret = paravirt_patch_nop();
16019
16020 /* identity functions just return their single argument */
16021- else if (opfunc == _paravirt_ident_32)
16022+ else if (opfunc == (void *)_paravirt_ident_32)
16023 ret = paravirt_patch_ident_32(insnbuf, len);
16024- else if (opfunc == _paravirt_ident_64)
16025+ else if (opfunc == (void *)_paravirt_ident_64)
16026+ ret = paravirt_patch_ident_64(insnbuf, len);
16027+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16028+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16029 ret = paravirt_patch_ident_64(insnbuf, len);
16030+#endif
16031
16032 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16033 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16034@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
16035 if (insn_len > len || start == NULL)
16036 insn_len = len;
16037 else
16038- memcpy(insnbuf, start, insn_len);
16039+ memcpy(insnbuf, ktla_ktva(start), insn_len);
16040
16041 return insn_len;
16042 }
16043@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
16044 preempt_enable();
16045 }
16046
16047-struct pv_info pv_info = {
16048+struct pv_info pv_info __read_only = {
16049 .name = "bare hardware",
16050 .paravirt_enabled = 0,
16051 .kernel_rpl = 0,
16052 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
16053 };
16054
16055-struct pv_init_ops pv_init_ops = {
16056+struct pv_init_ops pv_init_ops __read_only = {
16057 .patch = native_patch,
16058 };
16059
16060-struct pv_time_ops pv_time_ops = {
16061+struct pv_time_ops pv_time_ops __read_only = {
16062 .sched_clock = native_sched_clock,
16063 };
16064
16065-struct pv_irq_ops pv_irq_ops = {
16066+struct pv_irq_ops pv_irq_ops __read_only = {
16067 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16068 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16069 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16070@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
16071 #endif
16072 };
16073
16074-struct pv_cpu_ops pv_cpu_ops = {
16075+struct pv_cpu_ops pv_cpu_ops __read_only = {
16076 .cpuid = native_cpuid,
16077 .get_debugreg = native_get_debugreg,
16078 .set_debugreg = native_set_debugreg,
16079@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16080 .end_context_switch = paravirt_nop,
16081 };
16082
16083-struct pv_apic_ops pv_apic_ops = {
16084+struct pv_apic_ops pv_apic_ops __read_only = {
16085 #ifdef CONFIG_X86_LOCAL_APIC
16086 .startup_ipi_hook = paravirt_nop,
16087 #endif
16088 };
16089
16090-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16091+#ifdef CONFIG_X86_32
16092+#ifdef CONFIG_X86_PAE
16093+/* 64-bit pagetable entries */
16094+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16095+#else
16096 /* 32-bit pagetable entries */
16097 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16098+#endif
16099 #else
16100 /* 64-bit pagetable entries */
16101 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16102 #endif
16103
16104-struct pv_mmu_ops pv_mmu_ops = {
16105+struct pv_mmu_ops pv_mmu_ops __read_only = {
16106
16107 .read_cr2 = native_read_cr2,
16108 .write_cr2 = native_write_cr2,
16109@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16110 .make_pud = PTE_IDENT,
16111
16112 .set_pgd = native_set_pgd,
16113+ .set_pgd_batched = native_set_pgd_batched,
16114 #endif
16115 #endif /* PAGETABLE_LEVELS >= 3 */
16116
16117@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16118 },
16119
16120 .set_fixmap = native_set_fixmap,
16121+
16122+#ifdef CONFIG_PAX_KERNEXEC
16123+ .pax_open_kernel = native_pax_open_kernel,
16124+ .pax_close_kernel = native_pax_close_kernel,
16125+#endif
16126+
16127 };
16128
16129 EXPORT_SYMBOL_GPL(pv_time_ops);
16130diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c
16131--- linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
16132+++ linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
16133@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
16134 __raw_spin_lock(lock);
16135 }
16136
16137-struct pv_lock_ops pv_lock_ops = {
16138+struct pv_lock_ops pv_lock_ops __read_only = {
16139 #ifdef CONFIG_SMP
16140 .spin_is_locked = __ticket_spin_is_locked,
16141 .spin_is_contended = __ticket_spin_is_contended,
16142diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c
16143--- linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
16144+++ linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
16145@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
16146 free_pages((unsigned long)vaddr, get_order(size));
16147 }
16148
16149-static struct dma_map_ops calgary_dma_ops = {
16150+static const struct dma_map_ops calgary_dma_ops = {
16151 .alloc_coherent = calgary_alloc_coherent,
16152 .free_coherent = calgary_free_coherent,
16153 .map_sg = calgary_map_sg,
16154diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-dma.c linux-2.6.32.45/arch/x86/kernel/pci-dma.c
16155--- linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
16156+++ linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
16157@@ -14,7 +14,7 @@
16158
16159 static int forbid_dac __read_mostly;
16160
16161-struct dma_map_ops *dma_ops;
16162+const struct dma_map_ops *dma_ops;
16163 EXPORT_SYMBOL(dma_ops);
16164
16165 static int iommu_sac_force __read_mostly;
16166@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
16167
16168 int dma_supported(struct device *dev, u64 mask)
16169 {
16170- struct dma_map_ops *ops = get_dma_ops(dev);
16171+ const struct dma_map_ops *ops = get_dma_ops(dev);
16172
16173 #ifdef CONFIG_PCI
16174 if (mask > 0xffffffff && forbid_dac > 0) {
16175diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c
16176--- linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
16177+++ linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
16178@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
16179 return -1;
16180 }
16181
16182-static struct dma_map_ops gart_dma_ops = {
16183+static const struct dma_map_ops gart_dma_ops = {
16184 .map_sg = gart_map_sg,
16185 .unmap_sg = gart_unmap_sg,
16186 .map_page = gart_map_page,
16187diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-nommu.c linux-2.6.32.45/arch/x86/kernel/pci-nommu.c
16188--- linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
16189+++ linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
16190@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
16191 flush_write_buffers();
16192 }
16193
16194-struct dma_map_ops nommu_dma_ops = {
16195+const struct dma_map_ops nommu_dma_ops = {
16196 .alloc_coherent = dma_generic_alloc_coherent,
16197 .free_coherent = nommu_free_coherent,
16198 .map_sg = nommu_map_sg,
16199diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c
16200--- linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
16201+++ linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
16202@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
16203 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
16204 }
16205
16206-static struct dma_map_ops swiotlb_dma_ops = {
16207+static const struct dma_map_ops swiotlb_dma_ops = {
16208 .mapping_error = swiotlb_dma_mapping_error,
16209 .alloc_coherent = x86_swiotlb_alloc_coherent,
16210 .free_coherent = swiotlb_free_coherent,
16211diff -urNp linux-2.6.32.45/arch/x86/kernel/process_32.c linux-2.6.32.45/arch/x86/kernel/process_32.c
16212--- linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
16213+++ linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
16214@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
16215 unsigned long thread_saved_pc(struct task_struct *tsk)
16216 {
16217 return ((unsigned long *)tsk->thread.sp)[3];
16218+//XXX return tsk->thread.eip;
16219 }
16220
16221 #ifndef CONFIG_SMP
16222@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
16223 unsigned short ss, gs;
16224 const char *board;
16225
16226- if (user_mode_vm(regs)) {
16227+ if (user_mode(regs)) {
16228 sp = regs->sp;
16229 ss = regs->ss & 0xffff;
16230- gs = get_user_gs(regs);
16231 } else {
16232 sp = (unsigned long) (&regs->sp);
16233 savesegment(ss, ss);
16234- savesegment(gs, gs);
16235 }
16236+ gs = get_user_gs(regs);
16237
16238 printk("\n");
16239
16240@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
16241 regs.bx = (unsigned long) fn;
16242 regs.dx = (unsigned long) arg;
16243
16244- regs.ds = __USER_DS;
16245- regs.es = __USER_DS;
16246+ regs.ds = __KERNEL_DS;
16247+ regs.es = __KERNEL_DS;
16248 regs.fs = __KERNEL_PERCPU;
16249- regs.gs = __KERNEL_STACK_CANARY;
16250+ savesegment(gs, regs.gs);
16251 regs.orig_ax = -1;
16252 regs.ip = (unsigned long) kernel_thread_helper;
16253 regs.cs = __KERNEL_CS | get_kernel_rpl();
16254@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
16255 struct task_struct *tsk;
16256 int err;
16257
16258- childregs = task_pt_regs(p);
16259+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16260 *childregs = *regs;
16261 childregs->ax = 0;
16262 childregs->sp = sp;
16263
16264 p->thread.sp = (unsigned long) childregs;
16265 p->thread.sp0 = (unsigned long) (childregs+1);
16266+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16267
16268 p->thread.ip = (unsigned long) ret_from_fork;
16269
16270@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
16271 struct thread_struct *prev = &prev_p->thread,
16272 *next = &next_p->thread;
16273 int cpu = smp_processor_id();
16274- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16275+ struct tss_struct *tss = init_tss + cpu;
16276 bool preload_fpu;
16277
16278 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16279@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
16280 */
16281 lazy_save_gs(prev->gs);
16282
16283+#ifdef CONFIG_PAX_MEMORY_UDEREF
16284+ __set_fs(task_thread_info(next_p)->addr_limit);
16285+#endif
16286+
16287 /*
16288 * Load the per-thread Thread-Local Storage descriptor.
16289 */
16290@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
16291 */
16292 arch_end_context_switch(next_p);
16293
16294+ percpu_write(current_task, next_p);
16295+ percpu_write(current_tinfo, &next_p->tinfo);
16296+
16297 if (preload_fpu)
16298 __math_state_restore();
16299
16300@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
16301 if (prev->gs | next->gs)
16302 lazy_load_gs(next->gs);
16303
16304- percpu_write(current_task, next_p);
16305-
16306 return prev_p;
16307 }
16308
16309@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
16310 } while (count++ < 16);
16311 return 0;
16312 }
16313-
16314diff -urNp linux-2.6.32.45/arch/x86/kernel/process_64.c linux-2.6.32.45/arch/x86/kernel/process_64.c
16315--- linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
16316+++ linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
16317@@ -91,7 +91,7 @@ static void __exit_idle(void)
16318 void exit_idle(void)
16319 {
16320 /* idle loop has pid 0 */
16321- if (current->pid)
16322+ if (task_pid_nr(current))
16323 return;
16324 __exit_idle();
16325 }
16326@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
16327 if (!board)
16328 board = "";
16329 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
16330- current->pid, current->comm, print_tainted(),
16331+ task_pid_nr(current), current->comm, print_tainted(),
16332 init_utsname()->release,
16333 (int)strcspn(init_utsname()->version, " "),
16334 init_utsname()->version, board);
16335@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
16336 struct pt_regs *childregs;
16337 struct task_struct *me = current;
16338
16339- childregs = ((struct pt_regs *)
16340- (THREAD_SIZE + task_stack_page(p))) - 1;
16341+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16342 *childregs = *regs;
16343
16344 childregs->ax = 0;
16345@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
16346 p->thread.sp = (unsigned long) childregs;
16347 p->thread.sp0 = (unsigned long) (childregs+1);
16348 p->thread.usersp = me->thread.usersp;
16349+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16350
16351 set_tsk_thread_flag(p, TIF_FORK);
16352
16353@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
16354 struct thread_struct *prev = &prev_p->thread;
16355 struct thread_struct *next = &next_p->thread;
16356 int cpu = smp_processor_id();
16357- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16358+ struct tss_struct *tss = init_tss + cpu;
16359 unsigned fsindex, gsindex;
16360 bool preload_fpu;
16361
16362@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
16363 prev->usersp = percpu_read(old_rsp);
16364 percpu_write(old_rsp, next->usersp);
16365 percpu_write(current_task, next_p);
16366+ percpu_write(current_tinfo, &next_p->tinfo);
16367
16368- percpu_write(kernel_stack,
16369- (unsigned long)task_stack_page(next_p) +
16370- THREAD_SIZE - KERNEL_STACK_OFFSET);
16371+ percpu_write(kernel_stack, next->sp0);
16372
16373 /*
16374 * Now maybe reload the debug registers and handle I/O bitmaps
16375@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
16376 if (!p || p == current || p->state == TASK_RUNNING)
16377 return 0;
16378 stack = (unsigned long)task_stack_page(p);
16379- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16380+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16381 return 0;
16382 fp = *(u64 *)(p->thread.sp);
16383 do {
16384- if (fp < (unsigned long)stack ||
16385- fp >= (unsigned long)stack+THREAD_SIZE)
16386+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16387 return 0;
16388 ip = *(u64 *)(fp+8);
16389 if (!in_sched_functions(ip))
16390diff -urNp linux-2.6.32.45/arch/x86/kernel/process.c linux-2.6.32.45/arch/x86/kernel/process.c
16391--- linux-2.6.32.45/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
16392+++ linux-2.6.32.45/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
16393@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
16394
16395 void free_thread_info(struct thread_info *ti)
16396 {
16397- free_thread_xstate(ti->task);
16398 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16399 }
16400
16401+static struct kmem_cache *task_struct_cachep;
16402+
16403 void arch_task_cache_init(void)
16404 {
16405- task_xstate_cachep =
16406- kmem_cache_create("task_xstate", xstate_size,
16407+ /* create a slab on which task_structs can be allocated */
16408+ task_struct_cachep =
16409+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16410+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16411+
16412+ task_xstate_cachep =
16413+ kmem_cache_create("task_xstate", xstate_size,
16414 __alignof__(union thread_xstate),
16415- SLAB_PANIC | SLAB_NOTRACK, NULL);
16416+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16417+}
16418+
16419+struct task_struct *alloc_task_struct(void)
16420+{
16421+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16422+}
16423+
16424+void free_task_struct(struct task_struct *task)
16425+{
16426+ free_thread_xstate(task);
16427+ kmem_cache_free(task_struct_cachep, task);
16428 }
16429
16430 /*
16431@@ -73,7 +90,7 @@ void exit_thread(void)
16432 unsigned long *bp = t->io_bitmap_ptr;
16433
16434 if (bp) {
16435- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16436+ struct tss_struct *tss = init_tss + get_cpu();
16437
16438 t->io_bitmap_ptr = NULL;
16439 clear_thread_flag(TIF_IO_BITMAP);
16440@@ -93,6 +110,9 @@ void flush_thread(void)
16441
16442 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16443
16444+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16445+ loadsegment(gs, 0);
16446+#endif
16447 tsk->thread.debugreg0 = 0;
16448 tsk->thread.debugreg1 = 0;
16449 tsk->thread.debugreg2 = 0;
16450@@ -307,7 +327,7 @@ void default_idle(void)
16451 EXPORT_SYMBOL(default_idle);
16452 #endif
16453
16454-void stop_this_cpu(void *dummy)
16455+__noreturn void stop_this_cpu(void *dummy)
16456 {
16457 local_irq_disable();
16458 /*
16459@@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16460 }
16461 early_param("idle", idle_setup);
16462
16463-unsigned long arch_align_stack(unsigned long sp)
16464+#ifdef CONFIG_PAX_RANDKSTACK
16465+asmlinkage void pax_randomize_kstack(void)
16466 {
16467- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16468- sp -= get_random_int() % 8192;
16469- return sp & ~0xf;
16470-}
16471+ struct thread_struct *thread = &current->thread;
16472+ unsigned long time;
16473
16474-unsigned long arch_randomize_brk(struct mm_struct *mm)
16475-{
16476- unsigned long range_end = mm->brk + 0x02000000;
16477- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16478+ if (!randomize_va_space)
16479+ return;
16480+
16481+ rdtscl(time);
16482+
16483+ /* P4 seems to return a 0 LSB, ignore it */
16484+#ifdef CONFIG_MPENTIUM4
16485+ time &= 0x3EUL;
16486+ time <<= 2;
16487+#elif defined(CONFIG_X86_64)
16488+ time &= 0xFUL;
16489+ time <<= 4;
16490+#else
16491+ time &= 0x1FUL;
16492+ time <<= 3;
16493+#endif
16494+
16495+ thread->sp0 ^= time;
16496+ load_sp0(init_tss + smp_processor_id(), thread);
16497+
16498+#ifdef CONFIG_X86_64
16499+ percpu_write(kernel_stack, thread->sp0);
16500+#endif
16501 }
16502+#endif
16503
16504diff -urNp linux-2.6.32.45/arch/x86/kernel/ptrace.c linux-2.6.32.45/arch/x86/kernel/ptrace.c
16505--- linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16506+++ linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16507@@ -925,7 +925,7 @@ static const struct user_regset_view use
16508 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16509 {
16510 int ret;
16511- unsigned long __user *datap = (unsigned long __user *)data;
16512+ unsigned long __user *datap = (__force unsigned long __user *)data;
16513
16514 switch (request) {
16515 /* read the word at location addr in the USER area. */
16516@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16517 if (addr < 0)
16518 return -EIO;
16519 ret = do_get_thread_area(child, addr,
16520- (struct user_desc __user *) data);
16521+ (__force struct user_desc __user *) data);
16522 break;
16523
16524 case PTRACE_SET_THREAD_AREA:
16525 if (addr < 0)
16526 return -EIO;
16527 ret = do_set_thread_area(child, addr,
16528- (struct user_desc __user *) data, 0);
16529+ (__force struct user_desc __user *) data, 0);
16530 break;
16531 #endif
16532
16533@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16534 #ifdef CONFIG_X86_PTRACE_BTS
16535 case PTRACE_BTS_CONFIG:
16536 ret = ptrace_bts_config
16537- (child, data, (struct ptrace_bts_config __user *)addr);
16538+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16539 break;
16540
16541 case PTRACE_BTS_STATUS:
16542 ret = ptrace_bts_status
16543- (child, data, (struct ptrace_bts_config __user *)addr);
16544+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16545 break;
16546
16547 case PTRACE_BTS_SIZE:
16548@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16549
16550 case PTRACE_BTS_GET:
16551 ret = ptrace_bts_read_record
16552- (child, data, (struct bts_struct __user *) addr);
16553+ (child, data, (__force struct bts_struct __user *) addr);
16554 break;
16555
16556 case PTRACE_BTS_CLEAR:
16557@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16558
16559 case PTRACE_BTS_DRAIN:
16560 ret = ptrace_bts_drain
16561- (child, data, (struct bts_struct __user *) addr);
16562+ (child, data, (__force struct bts_struct __user *) addr);
16563 break;
16564 #endif /* CONFIG_X86_PTRACE_BTS */
16565
16566@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16567 info.si_code = si_code;
16568
16569 /* User-mode ip? */
16570- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16571+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16572
16573 /* Send us the fake SIGTRAP */
16574 force_sig_info(SIGTRAP, &info, tsk);
16575@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16576 * We must return the syscall number to actually look up in the table.
16577 * This can be -1L to skip running any syscall at all.
16578 */
16579-asmregparm long syscall_trace_enter(struct pt_regs *regs)
16580+long syscall_trace_enter(struct pt_regs *regs)
16581 {
16582 long ret = 0;
16583
16584@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16585 return ret ?: regs->orig_ax;
16586 }
16587
16588-asmregparm void syscall_trace_leave(struct pt_regs *regs)
16589+void syscall_trace_leave(struct pt_regs *regs)
16590 {
16591 if (unlikely(current->audit_context))
16592 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16593diff -urNp linux-2.6.32.45/arch/x86/kernel/reboot.c linux-2.6.32.45/arch/x86/kernel/reboot.c
16594--- linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:35:28.000000000 -0400
16595+++ linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:33:59.000000000 -0400
16596@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16597 EXPORT_SYMBOL(pm_power_off);
16598
16599 static const struct desc_ptr no_idt = {};
16600-static int reboot_mode;
16601+static unsigned short reboot_mode;
16602 enum reboot_type reboot_type = BOOT_KBD;
16603 int reboot_force;
16604
16605@@ -292,12 +292,12 @@ core_initcall(reboot_init);
16606 controller to pulse the CPU reset line, which is more thorough, but
16607 doesn't work with at least one type of 486 motherboard. It is easy
16608 to stop this code working; hence the copious comments. */
16609-static const unsigned long long
16610-real_mode_gdt_entries [3] =
16611+static struct desc_struct
16612+real_mode_gdt_entries [3] __read_only =
16613 {
16614- 0x0000000000000000ULL, /* Null descriptor */
16615- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16616- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16617+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16618+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16619+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16620 };
16621
16622 static const struct desc_ptr
16623@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16624 * specified by the code and length parameters.
16625 * We assume that length will aways be less that 100!
16626 */
16627-void machine_real_restart(const unsigned char *code, int length)
16628+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16629 {
16630 local_irq_disable();
16631
16632@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16633 /* Remap the kernel at virtual address zero, as well as offset zero
16634 from the kernel segment. This assumes the kernel segment starts at
16635 virtual address PAGE_OFFSET. */
16636- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16637- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16638+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16639+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16640
16641 /*
16642 * Use `swapper_pg_dir' as our page directory.
16643@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16644 boot)". This seems like a fairly standard thing that gets set by
16645 REBOOT.COM programs, and the previous reset routine did this
16646 too. */
16647- *((unsigned short *)0x472) = reboot_mode;
16648+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16649
16650 /* For the switch to real mode, copy some code to low memory. It has
16651 to be in the first 64k because it is running in 16-bit mode, and it
16652 has to have the same physical and virtual address, because it turns
16653 off paging. Copy it near the end of the first page, out of the way
16654 of BIOS variables. */
16655- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16656- real_mode_switch, sizeof (real_mode_switch));
16657- memcpy((void *)(0x1000 - 100), code, length);
16658+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16659+ memcpy(__va(0x1000 - 100), code, length);
16660
16661 /* Set up the IDT for real mode. */
16662 load_idt(&real_mode_idt);
16663@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16664 __asm__ __volatile__ ("ljmp $0x0008,%0"
16665 :
16666 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16667+ do { } while (1);
16668 }
16669 #ifdef CONFIG_APM_MODULE
16670 EXPORT_SYMBOL(machine_real_restart);
16671@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_f
16672 {
16673 }
16674
16675-static void native_machine_emergency_restart(void)
16676+__noreturn static void native_machine_emergency_restart(void)
16677 {
16678 int i;
16679
16680@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
16681 #endif
16682 }
16683
16684-static void __machine_emergency_restart(int emergency)
16685+static __noreturn void __machine_emergency_restart(int emergency)
16686 {
16687 reboot_emergency = emergency;
16688 machine_ops.emergency_restart();
16689 }
16690
16691-static void native_machine_restart(char *__unused)
16692+static __noreturn void native_machine_restart(char *__unused)
16693 {
16694 printk("machine restart\n");
16695
16696@@ -674,7 +674,7 @@ static void native_machine_restart(char
16697 __machine_emergency_restart(0);
16698 }
16699
16700-static void native_machine_halt(void)
16701+static __noreturn void native_machine_halt(void)
16702 {
16703 /* stop other cpus and apics */
16704 machine_shutdown();
16705@@ -685,7 +685,7 @@ static void native_machine_halt(void)
16706 stop_this_cpu(NULL);
16707 }
16708
16709-static void native_machine_power_off(void)
16710+__noreturn static void native_machine_power_off(void)
16711 {
16712 if (pm_power_off) {
16713 if (!reboot_force)
16714@@ -694,6 +694,7 @@ static void native_machine_power_off(voi
16715 }
16716 /* a fallback in case there is no PM info available */
16717 tboot_shutdown(TB_SHUTDOWN_HALT);
16718+ do { } while (1);
16719 }
16720
16721 struct machine_ops machine_ops = {
16722diff -urNp linux-2.6.32.45/arch/x86/kernel/setup.c linux-2.6.32.45/arch/x86/kernel/setup.c
16723--- linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16724+++ linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16725@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16726
16727 if (!boot_params.hdr.root_flags)
16728 root_mountflags &= ~MS_RDONLY;
16729- init_mm.start_code = (unsigned long) _text;
16730- init_mm.end_code = (unsigned long) _etext;
16731+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16732+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16733 init_mm.end_data = (unsigned long) _edata;
16734 init_mm.brk = _brk_end;
16735
16736- code_resource.start = virt_to_phys(_text);
16737- code_resource.end = virt_to_phys(_etext)-1;
16738- data_resource.start = virt_to_phys(_etext);
16739+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16740+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16741+ data_resource.start = virt_to_phys(_sdata);
16742 data_resource.end = virt_to_phys(_edata)-1;
16743 bss_resource.start = virt_to_phys(&__bss_start);
16744 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16745diff -urNp linux-2.6.32.45/arch/x86/kernel/setup_percpu.c linux-2.6.32.45/arch/x86/kernel/setup_percpu.c
16746--- linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16747+++ linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16748@@ -25,19 +25,17 @@
16749 # define DBG(x...)
16750 #endif
16751
16752-DEFINE_PER_CPU(int, cpu_number);
16753+#ifdef CONFIG_SMP
16754+DEFINE_PER_CPU(unsigned int, cpu_number);
16755 EXPORT_PER_CPU_SYMBOL(cpu_number);
16756+#endif
16757
16758-#ifdef CONFIG_X86_64
16759 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16760-#else
16761-#define BOOT_PERCPU_OFFSET 0
16762-#endif
16763
16764 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16765 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16766
16767-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16768+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16769 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16770 };
16771 EXPORT_SYMBOL(__per_cpu_offset);
16772@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16773 {
16774 #ifdef CONFIG_X86_32
16775 struct desc_struct gdt;
16776+ unsigned long base = per_cpu_offset(cpu);
16777
16778- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16779- 0x2 | DESCTYPE_S, 0x8);
16780- gdt.s = 1;
16781+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16782+ 0x83 | DESCTYPE_S, 0xC);
16783 write_gdt_entry(get_cpu_gdt_table(cpu),
16784 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16785 #endif
16786@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16787 /* alrighty, percpu areas up and running */
16788 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16789 for_each_possible_cpu(cpu) {
16790+#ifdef CONFIG_CC_STACKPROTECTOR
16791+#ifdef CONFIG_X86_32
16792+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16793+#endif
16794+#endif
16795 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16796 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16797 per_cpu(cpu_number, cpu) = cpu;
16798@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16799 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16800 #endif
16801 #endif
16802+#ifdef CONFIG_CC_STACKPROTECTOR
16803+#ifdef CONFIG_X86_32
16804+ if (!cpu)
16805+ per_cpu(stack_canary.canary, cpu) = canary;
16806+#endif
16807+#endif
16808 /*
16809 * Up to this point, the boot CPU has been using .data.init
16810 * area. Reload any changed state for the boot CPU.
16811diff -urNp linux-2.6.32.45/arch/x86/kernel/signal.c linux-2.6.32.45/arch/x86/kernel/signal.c
16812--- linux-2.6.32.45/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16813+++ linux-2.6.32.45/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16814@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16815 * Align the stack pointer according to the i386 ABI,
16816 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16817 */
16818- sp = ((sp + 4) & -16ul) - 4;
16819+ sp = ((sp - 12) & -16ul) - 4;
16820 #else /* !CONFIG_X86_32 */
16821 sp = round_down(sp, 16) - 8;
16822 #endif
16823@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16824 * Return an always-bogus address instead so we will die with SIGSEGV.
16825 */
16826 if (onsigstack && !likely(on_sig_stack(sp)))
16827- return (void __user *)-1L;
16828+ return (__force void __user *)-1L;
16829
16830 /* save i387 state */
16831 if (used_math() && save_i387_xstate(*fpstate) < 0)
16832- return (void __user *)-1L;
16833+ return (__force void __user *)-1L;
16834
16835 return (void __user *)sp;
16836 }
16837@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16838 }
16839
16840 if (current->mm->context.vdso)
16841- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16842+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16843 else
16844- restorer = &frame->retcode;
16845+ restorer = (void __user *)&frame->retcode;
16846 if (ka->sa.sa_flags & SA_RESTORER)
16847 restorer = ka->sa.sa_restorer;
16848
16849@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16850 * reasons and because gdb uses it as a signature to notice
16851 * signal handler stack frames.
16852 */
16853- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16854+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16855
16856 if (err)
16857 return -EFAULT;
16858@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16859 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16860
16861 /* Set up to return from userspace. */
16862- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16863+ if (current->mm->context.vdso)
16864+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16865+ else
16866+ restorer = (void __user *)&frame->retcode;
16867 if (ka->sa.sa_flags & SA_RESTORER)
16868 restorer = ka->sa.sa_restorer;
16869 put_user_ex(restorer, &frame->pretcode);
16870@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16871 * reasons and because gdb uses it as a signature to notice
16872 * signal handler stack frames.
16873 */
16874- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16875+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16876 } put_user_catch(err);
16877
16878 if (err)
16879@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16880 int signr;
16881 sigset_t *oldset;
16882
16883+ pax_track_stack();
16884+
16885 /*
16886 * We want the common case to go fast, which is why we may in certain
16887 * cases get here from kernel mode. Just return without doing anything
16888@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16889 * X86_32: vm86 regs switched out by assembly code before reaching
16890 * here, so testing against kernel CS suffices.
16891 */
16892- if (!user_mode(regs))
16893+ if (!user_mode_novm(regs))
16894 return;
16895
16896 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16897diff -urNp linux-2.6.32.45/arch/x86/kernel/smpboot.c linux-2.6.32.45/arch/x86/kernel/smpboot.c
16898--- linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16899+++ linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16900@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16901 */
16902 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16903
16904-void cpu_hotplug_driver_lock()
16905+void cpu_hotplug_driver_lock(void)
16906 {
16907- mutex_lock(&x86_cpu_hotplug_driver_mutex);
16908+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
16909 }
16910
16911-void cpu_hotplug_driver_unlock()
16912+void cpu_hotplug_driver_unlock(void)
16913 {
16914- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16915+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16916 }
16917
16918 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16919@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16920 * target processor state.
16921 */
16922 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16923- (unsigned long)stack_start.sp);
16924+ stack_start);
16925
16926 /*
16927 * Run STARTUP IPI loop.
16928@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16929 set_idle_for_cpu(cpu, c_idle.idle);
16930 do_rest:
16931 per_cpu(current_task, cpu) = c_idle.idle;
16932+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16933 #ifdef CONFIG_X86_32
16934 /* Stack for startup_32 can be just as for start_secondary onwards */
16935 irq_ctx_init(cpu);
16936@@ -750,13 +751,15 @@ do_rest:
16937 #else
16938 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16939 initial_gs = per_cpu_offset(cpu);
16940- per_cpu(kernel_stack, cpu) =
16941- (unsigned long)task_stack_page(c_idle.idle) -
16942- KERNEL_STACK_OFFSET + THREAD_SIZE;
16943+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16944 #endif
16945+
16946+ pax_open_kernel();
16947 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16948+ pax_close_kernel();
16949+
16950 initial_code = (unsigned long)start_secondary;
16951- stack_start.sp = (void *) c_idle.idle->thread.sp;
16952+ stack_start = c_idle.idle->thread.sp;
16953
16954 /* start_ip had better be page-aligned! */
16955 start_ip = setup_trampoline();
16956@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16957
16958 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16959
16960+#ifdef CONFIG_PAX_PER_CPU_PGD
16961+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16962+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16963+ KERNEL_PGD_PTRS);
16964+#endif
16965+
16966 err = do_boot_cpu(apicid, cpu);
16967
16968 if (err) {
16969diff -urNp linux-2.6.32.45/arch/x86/kernel/step.c linux-2.6.32.45/arch/x86/kernel/step.c
16970--- linux-2.6.32.45/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16971+++ linux-2.6.32.45/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16972@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16973 struct desc_struct *desc;
16974 unsigned long base;
16975
16976- seg &= ~7UL;
16977+ seg >>= 3;
16978
16979 mutex_lock(&child->mm->context.lock);
16980- if (unlikely((seg >> 3) >= child->mm->context.size))
16981+ if (unlikely(seg >= child->mm->context.size))
16982 addr = -1L; /* bogus selector, access would fault */
16983 else {
16984 desc = child->mm->context.ldt + seg;
16985@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16986 addr += base;
16987 }
16988 mutex_unlock(&child->mm->context.lock);
16989- }
16990+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16991+ addr = ktla_ktva(addr);
16992
16993 return addr;
16994 }
16995@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16996 unsigned char opcode[15];
16997 unsigned long addr = convert_ip_to_linear(child, regs);
16998
16999+ if (addr == -EINVAL)
17000+ return 0;
17001+
17002 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17003 for (i = 0; i < copied; i++) {
17004 switch (opcode[i]) {
17005@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
17006
17007 #ifdef CONFIG_X86_64
17008 case 0x40 ... 0x4f:
17009- if (regs->cs != __USER_CS)
17010+ if ((regs->cs & 0xffff) != __USER_CS)
17011 /* 32-bit mode: register increment */
17012 return 0;
17013 /* 64-bit mode: REX prefix */
17014diff -urNp linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S
17015--- linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
17016+++ linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
17017@@ -1,3 +1,4 @@
17018+.section .rodata,"a",@progbits
17019 ENTRY(sys_call_table)
17020 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17021 .long sys_exit
17022diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c
17023--- linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
17024+++ linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
17025@@ -24,6 +24,21 @@
17026
17027 #include <asm/syscalls.h>
17028
17029+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17030+{
17031+ unsigned long pax_task_size = TASK_SIZE;
17032+
17033+#ifdef CONFIG_PAX_SEGMEXEC
17034+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17035+ pax_task_size = SEGMEXEC_TASK_SIZE;
17036+#endif
17037+
17038+ if (len > pax_task_size || addr > pax_task_size - len)
17039+ return -EINVAL;
17040+
17041+ return 0;
17042+}
17043+
17044 /*
17045 * Perform the select(nd, in, out, ex, tv) and mmap() system
17046 * calls. Linux/i386 didn't use to be able to handle more than
17047@@ -58,6 +73,212 @@ out:
17048 return err;
17049 }
17050
17051+unsigned long
17052+arch_get_unmapped_area(struct file *filp, unsigned long addr,
17053+ unsigned long len, unsigned long pgoff, unsigned long flags)
17054+{
17055+ struct mm_struct *mm = current->mm;
17056+ struct vm_area_struct *vma;
17057+ unsigned long start_addr, pax_task_size = TASK_SIZE;
17058+
17059+#ifdef CONFIG_PAX_SEGMEXEC
17060+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17061+ pax_task_size = SEGMEXEC_TASK_SIZE;
17062+#endif
17063+
17064+ pax_task_size -= PAGE_SIZE;
17065+
17066+ if (len > pax_task_size)
17067+ return -ENOMEM;
17068+
17069+ if (flags & MAP_FIXED)
17070+ return addr;
17071+
17072+#ifdef CONFIG_PAX_RANDMMAP
17073+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17074+#endif
17075+
17076+ if (addr) {
17077+ addr = PAGE_ALIGN(addr);
17078+ if (pax_task_size - len >= addr) {
17079+ vma = find_vma(mm, addr);
17080+ if (check_heap_stack_gap(vma, addr, len))
17081+ return addr;
17082+ }
17083+ }
17084+ if (len > mm->cached_hole_size) {
17085+ start_addr = addr = mm->free_area_cache;
17086+ } else {
17087+ start_addr = addr = mm->mmap_base;
17088+ mm->cached_hole_size = 0;
17089+ }
17090+
17091+#ifdef CONFIG_PAX_PAGEEXEC
17092+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17093+ start_addr = 0x00110000UL;
17094+
17095+#ifdef CONFIG_PAX_RANDMMAP
17096+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17097+ start_addr += mm->delta_mmap & 0x03FFF000UL;
17098+#endif
17099+
17100+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17101+ start_addr = addr = mm->mmap_base;
17102+ else
17103+ addr = start_addr;
17104+ }
17105+#endif
17106+
17107+full_search:
17108+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17109+ /* At this point: (!vma || addr < vma->vm_end). */
17110+ if (pax_task_size - len < addr) {
17111+ /*
17112+ * Start a new search - just in case we missed
17113+ * some holes.
17114+ */
17115+ if (start_addr != mm->mmap_base) {
17116+ start_addr = addr = mm->mmap_base;
17117+ mm->cached_hole_size = 0;
17118+ goto full_search;
17119+ }
17120+ return -ENOMEM;
17121+ }
17122+ if (check_heap_stack_gap(vma, addr, len))
17123+ break;
17124+ if (addr + mm->cached_hole_size < vma->vm_start)
17125+ mm->cached_hole_size = vma->vm_start - addr;
17126+ addr = vma->vm_end;
17127+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
17128+ start_addr = addr = mm->mmap_base;
17129+ mm->cached_hole_size = 0;
17130+ goto full_search;
17131+ }
17132+ }
17133+
17134+ /*
17135+ * Remember the place where we stopped the search:
17136+ */
17137+ mm->free_area_cache = addr + len;
17138+ return addr;
17139+}
17140+
17141+unsigned long
17142+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17143+ const unsigned long len, const unsigned long pgoff,
17144+ const unsigned long flags)
17145+{
17146+ struct vm_area_struct *vma;
17147+ struct mm_struct *mm = current->mm;
17148+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17149+
17150+#ifdef CONFIG_PAX_SEGMEXEC
17151+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17152+ pax_task_size = SEGMEXEC_TASK_SIZE;
17153+#endif
17154+
17155+ pax_task_size -= PAGE_SIZE;
17156+
17157+ /* requested length too big for entire address space */
17158+ if (len > pax_task_size)
17159+ return -ENOMEM;
17160+
17161+ if (flags & MAP_FIXED)
17162+ return addr;
17163+
17164+#ifdef CONFIG_PAX_PAGEEXEC
17165+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17166+ goto bottomup;
17167+#endif
17168+
17169+#ifdef CONFIG_PAX_RANDMMAP
17170+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17171+#endif
17172+
17173+ /* requesting a specific address */
17174+ if (addr) {
17175+ addr = PAGE_ALIGN(addr);
17176+ if (pax_task_size - len >= addr) {
17177+ vma = find_vma(mm, addr);
17178+ if (check_heap_stack_gap(vma, addr, len))
17179+ return addr;
17180+ }
17181+ }
17182+
17183+ /* check if free_area_cache is useful for us */
17184+ if (len <= mm->cached_hole_size) {
17185+ mm->cached_hole_size = 0;
17186+ mm->free_area_cache = mm->mmap_base;
17187+ }
17188+
17189+ /* either no address requested or can't fit in requested address hole */
17190+ addr = mm->free_area_cache;
17191+
17192+ /* make sure it can fit in the remaining address space */
17193+ if (addr > len) {
17194+ vma = find_vma(mm, addr-len);
17195+ if (check_heap_stack_gap(vma, addr - len, len))
17196+ /* remember the address as a hint for next time */
17197+ return (mm->free_area_cache = addr-len);
17198+ }
17199+
17200+ if (mm->mmap_base < len)
17201+ goto bottomup;
17202+
17203+ addr = mm->mmap_base-len;
17204+
17205+ do {
17206+ /*
17207+ * Lookup failure means no vma is above this address,
17208+ * else if new region fits below vma->vm_start,
17209+ * return with success:
17210+ */
17211+ vma = find_vma(mm, addr);
17212+ if (check_heap_stack_gap(vma, addr, len))
17213+ /* remember the address as a hint for next time */
17214+ return (mm->free_area_cache = addr);
17215+
17216+ /* remember the largest hole we saw so far */
17217+ if (addr + mm->cached_hole_size < vma->vm_start)
17218+ mm->cached_hole_size = vma->vm_start - addr;
17219+
17220+ /* try just below the current vma->vm_start */
17221+ addr = skip_heap_stack_gap(vma, len);
17222+ } while (!IS_ERR_VALUE(addr));
17223+
17224+bottomup:
17225+ /*
17226+ * A failed mmap() very likely causes application failure,
17227+ * so fall back to the bottom-up function here. This scenario
17228+ * can happen with large stack limits and large mmap()
17229+ * allocations.
17230+ */
17231+
17232+#ifdef CONFIG_PAX_SEGMEXEC
17233+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17234+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17235+ else
17236+#endif
17237+
17238+ mm->mmap_base = TASK_UNMAPPED_BASE;
17239+
17240+#ifdef CONFIG_PAX_RANDMMAP
17241+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17242+ mm->mmap_base += mm->delta_mmap;
17243+#endif
17244+
17245+ mm->free_area_cache = mm->mmap_base;
17246+ mm->cached_hole_size = ~0UL;
17247+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17248+ /*
17249+ * Restore the topdown base:
17250+ */
17251+ mm->mmap_base = base;
17252+ mm->free_area_cache = base;
17253+ mm->cached_hole_size = ~0UL;
17254+
17255+ return addr;
17256+}
17257
17258 struct sel_arg_struct {
17259 unsigned long n;
17260@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
17261 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
17262 case SEMTIMEDOP:
17263 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
17264- (const struct timespec __user *)fifth);
17265+ (__force const struct timespec __user *)fifth);
17266
17267 case SEMGET:
17268 return sys_semget(first, second, third);
17269@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
17270 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
17271 if (ret)
17272 return ret;
17273- return put_user(raddr, (ulong __user *) third);
17274+ return put_user(raddr, (__force ulong __user *) third);
17275 }
17276 case 1: /* iBCS2 emulator entry point */
17277 if (!segment_eq(get_fs(), get_ds()))
17278@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
17279
17280 return error;
17281 }
17282-
17283-
17284-/*
17285- * Do a system call from kernel instead of calling sys_execve so we
17286- * end up with proper pt_regs.
17287- */
17288-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
17289-{
17290- long __res;
17291- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
17292- : "=a" (__res)
17293- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
17294- return __res;
17295-}
17296diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c
17297--- linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
17298+++ linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
17299@@ -32,8 +32,8 @@ out:
17300 return error;
17301 }
17302
17303-static void find_start_end(unsigned long flags, unsigned long *begin,
17304- unsigned long *end)
17305+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17306+ unsigned long *begin, unsigned long *end)
17307 {
17308 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17309 unsigned long new_begin;
17310@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
17311 *begin = new_begin;
17312 }
17313 } else {
17314- *begin = TASK_UNMAPPED_BASE;
17315+ *begin = mm->mmap_base;
17316 *end = TASK_SIZE;
17317 }
17318 }
17319@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
17320 if (flags & MAP_FIXED)
17321 return addr;
17322
17323- find_start_end(flags, &begin, &end);
17324+ find_start_end(mm, flags, &begin, &end);
17325
17326 if (len > end)
17327 return -ENOMEM;
17328
17329+#ifdef CONFIG_PAX_RANDMMAP
17330+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17331+#endif
17332+
17333 if (addr) {
17334 addr = PAGE_ALIGN(addr);
17335 vma = find_vma(mm, addr);
17336- if (end - len >= addr &&
17337- (!vma || addr + len <= vma->vm_start))
17338+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17339 return addr;
17340 }
17341 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17342@@ -106,7 +109,7 @@ full_search:
17343 }
17344 return -ENOMEM;
17345 }
17346- if (!vma || addr + len <= vma->vm_start) {
17347+ if (check_heap_stack_gap(vma, addr, len)) {
17348 /*
17349 * Remember the place where we stopped the search:
17350 */
17351@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
17352 {
17353 struct vm_area_struct *vma;
17354 struct mm_struct *mm = current->mm;
17355- unsigned long addr = addr0;
17356+ unsigned long base = mm->mmap_base, addr = addr0;
17357
17358 /* requested length too big for entire address space */
17359 if (len > TASK_SIZE)
17360@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
17361 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17362 goto bottomup;
17363
17364+#ifdef CONFIG_PAX_RANDMMAP
17365+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17366+#endif
17367+
17368 /* requesting a specific address */
17369 if (addr) {
17370 addr = PAGE_ALIGN(addr);
17371- vma = find_vma(mm, addr);
17372- if (TASK_SIZE - len >= addr &&
17373- (!vma || addr + len <= vma->vm_start))
17374- return addr;
17375+ if (TASK_SIZE - len >= addr) {
17376+ vma = find_vma(mm, addr);
17377+ if (check_heap_stack_gap(vma, addr, len))
17378+ return addr;
17379+ }
17380 }
17381
17382 /* check if free_area_cache is useful for us */
17383@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
17384 /* make sure it can fit in the remaining address space */
17385 if (addr > len) {
17386 vma = find_vma(mm, addr-len);
17387- if (!vma || addr <= vma->vm_start)
17388+ if (check_heap_stack_gap(vma, addr - len, len))
17389 /* remember the address as a hint for next time */
17390 return mm->free_area_cache = addr-len;
17391 }
17392@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
17393 * return with success:
17394 */
17395 vma = find_vma(mm, addr);
17396- if (!vma || addr+len <= vma->vm_start)
17397+ if (check_heap_stack_gap(vma, addr, len))
17398 /* remember the address as a hint for next time */
17399 return mm->free_area_cache = addr;
17400
17401@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
17402 mm->cached_hole_size = vma->vm_start - addr;
17403
17404 /* try just below the current vma->vm_start */
17405- addr = vma->vm_start-len;
17406- } while (len < vma->vm_start);
17407+ addr = skip_heap_stack_gap(vma, len);
17408+ } while (!IS_ERR_VALUE(addr));
17409
17410 bottomup:
17411 /*
17412@@ -198,13 +206,21 @@ bottomup:
17413 * can happen with large stack limits and large mmap()
17414 * allocations.
17415 */
17416+ mm->mmap_base = TASK_UNMAPPED_BASE;
17417+
17418+#ifdef CONFIG_PAX_RANDMMAP
17419+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17420+ mm->mmap_base += mm->delta_mmap;
17421+#endif
17422+
17423+ mm->free_area_cache = mm->mmap_base;
17424 mm->cached_hole_size = ~0UL;
17425- mm->free_area_cache = TASK_UNMAPPED_BASE;
17426 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17427 /*
17428 * Restore the topdown base:
17429 */
17430- mm->free_area_cache = mm->mmap_base;
17431+ mm->mmap_base = base;
17432+ mm->free_area_cache = base;
17433 mm->cached_hole_size = ~0UL;
17434
17435 return addr;
17436diff -urNp linux-2.6.32.45/arch/x86/kernel/tboot.c linux-2.6.32.45/arch/x86/kernel/tboot.c
17437--- linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
17438+++ linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
17439@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17440
17441 void tboot_shutdown(u32 shutdown_type)
17442 {
17443- void (*shutdown)(void);
17444+ void (* __noreturn shutdown)(void);
17445
17446 if (!tboot_enabled())
17447 return;
17448@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17449
17450 switch_to_tboot_pt();
17451
17452- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17453+ shutdown = (void *)tboot->shutdown_entry;
17454 shutdown();
17455
17456 /* should not reach here */
17457@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17458 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17459 }
17460
17461-static atomic_t ap_wfs_count;
17462+static atomic_unchecked_t ap_wfs_count;
17463
17464 static int tboot_wait_for_aps(int num_aps)
17465 {
17466@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17467 {
17468 switch (action) {
17469 case CPU_DYING:
17470- atomic_inc(&ap_wfs_count);
17471+ atomic_inc_unchecked(&ap_wfs_count);
17472 if (num_online_cpus() == 1)
17473- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17474+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17475 return NOTIFY_BAD;
17476 break;
17477 }
17478@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17479
17480 tboot_create_trampoline();
17481
17482- atomic_set(&ap_wfs_count, 0);
17483+ atomic_set_unchecked(&ap_wfs_count, 0);
17484 register_hotcpu_notifier(&tboot_cpu_notifier);
17485 return 0;
17486 }
17487diff -urNp linux-2.6.32.45/arch/x86/kernel/time.c linux-2.6.32.45/arch/x86/kernel/time.c
17488--- linux-2.6.32.45/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17489+++ linux-2.6.32.45/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17490@@ -26,17 +26,13 @@
17491 int timer_ack;
17492 #endif
17493
17494-#ifdef CONFIG_X86_64
17495-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17496-#endif
17497-
17498 unsigned long profile_pc(struct pt_regs *regs)
17499 {
17500 unsigned long pc = instruction_pointer(regs);
17501
17502- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17503+ if (!user_mode(regs) && in_lock_functions(pc)) {
17504 #ifdef CONFIG_FRAME_POINTER
17505- return *(unsigned long *)(regs->bp + sizeof(long));
17506+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17507 #else
17508 unsigned long *sp =
17509 (unsigned long *)kernel_stack_pointer(regs);
17510@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17511 * or above a saved flags. Eflags has bits 22-31 zero,
17512 * kernel addresses don't.
17513 */
17514+
17515+#ifdef CONFIG_PAX_KERNEXEC
17516+ return ktla_ktva(sp[0]);
17517+#else
17518 if (sp[0] >> 22)
17519 return sp[0];
17520 if (sp[1] >> 22)
17521 return sp[1];
17522 #endif
17523+
17524+#endif
17525 }
17526 return pc;
17527 }
17528diff -urNp linux-2.6.32.45/arch/x86/kernel/tls.c linux-2.6.32.45/arch/x86/kernel/tls.c
17529--- linux-2.6.32.45/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17530+++ linux-2.6.32.45/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17531@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17532 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17533 return -EINVAL;
17534
17535+#ifdef CONFIG_PAX_SEGMEXEC
17536+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17537+ return -EINVAL;
17538+#endif
17539+
17540 set_tls_desc(p, idx, &info, 1);
17541
17542 return 0;
17543diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_32.S linux-2.6.32.45/arch/x86/kernel/trampoline_32.S
17544--- linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17545+++ linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17546@@ -32,6 +32,12 @@
17547 #include <asm/segment.h>
17548 #include <asm/page_types.h>
17549
17550+#ifdef CONFIG_PAX_KERNEXEC
17551+#define ta(X) (X)
17552+#else
17553+#define ta(X) ((X) - __PAGE_OFFSET)
17554+#endif
17555+
17556 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17557 __CPUINITRODATA
17558 .code16
17559@@ -60,7 +66,7 @@ r_base = .
17560 inc %ax # protected mode (PE) bit
17561 lmsw %ax # into protected mode
17562 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17563- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17564+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17565
17566 # These need to be in the same 64K segment as the above;
17567 # hence we don't use the boot_gdt_descr defined in head.S
17568diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_64.S linux-2.6.32.45/arch/x86/kernel/trampoline_64.S
17569--- linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17570+++ linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17571@@ -91,7 +91,7 @@ startup_32:
17572 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17573 movl %eax, %ds
17574
17575- movl $X86_CR4_PAE, %eax
17576+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17577 movl %eax, %cr4 # Enable PAE mode
17578
17579 # Setup trampoline 4 level pagetables
17580@@ -127,7 +127,7 @@ startup_64:
17581 no_longmode:
17582 hlt
17583 jmp no_longmode
17584-#include "verify_cpu_64.S"
17585+#include "verify_cpu.S"
17586
17587 # Careful these need to be in the same 64K segment as the above;
17588 tidt:
17589@@ -138,7 +138,7 @@ tidt:
17590 # so the kernel can live anywhere
17591 .balign 4
17592 tgdt:
17593- .short tgdt_end - tgdt # gdt limit
17594+ .short tgdt_end - tgdt - 1 # gdt limit
17595 .long tgdt - r_base
17596 .short 0
17597 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17598diff -urNp linux-2.6.32.45/arch/x86/kernel/traps.c linux-2.6.32.45/arch/x86/kernel/traps.c
17599--- linux-2.6.32.45/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17600+++ linux-2.6.32.45/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17601@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17602
17603 /* Do we ignore FPU interrupts ? */
17604 char ignore_fpu_irq;
17605-
17606-/*
17607- * The IDT has to be page-aligned to simplify the Pentium
17608- * F0 0F bug workaround.
17609- */
17610-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17611 #endif
17612
17613 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17614@@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17615 static inline void
17616 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17617 {
17618- if (!user_mode_vm(regs))
17619+ if (!user_mode(regs))
17620 die(str, regs, err);
17621 }
17622 #endif
17623
17624 static void __kprobes
17625-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17626+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17627 long error_code, siginfo_t *info)
17628 {
17629 struct task_struct *tsk = current;
17630
17631 #ifdef CONFIG_X86_32
17632- if (regs->flags & X86_VM_MASK) {
17633+ if (v8086_mode(regs)) {
17634 /*
17635 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17636 * On nmi (interrupt 2), do_trap should not be called.
17637@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17638 }
17639 #endif
17640
17641- if (!user_mode(regs))
17642+ if (!user_mode_novm(regs))
17643 goto kernel_trap;
17644
17645 #ifdef CONFIG_X86_32
17646@@ -158,7 +152,7 @@ trap_signal:
17647 printk_ratelimit()) {
17648 printk(KERN_INFO
17649 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17650- tsk->comm, tsk->pid, str,
17651+ tsk->comm, task_pid_nr(tsk), str,
17652 regs->ip, regs->sp, error_code);
17653 print_vma_addr(" in ", regs->ip);
17654 printk("\n");
17655@@ -175,8 +169,20 @@ kernel_trap:
17656 if (!fixup_exception(regs)) {
17657 tsk->thread.error_code = error_code;
17658 tsk->thread.trap_no = trapnr;
17659+
17660+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17661+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17662+ str = "PAX: suspicious stack segment fault";
17663+#endif
17664+
17665 die(str, regs, error_code);
17666 }
17667+
17668+#ifdef CONFIG_PAX_REFCOUNT
17669+ if (trapnr == 4)
17670+ pax_report_refcount_overflow(regs);
17671+#endif
17672+
17673 return;
17674
17675 #ifdef CONFIG_X86_32
17676@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17677 conditional_sti(regs);
17678
17679 #ifdef CONFIG_X86_32
17680- if (regs->flags & X86_VM_MASK)
17681+ if (v8086_mode(regs))
17682 goto gp_in_vm86;
17683 #endif
17684
17685 tsk = current;
17686- if (!user_mode(regs))
17687+ if (!user_mode_novm(regs))
17688 goto gp_in_kernel;
17689
17690+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17691+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17692+ struct mm_struct *mm = tsk->mm;
17693+ unsigned long limit;
17694+
17695+ down_write(&mm->mmap_sem);
17696+ limit = mm->context.user_cs_limit;
17697+ if (limit < TASK_SIZE) {
17698+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17699+ up_write(&mm->mmap_sem);
17700+ return;
17701+ }
17702+ up_write(&mm->mmap_sem);
17703+ }
17704+#endif
17705+
17706 tsk->thread.error_code = error_code;
17707 tsk->thread.trap_no = 13;
17708
17709@@ -305,6 +327,13 @@ gp_in_kernel:
17710 if (notify_die(DIE_GPF, "general protection fault", regs,
17711 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17712 return;
17713+
17714+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17715+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17716+ die("PAX: suspicious general protection fault", regs, error_code);
17717+ else
17718+#endif
17719+
17720 die("general protection fault", regs, error_code);
17721 }
17722
17723@@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17724 dotraplinkage notrace __kprobes void
17725 do_nmi(struct pt_regs *regs, long error_code)
17726 {
17727+
17728+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17729+ if (!user_mode(regs)) {
17730+ unsigned long cs = regs->cs & 0xFFFF;
17731+ unsigned long ip = ktva_ktla(regs->ip);
17732+
17733+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17734+ regs->ip = ip;
17735+ }
17736+#endif
17737+
17738 nmi_enter();
17739
17740 inc_irq_stat(__nmi_count);
17741@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17742 }
17743
17744 #ifdef CONFIG_X86_32
17745- if (regs->flags & X86_VM_MASK)
17746+ if (v8086_mode(regs))
17747 goto debug_vm86;
17748 #endif
17749
17750@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17751 * kernel space (but re-enable TF when returning to user mode).
17752 */
17753 if (condition & DR_STEP) {
17754- if (!user_mode(regs))
17755+ if (!user_mode_novm(regs))
17756 goto clear_TF_reenable;
17757 }
17758
17759@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17760 * Handle strange cache flush from user space exception
17761 * in all other cases. This is undocumented behaviour.
17762 */
17763- if (regs->flags & X86_VM_MASK) {
17764+ if (v8086_mode(regs)) {
17765 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17766 return;
17767 }
17768@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17769 void __math_state_restore(void)
17770 {
17771 struct thread_info *thread = current_thread_info();
17772- struct task_struct *tsk = thread->task;
17773+ struct task_struct *tsk = current;
17774
17775 /*
17776 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17777@@ -825,8 +865,7 @@ void __math_state_restore(void)
17778 */
17779 asmlinkage void math_state_restore(void)
17780 {
17781- struct thread_info *thread = current_thread_info();
17782- struct task_struct *tsk = thread->task;
17783+ struct task_struct *tsk = current;
17784
17785 if (!tsk_used_math(tsk)) {
17786 local_irq_enable();
17787diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S
17788--- linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17789+++ linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17790@@ -1,105 +0,0 @@
17791-/*
17792- *
17793- * verify_cpu.S - Code for cpu long mode and SSE verification. This
17794- * code has been borrowed from boot/setup.S and was introduced by
17795- * Andi Kleen.
17796- *
17797- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17798- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17799- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17800- *
17801- * This source code is licensed under the GNU General Public License,
17802- * Version 2. See the file COPYING for more details.
17803- *
17804- * This is a common code for verification whether CPU supports
17805- * long mode and SSE or not. It is not called directly instead this
17806- * file is included at various places and compiled in that context.
17807- * Following are the current usage.
17808- *
17809- * This file is included by both 16bit and 32bit code.
17810- *
17811- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17812- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17813- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17814- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17815- *
17816- * verify_cpu, returns the status of cpu check in register %eax.
17817- * 0: Success 1: Failure
17818- *
17819- * The caller needs to check for the error code and take the action
17820- * appropriately. Either display a message or halt.
17821- */
17822-
17823-#include <asm/cpufeature.h>
17824-
17825-verify_cpu:
17826- pushfl # Save caller passed flags
17827- pushl $0 # Kill any dangerous flags
17828- popfl
17829-
17830- pushfl # standard way to check for cpuid
17831- popl %eax
17832- movl %eax,%ebx
17833- xorl $0x200000,%eax
17834- pushl %eax
17835- popfl
17836- pushfl
17837- popl %eax
17838- cmpl %eax,%ebx
17839- jz verify_cpu_no_longmode # cpu has no cpuid
17840-
17841- movl $0x0,%eax # See if cpuid 1 is implemented
17842- cpuid
17843- cmpl $0x1,%eax
17844- jb verify_cpu_no_longmode # no cpuid 1
17845-
17846- xor %di,%di
17847- cmpl $0x68747541,%ebx # AuthenticAMD
17848- jnz verify_cpu_noamd
17849- cmpl $0x69746e65,%edx
17850- jnz verify_cpu_noamd
17851- cmpl $0x444d4163,%ecx
17852- jnz verify_cpu_noamd
17853- mov $1,%di # cpu is from AMD
17854-
17855-verify_cpu_noamd:
17856- movl $0x1,%eax # Does the cpu have what it takes
17857- cpuid
17858- andl $REQUIRED_MASK0,%edx
17859- xorl $REQUIRED_MASK0,%edx
17860- jnz verify_cpu_no_longmode
17861-
17862- movl $0x80000000,%eax # See if extended cpuid is implemented
17863- cpuid
17864- cmpl $0x80000001,%eax
17865- jb verify_cpu_no_longmode # no extended cpuid
17866-
17867- movl $0x80000001,%eax # Does the cpu have what it takes
17868- cpuid
17869- andl $REQUIRED_MASK1,%edx
17870- xorl $REQUIRED_MASK1,%edx
17871- jnz verify_cpu_no_longmode
17872-
17873-verify_cpu_sse_test:
17874- movl $1,%eax
17875- cpuid
17876- andl $SSE_MASK,%edx
17877- cmpl $SSE_MASK,%edx
17878- je verify_cpu_sse_ok
17879- test %di,%di
17880- jz verify_cpu_no_longmode # only try to force SSE on AMD
17881- movl $0xc0010015,%ecx # HWCR
17882- rdmsr
17883- btr $15,%eax # enable SSE
17884- wrmsr
17885- xor %di,%di # don't loop
17886- jmp verify_cpu_sse_test # try again
17887-
17888-verify_cpu_no_longmode:
17889- popfl # Restore caller passed flags
17890- movl $1,%eax
17891- ret
17892-verify_cpu_sse_ok:
17893- popfl # Restore caller passed flags
17894- xorl %eax, %eax
17895- ret
17896diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu.S linux-2.6.32.45/arch/x86/kernel/verify_cpu.S
17897--- linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17898+++ linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17899@@ -0,0 +1,140 @@
17900+/*
17901+ *
17902+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
17903+ * code has been borrowed from boot/setup.S and was introduced by
17904+ * Andi Kleen.
17905+ *
17906+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17907+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17908+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17909+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17910+ *
17911+ * This source code is licensed under the GNU General Public License,
17912+ * Version 2. See the file COPYING for more details.
17913+ *
17914+ * This is a common code for verification whether CPU supports
17915+ * long mode and SSE or not. It is not called directly instead this
17916+ * file is included at various places and compiled in that context.
17917+ * This file is expected to run in 32bit code. Currently:
17918+ *
17919+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17920+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
17921+ * arch/x86/kernel/head_32.S: processor startup
17922+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17923+ *
17924+ * verify_cpu, returns the status of longmode and SSE in register %eax.
17925+ * 0: Success 1: Failure
17926+ *
17927+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17928+ *
17929+ * The caller needs to check for the error code and take the action
17930+ * appropriately. Either display a message or halt.
17931+ */
17932+
17933+#include <asm/cpufeature.h>
17934+#include <asm/msr-index.h>
17935+
17936+verify_cpu:
17937+ pushfl # Save caller passed flags
17938+ pushl $0 # Kill any dangerous flags
17939+ popfl
17940+
17941+ pushfl # standard way to check for cpuid
17942+ popl %eax
17943+ movl %eax,%ebx
17944+ xorl $0x200000,%eax
17945+ pushl %eax
17946+ popfl
17947+ pushfl
17948+ popl %eax
17949+ cmpl %eax,%ebx
17950+ jz verify_cpu_no_longmode # cpu has no cpuid
17951+
17952+ movl $0x0,%eax # See if cpuid 1 is implemented
17953+ cpuid
17954+ cmpl $0x1,%eax
17955+ jb verify_cpu_no_longmode # no cpuid 1
17956+
17957+ xor %di,%di
17958+ cmpl $0x68747541,%ebx # AuthenticAMD
17959+ jnz verify_cpu_noamd
17960+ cmpl $0x69746e65,%edx
17961+ jnz verify_cpu_noamd
17962+ cmpl $0x444d4163,%ecx
17963+ jnz verify_cpu_noamd
17964+ mov $1,%di # cpu is from AMD
17965+ jmp verify_cpu_check
17966+
17967+verify_cpu_noamd:
17968+ cmpl $0x756e6547,%ebx # GenuineIntel?
17969+ jnz verify_cpu_check
17970+ cmpl $0x49656e69,%edx
17971+ jnz verify_cpu_check
17972+ cmpl $0x6c65746e,%ecx
17973+ jnz verify_cpu_check
17974+
17975+ # only call IA32_MISC_ENABLE when:
17976+ # family > 6 || (family == 6 && model >= 0xd)
17977+ movl $0x1, %eax # check CPU family and model
17978+ cpuid
17979+ movl %eax, %ecx
17980+
17981+ andl $0x0ff00f00, %eax # mask family and extended family
17982+ shrl $8, %eax
17983+ cmpl $6, %eax
17984+ ja verify_cpu_clear_xd # family > 6, ok
17985+ jb verify_cpu_check # family < 6, skip
17986+
17987+ andl $0x000f00f0, %ecx # mask model and extended model
17988+ shrl $4, %ecx
17989+ cmpl $0xd, %ecx
17990+ jb verify_cpu_check # family == 6, model < 0xd, skip
17991+
17992+verify_cpu_clear_xd:
17993+ movl $MSR_IA32_MISC_ENABLE, %ecx
17994+ rdmsr
17995+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17996+ jnc verify_cpu_check # only write MSR if bit was changed
17997+ wrmsr
17998+
17999+verify_cpu_check:
18000+ movl $0x1,%eax # Does the cpu have what it takes
18001+ cpuid
18002+ andl $REQUIRED_MASK0,%edx
18003+ xorl $REQUIRED_MASK0,%edx
18004+ jnz verify_cpu_no_longmode
18005+
18006+ movl $0x80000000,%eax # See if extended cpuid is implemented
18007+ cpuid
18008+ cmpl $0x80000001,%eax
18009+ jb verify_cpu_no_longmode # no extended cpuid
18010+
18011+ movl $0x80000001,%eax # Does the cpu have what it takes
18012+ cpuid
18013+ andl $REQUIRED_MASK1,%edx
18014+ xorl $REQUIRED_MASK1,%edx
18015+ jnz verify_cpu_no_longmode
18016+
18017+verify_cpu_sse_test:
18018+ movl $1,%eax
18019+ cpuid
18020+ andl $SSE_MASK,%edx
18021+ cmpl $SSE_MASK,%edx
18022+ je verify_cpu_sse_ok
18023+ test %di,%di
18024+ jz verify_cpu_no_longmode # only try to force SSE on AMD
18025+ movl $MSR_K7_HWCR,%ecx
18026+ rdmsr
18027+ btr $15,%eax # enable SSE
18028+ wrmsr
18029+ xor %di,%di # don't loop
18030+ jmp verify_cpu_sse_test # try again
18031+
18032+verify_cpu_no_longmode:
18033+ popfl # Restore caller passed flags
18034+ movl $1,%eax
18035+ ret
18036+verify_cpu_sse_ok:
18037+ popfl # Restore caller passed flags
18038+ xorl %eax, %eax
18039+ ret
18040diff -urNp linux-2.6.32.45/arch/x86/kernel/vm86_32.c linux-2.6.32.45/arch/x86/kernel/vm86_32.c
18041--- linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
18042+++ linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
18043@@ -41,6 +41,7 @@
18044 #include <linux/ptrace.h>
18045 #include <linux/audit.h>
18046 #include <linux/stddef.h>
18047+#include <linux/grsecurity.h>
18048
18049 #include <asm/uaccess.h>
18050 #include <asm/io.h>
18051@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
18052 do_exit(SIGSEGV);
18053 }
18054
18055- tss = &per_cpu(init_tss, get_cpu());
18056+ tss = init_tss + get_cpu();
18057 current->thread.sp0 = current->thread.saved_sp0;
18058 current->thread.sysenter_cs = __KERNEL_CS;
18059 load_sp0(tss, &current->thread);
18060@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
18061 struct task_struct *tsk;
18062 int tmp, ret = -EPERM;
18063
18064+#ifdef CONFIG_GRKERNSEC_VM86
18065+ if (!capable(CAP_SYS_RAWIO)) {
18066+ gr_handle_vm86();
18067+ goto out;
18068+ }
18069+#endif
18070+
18071 tsk = current;
18072 if (tsk->thread.saved_sp0)
18073 goto out;
18074@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
18075 int tmp, ret;
18076 struct vm86plus_struct __user *v86;
18077
18078+#ifdef CONFIG_GRKERNSEC_VM86
18079+ if (!capable(CAP_SYS_RAWIO)) {
18080+ gr_handle_vm86();
18081+ ret = -EPERM;
18082+ goto out;
18083+ }
18084+#endif
18085+
18086 tsk = current;
18087 switch (regs->bx) {
18088 case VM86_REQUEST_IRQ:
18089@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
18090 tsk->thread.saved_fs = info->regs32->fs;
18091 tsk->thread.saved_gs = get_user_gs(info->regs32);
18092
18093- tss = &per_cpu(init_tss, get_cpu());
18094+ tss = init_tss + get_cpu();
18095 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
18096 if (cpu_has_sep)
18097 tsk->thread.sysenter_cs = 0;
18098@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
18099 goto cannot_handle;
18100 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
18101 goto cannot_handle;
18102- intr_ptr = (unsigned long __user *) (i << 2);
18103+ intr_ptr = (__force unsigned long __user *) (i << 2);
18104 if (get_user(segoffs, intr_ptr))
18105 goto cannot_handle;
18106 if ((segoffs >> 16) == BIOSSEG)
18107diff -urNp linux-2.6.32.45/arch/x86/kernel/vmi_32.c linux-2.6.32.45/arch/x86/kernel/vmi_32.c
18108--- linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
18109+++ linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-08-05 20:33:55.000000000 -0400
18110@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
18111 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
18112
18113 #define call_vrom_func(rom,func) \
18114- (((VROMFUNC *)(rom->func))())
18115+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
18116
18117 #define call_vrom_long_func(rom,func,arg) \
18118- (((VROMLONGFUNC *)(rom->func)) (arg))
18119+({\
18120+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
18121+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
18122+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
18123+ __reloc;\
18124+})
18125
18126-static struct vrom_header *vmi_rom;
18127+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
18128 static int disable_pge;
18129 static int disable_pse;
18130 static int disable_sep;
18131@@ -76,10 +81,10 @@ static struct {
18132 void (*set_initial_ap_state)(int, int);
18133 void (*halt)(void);
18134 void (*set_lazy_mode)(int mode);
18135-} vmi_ops;
18136+} __no_const vmi_ops __read_only;
18137
18138 /* Cached VMI operations */
18139-struct vmi_timer_ops vmi_timer_ops;
18140+struct vmi_timer_ops vmi_timer_ops __read_only;
18141
18142 /*
18143 * VMI patching routines.
18144@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
18145 static inline void patch_offset(void *insnbuf,
18146 unsigned long ip, unsigned long dest)
18147 {
18148- *(unsigned long *)(insnbuf+1) = dest-ip-5;
18149+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
18150 }
18151
18152 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
18153@@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
18154 {
18155 u64 reloc;
18156 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
18157+
18158 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
18159 switch(rel->type) {
18160 case VMI_RELOCATION_CALL_REL:
18161@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
18162
18163 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
18164 {
18165- const pte_t pte = { .pte = 0 };
18166+ const pte_t pte = __pte(0ULL);
18167 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
18168 }
18169
18170 static void vmi_pmd_clear(pmd_t *pmd)
18171 {
18172- const pte_t pte = { .pte = 0 };
18173+ const pte_t pte = __pte(0ULL);
18174 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
18175 }
18176 #endif
18177@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
18178 ap.ss = __KERNEL_DS;
18179 ap.esp = (unsigned long) start_esp;
18180
18181- ap.ds = __USER_DS;
18182- ap.es = __USER_DS;
18183+ ap.ds = __KERNEL_DS;
18184+ ap.es = __KERNEL_DS;
18185 ap.fs = __KERNEL_PERCPU;
18186- ap.gs = __KERNEL_STACK_CANARY;
18187+ savesegment(gs, ap.gs);
18188
18189 ap.eflags = 0;
18190
18191@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
18192 paravirt_leave_lazy_mmu();
18193 }
18194
18195+#ifdef CONFIG_PAX_KERNEXEC
18196+static unsigned long vmi_pax_open_kernel(void)
18197+{
18198+ return 0;
18199+}
18200+
18201+static unsigned long vmi_pax_close_kernel(void)
18202+{
18203+ return 0;
18204+}
18205+#endif
18206+
18207 static inline int __init check_vmi_rom(struct vrom_header *rom)
18208 {
18209 struct pci_header *pci;
18210@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
18211 return 0;
18212 if (rom->vrom_signature != VMI_SIGNATURE)
18213 return 0;
18214+ if (rom->rom_length * 512 > sizeof(*rom)) {
18215+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
18216+ return 0;
18217+ }
18218 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
18219 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
18220 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
18221@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
18222 struct vrom_header *romstart;
18223 romstart = (struct vrom_header *)isa_bus_to_virt(base);
18224 if (check_vmi_rom(romstart)) {
18225- vmi_rom = romstart;
18226+ vmi_rom = *romstart;
18227 return 1;
18228 }
18229 }
18230@@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
18231
18232 para_fill(pv_irq_ops.safe_halt, Halt);
18233
18234+#ifdef CONFIG_PAX_KERNEXEC
18235+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
18236+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
18237+#endif
18238+
18239 /*
18240 * Alternative instruction rewriting doesn't happen soon enough
18241 * to convert VMI_IRET to a call instead of a jump; so we have
18242@@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
18243
18244 void __init vmi_init(void)
18245 {
18246- if (!vmi_rom)
18247+ if (!vmi_rom.rom_signature)
18248 probe_vmi_rom();
18249 else
18250- check_vmi_rom(vmi_rom);
18251+ check_vmi_rom(&vmi_rom);
18252
18253 /* In case probing for or validating the ROM failed, basil */
18254- if (!vmi_rom)
18255+ if (!vmi_rom.rom_signature)
18256 return;
18257
18258- reserve_top_address(-vmi_rom->virtual_top);
18259+ reserve_top_address(-vmi_rom.virtual_top);
18260
18261 #ifdef CONFIG_X86_IO_APIC
18262 /* This is virtual hardware; timer routing is wired correctly */
18263@@ -874,7 +901,7 @@ void __init vmi_activate(void)
18264 {
18265 unsigned long flags;
18266
18267- if (!vmi_rom)
18268+ if (!vmi_rom.rom_signature)
18269 return;
18270
18271 local_irq_save(flags);
18272diff -urNp linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S
18273--- linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
18274+++ linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
18275@@ -26,6 +26,13 @@
18276 #include <asm/page_types.h>
18277 #include <asm/cache.h>
18278 #include <asm/boot.h>
18279+#include <asm/segment.h>
18280+
18281+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18282+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18283+#else
18284+#define __KERNEL_TEXT_OFFSET 0
18285+#endif
18286
18287 #undef i386 /* in case the preprocessor is a 32bit one */
18288
18289@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
18290 #ifdef CONFIG_X86_32
18291 OUTPUT_ARCH(i386)
18292 ENTRY(phys_startup_32)
18293-jiffies = jiffies_64;
18294 #else
18295 OUTPUT_ARCH(i386:x86-64)
18296 ENTRY(phys_startup_64)
18297-jiffies_64 = jiffies;
18298 #endif
18299
18300 PHDRS {
18301 text PT_LOAD FLAGS(5); /* R_E */
18302- data PT_LOAD FLAGS(7); /* RWE */
18303+#ifdef CONFIG_X86_32
18304+ module PT_LOAD FLAGS(5); /* R_E */
18305+#endif
18306+#ifdef CONFIG_XEN
18307+ rodata PT_LOAD FLAGS(5); /* R_E */
18308+#else
18309+ rodata PT_LOAD FLAGS(4); /* R__ */
18310+#endif
18311+ data PT_LOAD FLAGS(6); /* RW_ */
18312 #ifdef CONFIG_X86_64
18313 user PT_LOAD FLAGS(5); /* R_E */
18314+#endif
18315+ init.begin PT_LOAD FLAGS(6); /* RW_ */
18316 #ifdef CONFIG_SMP
18317 percpu PT_LOAD FLAGS(6); /* RW_ */
18318 #endif
18319+ text.init PT_LOAD FLAGS(5); /* R_E */
18320+ text.exit PT_LOAD FLAGS(5); /* R_E */
18321 init PT_LOAD FLAGS(7); /* RWE */
18322-#endif
18323 note PT_NOTE FLAGS(0); /* ___ */
18324 }
18325
18326 SECTIONS
18327 {
18328 #ifdef CONFIG_X86_32
18329- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18330- phys_startup_32 = startup_32 - LOAD_OFFSET;
18331+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18332 #else
18333- . = __START_KERNEL;
18334- phys_startup_64 = startup_64 - LOAD_OFFSET;
18335+ . = __START_KERNEL;
18336 #endif
18337
18338 /* Text and read-only data */
18339- .text : AT(ADDR(.text) - LOAD_OFFSET) {
18340- _text = .;
18341+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18342 /* bootstrapping code */
18343+#ifdef CONFIG_X86_32
18344+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18345+#else
18346+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18347+#endif
18348+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18349+ _text = .;
18350 HEAD_TEXT
18351 #ifdef CONFIG_X86_32
18352 . = ALIGN(PAGE_SIZE);
18353@@ -82,28 +102,71 @@ SECTIONS
18354 IRQENTRY_TEXT
18355 *(.fixup)
18356 *(.gnu.warning)
18357- /* End of text section */
18358- _etext = .;
18359 } :text = 0x9090
18360
18361- NOTES :text :note
18362+ . += __KERNEL_TEXT_OFFSET;
18363+
18364+#ifdef CONFIG_X86_32
18365+ . = ALIGN(PAGE_SIZE);
18366+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
18367+ *(.vmi.rom)
18368+ } :module
18369+
18370+ . = ALIGN(PAGE_SIZE);
18371+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18372+
18373+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18374+ MODULES_EXEC_VADDR = .;
18375+ BYTE(0)
18376+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18377+ . = ALIGN(HPAGE_SIZE);
18378+ MODULES_EXEC_END = . - 1;
18379+#endif
18380+
18381+ } :module
18382+#endif
18383
18384- EXCEPTION_TABLE(16) :text = 0x9090
18385+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18386+ /* End of text section */
18387+ _etext = . - __KERNEL_TEXT_OFFSET;
18388+ }
18389+
18390+#ifdef CONFIG_X86_32
18391+ . = ALIGN(PAGE_SIZE);
18392+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18393+ *(.idt)
18394+ . = ALIGN(PAGE_SIZE);
18395+ *(.empty_zero_page)
18396+ *(.swapper_pg_fixmap)
18397+ *(.swapper_pg_pmd)
18398+ *(.swapper_pg_dir)
18399+ *(.trampoline_pg_dir)
18400+ } :rodata
18401+#endif
18402+
18403+ . = ALIGN(PAGE_SIZE);
18404+ NOTES :rodata :note
18405+
18406+ EXCEPTION_TABLE(16) :rodata
18407
18408 RO_DATA(PAGE_SIZE)
18409
18410 /* Data */
18411 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18412+
18413+#ifdef CONFIG_PAX_KERNEXEC
18414+ . = ALIGN(HPAGE_SIZE);
18415+#else
18416+ . = ALIGN(PAGE_SIZE);
18417+#endif
18418+
18419 /* Start of data section */
18420 _sdata = .;
18421
18422 /* init_task */
18423 INIT_TASK_DATA(THREAD_SIZE)
18424
18425-#ifdef CONFIG_X86_32
18426- /* 32 bit has nosave before _edata */
18427 NOSAVE_DATA
18428-#endif
18429
18430 PAGE_ALIGNED_DATA(PAGE_SIZE)
18431
18432@@ -112,6 +175,8 @@ SECTIONS
18433 DATA_DATA
18434 CONSTRUCTORS
18435
18436+ jiffies = jiffies_64;
18437+
18438 /* rarely changed data like cpu maps */
18439 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18440
18441@@ -166,12 +231,6 @@ SECTIONS
18442 }
18443 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18444
18445- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18446- .jiffies : AT(VLOAD(.jiffies)) {
18447- *(.jiffies)
18448- }
18449- jiffies = VVIRT(.jiffies);
18450-
18451 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18452 *(.vsyscall_3)
18453 }
18454@@ -187,12 +246,19 @@ SECTIONS
18455 #endif /* CONFIG_X86_64 */
18456
18457 /* Init code and data - will be freed after init */
18458- . = ALIGN(PAGE_SIZE);
18459 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18460+ BYTE(0)
18461+
18462+#ifdef CONFIG_PAX_KERNEXEC
18463+ . = ALIGN(HPAGE_SIZE);
18464+#else
18465+ . = ALIGN(PAGE_SIZE);
18466+#endif
18467+
18468 __init_begin = .; /* paired with __init_end */
18469- }
18470+ } :init.begin
18471
18472-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18473+#ifdef CONFIG_SMP
18474 /*
18475 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18476 * output PHDR, so the next output section - .init.text - should
18477@@ -201,12 +267,27 @@ SECTIONS
18478 PERCPU_VADDR(0, :percpu)
18479 #endif
18480
18481- INIT_TEXT_SECTION(PAGE_SIZE)
18482-#ifdef CONFIG_X86_64
18483- :init
18484-#endif
18485+ . = ALIGN(PAGE_SIZE);
18486+ init_begin = .;
18487+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18488+ VMLINUX_SYMBOL(_sinittext) = .;
18489+ INIT_TEXT
18490+ VMLINUX_SYMBOL(_einittext) = .;
18491+ . = ALIGN(PAGE_SIZE);
18492+ } :text.init
18493
18494- INIT_DATA_SECTION(16)
18495+ /*
18496+ * .exit.text is discard at runtime, not link time, to deal with
18497+ * references from .altinstructions and .eh_frame
18498+ */
18499+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18500+ EXIT_TEXT
18501+ . = ALIGN(16);
18502+ } :text.exit
18503+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18504+
18505+ . = ALIGN(PAGE_SIZE);
18506+ INIT_DATA_SECTION(16) :init
18507
18508 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18509 __x86_cpu_dev_start = .;
18510@@ -232,19 +313,11 @@ SECTIONS
18511 *(.altinstr_replacement)
18512 }
18513
18514- /*
18515- * .exit.text is discard at runtime, not link time, to deal with
18516- * references from .altinstructions and .eh_frame
18517- */
18518- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18519- EXIT_TEXT
18520- }
18521-
18522 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18523 EXIT_DATA
18524 }
18525
18526-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18527+#ifndef CONFIG_SMP
18528 PERCPU(PAGE_SIZE)
18529 #endif
18530
18531@@ -267,12 +340,6 @@ SECTIONS
18532 . = ALIGN(PAGE_SIZE);
18533 }
18534
18535-#ifdef CONFIG_X86_64
18536- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18537- NOSAVE_DATA
18538- }
18539-#endif
18540-
18541 /* BSS */
18542 . = ALIGN(PAGE_SIZE);
18543 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18544@@ -288,6 +355,7 @@ SECTIONS
18545 __brk_base = .;
18546 . += 64 * 1024; /* 64k alignment slop space */
18547 *(.brk_reservation) /* areas brk users have reserved */
18548+ . = ALIGN(HPAGE_SIZE);
18549 __brk_limit = .;
18550 }
18551
18552@@ -316,13 +384,12 @@ SECTIONS
18553 * for the boot processor.
18554 */
18555 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18556-INIT_PER_CPU(gdt_page);
18557 INIT_PER_CPU(irq_stack_union);
18558
18559 /*
18560 * Build-time check on the image size:
18561 */
18562-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18563+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18564 "kernel image bigger than KERNEL_IMAGE_SIZE");
18565
18566 #ifdef CONFIG_SMP
18567diff -urNp linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c
18568--- linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18569+++ linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18570@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18571
18572 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18573 /* copy vsyscall data */
18574+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18575 vsyscall_gtod_data.clock.vread = clock->vread;
18576 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18577 vsyscall_gtod_data.clock.mask = clock->mask;
18578@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18579 We do this here because otherwise user space would do it on
18580 its own in a likely inferior way (no access to jiffies).
18581 If you don't like it pass NULL. */
18582- if (tcache && tcache->blob[0] == (j = __jiffies)) {
18583+ if (tcache && tcache->blob[0] == (j = jiffies)) {
18584 p = tcache->blob[1];
18585 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18586 /* Load per CPU data from RDTSCP */
18587diff -urNp linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c
18588--- linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18589+++ linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18590@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18591
18592 EXPORT_SYMBOL(copy_user_generic);
18593 EXPORT_SYMBOL(__copy_user_nocache);
18594-EXPORT_SYMBOL(copy_from_user);
18595-EXPORT_SYMBOL(copy_to_user);
18596 EXPORT_SYMBOL(__copy_from_user_inatomic);
18597
18598 EXPORT_SYMBOL(copy_page);
18599diff -urNp linux-2.6.32.45/arch/x86/kernel/xsave.c linux-2.6.32.45/arch/x86/kernel/xsave.c
18600--- linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18601+++ linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18602@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18603 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18604 return -1;
18605
18606- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18607+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18608 fx_sw_user->extended_size -
18609 FP_XSTATE_MAGIC2_SIZE));
18610 /*
18611@@ -196,7 +196,7 @@ fx_only:
18612 * the other extended state.
18613 */
18614 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18615- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18616+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18617 }
18618
18619 /*
18620@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18621 if (task_thread_info(tsk)->status & TS_XSAVE)
18622 err = restore_user_xstate(buf);
18623 else
18624- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18625+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
18626 buf);
18627 if (unlikely(err)) {
18628 /*
18629diff -urNp linux-2.6.32.45/arch/x86/kvm/emulate.c linux-2.6.32.45/arch/x86/kvm/emulate.c
18630--- linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18631+++ linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18632@@ -81,8 +81,8 @@
18633 #define Src2CL (1<<29)
18634 #define Src2ImmByte (2<<29)
18635 #define Src2One (3<<29)
18636-#define Src2Imm16 (4<<29)
18637-#define Src2Mask (7<<29)
18638+#define Src2Imm16 (4U<<29)
18639+#define Src2Mask (7U<<29)
18640
18641 enum {
18642 Group1_80, Group1_81, Group1_82, Group1_83,
18643@@ -411,6 +411,7 @@ static u32 group2_table[] = {
18644
18645 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18646 do { \
18647+ unsigned long _tmp; \
18648 __asm__ __volatile__ ( \
18649 _PRE_EFLAGS("0", "4", "2") \
18650 _op _suffix " %"_x"3,%1; " \
18651@@ -424,8 +425,6 @@ static u32 group2_table[] = {
18652 /* Raw emulation: instruction has two explicit operands. */
18653 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18654 do { \
18655- unsigned long _tmp; \
18656- \
18657 switch ((_dst).bytes) { \
18658 case 2: \
18659 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18660@@ -441,7 +440,6 @@ static u32 group2_table[] = {
18661
18662 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18663 do { \
18664- unsigned long _tmp; \
18665 switch ((_dst).bytes) { \
18666 case 1: \
18667 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18668diff -urNp linux-2.6.32.45/arch/x86/kvm/lapic.c linux-2.6.32.45/arch/x86/kvm/lapic.c
18669--- linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18670+++ linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18671@@ -52,7 +52,7 @@
18672 #define APIC_BUS_CYCLE_NS 1
18673
18674 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18675-#define apic_debug(fmt, arg...)
18676+#define apic_debug(fmt, arg...) do {} while (0)
18677
18678 #define APIC_LVT_NUM 6
18679 /* 14 is the version for Xeon and Pentium 8.4.8*/
18680diff -urNp linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h
18681--- linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18682+++ linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18683@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18684 int level = PT_PAGE_TABLE_LEVEL;
18685 unsigned long mmu_seq;
18686
18687+ pax_track_stack();
18688+
18689 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18690 kvm_mmu_audit(vcpu, "pre page fault");
18691
18692diff -urNp linux-2.6.32.45/arch/x86/kvm/svm.c linux-2.6.32.45/arch/x86/kvm/svm.c
18693--- linux-2.6.32.45/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18694+++ linux-2.6.32.45/arch/x86/kvm/svm.c 2011-08-05 20:33:55.000000000 -0400
18695@@ -2485,7 +2485,11 @@ static void reload_tss(struct kvm_vcpu *
18696 int cpu = raw_smp_processor_id();
18697
18698 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18699+
18700+ pax_open_kernel();
18701 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18702+ pax_close_kernel();
18703+
18704 load_TR_desc();
18705 }
18706
18707@@ -2946,7 +2950,7 @@ static bool svm_gb_page_enable(void)
18708 return true;
18709 }
18710
18711-static struct kvm_x86_ops svm_x86_ops = {
18712+static const struct kvm_x86_ops svm_x86_ops = {
18713 .cpu_has_kvm_support = has_svm,
18714 .disabled_by_bios = is_disabled,
18715 .hardware_setup = svm_hardware_setup,
18716diff -urNp linux-2.6.32.45/arch/x86/kvm/vmx.c linux-2.6.32.45/arch/x86/kvm/vmx.c
18717--- linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18718+++ linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18719@@ -570,7 +570,11 @@ static void reload_tss(void)
18720
18721 kvm_get_gdt(&gdt);
18722 descs = (void *)gdt.base;
18723+
18724+ pax_open_kernel();
18725 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18726+ pax_close_kernel();
18727+
18728 load_TR_desc();
18729 }
18730
18731@@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18732 if (!cpu_has_vmx_flexpriority())
18733 flexpriority_enabled = 0;
18734
18735- if (!cpu_has_vmx_tpr_shadow())
18736- kvm_x86_ops->update_cr8_intercept = NULL;
18737+ if (!cpu_has_vmx_tpr_shadow()) {
18738+ pax_open_kernel();
18739+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18740+ pax_close_kernel();
18741+ }
18742
18743 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18744 kvm_disable_largepages();
18745@@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18746 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18747
18748 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18749- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18750+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18751 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18752 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18753 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18754@@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18755 "jmp .Lkvm_vmx_return \n\t"
18756 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18757 ".Lkvm_vmx_return: "
18758+
18759+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18760+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18761+ ".Lkvm_vmx_return2: "
18762+#endif
18763+
18764 /* Save guest registers, load host registers, keep flags */
18765 "xchg %0, (%%"R"sp) \n\t"
18766 "mov %%"R"ax, %c[rax](%0) \n\t"
18767@@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18768 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18769 #endif
18770 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18771+
18772+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18773+ ,[cs]"i"(__KERNEL_CS)
18774+#endif
18775+
18776 : "cc", "memory"
18777- , R"bx", R"di", R"si"
18778+ , R"ax", R"bx", R"di", R"si"
18779 #ifdef CONFIG_X86_64
18780 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18781 #endif
18782@@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18783 if (vmx->rmode.irq.pending)
18784 fixup_rmode_irq(vmx);
18785
18786- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18787+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18788+
18789+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18790+ loadsegment(fs, __KERNEL_PERCPU);
18791+#endif
18792+
18793+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18794+ __set_fs(current_thread_info()->addr_limit);
18795+#endif
18796+
18797 vmx->launched = 1;
18798
18799 vmx_complete_interrupts(vmx);
18800@@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18801 return false;
18802 }
18803
18804-static struct kvm_x86_ops vmx_x86_ops = {
18805+static const struct kvm_x86_ops vmx_x86_ops = {
18806 .cpu_has_kvm_support = cpu_has_kvm_support,
18807 .disabled_by_bios = vmx_disabled_by_bios,
18808 .hardware_setup = hardware_setup,
18809diff -urNp linux-2.6.32.45/arch/x86/kvm/x86.c linux-2.6.32.45/arch/x86/kvm/x86.c
18810--- linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18811+++ linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18812@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18813 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18814 struct kvm_cpuid_entry2 __user *entries);
18815
18816-struct kvm_x86_ops *kvm_x86_ops;
18817+const struct kvm_x86_ops *kvm_x86_ops;
18818 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18819
18820 int ignore_msrs = 0;
18821@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18822 struct kvm_cpuid2 *cpuid,
18823 struct kvm_cpuid_entry2 __user *entries)
18824 {
18825- int r;
18826+ int r, i;
18827
18828 r = -E2BIG;
18829 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18830 goto out;
18831 r = -EFAULT;
18832- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18833- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18834+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18835 goto out;
18836+ for (i = 0; i < cpuid->nent; ++i) {
18837+ struct kvm_cpuid_entry2 cpuid_entry;
18838+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18839+ goto out;
18840+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18841+ }
18842 vcpu->arch.cpuid_nent = cpuid->nent;
18843 kvm_apic_set_version(vcpu);
18844 return 0;
18845@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18846 struct kvm_cpuid2 *cpuid,
18847 struct kvm_cpuid_entry2 __user *entries)
18848 {
18849- int r;
18850+ int r, i;
18851
18852 vcpu_load(vcpu);
18853 r = -E2BIG;
18854 if (cpuid->nent < vcpu->arch.cpuid_nent)
18855 goto out;
18856 r = -EFAULT;
18857- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18858- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18859+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18860 goto out;
18861+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18862+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18863+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18864+ goto out;
18865+ }
18866 return 0;
18867
18868 out:
18869@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18870 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18871 struct kvm_interrupt *irq)
18872 {
18873- if (irq->irq < 0 || irq->irq >= 256)
18874+ if (irq->irq >= 256)
18875 return -EINVAL;
18876 if (irqchip_in_kernel(vcpu->kvm))
18877 return -ENXIO;
18878@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18879 .notifier_call = kvmclock_cpufreq_notifier
18880 };
18881
18882-int kvm_arch_init(void *opaque)
18883+int kvm_arch_init(const void *opaque)
18884 {
18885 int r, cpu;
18886- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18887+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18888
18889 if (kvm_x86_ops) {
18890 printk(KERN_ERR "kvm: already loaded the other module\n");
18891diff -urNp linux-2.6.32.45/arch/x86/lguest/boot.c linux-2.6.32.45/arch/x86/lguest/boot.c
18892--- linux-2.6.32.45/arch/x86/lguest/boot.c 2011-03-27 14:31:47.000000000 -0400
18893+++ linux-2.6.32.45/arch/x86/lguest/boot.c 2011-08-05 20:33:55.000000000 -0400
18894@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vt
18895 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18896 * Launcher to reboot us.
18897 */
18898-static void lguest_restart(char *reason)
18899+static __noreturn void lguest_restart(char *reason)
18900 {
18901 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
18902+ BUG();
18903 }
18904
18905 /*G:050
18906diff -urNp linux-2.6.32.45/arch/x86/lib/atomic64_32.c linux-2.6.32.45/arch/x86/lib/atomic64_32.c
18907--- linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18908+++ linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18909@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18910 }
18911 EXPORT_SYMBOL(atomic64_cmpxchg);
18912
18913+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18914+{
18915+ return cmpxchg8b(&ptr->counter, old_val, new_val);
18916+}
18917+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18918+
18919 /**
18920 * atomic64_xchg - xchg atomic64 variable
18921 * @ptr: pointer to type atomic64_t
18922@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18923 EXPORT_SYMBOL(atomic64_xchg);
18924
18925 /**
18926+ * atomic64_xchg_unchecked - xchg atomic64 variable
18927+ * @ptr: pointer to type atomic64_unchecked_t
18928+ * @new_val: value to assign
18929+ *
18930+ * Atomically xchgs the value of @ptr to @new_val and returns
18931+ * the old value.
18932+ */
18933+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18934+{
18935+ /*
18936+ * Try first with a (possibly incorrect) assumption about
18937+ * what we have there. We'll do two loops most likely,
18938+ * but we'll get an ownership MESI transaction straight away
18939+ * instead of a read transaction followed by a
18940+ * flush-for-ownership transaction:
18941+ */
18942+ u64 old_val, real_val = 0;
18943+
18944+ do {
18945+ old_val = real_val;
18946+
18947+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18948+
18949+ } while (real_val != old_val);
18950+
18951+ return old_val;
18952+}
18953+EXPORT_SYMBOL(atomic64_xchg_unchecked);
18954+
18955+/**
18956 * atomic64_set - set atomic64 variable
18957 * @ptr: pointer to type atomic64_t
18958 * @new_val: value to assign
18959@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18960 EXPORT_SYMBOL(atomic64_set);
18961
18962 /**
18963-EXPORT_SYMBOL(atomic64_read);
18964+ * atomic64_unchecked_set - set atomic64 variable
18965+ * @ptr: pointer to type atomic64_unchecked_t
18966+ * @new_val: value to assign
18967+ *
18968+ * Atomically sets the value of @ptr to @new_val.
18969+ */
18970+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18971+{
18972+ atomic64_xchg_unchecked(ptr, new_val);
18973+}
18974+EXPORT_SYMBOL(atomic64_set_unchecked);
18975+
18976+/**
18977 * atomic64_add_return - add and return
18978 * @delta: integer value to add
18979 * @ptr: pointer to type atomic64_t
18980@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18981 }
18982 EXPORT_SYMBOL(atomic64_add_return);
18983
18984+/**
18985+ * atomic64_add_return_unchecked - add and return
18986+ * @delta: integer value to add
18987+ * @ptr: pointer to type atomic64_unchecked_t
18988+ *
18989+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
18990+ */
18991+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18992+{
18993+ /*
18994+ * Try first with a (possibly incorrect) assumption about
18995+ * what we have there. We'll do two loops most likely,
18996+ * but we'll get an ownership MESI transaction straight away
18997+ * instead of a read transaction followed by a
18998+ * flush-for-ownership transaction:
18999+ */
19000+ u64 old_val, new_val, real_val = 0;
19001+
19002+ do {
19003+ old_val = real_val;
19004+ new_val = old_val + delta;
19005+
19006+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
19007+
19008+ } while (real_val != old_val);
19009+
19010+ return new_val;
19011+}
19012+EXPORT_SYMBOL(atomic64_add_return_unchecked);
19013+
19014 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
19015 {
19016 return atomic64_add_return(-delta, ptr);
19017 }
19018 EXPORT_SYMBOL(atomic64_sub_return);
19019
19020+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19021+{
19022+ return atomic64_add_return_unchecked(-delta, ptr);
19023+}
19024+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
19025+
19026 u64 atomic64_inc_return(atomic64_t *ptr)
19027 {
19028 return atomic64_add_return(1, ptr);
19029 }
19030 EXPORT_SYMBOL(atomic64_inc_return);
19031
19032+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
19033+{
19034+ return atomic64_add_return_unchecked(1, ptr);
19035+}
19036+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
19037+
19038 u64 atomic64_dec_return(atomic64_t *ptr)
19039 {
19040 return atomic64_sub_return(1, ptr);
19041 }
19042 EXPORT_SYMBOL(atomic64_dec_return);
19043
19044+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
19045+{
19046+ return atomic64_sub_return_unchecked(1, ptr);
19047+}
19048+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
19049+
19050 /**
19051 * atomic64_add - add integer to atomic64 variable
19052 * @delta: integer value to add
19053@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
19054 EXPORT_SYMBOL(atomic64_add);
19055
19056 /**
19057+ * atomic64_add_unchecked - add integer to atomic64 variable
19058+ * @delta: integer value to add
19059+ * @ptr: pointer to type atomic64_unchecked_t
19060+ *
19061+ * Atomically adds @delta to @ptr.
19062+ */
19063+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19064+{
19065+ atomic64_add_return_unchecked(delta, ptr);
19066+}
19067+EXPORT_SYMBOL(atomic64_add_unchecked);
19068+
19069+/**
19070 * atomic64_sub - subtract the atomic64 variable
19071 * @delta: integer value to subtract
19072 * @ptr: pointer to type atomic64_t
19073@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
19074 EXPORT_SYMBOL(atomic64_sub);
19075
19076 /**
19077+ * atomic64_sub_unchecked - subtract the atomic64 variable
19078+ * @delta: integer value to subtract
19079+ * @ptr: pointer to type atomic64_unchecked_t
19080+ *
19081+ * Atomically subtracts @delta from @ptr.
19082+ */
19083+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19084+{
19085+ atomic64_add_unchecked(-delta, ptr);
19086+}
19087+EXPORT_SYMBOL(atomic64_sub_unchecked);
19088+
19089+/**
19090 * atomic64_sub_and_test - subtract value from variable and test result
19091 * @delta: integer value to subtract
19092 * @ptr: pointer to type atomic64_t
19093@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
19094 EXPORT_SYMBOL(atomic64_inc);
19095
19096 /**
19097+ * atomic64_inc_unchecked - increment atomic64 variable
19098+ * @ptr: pointer to type atomic64_unchecked_t
19099+ *
19100+ * Atomically increments @ptr by 1.
19101+ */
19102+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
19103+{
19104+ atomic64_add_unchecked(1, ptr);
19105+}
19106+EXPORT_SYMBOL(atomic64_inc_unchecked);
19107+
19108+/**
19109 * atomic64_dec - decrement atomic64 variable
19110 * @ptr: pointer to type atomic64_t
19111 *
19112@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
19113 EXPORT_SYMBOL(atomic64_dec);
19114
19115 /**
19116+ * atomic64_dec_unchecked - decrement atomic64 variable
19117+ * @ptr: pointer to type atomic64_unchecked_t
19118+ *
19119+ * Atomically decrements @ptr by 1.
19120+ */
19121+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
19122+{
19123+ atomic64_sub_unchecked(1, ptr);
19124+}
19125+EXPORT_SYMBOL(atomic64_dec_unchecked);
19126+
19127+/**
19128 * atomic64_dec_and_test - decrement and test
19129 * @ptr: pointer to type atomic64_t
19130 *
19131diff -urNp linux-2.6.32.45/arch/x86/lib/checksum_32.S linux-2.6.32.45/arch/x86/lib/checksum_32.S
19132--- linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
19133+++ linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
19134@@ -28,7 +28,8 @@
19135 #include <linux/linkage.h>
19136 #include <asm/dwarf2.h>
19137 #include <asm/errno.h>
19138-
19139+#include <asm/segment.h>
19140+
19141 /*
19142 * computes a partial checksum, e.g. for TCP/UDP fragments
19143 */
19144@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
19145
19146 #define ARGBASE 16
19147 #define FP 12
19148-
19149-ENTRY(csum_partial_copy_generic)
19150+
19151+ENTRY(csum_partial_copy_generic_to_user)
19152 CFI_STARTPROC
19153+
19154+#ifdef CONFIG_PAX_MEMORY_UDEREF
19155+ pushl %gs
19156+ CFI_ADJUST_CFA_OFFSET 4
19157+ popl %es
19158+ CFI_ADJUST_CFA_OFFSET -4
19159+ jmp csum_partial_copy_generic
19160+#endif
19161+
19162+ENTRY(csum_partial_copy_generic_from_user)
19163+
19164+#ifdef CONFIG_PAX_MEMORY_UDEREF
19165+ pushl %gs
19166+ CFI_ADJUST_CFA_OFFSET 4
19167+ popl %ds
19168+ CFI_ADJUST_CFA_OFFSET -4
19169+#endif
19170+
19171+ENTRY(csum_partial_copy_generic)
19172 subl $4,%esp
19173 CFI_ADJUST_CFA_OFFSET 4
19174 pushl %edi
19175@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
19176 jmp 4f
19177 SRC(1: movw (%esi), %bx )
19178 addl $2, %esi
19179-DST( movw %bx, (%edi) )
19180+DST( movw %bx, %es:(%edi) )
19181 addl $2, %edi
19182 addw %bx, %ax
19183 adcl $0, %eax
19184@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
19185 SRC(1: movl (%esi), %ebx )
19186 SRC( movl 4(%esi), %edx )
19187 adcl %ebx, %eax
19188-DST( movl %ebx, (%edi) )
19189+DST( movl %ebx, %es:(%edi) )
19190 adcl %edx, %eax
19191-DST( movl %edx, 4(%edi) )
19192+DST( movl %edx, %es:4(%edi) )
19193
19194 SRC( movl 8(%esi), %ebx )
19195 SRC( movl 12(%esi), %edx )
19196 adcl %ebx, %eax
19197-DST( movl %ebx, 8(%edi) )
19198+DST( movl %ebx, %es:8(%edi) )
19199 adcl %edx, %eax
19200-DST( movl %edx, 12(%edi) )
19201+DST( movl %edx, %es:12(%edi) )
19202
19203 SRC( movl 16(%esi), %ebx )
19204 SRC( movl 20(%esi), %edx )
19205 adcl %ebx, %eax
19206-DST( movl %ebx, 16(%edi) )
19207+DST( movl %ebx, %es:16(%edi) )
19208 adcl %edx, %eax
19209-DST( movl %edx, 20(%edi) )
19210+DST( movl %edx, %es:20(%edi) )
19211
19212 SRC( movl 24(%esi), %ebx )
19213 SRC( movl 28(%esi), %edx )
19214 adcl %ebx, %eax
19215-DST( movl %ebx, 24(%edi) )
19216+DST( movl %ebx, %es:24(%edi) )
19217 adcl %edx, %eax
19218-DST( movl %edx, 28(%edi) )
19219+DST( movl %edx, %es:28(%edi) )
19220
19221 lea 32(%esi), %esi
19222 lea 32(%edi), %edi
19223@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
19224 shrl $2, %edx # This clears CF
19225 SRC(3: movl (%esi), %ebx )
19226 adcl %ebx, %eax
19227-DST( movl %ebx, (%edi) )
19228+DST( movl %ebx, %es:(%edi) )
19229 lea 4(%esi), %esi
19230 lea 4(%edi), %edi
19231 dec %edx
19232@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
19233 jb 5f
19234 SRC( movw (%esi), %cx )
19235 leal 2(%esi), %esi
19236-DST( movw %cx, (%edi) )
19237+DST( movw %cx, %es:(%edi) )
19238 leal 2(%edi), %edi
19239 je 6f
19240 shll $16,%ecx
19241 SRC(5: movb (%esi), %cl )
19242-DST( movb %cl, (%edi) )
19243+DST( movb %cl, %es:(%edi) )
19244 6: addl %ecx, %eax
19245 adcl $0, %eax
19246 7:
19247@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
19248
19249 6001:
19250 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19251- movl $-EFAULT, (%ebx)
19252+ movl $-EFAULT, %ss:(%ebx)
19253
19254 # zero the complete destination - computing the rest
19255 # is too much work
19256@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
19257
19258 6002:
19259 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19260- movl $-EFAULT,(%ebx)
19261+ movl $-EFAULT,%ss:(%ebx)
19262 jmp 5000b
19263
19264 .previous
19265
19266+ pushl %ss
19267+ CFI_ADJUST_CFA_OFFSET 4
19268+ popl %ds
19269+ CFI_ADJUST_CFA_OFFSET -4
19270+ pushl %ss
19271+ CFI_ADJUST_CFA_OFFSET 4
19272+ popl %es
19273+ CFI_ADJUST_CFA_OFFSET -4
19274 popl %ebx
19275 CFI_ADJUST_CFA_OFFSET -4
19276 CFI_RESTORE ebx
19277@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
19278 CFI_ADJUST_CFA_OFFSET -4
19279 ret
19280 CFI_ENDPROC
19281-ENDPROC(csum_partial_copy_generic)
19282+ENDPROC(csum_partial_copy_generic_to_user)
19283
19284 #else
19285
19286 /* Version for PentiumII/PPro */
19287
19288 #define ROUND1(x) \
19289+ nop; nop; nop; \
19290 SRC(movl x(%esi), %ebx ) ; \
19291 addl %ebx, %eax ; \
19292- DST(movl %ebx, x(%edi) ) ;
19293+ DST(movl %ebx, %es:x(%edi)) ;
19294
19295 #define ROUND(x) \
19296+ nop; nop; nop; \
19297 SRC(movl x(%esi), %ebx ) ; \
19298 adcl %ebx, %eax ; \
19299- DST(movl %ebx, x(%edi) ) ;
19300+ DST(movl %ebx, %es:x(%edi)) ;
19301
19302 #define ARGBASE 12
19303-
19304-ENTRY(csum_partial_copy_generic)
19305+
19306+ENTRY(csum_partial_copy_generic_to_user)
19307 CFI_STARTPROC
19308+
19309+#ifdef CONFIG_PAX_MEMORY_UDEREF
19310+ pushl %gs
19311+ CFI_ADJUST_CFA_OFFSET 4
19312+ popl %es
19313+ CFI_ADJUST_CFA_OFFSET -4
19314+ jmp csum_partial_copy_generic
19315+#endif
19316+
19317+ENTRY(csum_partial_copy_generic_from_user)
19318+
19319+#ifdef CONFIG_PAX_MEMORY_UDEREF
19320+ pushl %gs
19321+ CFI_ADJUST_CFA_OFFSET 4
19322+ popl %ds
19323+ CFI_ADJUST_CFA_OFFSET -4
19324+#endif
19325+
19326+ENTRY(csum_partial_copy_generic)
19327 pushl %ebx
19328 CFI_ADJUST_CFA_OFFSET 4
19329 CFI_REL_OFFSET ebx, 0
19330@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
19331 subl %ebx, %edi
19332 lea -1(%esi),%edx
19333 andl $-32,%edx
19334- lea 3f(%ebx,%ebx), %ebx
19335+ lea 3f(%ebx,%ebx,2), %ebx
19336 testl %esi, %esi
19337 jmp *%ebx
19338 1: addl $64,%esi
19339@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
19340 jb 5f
19341 SRC( movw (%esi), %dx )
19342 leal 2(%esi), %esi
19343-DST( movw %dx, (%edi) )
19344+DST( movw %dx, %es:(%edi) )
19345 leal 2(%edi), %edi
19346 je 6f
19347 shll $16,%edx
19348 5:
19349 SRC( movb (%esi), %dl )
19350-DST( movb %dl, (%edi) )
19351+DST( movb %dl, %es:(%edi) )
19352 6: addl %edx, %eax
19353 adcl $0, %eax
19354 7:
19355 .section .fixup, "ax"
19356 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19357- movl $-EFAULT, (%ebx)
19358+ movl $-EFAULT, %ss:(%ebx)
19359 # zero the complete destination (computing the rest is too much work)
19360 movl ARGBASE+8(%esp),%edi # dst
19361 movl ARGBASE+12(%esp),%ecx # len
19362@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
19363 rep; stosb
19364 jmp 7b
19365 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19366- movl $-EFAULT, (%ebx)
19367+ movl $-EFAULT, %ss:(%ebx)
19368 jmp 7b
19369 .previous
19370
19371+#ifdef CONFIG_PAX_MEMORY_UDEREF
19372+ pushl %ss
19373+ CFI_ADJUST_CFA_OFFSET 4
19374+ popl %ds
19375+ CFI_ADJUST_CFA_OFFSET -4
19376+ pushl %ss
19377+ CFI_ADJUST_CFA_OFFSET 4
19378+ popl %es
19379+ CFI_ADJUST_CFA_OFFSET -4
19380+#endif
19381+
19382 popl %esi
19383 CFI_ADJUST_CFA_OFFSET -4
19384 CFI_RESTORE esi
19385@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
19386 CFI_RESTORE ebx
19387 ret
19388 CFI_ENDPROC
19389-ENDPROC(csum_partial_copy_generic)
19390+ENDPROC(csum_partial_copy_generic_to_user)
19391
19392 #undef ROUND
19393 #undef ROUND1
19394diff -urNp linux-2.6.32.45/arch/x86/lib/clear_page_64.S linux-2.6.32.45/arch/x86/lib/clear_page_64.S
19395--- linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
19396+++ linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
19397@@ -43,7 +43,7 @@ ENDPROC(clear_page)
19398
19399 #include <asm/cpufeature.h>
19400
19401- .section .altinstr_replacement,"ax"
19402+ .section .altinstr_replacement,"a"
19403 1: .byte 0xeb /* jmp <disp8> */
19404 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19405 2:
19406diff -urNp linux-2.6.32.45/arch/x86/lib/copy_page_64.S linux-2.6.32.45/arch/x86/lib/copy_page_64.S
19407--- linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
19408+++ linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
19409@@ -104,7 +104,7 @@ ENDPROC(copy_page)
19410
19411 #include <asm/cpufeature.h>
19412
19413- .section .altinstr_replacement,"ax"
19414+ .section .altinstr_replacement,"a"
19415 1: .byte 0xeb /* jmp <disp8> */
19416 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19417 2:
19418diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_64.S linux-2.6.32.45/arch/x86/lib/copy_user_64.S
19419--- linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
19420+++ linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
19421@@ -15,13 +15,14 @@
19422 #include <asm/asm-offsets.h>
19423 #include <asm/thread_info.h>
19424 #include <asm/cpufeature.h>
19425+#include <asm/pgtable.h>
19426
19427 .macro ALTERNATIVE_JUMP feature,orig,alt
19428 0:
19429 .byte 0xe9 /* 32bit jump */
19430 .long \orig-1f /* by default jump to orig */
19431 1:
19432- .section .altinstr_replacement,"ax"
19433+ .section .altinstr_replacement,"a"
19434 2: .byte 0xe9 /* near jump with 32bit immediate */
19435 .long \alt-1b /* offset */ /* or alternatively to alt */
19436 .previous
19437@@ -64,49 +65,19 @@
19438 #endif
19439 .endm
19440
19441-/* Standard copy_to_user with segment limit checking */
19442-ENTRY(copy_to_user)
19443- CFI_STARTPROC
19444- GET_THREAD_INFO(%rax)
19445- movq %rdi,%rcx
19446- addq %rdx,%rcx
19447- jc bad_to_user
19448- cmpq TI_addr_limit(%rax),%rcx
19449- ja bad_to_user
19450- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19451- CFI_ENDPROC
19452-ENDPROC(copy_to_user)
19453-
19454-/* Standard copy_from_user with segment limit checking */
19455-ENTRY(copy_from_user)
19456- CFI_STARTPROC
19457- GET_THREAD_INFO(%rax)
19458- movq %rsi,%rcx
19459- addq %rdx,%rcx
19460- jc bad_from_user
19461- cmpq TI_addr_limit(%rax),%rcx
19462- ja bad_from_user
19463- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19464- CFI_ENDPROC
19465-ENDPROC(copy_from_user)
19466-
19467 ENTRY(copy_user_generic)
19468 CFI_STARTPROC
19469 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19470 CFI_ENDPROC
19471 ENDPROC(copy_user_generic)
19472
19473-ENTRY(__copy_from_user_inatomic)
19474- CFI_STARTPROC
19475- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19476- CFI_ENDPROC
19477-ENDPROC(__copy_from_user_inatomic)
19478-
19479 .section .fixup,"ax"
19480 /* must zero dest */
19481 ENTRY(bad_from_user)
19482 bad_from_user:
19483 CFI_STARTPROC
19484+ testl %edx,%edx
19485+ js bad_to_user
19486 movl %edx,%ecx
19487 xorl %eax,%eax
19488 rep
19489diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S
19490--- linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19491+++ linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19492@@ -14,6 +14,7 @@
19493 #include <asm/current.h>
19494 #include <asm/asm-offsets.h>
19495 #include <asm/thread_info.h>
19496+#include <asm/pgtable.h>
19497
19498 .macro ALIGN_DESTINATION
19499 #ifdef FIX_ALIGNMENT
19500@@ -50,6 +51,15 @@
19501 */
19502 ENTRY(__copy_user_nocache)
19503 CFI_STARTPROC
19504+
19505+#ifdef CONFIG_PAX_MEMORY_UDEREF
19506+ mov $PAX_USER_SHADOW_BASE,%rcx
19507+ cmp %rcx,%rsi
19508+ jae 1f
19509+ add %rcx,%rsi
19510+1:
19511+#endif
19512+
19513 cmpl $8,%edx
19514 jb 20f /* less then 8 bytes, go to byte copy loop */
19515 ALIGN_DESTINATION
19516diff -urNp linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c
19517--- linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19518+++ linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19519@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19520 len -= 2;
19521 }
19522 }
19523+
19524+#ifdef CONFIG_PAX_MEMORY_UDEREF
19525+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19526+ src += PAX_USER_SHADOW_BASE;
19527+#endif
19528+
19529 isum = csum_partial_copy_generic((__force const void *)src,
19530 dst, len, isum, errp, NULL);
19531 if (unlikely(*errp))
19532@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19533 }
19534
19535 *errp = 0;
19536+
19537+#ifdef CONFIG_PAX_MEMORY_UDEREF
19538+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19539+ dst += PAX_USER_SHADOW_BASE;
19540+#endif
19541+
19542 return csum_partial_copy_generic(src, (void __force *)dst,
19543 len, isum, NULL, errp);
19544 }
19545diff -urNp linux-2.6.32.45/arch/x86/lib/getuser.S linux-2.6.32.45/arch/x86/lib/getuser.S
19546--- linux-2.6.32.45/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19547+++ linux-2.6.32.45/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19548@@ -33,14 +33,35 @@
19549 #include <asm/asm-offsets.h>
19550 #include <asm/thread_info.h>
19551 #include <asm/asm.h>
19552+#include <asm/segment.h>
19553+#include <asm/pgtable.h>
19554+
19555+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19556+#define __copyuser_seg gs;
19557+#else
19558+#define __copyuser_seg
19559+#endif
19560
19561 .text
19562 ENTRY(__get_user_1)
19563 CFI_STARTPROC
19564+
19565+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19566 GET_THREAD_INFO(%_ASM_DX)
19567 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19568 jae bad_get_user
19569-1: movzb (%_ASM_AX),%edx
19570+
19571+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19572+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19573+ cmp %_ASM_DX,%_ASM_AX
19574+ jae 1234f
19575+ add %_ASM_DX,%_ASM_AX
19576+1234:
19577+#endif
19578+
19579+#endif
19580+
19581+1: __copyuser_seg movzb (%_ASM_AX),%edx
19582 xor %eax,%eax
19583 ret
19584 CFI_ENDPROC
19585@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19586 ENTRY(__get_user_2)
19587 CFI_STARTPROC
19588 add $1,%_ASM_AX
19589+
19590+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19591 jc bad_get_user
19592 GET_THREAD_INFO(%_ASM_DX)
19593 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19594 jae bad_get_user
19595-2: movzwl -1(%_ASM_AX),%edx
19596+
19597+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19598+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19599+ cmp %_ASM_DX,%_ASM_AX
19600+ jae 1234f
19601+ add %_ASM_DX,%_ASM_AX
19602+1234:
19603+#endif
19604+
19605+#endif
19606+
19607+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19608 xor %eax,%eax
19609 ret
19610 CFI_ENDPROC
19611@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19612 ENTRY(__get_user_4)
19613 CFI_STARTPROC
19614 add $3,%_ASM_AX
19615+
19616+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19617 jc bad_get_user
19618 GET_THREAD_INFO(%_ASM_DX)
19619 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19620 jae bad_get_user
19621-3: mov -3(%_ASM_AX),%edx
19622+
19623+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19624+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19625+ cmp %_ASM_DX,%_ASM_AX
19626+ jae 1234f
19627+ add %_ASM_DX,%_ASM_AX
19628+1234:
19629+#endif
19630+
19631+#endif
19632+
19633+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19634 xor %eax,%eax
19635 ret
19636 CFI_ENDPROC
19637@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19638 GET_THREAD_INFO(%_ASM_DX)
19639 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19640 jae bad_get_user
19641+
19642+#ifdef CONFIG_PAX_MEMORY_UDEREF
19643+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19644+ cmp %_ASM_DX,%_ASM_AX
19645+ jae 1234f
19646+ add %_ASM_DX,%_ASM_AX
19647+1234:
19648+#endif
19649+
19650 4: movq -7(%_ASM_AX),%_ASM_DX
19651 xor %eax,%eax
19652 ret
19653diff -urNp linux-2.6.32.45/arch/x86/lib/memcpy_64.S linux-2.6.32.45/arch/x86/lib/memcpy_64.S
19654--- linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19655+++ linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19656@@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19657 * It is also a lot simpler. Use this when possible:
19658 */
19659
19660- .section .altinstr_replacement, "ax"
19661+ .section .altinstr_replacement, "a"
19662 1: .byte 0xeb /* jmp <disp8> */
19663 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19664 2:
19665diff -urNp linux-2.6.32.45/arch/x86/lib/memset_64.S linux-2.6.32.45/arch/x86/lib/memset_64.S
19666--- linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19667+++ linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19668@@ -118,7 +118,7 @@ ENDPROC(__memset)
19669
19670 #include <asm/cpufeature.h>
19671
19672- .section .altinstr_replacement,"ax"
19673+ .section .altinstr_replacement,"a"
19674 1: .byte 0xeb /* jmp <disp8> */
19675 .byte (memset_c - memset) - (2f - 1b) /* offset */
19676 2:
19677diff -urNp linux-2.6.32.45/arch/x86/lib/mmx_32.c linux-2.6.32.45/arch/x86/lib/mmx_32.c
19678--- linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19679+++ linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19680@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19681 {
19682 void *p;
19683 int i;
19684+ unsigned long cr0;
19685
19686 if (unlikely(in_interrupt()))
19687 return __memcpy(to, from, len);
19688@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19689 kernel_fpu_begin();
19690
19691 __asm__ __volatile__ (
19692- "1: prefetch (%0)\n" /* This set is 28 bytes */
19693- " prefetch 64(%0)\n"
19694- " prefetch 128(%0)\n"
19695- " prefetch 192(%0)\n"
19696- " prefetch 256(%0)\n"
19697+ "1: prefetch (%1)\n" /* This set is 28 bytes */
19698+ " prefetch 64(%1)\n"
19699+ " prefetch 128(%1)\n"
19700+ " prefetch 192(%1)\n"
19701+ " prefetch 256(%1)\n"
19702 "2: \n"
19703 ".section .fixup, \"ax\"\n"
19704- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19705+ "3: \n"
19706+
19707+#ifdef CONFIG_PAX_KERNEXEC
19708+ " movl %%cr0, %0\n"
19709+ " movl %0, %%eax\n"
19710+ " andl $0xFFFEFFFF, %%eax\n"
19711+ " movl %%eax, %%cr0\n"
19712+#endif
19713+
19714+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19715+
19716+#ifdef CONFIG_PAX_KERNEXEC
19717+ " movl %0, %%cr0\n"
19718+#endif
19719+
19720 " jmp 2b\n"
19721 ".previous\n"
19722 _ASM_EXTABLE(1b, 3b)
19723- : : "r" (from));
19724+ : "=&r" (cr0) : "r" (from) : "ax");
19725
19726 for ( ; i > 5; i--) {
19727 __asm__ __volatile__ (
19728- "1: prefetch 320(%0)\n"
19729- "2: movq (%0), %%mm0\n"
19730- " movq 8(%0), %%mm1\n"
19731- " movq 16(%0), %%mm2\n"
19732- " movq 24(%0), %%mm3\n"
19733- " movq %%mm0, (%1)\n"
19734- " movq %%mm1, 8(%1)\n"
19735- " movq %%mm2, 16(%1)\n"
19736- " movq %%mm3, 24(%1)\n"
19737- " movq 32(%0), %%mm0\n"
19738- " movq 40(%0), %%mm1\n"
19739- " movq 48(%0), %%mm2\n"
19740- " movq 56(%0), %%mm3\n"
19741- " movq %%mm0, 32(%1)\n"
19742- " movq %%mm1, 40(%1)\n"
19743- " movq %%mm2, 48(%1)\n"
19744- " movq %%mm3, 56(%1)\n"
19745+ "1: prefetch 320(%1)\n"
19746+ "2: movq (%1), %%mm0\n"
19747+ " movq 8(%1), %%mm1\n"
19748+ " movq 16(%1), %%mm2\n"
19749+ " movq 24(%1), %%mm3\n"
19750+ " movq %%mm0, (%2)\n"
19751+ " movq %%mm1, 8(%2)\n"
19752+ " movq %%mm2, 16(%2)\n"
19753+ " movq %%mm3, 24(%2)\n"
19754+ " movq 32(%1), %%mm0\n"
19755+ " movq 40(%1), %%mm1\n"
19756+ " movq 48(%1), %%mm2\n"
19757+ " movq 56(%1), %%mm3\n"
19758+ " movq %%mm0, 32(%2)\n"
19759+ " movq %%mm1, 40(%2)\n"
19760+ " movq %%mm2, 48(%2)\n"
19761+ " movq %%mm3, 56(%2)\n"
19762 ".section .fixup, \"ax\"\n"
19763- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19764+ "3:\n"
19765+
19766+#ifdef CONFIG_PAX_KERNEXEC
19767+ " movl %%cr0, %0\n"
19768+ " movl %0, %%eax\n"
19769+ " andl $0xFFFEFFFF, %%eax\n"
19770+ " movl %%eax, %%cr0\n"
19771+#endif
19772+
19773+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19774+
19775+#ifdef CONFIG_PAX_KERNEXEC
19776+ " movl %0, %%cr0\n"
19777+#endif
19778+
19779 " jmp 2b\n"
19780 ".previous\n"
19781 _ASM_EXTABLE(1b, 3b)
19782- : : "r" (from), "r" (to) : "memory");
19783+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19784
19785 from += 64;
19786 to += 64;
19787@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19788 static void fast_copy_page(void *to, void *from)
19789 {
19790 int i;
19791+ unsigned long cr0;
19792
19793 kernel_fpu_begin();
19794
19795@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19796 * but that is for later. -AV
19797 */
19798 __asm__ __volatile__(
19799- "1: prefetch (%0)\n"
19800- " prefetch 64(%0)\n"
19801- " prefetch 128(%0)\n"
19802- " prefetch 192(%0)\n"
19803- " prefetch 256(%0)\n"
19804+ "1: prefetch (%1)\n"
19805+ " prefetch 64(%1)\n"
19806+ " prefetch 128(%1)\n"
19807+ " prefetch 192(%1)\n"
19808+ " prefetch 256(%1)\n"
19809 "2: \n"
19810 ".section .fixup, \"ax\"\n"
19811- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19812+ "3: \n"
19813+
19814+#ifdef CONFIG_PAX_KERNEXEC
19815+ " movl %%cr0, %0\n"
19816+ " movl %0, %%eax\n"
19817+ " andl $0xFFFEFFFF, %%eax\n"
19818+ " movl %%eax, %%cr0\n"
19819+#endif
19820+
19821+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19822+
19823+#ifdef CONFIG_PAX_KERNEXEC
19824+ " movl %0, %%cr0\n"
19825+#endif
19826+
19827 " jmp 2b\n"
19828 ".previous\n"
19829- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19830+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19831
19832 for (i = 0; i < (4096-320)/64; i++) {
19833 __asm__ __volatile__ (
19834- "1: prefetch 320(%0)\n"
19835- "2: movq (%0), %%mm0\n"
19836- " movntq %%mm0, (%1)\n"
19837- " movq 8(%0), %%mm1\n"
19838- " movntq %%mm1, 8(%1)\n"
19839- " movq 16(%0), %%mm2\n"
19840- " movntq %%mm2, 16(%1)\n"
19841- " movq 24(%0), %%mm3\n"
19842- " movntq %%mm3, 24(%1)\n"
19843- " movq 32(%0), %%mm4\n"
19844- " movntq %%mm4, 32(%1)\n"
19845- " movq 40(%0), %%mm5\n"
19846- " movntq %%mm5, 40(%1)\n"
19847- " movq 48(%0), %%mm6\n"
19848- " movntq %%mm6, 48(%1)\n"
19849- " movq 56(%0), %%mm7\n"
19850- " movntq %%mm7, 56(%1)\n"
19851+ "1: prefetch 320(%1)\n"
19852+ "2: movq (%1), %%mm0\n"
19853+ " movntq %%mm0, (%2)\n"
19854+ " movq 8(%1), %%mm1\n"
19855+ " movntq %%mm1, 8(%2)\n"
19856+ " movq 16(%1), %%mm2\n"
19857+ " movntq %%mm2, 16(%2)\n"
19858+ " movq 24(%1), %%mm3\n"
19859+ " movntq %%mm3, 24(%2)\n"
19860+ " movq 32(%1), %%mm4\n"
19861+ " movntq %%mm4, 32(%2)\n"
19862+ " movq 40(%1), %%mm5\n"
19863+ " movntq %%mm5, 40(%2)\n"
19864+ " movq 48(%1), %%mm6\n"
19865+ " movntq %%mm6, 48(%2)\n"
19866+ " movq 56(%1), %%mm7\n"
19867+ " movntq %%mm7, 56(%2)\n"
19868 ".section .fixup, \"ax\"\n"
19869- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19870+ "3:\n"
19871+
19872+#ifdef CONFIG_PAX_KERNEXEC
19873+ " movl %%cr0, %0\n"
19874+ " movl %0, %%eax\n"
19875+ " andl $0xFFFEFFFF, %%eax\n"
19876+ " movl %%eax, %%cr0\n"
19877+#endif
19878+
19879+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19880+
19881+#ifdef CONFIG_PAX_KERNEXEC
19882+ " movl %0, %%cr0\n"
19883+#endif
19884+
19885 " jmp 2b\n"
19886 ".previous\n"
19887- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19888+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19889
19890 from += 64;
19891 to += 64;
19892@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19893 static void fast_copy_page(void *to, void *from)
19894 {
19895 int i;
19896+ unsigned long cr0;
19897
19898 kernel_fpu_begin();
19899
19900 __asm__ __volatile__ (
19901- "1: prefetch (%0)\n"
19902- " prefetch 64(%0)\n"
19903- " prefetch 128(%0)\n"
19904- " prefetch 192(%0)\n"
19905- " prefetch 256(%0)\n"
19906+ "1: prefetch (%1)\n"
19907+ " prefetch 64(%1)\n"
19908+ " prefetch 128(%1)\n"
19909+ " prefetch 192(%1)\n"
19910+ " prefetch 256(%1)\n"
19911 "2: \n"
19912 ".section .fixup, \"ax\"\n"
19913- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19914+ "3: \n"
19915+
19916+#ifdef CONFIG_PAX_KERNEXEC
19917+ " movl %%cr0, %0\n"
19918+ " movl %0, %%eax\n"
19919+ " andl $0xFFFEFFFF, %%eax\n"
19920+ " movl %%eax, %%cr0\n"
19921+#endif
19922+
19923+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19924+
19925+#ifdef CONFIG_PAX_KERNEXEC
19926+ " movl %0, %%cr0\n"
19927+#endif
19928+
19929 " jmp 2b\n"
19930 ".previous\n"
19931- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19932+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19933
19934 for (i = 0; i < 4096/64; i++) {
19935 __asm__ __volatile__ (
19936- "1: prefetch 320(%0)\n"
19937- "2: movq (%0), %%mm0\n"
19938- " movq 8(%0), %%mm1\n"
19939- " movq 16(%0), %%mm2\n"
19940- " movq 24(%0), %%mm3\n"
19941- " movq %%mm0, (%1)\n"
19942- " movq %%mm1, 8(%1)\n"
19943- " movq %%mm2, 16(%1)\n"
19944- " movq %%mm3, 24(%1)\n"
19945- " movq 32(%0), %%mm0\n"
19946- " movq 40(%0), %%mm1\n"
19947- " movq 48(%0), %%mm2\n"
19948- " movq 56(%0), %%mm3\n"
19949- " movq %%mm0, 32(%1)\n"
19950- " movq %%mm1, 40(%1)\n"
19951- " movq %%mm2, 48(%1)\n"
19952- " movq %%mm3, 56(%1)\n"
19953+ "1: prefetch 320(%1)\n"
19954+ "2: movq (%1), %%mm0\n"
19955+ " movq 8(%1), %%mm1\n"
19956+ " movq 16(%1), %%mm2\n"
19957+ " movq 24(%1), %%mm3\n"
19958+ " movq %%mm0, (%2)\n"
19959+ " movq %%mm1, 8(%2)\n"
19960+ " movq %%mm2, 16(%2)\n"
19961+ " movq %%mm3, 24(%2)\n"
19962+ " movq 32(%1), %%mm0\n"
19963+ " movq 40(%1), %%mm1\n"
19964+ " movq 48(%1), %%mm2\n"
19965+ " movq 56(%1), %%mm3\n"
19966+ " movq %%mm0, 32(%2)\n"
19967+ " movq %%mm1, 40(%2)\n"
19968+ " movq %%mm2, 48(%2)\n"
19969+ " movq %%mm3, 56(%2)\n"
19970 ".section .fixup, \"ax\"\n"
19971- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19972+ "3:\n"
19973+
19974+#ifdef CONFIG_PAX_KERNEXEC
19975+ " movl %%cr0, %0\n"
19976+ " movl %0, %%eax\n"
19977+ " andl $0xFFFEFFFF, %%eax\n"
19978+ " movl %%eax, %%cr0\n"
19979+#endif
19980+
19981+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19982+
19983+#ifdef CONFIG_PAX_KERNEXEC
19984+ " movl %0, %%cr0\n"
19985+#endif
19986+
19987 " jmp 2b\n"
19988 ".previous\n"
19989 _ASM_EXTABLE(1b, 3b)
19990- : : "r" (from), "r" (to) : "memory");
19991+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19992
19993 from += 64;
19994 to += 64;
19995diff -urNp linux-2.6.32.45/arch/x86/lib/putuser.S linux-2.6.32.45/arch/x86/lib/putuser.S
19996--- linux-2.6.32.45/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19997+++ linux-2.6.32.45/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19998@@ -15,7 +15,8 @@
19999 #include <asm/thread_info.h>
20000 #include <asm/errno.h>
20001 #include <asm/asm.h>
20002-
20003+#include <asm/segment.h>
20004+#include <asm/pgtable.h>
20005
20006 /*
20007 * __put_user_X
20008@@ -29,52 +30,119 @@
20009 * as they get called from within inline assembly.
20010 */
20011
20012-#define ENTER CFI_STARTPROC ; \
20013- GET_THREAD_INFO(%_ASM_BX)
20014+#define ENTER CFI_STARTPROC
20015 #define EXIT ret ; \
20016 CFI_ENDPROC
20017
20018+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20019+#define _DEST %_ASM_CX,%_ASM_BX
20020+#else
20021+#define _DEST %_ASM_CX
20022+#endif
20023+
20024+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20025+#define __copyuser_seg gs;
20026+#else
20027+#define __copyuser_seg
20028+#endif
20029+
20030 .text
20031 ENTRY(__put_user_1)
20032 ENTER
20033+
20034+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20035+ GET_THREAD_INFO(%_ASM_BX)
20036 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20037 jae bad_put_user
20038-1: movb %al,(%_ASM_CX)
20039+
20040+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20041+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20042+ cmp %_ASM_BX,%_ASM_CX
20043+ jb 1234f
20044+ xor %ebx,%ebx
20045+1234:
20046+#endif
20047+
20048+#endif
20049+
20050+1: __copyuser_seg movb %al,(_DEST)
20051 xor %eax,%eax
20052 EXIT
20053 ENDPROC(__put_user_1)
20054
20055 ENTRY(__put_user_2)
20056 ENTER
20057+
20058+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20059+ GET_THREAD_INFO(%_ASM_BX)
20060 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20061 sub $1,%_ASM_BX
20062 cmp %_ASM_BX,%_ASM_CX
20063 jae bad_put_user
20064-2: movw %ax,(%_ASM_CX)
20065+
20066+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20067+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20068+ cmp %_ASM_BX,%_ASM_CX
20069+ jb 1234f
20070+ xor %ebx,%ebx
20071+1234:
20072+#endif
20073+
20074+#endif
20075+
20076+2: __copyuser_seg movw %ax,(_DEST)
20077 xor %eax,%eax
20078 EXIT
20079 ENDPROC(__put_user_2)
20080
20081 ENTRY(__put_user_4)
20082 ENTER
20083+
20084+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20085+ GET_THREAD_INFO(%_ASM_BX)
20086 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20087 sub $3,%_ASM_BX
20088 cmp %_ASM_BX,%_ASM_CX
20089 jae bad_put_user
20090-3: movl %eax,(%_ASM_CX)
20091+
20092+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20093+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20094+ cmp %_ASM_BX,%_ASM_CX
20095+ jb 1234f
20096+ xor %ebx,%ebx
20097+1234:
20098+#endif
20099+
20100+#endif
20101+
20102+3: __copyuser_seg movl %eax,(_DEST)
20103 xor %eax,%eax
20104 EXIT
20105 ENDPROC(__put_user_4)
20106
20107 ENTRY(__put_user_8)
20108 ENTER
20109+
20110+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20111+ GET_THREAD_INFO(%_ASM_BX)
20112 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20113 sub $7,%_ASM_BX
20114 cmp %_ASM_BX,%_ASM_CX
20115 jae bad_put_user
20116-4: mov %_ASM_AX,(%_ASM_CX)
20117+
20118+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20119+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20120+ cmp %_ASM_BX,%_ASM_CX
20121+ jb 1234f
20122+ xor %ebx,%ebx
20123+1234:
20124+#endif
20125+
20126+#endif
20127+
20128+4: __copyuser_seg mov %_ASM_AX,(_DEST)
20129 #ifdef CONFIG_X86_32
20130-5: movl %edx,4(%_ASM_CX)
20131+5: __copyuser_seg movl %edx,4(_DEST)
20132 #endif
20133 xor %eax,%eax
20134 EXIT
20135diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_32.c linux-2.6.32.45/arch/x86/lib/usercopy_32.c
20136--- linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
20137+++ linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
20138@@ -43,7 +43,7 @@ do { \
20139 __asm__ __volatile__( \
20140 " testl %1,%1\n" \
20141 " jz 2f\n" \
20142- "0: lodsb\n" \
20143+ "0: "__copyuser_seg"lodsb\n" \
20144 " stosb\n" \
20145 " testb %%al,%%al\n" \
20146 " jz 1f\n" \
20147@@ -128,10 +128,12 @@ do { \
20148 int __d0; \
20149 might_fault(); \
20150 __asm__ __volatile__( \
20151+ __COPYUSER_SET_ES \
20152 "0: rep; stosl\n" \
20153 " movl %2,%0\n" \
20154 "1: rep; stosb\n" \
20155 "2:\n" \
20156+ __COPYUSER_RESTORE_ES \
20157 ".section .fixup,\"ax\"\n" \
20158 "3: lea 0(%2,%0,4),%0\n" \
20159 " jmp 2b\n" \
20160@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
20161 might_fault();
20162
20163 __asm__ __volatile__(
20164+ __COPYUSER_SET_ES
20165 " testl %0, %0\n"
20166 " jz 3f\n"
20167 " andl %0,%%ecx\n"
20168@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
20169 " subl %%ecx,%0\n"
20170 " addl %0,%%eax\n"
20171 "1:\n"
20172+ __COPYUSER_RESTORE_ES
20173 ".section .fixup,\"ax\"\n"
20174 "2: xorl %%eax,%%eax\n"
20175 " jmp 1b\n"
20176@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20177
20178 #ifdef CONFIG_X86_INTEL_USERCOPY
20179 static unsigned long
20180-__copy_user_intel(void __user *to, const void *from, unsigned long size)
20181+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20182 {
20183 int d0, d1;
20184 __asm__ __volatile__(
20185@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
20186 " .align 2,0x90\n"
20187 "3: movl 0(%4), %%eax\n"
20188 "4: movl 4(%4), %%edx\n"
20189- "5: movl %%eax, 0(%3)\n"
20190- "6: movl %%edx, 4(%3)\n"
20191+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20192+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20193 "7: movl 8(%4), %%eax\n"
20194 "8: movl 12(%4),%%edx\n"
20195- "9: movl %%eax, 8(%3)\n"
20196- "10: movl %%edx, 12(%3)\n"
20197+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20198+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20199 "11: movl 16(%4), %%eax\n"
20200 "12: movl 20(%4), %%edx\n"
20201- "13: movl %%eax, 16(%3)\n"
20202- "14: movl %%edx, 20(%3)\n"
20203+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20204+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20205 "15: movl 24(%4), %%eax\n"
20206 "16: movl 28(%4), %%edx\n"
20207- "17: movl %%eax, 24(%3)\n"
20208- "18: movl %%edx, 28(%3)\n"
20209+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20210+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20211 "19: movl 32(%4), %%eax\n"
20212 "20: movl 36(%4), %%edx\n"
20213- "21: movl %%eax, 32(%3)\n"
20214- "22: movl %%edx, 36(%3)\n"
20215+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20216+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20217 "23: movl 40(%4), %%eax\n"
20218 "24: movl 44(%4), %%edx\n"
20219- "25: movl %%eax, 40(%3)\n"
20220- "26: movl %%edx, 44(%3)\n"
20221+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20222+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20223 "27: movl 48(%4), %%eax\n"
20224 "28: movl 52(%4), %%edx\n"
20225- "29: movl %%eax, 48(%3)\n"
20226- "30: movl %%edx, 52(%3)\n"
20227+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20228+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20229 "31: movl 56(%4), %%eax\n"
20230 "32: movl 60(%4), %%edx\n"
20231- "33: movl %%eax, 56(%3)\n"
20232- "34: movl %%edx, 60(%3)\n"
20233+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20234+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20235 " addl $-64, %0\n"
20236 " addl $64, %4\n"
20237 " addl $64, %3\n"
20238@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
20239 " shrl $2, %0\n"
20240 " andl $3, %%eax\n"
20241 " cld\n"
20242+ __COPYUSER_SET_ES
20243 "99: rep; movsl\n"
20244 "36: movl %%eax, %0\n"
20245 "37: rep; movsb\n"
20246 "100:\n"
20247+ __COPYUSER_RESTORE_ES
20248+ ".section .fixup,\"ax\"\n"
20249+ "101: lea 0(%%eax,%0,4),%0\n"
20250+ " jmp 100b\n"
20251+ ".previous\n"
20252+ ".section __ex_table,\"a\"\n"
20253+ " .align 4\n"
20254+ " .long 1b,100b\n"
20255+ " .long 2b,100b\n"
20256+ " .long 3b,100b\n"
20257+ " .long 4b,100b\n"
20258+ " .long 5b,100b\n"
20259+ " .long 6b,100b\n"
20260+ " .long 7b,100b\n"
20261+ " .long 8b,100b\n"
20262+ " .long 9b,100b\n"
20263+ " .long 10b,100b\n"
20264+ " .long 11b,100b\n"
20265+ " .long 12b,100b\n"
20266+ " .long 13b,100b\n"
20267+ " .long 14b,100b\n"
20268+ " .long 15b,100b\n"
20269+ " .long 16b,100b\n"
20270+ " .long 17b,100b\n"
20271+ " .long 18b,100b\n"
20272+ " .long 19b,100b\n"
20273+ " .long 20b,100b\n"
20274+ " .long 21b,100b\n"
20275+ " .long 22b,100b\n"
20276+ " .long 23b,100b\n"
20277+ " .long 24b,100b\n"
20278+ " .long 25b,100b\n"
20279+ " .long 26b,100b\n"
20280+ " .long 27b,100b\n"
20281+ " .long 28b,100b\n"
20282+ " .long 29b,100b\n"
20283+ " .long 30b,100b\n"
20284+ " .long 31b,100b\n"
20285+ " .long 32b,100b\n"
20286+ " .long 33b,100b\n"
20287+ " .long 34b,100b\n"
20288+ " .long 35b,100b\n"
20289+ " .long 36b,100b\n"
20290+ " .long 37b,100b\n"
20291+ " .long 99b,101b\n"
20292+ ".previous"
20293+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
20294+ : "1"(to), "2"(from), "0"(size)
20295+ : "eax", "edx", "memory");
20296+ return size;
20297+}
20298+
20299+static unsigned long
20300+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20301+{
20302+ int d0, d1;
20303+ __asm__ __volatile__(
20304+ " .align 2,0x90\n"
20305+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20306+ " cmpl $67, %0\n"
20307+ " jbe 3f\n"
20308+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20309+ " .align 2,0x90\n"
20310+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20311+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20312+ "5: movl %%eax, 0(%3)\n"
20313+ "6: movl %%edx, 4(%3)\n"
20314+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20315+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20316+ "9: movl %%eax, 8(%3)\n"
20317+ "10: movl %%edx, 12(%3)\n"
20318+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20319+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20320+ "13: movl %%eax, 16(%3)\n"
20321+ "14: movl %%edx, 20(%3)\n"
20322+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20323+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20324+ "17: movl %%eax, 24(%3)\n"
20325+ "18: movl %%edx, 28(%3)\n"
20326+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20327+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20328+ "21: movl %%eax, 32(%3)\n"
20329+ "22: movl %%edx, 36(%3)\n"
20330+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20331+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20332+ "25: movl %%eax, 40(%3)\n"
20333+ "26: movl %%edx, 44(%3)\n"
20334+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20335+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20336+ "29: movl %%eax, 48(%3)\n"
20337+ "30: movl %%edx, 52(%3)\n"
20338+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20339+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20340+ "33: movl %%eax, 56(%3)\n"
20341+ "34: movl %%edx, 60(%3)\n"
20342+ " addl $-64, %0\n"
20343+ " addl $64, %4\n"
20344+ " addl $64, %3\n"
20345+ " cmpl $63, %0\n"
20346+ " ja 1b\n"
20347+ "35: movl %0, %%eax\n"
20348+ " shrl $2, %0\n"
20349+ " andl $3, %%eax\n"
20350+ " cld\n"
20351+ "99: rep; "__copyuser_seg" movsl\n"
20352+ "36: movl %%eax, %0\n"
20353+ "37: rep; "__copyuser_seg" movsb\n"
20354+ "100:\n"
20355 ".section .fixup,\"ax\"\n"
20356 "101: lea 0(%%eax,%0,4),%0\n"
20357 " jmp 100b\n"
20358@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
20359 int d0, d1;
20360 __asm__ __volatile__(
20361 " .align 2,0x90\n"
20362- "0: movl 32(%4), %%eax\n"
20363+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20364 " cmpl $67, %0\n"
20365 " jbe 2f\n"
20366- "1: movl 64(%4), %%eax\n"
20367+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20368 " .align 2,0x90\n"
20369- "2: movl 0(%4), %%eax\n"
20370- "21: movl 4(%4), %%edx\n"
20371+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20372+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20373 " movl %%eax, 0(%3)\n"
20374 " movl %%edx, 4(%3)\n"
20375- "3: movl 8(%4), %%eax\n"
20376- "31: movl 12(%4),%%edx\n"
20377+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20378+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20379 " movl %%eax, 8(%3)\n"
20380 " movl %%edx, 12(%3)\n"
20381- "4: movl 16(%4), %%eax\n"
20382- "41: movl 20(%4), %%edx\n"
20383+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20384+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20385 " movl %%eax, 16(%3)\n"
20386 " movl %%edx, 20(%3)\n"
20387- "10: movl 24(%4), %%eax\n"
20388- "51: movl 28(%4), %%edx\n"
20389+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20390+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20391 " movl %%eax, 24(%3)\n"
20392 " movl %%edx, 28(%3)\n"
20393- "11: movl 32(%4), %%eax\n"
20394- "61: movl 36(%4), %%edx\n"
20395+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20396+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20397 " movl %%eax, 32(%3)\n"
20398 " movl %%edx, 36(%3)\n"
20399- "12: movl 40(%4), %%eax\n"
20400- "71: movl 44(%4), %%edx\n"
20401+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20402+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20403 " movl %%eax, 40(%3)\n"
20404 " movl %%edx, 44(%3)\n"
20405- "13: movl 48(%4), %%eax\n"
20406- "81: movl 52(%4), %%edx\n"
20407+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20408+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20409 " movl %%eax, 48(%3)\n"
20410 " movl %%edx, 52(%3)\n"
20411- "14: movl 56(%4), %%eax\n"
20412- "91: movl 60(%4), %%edx\n"
20413+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20414+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20415 " movl %%eax, 56(%3)\n"
20416 " movl %%edx, 60(%3)\n"
20417 " addl $-64, %0\n"
20418@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
20419 " shrl $2, %0\n"
20420 " andl $3, %%eax\n"
20421 " cld\n"
20422- "6: rep; movsl\n"
20423+ "6: rep; "__copyuser_seg" movsl\n"
20424 " movl %%eax,%0\n"
20425- "7: rep; movsb\n"
20426+ "7: rep; "__copyuser_seg" movsb\n"
20427 "8:\n"
20428 ".section .fixup,\"ax\"\n"
20429 "9: lea 0(%%eax,%0,4),%0\n"
20430@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20431
20432 __asm__ __volatile__(
20433 " .align 2,0x90\n"
20434- "0: movl 32(%4), %%eax\n"
20435+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20436 " cmpl $67, %0\n"
20437 " jbe 2f\n"
20438- "1: movl 64(%4), %%eax\n"
20439+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20440 " .align 2,0x90\n"
20441- "2: movl 0(%4), %%eax\n"
20442- "21: movl 4(%4), %%edx\n"
20443+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20444+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20445 " movnti %%eax, 0(%3)\n"
20446 " movnti %%edx, 4(%3)\n"
20447- "3: movl 8(%4), %%eax\n"
20448- "31: movl 12(%4),%%edx\n"
20449+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20450+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20451 " movnti %%eax, 8(%3)\n"
20452 " movnti %%edx, 12(%3)\n"
20453- "4: movl 16(%4), %%eax\n"
20454- "41: movl 20(%4), %%edx\n"
20455+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20456+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20457 " movnti %%eax, 16(%3)\n"
20458 " movnti %%edx, 20(%3)\n"
20459- "10: movl 24(%4), %%eax\n"
20460- "51: movl 28(%4), %%edx\n"
20461+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20462+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20463 " movnti %%eax, 24(%3)\n"
20464 " movnti %%edx, 28(%3)\n"
20465- "11: movl 32(%4), %%eax\n"
20466- "61: movl 36(%4), %%edx\n"
20467+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20468+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20469 " movnti %%eax, 32(%3)\n"
20470 " movnti %%edx, 36(%3)\n"
20471- "12: movl 40(%4), %%eax\n"
20472- "71: movl 44(%4), %%edx\n"
20473+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20474+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20475 " movnti %%eax, 40(%3)\n"
20476 " movnti %%edx, 44(%3)\n"
20477- "13: movl 48(%4), %%eax\n"
20478- "81: movl 52(%4), %%edx\n"
20479+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20480+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20481 " movnti %%eax, 48(%3)\n"
20482 " movnti %%edx, 52(%3)\n"
20483- "14: movl 56(%4), %%eax\n"
20484- "91: movl 60(%4), %%edx\n"
20485+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20486+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20487 " movnti %%eax, 56(%3)\n"
20488 " movnti %%edx, 60(%3)\n"
20489 " addl $-64, %0\n"
20490@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20491 " shrl $2, %0\n"
20492 " andl $3, %%eax\n"
20493 " cld\n"
20494- "6: rep; movsl\n"
20495+ "6: rep; "__copyuser_seg" movsl\n"
20496 " movl %%eax,%0\n"
20497- "7: rep; movsb\n"
20498+ "7: rep; "__copyuser_seg" movsb\n"
20499 "8:\n"
20500 ".section .fixup,\"ax\"\n"
20501 "9: lea 0(%%eax,%0,4),%0\n"
20502@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20503
20504 __asm__ __volatile__(
20505 " .align 2,0x90\n"
20506- "0: movl 32(%4), %%eax\n"
20507+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20508 " cmpl $67, %0\n"
20509 " jbe 2f\n"
20510- "1: movl 64(%4), %%eax\n"
20511+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20512 " .align 2,0x90\n"
20513- "2: movl 0(%4), %%eax\n"
20514- "21: movl 4(%4), %%edx\n"
20515+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20516+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20517 " movnti %%eax, 0(%3)\n"
20518 " movnti %%edx, 4(%3)\n"
20519- "3: movl 8(%4), %%eax\n"
20520- "31: movl 12(%4),%%edx\n"
20521+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20522+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20523 " movnti %%eax, 8(%3)\n"
20524 " movnti %%edx, 12(%3)\n"
20525- "4: movl 16(%4), %%eax\n"
20526- "41: movl 20(%4), %%edx\n"
20527+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20528+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20529 " movnti %%eax, 16(%3)\n"
20530 " movnti %%edx, 20(%3)\n"
20531- "10: movl 24(%4), %%eax\n"
20532- "51: movl 28(%4), %%edx\n"
20533+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20534+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20535 " movnti %%eax, 24(%3)\n"
20536 " movnti %%edx, 28(%3)\n"
20537- "11: movl 32(%4), %%eax\n"
20538- "61: movl 36(%4), %%edx\n"
20539+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20540+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20541 " movnti %%eax, 32(%3)\n"
20542 " movnti %%edx, 36(%3)\n"
20543- "12: movl 40(%4), %%eax\n"
20544- "71: movl 44(%4), %%edx\n"
20545+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20546+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20547 " movnti %%eax, 40(%3)\n"
20548 " movnti %%edx, 44(%3)\n"
20549- "13: movl 48(%4), %%eax\n"
20550- "81: movl 52(%4), %%edx\n"
20551+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20552+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20553 " movnti %%eax, 48(%3)\n"
20554 " movnti %%edx, 52(%3)\n"
20555- "14: movl 56(%4), %%eax\n"
20556- "91: movl 60(%4), %%edx\n"
20557+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20558+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20559 " movnti %%eax, 56(%3)\n"
20560 " movnti %%edx, 60(%3)\n"
20561 " addl $-64, %0\n"
20562@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20563 " shrl $2, %0\n"
20564 " andl $3, %%eax\n"
20565 " cld\n"
20566- "6: rep; movsl\n"
20567+ "6: rep; "__copyuser_seg" movsl\n"
20568 " movl %%eax,%0\n"
20569- "7: rep; movsb\n"
20570+ "7: rep; "__copyuser_seg" movsb\n"
20571 "8:\n"
20572 ".section .fixup,\"ax\"\n"
20573 "9: lea 0(%%eax,%0,4),%0\n"
20574@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20575 */
20576 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20577 unsigned long size);
20578-unsigned long __copy_user_intel(void __user *to, const void *from,
20579+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20580+ unsigned long size);
20581+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20582 unsigned long size);
20583 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20584 const void __user *from, unsigned long size);
20585 #endif /* CONFIG_X86_INTEL_USERCOPY */
20586
20587 /* Generic arbitrary sized copy. */
20588-#define __copy_user(to, from, size) \
20589+#define __copy_user(to, from, size, prefix, set, restore) \
20590 do { \
20591 int __d0, __d1, __d2; \
20592 __asm__ __volatile__( \
20593+ set \
20594 " cmp $7,%0\n" \
20595 " jbe 1f\n" \
20596 " movl %1,%0\n" \
20597 " negl %0\n" \
20598 " andl $7,%0\n" \
20599 " subl %0,%3\n" \
20600- "4: rep; movsb\n" \
20601+ "4: rep; "prefix"movsb\n" \
20602 " movl %3,%0\n" \
20603 " shrl $2,%0\n" \
20604 " andl $3,%3\n" \
20605 " .align 2,0x90\n" \
20606- "0: rep; movsl\n" \
20607+ "0: rep; "prefix"movsl\n" \
20608 " movl %3,%0\n" \
20609- "1: rep; movsb\n" \
20610+ "1: rep; "prefix"movsb\n" \
20611 "2:\n" \
20612+ restore \
20613 ".section .fixup,\"ax\"\n" \
20614 "5: addl %3,%0\n" \
20615 " jmp 2b\n" \
20616@@ -682,14 +799,14 @@ do { \
20617 " negl %0\n" \
20618 " andl $7,%0\n" \
20619 " subl %0,%3\n" \
20620- "4: rep; movsb\n" \
20621+ "4: rep; "__copyuser_seg"movsb\n" \
20622 " movl %3,%0\n" \
20623 " shrl $2,%0\n" \
20624 " andl $3,%3\n" \
20625 " .align 2,0x90\n" \
20626- "0: rep; movsl\n" \
20627+ "0: rep; "__copyuser_seg"movsl\n" \
20628 " movl %3,%0\n" \
20629- "1: rep; movsb\n" \
20630+ "1: rep; "__copyuser_seg"movsb\n" \
20631 "2:\n" \
20632 ".section .fixup,\"ax\"\n" \
20633 "5: addl %3,%0\n" \
20634@@ -775,9 +892,9 @@ survive:
20635 }
20636 #endif
20637 if (movsl_is_ok(to, from, n))
20638- __copy_user(to, from, n);
20639+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20640 else
20641- n = __copy_user_intel(to, from, n);
20642+ n = __generic_copy_to_user_intel(to, from, n);
20643 return n;
20644 }
20645 EXPORT_SYMBOL(__copy_to_user_ll);
20646@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20647 unsigned long n)
20648 {
20649 if (movsl_is_ok(to, from, n))
20650- __copy_user(to, from, n);
20651+ __copy_user(to, from, n, __copyuser_seg, "", "");
20652 else
20653- n = __copy_user_intel((void __user *)to,
20654- (const void *)from, n);
20655+ n = __generic_copy_from_user_intel(to, from, n);
20656 return n;
20657 }
20658 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20659@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20660 if (n > 64 && cpu_has_xmm2)
20661 n = __copy_user_intel_nocache(to, from, n);
20662 else
20663- __copy_user(to, from, n);
20664+ __copy_user(to, from, n, __copyuser_seg, "", "");
20665 #else
20666- __copy_user(to, from, n);
20667+ __copy_user(to, from, n, __copyuser_seg, "", "");
20668 #endif
20669 return n;
20670 }
20671 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20672
20673-/**
20674- * copy_to_user: - Copy a block of data into user space.
20675- * @to: Destination address, in user space.
20676- * @from: Source address, in kernel space.
20677- * @n: Number of bytes to copy.
20678- *
20679- * Context: User context only. This function may sleep.
20680- *
20681- * Copy data from kernel space to user space.
20682- *
20683- * Returns number of bytes that could not be copied.
20684- * On success, this will be zero.
20685- */
20686-unsigned long
20687-copy_to_user(void __user *to, const void *from, unsigned long n)
20688+#ifdef CONFIG_PAX_MEMORY_UDEREF
20689+void __set_fs(mm_segment_t x)
20690 {
20691- if (access_ok(VERIFY_WRITE, to, n))
20692- n = __copy_to_user(to, from, n);
20693- return n;
20694+ switch (x.seg) {
20695+ case 0:
20696+ loadsegment(gs, 0);
20697+ break;
20698+ case TASK_SIZE_MAX:
20699+ loadsegment(gs, __USER_DS);
20700+ break;
20701+ case -1UL:
20702+ loadsegment(gs, __KERNEL_DS);
20703+ break;
20704+ default:
20705+ BUG();
20706+ }
20707+ return;
20708 }
20709-EXPORT_SYMBOL(copy_to_user);
20710+EXPORT_SYMBOL(__set_fs);
20711
20712-/**
20713- * copy_from_user: - Copy a block of data from user space.
20714- * @to: Destination address, in kernel space.
20715- * @from: Source address, in user space.
20716- * @n: Number of bytes to copy.
20717- *
20718- * Context: User context only. This function may sleep.
20719- *
20720- * Copy data from user space to kernel space.
20721- *
20722- * Returns number of bytes that could not be copied.
20723- * On success, this will be zero.
20724- *
20725- * If some data could not be copied, this function will pad the copied
20726- * data to the requested size using zero bytes.
20727- */
20728-unsigned long
20729-copy_from_user(void *to, const void __user *from, unsigned long n)
20730+void set_fs(mm_segment_t x)
20731 {
20732- if (access_ok(VERIFY_READ, from, n))
20733- n = __copy_from_user(to, from, n);
20734- else
20735- memset(to, 0, n);
20736- return n;
20737+ current_thread_info()->addr_limit = x;
20738+ __set_fs(x);
20739 }
20740-EXPORT_SYMBOL(copy_from_user);
20741+EXPORT_SYMBOL(set_fs);
20742+#endif
20743diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_64.c linux-2.6.32.45/arch/x86/lib/usercopy_64.c
20744--- linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20745+++ linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20746@@ -42,6 +42,12 @@ long
20747 __strncpy_from_user(char *dst, const char __user *src, long count)
20748 {
20749 long res;
20750+
20751+#ifdef CONFIG_PAX_MEMORY_UDEREF
20752+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20753+ src += PAX_USER_SHADOW_BASE;
20754+#endif
20755+
20756 __do_strncpy_from_user(dst, src, count, res);
20757 return res;
20758 }
20759@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20760 {
20761 long __d0;
20762 might_fault();
20763+
20764+#ifdef CONFIG_PAX_MEMORY_UDEREF
20765+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20766+ addr += PAX_USER_SHADOW_BASE;
20767+#endif
20768+
20769 /* no memory constraint because it doesn't change any memory gcc knows
20770 about */
20771 asm volatile(
20772@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20773
20774 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20775 {
20776- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20777+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20778+
20779+#ifdef CONFIG_PAX_MEMORY_UDEREF
20780+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20781+ to += PAX_USER_SHADOW_BASE;
20782+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20783+ from += PAX_USER_SHADOW_BASE;
20784+#endif
20785+
20786 return copy_user_generic((__force void *)to, (__force void *)from, len);
20787- }
20788- return len;
20789+ }
20790+ return len;
20791 }
20792 EXPORT_SYMBOL(copy_in_user);
20793
20794diff -urNp linux-2.6.32.45/arch/x86/Makefile linux-2.6.32.45/arch/x86/Makefile
20795--- linux-2.6.32.45/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20796+++ linux-2.6.32.45/arch/x86/Makefile 2011-07-19 18:16:02.000000000 -0400
20797@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
20798 else
20799 BITS := 64
20800 UTS_MACHINE := x86_64
20801+ biarch := $(call cc-option,-m64)
20802 CHECKFLAGS += -D__x86_64__ -m64
20803
20804 KBUILD_AFLAGS += -m64
20805@@ -189,3 +190,12 @@ define archhelp
20806 echo ' FDARGS="..." arguments for the booted kernel'
20807 echo ' FDINITRD=file initrd for the booted kernel'
20808 endef
20809+
20810+define OLD_LD
20811+
20812+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20813+*** Please upgrade your binutils to 2.18 or newer
20814+endef
20815+
20816+archprepare:
20817+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20818diff -urNp linux-2.6.32.45/arch/x86/mm/extable.c linux-2.6.32.45/arch/x86/mm/extable.c
20819--- linux-2.6.32.45/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20820+++ linux-2.6.32.45/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20821@@ -1,14 +1,71 @@
20822 #include <linux/module.h>
20823 #include <linux/spinlock.h>
20824+#include <linux/sort.h>
20825 #include <asm/uaccess.h>
20826+#include <asm/pgtable.h>
20827
20828+/*
20829+ * The exception table needs to be sorted so that the binary
20830+ * search that we use to find entries in it works properly.
20831+ * This is used both for the kernel exception table and for
20832+ * the exception tables of modules that get loaded.
20833+ */
20834+static int cmp_ex(const void *a, const void *b)
20835+{
20836+ const struct exception_table_entry *x = a, *y = b;
20837+
20838+ /* avoid overflow */
20839+ if (x->insn > y->insn)
20840+ return 1;
20841+ if (x->insn < y->insn)
20842+ return -1;
20843+ return 0;
20844+}
20845+
20846+static void swap_ex(void *a, void *b, int size)
20847+{
20848+ struct exception_table_entry t, *x = a, *y = b;
20849+
20850+ t = *x;
20851+
20852+ pax_open_kernel();
20853+ *x = *y;
20854+ *y = t;
20855+ pax_close_kernel();
20856+}
20857+
20858+void sort_extable(struct exception_table_entry *start,
20859+ struct exception_table_entry *finish)
20860+{
20861+ sort(start, finish - start, sizeof(struct exception_table_entry),
20862+ cmp_ex, swap_ex);
20863+}
20864+
20865+#ifdef CONFIG_MODULES
20866+/*
20867+ * If the exception table is sorted, any referring to the module init
20868+ * will be at the beginning or the end.
20869+ */
20870+void trim_init_extable(struct module *m)
20871+{
20872+ /*trim the beginning*/
20873+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20874+ m->extable++;
20875+ m->num_exentries--;
20876+ }
20877+ /*trim the end*/
20878+ while (m->num_exentries &&
20879+ within_module_init(m->extable[m->num_exentries-1].insn, m))
20880+ m->num_exentries--;
20881+}
20882+#endif /* CONFIG_MODULES */
20883
20884 int fixup_exception(struct pt_regs *regs)
20885 {
20886 const struct exception_table_entry *fixup;
20887
20888 #ifdef CONFIG_PNPBIOS
20889- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20890+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20891 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20892 extern u32 pnp_bios_is_utter_crap;
20893 pnp_bios_is_utter_crap = 1;
20894diff -urNp linux-2.6.32.45/arch/x86/mm/fault.c linux-2.6.32.45/arch/x86/mm/fault.c
20895--- linux-2.6.32.45/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20896+++ linux-2.6.32.45/arch/x86/mm/fault.c 2011-08-17 20:06:44.000000000 -0400
20897@@ -11,10 +11,19 @@
20898 #include <linux/kprobes.h> /* __kprobes, ... */
20899 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20900 #include <linux/perf_event.h> /* perf_sw_event */
20901+#include <linux/unistd.h>
20902+#include <linux/compiler.h>
20903
20904 #include <asm/traps.h> /* dotraplinkage, ... */
20905 #include <asm/pgalloc.h> /* pgd_*(), ... */
20906 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20907+#include <asm/vsyscall.h>
20908+#include <asm/tlbflush.h>
20909+
20910+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20911+#include <asm/stacktrace.h>
20912+#include "../kernel/dumpstack.h"
20913+#endif
20914
20915 /*
20916 * Page fault error code bits:
20917@@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20918 int ret = 0;
20919
20920 /* kprobe_running() needs smp_processor_id() */
20921- if (kprobes_built_in() && !user_mode_vm(regs)) {
20922+ if (kprobes_built_in() && !user_mode(regs)) {
20923 preempt_disable();
20924 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20925 ret = 1;
20926@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20927 return !instr_lo || (instr_lo>>1) == 1;
20928 case 0x00:
20929 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20930- if (probe_kernel_address(instr, opcode))
20931+ if (user_mode(regs)) {
20932+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20933+ return 0;
20934+ } else if (probe_kernel_address(instr, opcode))
20935 return 0;
20936
20937 *prefetch = (instr_lo == 0xF) &&
20938@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20939 while (instr < max_instr) {
20940 unsigned char opcode;
20941
20942- if (probe_kernel_address(instr, opcode))
20943+ if (user_mode(regs)) {
20944+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20945+ break;
20946+ } else if (probe_kernel_address(instr, opcode))
20947 break;
20948
20949 instr++;
20950@@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20951 force_sig_info(si_signo, &info, tsk);
20952 }
20953
20954+#ifdef CONFIG_PAX_EMUTRAMP
20955+static int pax_handle_fetch_fault(struct pt_regs *regs);
20956+#endif
20957+
20958+#ifdef CONFIG_PAX_PAGEEXEC
20959+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20960+{
20961+ pgd_t *pgd;
20962+ pud_t *pud;
20963+ pmd_t *pmd;
20964+
20965+ pgd = pgd_offset(mm, address);
20966+ if (!pgd_present(*pgd))
20967+ return NULL;
20968+ pud = pud_offset(pgd, address);
20969+ if (!pud_present(*pud))
20970+ return NULL;
20971+ pmd = pmd_offset(pud, address);
20972+ if (!pmd_present(*pmd))
20973+ return NULL;
20974+ return pmd;
20975+}
20976+#endif
20977+
20978 DEFINE_SPINLOCK(pgd_lock);
20979 LIST_HEAD(pgd_list);
20980
20981@@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20982 address += PMD_SIZE) {
20983
20984 unsigned long flags;
20985+
20986+#ifdef CONFIG_PAX_PER_CPU_PGD
20987+ unsigned long cpu;
20988+#else
20989 struct page *page;
20990+#endif
20991
20992 spin_lock_irqsave(&pgd_lock, flags);
20993+
20994+#ifdef CONFIG_PAX_PER_CPU_PGD
20995+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20996+ pgd_t *pgd = get_cpu_pgd(cpu);
20997+#else
20998 list_for_each_entry(page, &pgd_list, lru) {
20999- if (!vmalloc_sync_one(page_address(page), address))
21000+ pgd_t *pgd = page_address(page);
21001+#endif
21002+
21003+ if (!vmalloc_sync_one(pgd, address))
21004 break;
21005 }
21006 spin_unlock_irqrestore(&pgd_lock, flags);
21007@@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
21008 * an interrupt in the middle of a task switch..
21009 */
21010 pgd_paddr = read_cr3();
21011+
21012+#ifdef CONFIG_PAX_PER_CPU_PGD
21013+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21014+#endif
21015+
21016 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21017 if (!pmd_k)
21018 return -1;
21019@@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
21020
21021 const pgd_t *pgd_ref = pgd_offset_k(address);
21022 unsigned long flags;
21023+
21024+#ifdef CONFIG_PAX_PER_CPU_PGD
21025+ unsigned long cpu;
21026+#else
21027 struct page *page;
21028+#endif
21029
21030 if (pgd_none(*pgd_ref))
21031 continue;
21032
21033 spin_lock_irqsave(&pgd_lock, flags);
21034+
21035+#ifdef CONFIG_PAX_PER_CPU_PGD
21036+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21037+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
21038+#else
21039 list_for_each_entry(page, &pgd_list, lru) {
21040 pgd_t *pgd;
21041 pgd = (pgd_t *)page_address(page) + pgd_index(address);
21042+#endif
21043+
21044 if (pgd_none(*pgd))
21045 set_pgd(pgd, *pgd_ref);
21046 else
21047@@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
21048 * happen within a race in page table update. In the later
21049 * case just flush:
21050 */
21051+
21052+#ifdef CONFIG_PAX_PER_CPU_PGD
21053+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21054+ pgd = pgd_offset_cpu(smp_processor_id(), address);
21055+#else
21056 pgd = pgd_offset(current->active_mm, address);
21057+#endif
21058+
21059 pgd_ref = pgd_offset_k(address);
21060 if (pgd_none(*pgd_ref))
21061 return -1;
21062@@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
21063 static int is_errata100(struct pt_regs *regs, unsigned long address)
21064 {
21065 #ifdef CONFIG_X86_64
21066- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21067+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21068 return 1;
21069 #endif
21070 return 0;
21071@@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
21072 }
21073
21074 static const char nx_warning[] = KERN_CRIT
21075-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21076+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21077
21078 static void
21079 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21080@@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
21081 if (!oops_may_print())
21082 return;
21083
21084- if (error_code & PF_INSTR) {
21085+ if (nx_enabled && (error_code & PF_INSTR)) {
21086 unsigned int level;
21087
21088 pte_t *pte = lookup_address(address, &level);
21089
21090 if (pte && pte_present(*pte) && !pte_exec(*pte))
21091- printk(nx_warning, current_uid());
21092+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21093 }
21094
21095+#ifdef CONFIG_PAX_KERNEXEC
21096+ if (init_mm.start_code <= address && address < init_mm.end_code) {
21097+ if (current->signal->curr_ip)
21098+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21099+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21100+ else
21101+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21102+ current->comm, task_pid_nr(current), current_uid(), current_euid());
21103+ }
21104+#endif
21105+
21106 printk(KERN_ALERT "BUG: unable to handle kernel ");
21107 if (address < PAGE_SIZE)
21108 printk(KERN_CONT "NULL pointer dereference");
21109@@ -704,6 +791,70 @@ __bad_area_nosemaphore(struct pt_regs *r
21110 unsigned long address, int si_code)
21111 {
21112 struct task_struct *tsk = current;
21113+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21114+ struct mm_struct *mm = tsk->mm;
21115+#endif
21116+
21117+#ifdef CONFIG_X86_64
21118+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
21119+ if (regs->ip == (unsigned long)vgettimeofday) {
21120+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
21121+ return;
21122+ } else if (regs->ip == (unsigned long)vtime) {
21123+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
21124+ return;
21125+ } else if (regs->ip == (unsigned long)vgetcpu) {
21126+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
21127+ return;
21128+ }
21129+ }
21130+#endif
21131+
21132+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21133+ if (mm && (error_code & PF_USER)) {
21134+ unsigned long ip = regs->ip;
21135+
21136+ if (v8086_mode(regs))
21137+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21138+
21139+ /*
21140+ * It's possible to have interrupts off here:
21141+ */
21142+ local_irq_enable();
21143+
21144+#ifdef CONFIG_PAX_PAGEEXEC
21145+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
21146+ ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
21147+
21148+#ifdef CONFIG_PAX_EMUTRAMP
21149+ switch (pax_handle_fetch_fault(regs)) {
21150+ case 2:
21151+ return;
21152+ }
21153+#endif
21154+
21155+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21156+ do_group_exit(SIGKILL);
21157+ }
21158+#endif
21159+
21160+#ifdef CONFIG_PAX_SEGMEXEC
21161+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
21162+
21163+#ifdef CONFIG_PAX_EMUTRAMP
21164+ switch (pax_handle_fetch_fault(regs)) {
21165+ case 2:
21166+ return;
21167+ }
21168+#endif
21169+
21170+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21171+ do_group_exit(SIGKILL);
21172+ }
21173+#endif
21174+
21175+ }
21176+#endif
21177
21178 /* User mode accesses just cause a SIGSEGV */
21179 if (error_code & PF_USER) {
21180@@ -857,6 +1008,99 @@ static int spurious_fault_check(unsigned
21181 return 1;
21182 }
21183
21184+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21185+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21186+{
21187+ pte_t *pte;
21188+ pmd_t *pmd;
21189+ spinlock_t *ptl;
21190+ unsigned char pte_mask;
21191+
21192+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21193+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
21194+ return 0;
21195+
21196+ /* PaX: it's our fault, let's handle it if we can */
21197+
21198+ /* PaX: take a look at read faults before acquiring any locks */
21199+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21200+ /* instruction fetch attempt from a protected page in user mode */
21201+ up_read(&mm->mmap_sem);
21202+
21203+#ifdef CONFIG_PAX_EMUTRAMP
21204+ switch (pax_handle_fetch_fault(regs)) {
21205+ case 2:
21206+ return 1;
21207+ }
21208+#endif
21209+
21210+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21211+ do_group_exit(SIGKILL);
21212+ }
21213+
21214+ pmd = pax_get_pmd(mm, address);
21215+ if (unlikely(!pmd))
21216+ return 0;
21217+
21218+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21219+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21220+ pte_unmap_unlock(pte, ptl);
21221+ return 0;
21222+ }
21223+
21224+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21225+ /* write attempt to a protected page in user mode */
21226+ pte_unmap_unlock(pte, ptl);
21227+ return 0;
21228+ }
21229+
21230+#ifdef CONFIG_SMP
21231+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21232+#else
21233+ if (likely(address > get_limit(regs->cs)))
21234+#endif
21235+ {
21236+ set_pte(pte, pte_mkread(*pte));
21237+ __flush_tlb_one(address);
21238+ pte_unmap_unlock(pte, ptl);
21239+ up_read(&mm->mmap_sem);
21240+ return 1;
21241+ }
21242+
21243+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21244+
21245+ /*
21246+ * PaX: fill DTLB with user rights and retry
21247+ */
21248+ __asm__ __volatile__ (
21249+ "orb %2,(%1)\n"
21250+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21251+/*
21252+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21253+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21254+ * page fault when examined during a TLB load attempt. this is true not only
21255+ * for PTEs holding a non-present entry but also present entries that will
21256+ * raise a page fault (such as those set up by PaX, or the copy-on-write
21257+ * mechanism). in effect it means that we do *not* need to flush the TLBs
21258+ * for our target pages since their PTEs are simply not in the TLBs at all.
21259+
21260+ * the best thing in omitting it is that we gain around 15-20% speed in the
21261+ * fast path of the page fault handler and can get rid of tracing since we
21262+ * can no longer flush unintended entries.
21263+ */
21264+ "invlpg (%0)\n"
21265+#endif
21266+ __copyuser_seg"testb $0,(%0)\n"
21267+ "xorb %3,(%1)\n"
21268+ :
21269+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21270+ : "memory", "cc");
21271+ pte_unmap_unlock(pte, ptl);
21272+ up_read(&mm->mmap_sem);
21273+ return 1;
21274+}
21275+#endif
21276+
21277 /*
21278 * Handle a spurious fault caused by a stale TLB entry.
21279 *
21280@@ -923,6 +1167,9 @@ int show_unhandled_signals = 1;
21281 static inline int
21282 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
21283 {
21284+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21285+ return 1;
21286+
21287 if (write) {
21288 /* write, present and write, not present: */
21289 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21290@@ -956,17 +1203,31 @@ do_page_fault(struct pt_regs *regs, unsi
21291 {
21292 struct vm_area_struct *vma;
21293 struct task_struct *tsk;
21294- unsigned long address;
21295 struct mm_struct *mm;
21296 int write;
21297 int fault;
21298
21299+ /* Get the faulting address: */
21300+ unsigned long address = read_cr2();
21301+
21302+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21303+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21304+ if (!search_exception_tables(regs->ip)) {
21305+ bad_area_nosemaphore(regs, error_code, address);
21306+ return;
21307+ }
21308+ if (address < PAX_USER_SHADOW_BASE) {
21309+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21310+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
21311+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21312+ } else
21313+ address -= PAX_USER_SHADOW_BASE;
21314+ }
21315+#endif
21316+
21317 tsk = current;
21318 mm = tsk->mm;
21319
21320- /* Get the faulting address: */
21321- address = read_cr2();
21322-
21323 /*
21324 * Detect and handle instructions that would cause a page fault for
21325 * both a tracked kernel page and a userspace page.
21326@@ -1026,7 +1287,7 @@ do_page_fault(struct pt_regs *regs, unsi
21327 * User-mode registers count as a user access even for any
21328 * potential system fault or CPU buglet:
21329 */
21330- if (user_mode_vm(regs)) {
21331+ if (user_mode(regs)) {
21332 local_irq_enable();
21333 error_code |= PF_USER;
21334 } else {
21335@@ -1080,6 +1341,11 @@ do_page_fault(struct pt_regs *regs, unsi
21336 might_sleep();
21337 }
21338
21339+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21340+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21341+ return;
21342+#endif
21343+
21344 vma = find_vma(mm, address);
21345 if (unlikely(!vma)) {
21346 bad_area(regs, error_code, address);
21347@@ -1091,18 +1357,24 @@ do_page_fault(struct pt_regs *regs, unsi
21348 bad_area(regs, error_code, address);
21349 return;
21350 }
21351- if (error_code & PF_USER) {
21352- /*
21353- * Accessing the stack below %sp is always a bug.
21354- * The large cushion allows instructions like enter
21355- * and pusha to work. ("enter $65535, $31" pushes
21356- * 32 pointers and then decrements %sp by 65535.)
21357- */
21358- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21359- bad_area(regs, error_code, address);
21360- return;
21361- }
21362+ /*
21363+ * Accessing the stack below %sp is always a bug.
21364+ * The large cushion allows instructions like enter
21365+ * and pusha to work. ("enter $65535, $31" pushes
21366+ * 32 pointers and then decrements %sp by 65535.)
21367+ */
21368+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21369+ bad_area(regs, error_code, address);
21370+ return;
21371 }
21372+
21373+#ifdef CONFIG_PAX_SEGMEXEC
21374+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21375+ bad_area(regs, error_code, address);
21376+ return;
21377+ }
21378+#endif
21379+
21380 if (unlikely(expand_stack(vma, address))) {
21381 bad_area(regs, error_code, address);
21382 return;
21383@@ -1146,3 +1418,199 @@ good_area:
21384
21385 up_read(&mm->mmap_sem);
21386 }
21387+
21388+#ifdef CONFIG_PAX_EMUTRAMP
21389+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21390+{
21391+ int err;
21392+
21393+ do { /* PaX: gcc trampoline emulation #1 */
21394+ unsigned char mov1, mov2;
21395+ unsigned short jmp;
21396+ unsigned int addr1, addr2;
21397+
21398+#ifdef CONFIG_X86_64
21399+ if ((regs->ip + 11) >> 32)
21400+ break;
21401+#endif
21402+
21403+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21404+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21405+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21406+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21407+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21408+
21409+ if (err)
21410+ break;
21411+
21412+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21413+ regs->cx = addr1;
21414+ regs->ax = addr2;
21415+ regs->ip = addr2;
21416+ return 2;
21417+ }
21418+ } while (0);
21419+
21420+ do { /* PaX: gcc trampoline emulation #2 */
21421+ unsigned char mov, jmp;
21422+ unsigned int addr1, addr2;
21423+
21424+#ifdef CONFIG_X86_64
21425+ if ((regs->ip + 9) >> 32)
21426+ break;
21427+#endif
21428+
21429+ err = get_user(mov, (unsigned char __user *)regs->ip);
21430+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21431+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21432+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21433+
21434+ if (err)
21435+ break;
21436+
21437+ if (mov == 0xB9 && jmp == 0xE9) {
21438+ regs->cx = addr1;
21439+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21440+ return 2;
21441+ }
21442+ } while (0);
21443+
21444+ return 1; /* PaX in action */
21445+}
21446+
21447+#ifdef CONFIG_X86_64
21448+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21449+{
21450+ int err;
21451+
21452+ do { /* PaX: gcc trampoline emulation #1 */
21453+ unsigned short mov1, mov2, jmp1;
21454+ unsigned char jmp2;
21455+ unsigned int addr1;
21456+ unsigned long addr2;
21457+
21458+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21459+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21460+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21461+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21462+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21463+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21464+
21465+ if (err)
21466+ break;
21467+
21468+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21469+ regs->r11 = addr1;
21470+ regs->r10 = addr2;
21471+ regs->ip = addr1;
21472+ return 2;
21473+ }
21474+ } while (0);
21475+
21476+ do { /* PaX: gcc trampoline emulation #2 */
21477+ unsigned short mov1, mov2, jmp1;
21478+ unsigned char jmp2;
21479+ unsigned long addr1, addr2;
21480+
21481+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21482+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21483+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21484+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21485+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21486+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21487+
21488+ if (err)
21489+ break;
21490+
21491+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21492+ regs->r11 = addr1;
21493+ regs->r10 = addr2;
21494+ regs->ip = addr1;
21495+ return 2;
21496+ }
21497+ } while (0);
21498+
21499+ return 1; /* PaX in action */
21500+}
21501+#endif
21502+
21503+/*
21504+ * PaX: decide what to do with offenders (regs->ip = fault address)
21505+ *
21506+ * returns 1 when task should be killed
21507+ * 2 when gcc trampoline was detected
21508+ */
21509+static int pax_handle_fetch_fault(struct pt_regs *regs)
21510+{
21511+ if (v8086_mode(regs))
21512+ return 1;
21513+
21514+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21515+ return 1;
21516+
21517+#ifdef CONFIG_X86_32
21518+ return pax_handle_fetch_fault_32(regs);
21519+#else
21520+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21521+ return pax_handle_fetch_fault_32(regs);
21522+ else
21523+ return pax_handle_fetch_fault_64(regs);
21524+#endif
21525+}
21526+#endif
21527+
21528+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21529+void pax_report_insns(void *pc, void *sp)
21530+{
21531+ long i;
21532+
21533+ printk(KERN_ERR "PAX: bytes at PC: ");
21534+ for (i = 0; i < 20; i++) {
21535+ unsigned char c;
21536+ if (get_user(c, (__force unsigned char __user *)pc+i))
21537+ printk(KERN_CONT "?? ");
21538+ else
21539+ printk(KERN_CONT "%02x ", c);
21540+ }
21541+ printk("\n");
21542+
21543+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21544+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
21545+ unsigned long c;
21546+ if (get_user(c, (__force unsigned long __user *)sp+i))
21547+#ifdef CONFIG_X86_32
21548+ printk(KERN_CONT "???????? ");
21549+#else
21550+ printk(KERN_CONT "???????????????? ");
21551+#endif
21552+ else
21553+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21554+ }
21555+ printk("\n");
21556+}
21557+#endif
21558+
21559+/**
21560+ * probe_kernel_write(): safely attempt to write to a location
21561+ * @dst: address to write to
21562+ * @src: pointer to the data that shall be written
21563+ * @size: size of the data chunk
21564+ *
21565+ * Safely write to address @dst from the buffer at @src. If a kernel fault
21566+ * happens, handle that and return -EFAULT.
21567+ */
21568+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21569+{
21570+ long ret;
21571+ mm_segment_t old_fs = get_fs();
21572+
21573+ set_fs(KERNEL_DS);
21574+ pagefault_disable();
21575+ pax_open_kernel();
21576+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21577+ pax_close_kernel();
21578+ pagefault_enable();
21579+ set_fs(old_fs);
21580+
21581+ return ret ? -EFAULT : 0;
21582+}
21583diff -urNp linux-2.6.32.45/arch/x86/mm/gup.c linux-2.6.32.45/arch/x86/mm/gup.c
21584--- linux-2.6.32.45/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21585+++ linux-2.6.32.45/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21586@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21587 addr = start;
21588 len = (unsigned long) nr_pages << PAGE_SHIFT;
21589 end = start + len;
21590- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21591+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21592 (void __user *)start, len)))
21593 return 0;
21594
21595diff -urNp linux-2.6.32.45/arch/x86/mm/highmem_32.c linux-2.6.32.45/arch/x86/mm/highmem_32.c
21596--- linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21597+++ linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21598@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21599 idx = type + KM_TYPE_NR*smp_processor_id();
21600 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21601 BUG_ON(!pte_none(*(kmap_pte-idx)));
21602+
21603+ pax_open_kernel();
21604 set_pte(kmap_pte-idx, mk_pte(page, prot));
21605+ pax_close_kernel();
21606
21607 return (void *)vaddr;
21608 }
21609diff -urNp linux-2.6.32.45/arch/x86/mm/hugetlbpage.c linux-2.6.32.45/arch/x86/mm/hugetlbpage.c
21610--- linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21611+++ linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21612@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21613 struct hstate *h = hstate_file(file);
21614 struct mm_struct *mm = current->mm;
21615 struct vm_area_struct *vma;
21616- unsigned long start_addr;
21617+ unsigned long start_addr, pax_task_size = TASK_SIZE;
21618+
21619+#ifdef CONFIG_PAX_SEGMEXEC
21620+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21621+ pax_task_size = SEGMEXEC_TASK_SIZE;
21622+#endif
21623+
21624+ pax_task_size -= PAGE_SIZE;
21625
21626 if (len > mm->cached_hole_size) {
21627- start_addr = mm->free_area_cache;
21628+ start_addr = mm->free_area_cache;
21629 } else {
21630- start_addr = TASK_UNMAPPED_BASE;
21631- mm->cached_hole_size = 0;
21632+ start_addr = mm->mmap_base;
21633+ mm->cached_hole_size = 0;
21634 }
21635
21636 full_search:
21637@@ -281,26 +288,27 @@ full_search:
21638
21639 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21640 /* At this point: (!vma || addr < vma->vm_end). */
21641- if (TASK_SIZE - len < addr) {
21642+ if (pax_task_size - len < addr) {
21643 /*
21644 * Start a new search - just in case we missed
21645 * some holes.
21646 */
21647- if (start_addr != TASK_UNMAPPED_BASE) {
21648- start_addr = TASK_UNMAPPED_BASE;
21649+ if (start_addr != mm->mmap_base) {
21650+ start_addr = mm->mmap_base;
21651 mm->cached_hole_size = 0;
21652 goto full_search;
21653 }
21654 return -ENOMEM;
21655 }
21656- if (!vma || addr + len <= vma->vm_start) {
21657- mm->free_area_cache = addr + len;
21658- return addr;
21659- }
21660+ if (check_heap_stack_gap(vma, addr, len))
21661+ break;
21662 if (addr + mm->cached_hole_size < vma->vm_start)
21663 mm->cached_hole_size = vma->vm_start - addr;
21664 addr = ALIGN(vma->vm_end, huge_page_size(h));
21665 }
21666+
21667+ mm->free_area_cache = addr + len;
21668+ return addr;
21669 }
21670
21671 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21672@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21673 {
21674 struct hstate *h = hstate_file(file);
21675 struct mm_struct *mm = current->mm;
21676- struct vm_area_struct *vma, *prev_vma;
21677- unsigned long base = mm->mmap_base, addr = addr0;
21678+ struct vm_area_struct *vma;
21679+ unsigned long base = mm->mmap_base, addr;
21680 unsigned long largest_hole = mm->cached_hole_size;
21681- int first_time = 1;
21682
21683 /* don't allow allocations above current base */
21684 if (mm->free_area_cache > base)
21685@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21686 largest_hole = 0;
21687 mm->free_area_cache = base;
21688 }
21689-try_again:
21690+
21691 /* make sure it can fit in the remaining address space */
21692 if (mm->free_area_cache < len)
21693 goto fail;
21694
21695 /* either no address requested or cant fit in requested address hole */
21696- addr = (mm->free_area_cache - len) & huge_page_mask(h);
21697+ addr = (mm->free_area_cache - len);
21698 do {
21699+ addr &= huge_page_mask(h);
21700+ vma = find_vma(mm, addr);
21701 /*
21702 * Lookup failure means no vma is above this address,
21703 * i.e. return with success:
21704- */
21705- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21706- return addr;
21707-
21708- /*
21709 * new region fits between prev_vma->vm_end and
21710 * vma->vm_start, use it:
21711 */
21712- if (addr + len <= vma->vm_start &&
21713- (!prev_vma || (addr >= prev_vma->vm_end))) {
21714+ if (check_heap_stack_gap(vma, addr, len)) {
21715 /* remember the address as a hint for next time */
21716- mm->cached_hole_size = largest_hole;
21717- return (mm->free_area_cache = addr);
21718- } else {
21719- /* pull free_area_cache down to the first hole */
21720- if (mm->free_area_cache == vma->vm_end) {
21721- mm->free_area_cache = vma->vm_start;
21722- mm->cached_hole_size = largest_hole;
21723- }
21724+ mm->cached_hole_size = largest_hole;
21725+ return (mm->free_area_cache = addr);
21726+ }
21727+ /* pull free_area_cache down to the first hole */
21728+ if (mm->free_area_cache == vma->vm_end) {
21729+ mm->free_area_cache = vma->vm_start;
21730+ mm->cached_hole_size = largest_hole;
21731 }
21732
21733 /* remember the largest hole we saw so far */
21734 if (addr + largest_hole < vma->vm_start)
21735- largest_hole = vma->vm_start - addr;
21736+ largest_hole = vma->vm_start - addr;
21737
21738 /* try just below the current vma->vm_start */
21739- addr = (vma->vm_start - len) & huge_page_mask(h);
21740- } while (len <= vma->vm_start);
21741+ addr = skip_heap_stack_gap(vma, len);
21742+ } while (!IS_ERR_VALUE(addr));
21743
21744 fail:
21745 /*
21746- * if hint left us with no space for the requested
21747- * mapping then try again:
21748- */
21749- if (first_time) {
21750- mm->free_area_cache = base;
21751- largest_hole = 0;
21752- first_time = 0;
21753- goto try_again;
21754- }
21755- /*
21756 * A failed mmap() very likely causes application failure,
21757 * so fall back to the bottom-up function here. This scenario
21758 * can happen with large stack limits and large mmap()
21759 * allocations.
21760 */
21761- mm->free_area_cache = TASK_UNMAPPED_BASE;
21762+
21763+#ifdef CONFIG_PAX_SEGMEXEC
21764+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21765+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21766+ else
21767+#endif
21768+
21769+ mm->mmap_base = TASK_UNMAPPED_BASE;
21770+
21771+#ifdef CONFIG_PAX_RANDMMAP
21772+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21773+ mm->mmap_base += mm->delta_mmap;
21774+#endif
21775+
21776+ mm->free_area_cache = mm->mmap_base;
21777 mm->cached_hole_size = ~0UL;
21778 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21779 len, pgoff, flags);
21780@@ -387,6 +393,7 @@ fail:
21781 /*
21782 * Restore the topdown base:
21783 */
21784+ mm->mmap_base = base;
21785 mm->free_area_cache = base;
21786 mm->cached_hole_size = ~0UL;
21787
21788@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21789 struct hstate *h = hstate_file(file);
21790 struct mm_struct *mm = current->mm;
21791 struct vm_area_struct *vma;
21792+ unsigned long pax_task_size = TASK_SIZE;
21793
21794 if (len & ~huge_page_mask(h))
21795 return -EINVAL;
21796- if (len > TASK_SIZE)
21797+
21798+#ifdef CONFIG_PAX_SEGMEXEC
21799+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21800+ pax_task_size = SEGMEXEC_TASK_SIZE;
21801+#endif
21802+
21803+ pax_task_size -= PAGE_SIZE;
21804+
21805+ if (len > pax_task_size)
21806 return -ENOMEM;
21807
21808 if (flags & MAP_FIXED) {
21809@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21810 if (addr) {
21811 addr = ALIGN(addr, huge_page_size(h));
21812 vma = find_vma(mm, addr);
21813- if (TASK_SIZE - len >= addr &&
21814- (!vma || addr + len <= vma->vm_start))
21815+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21816 return addr;
21817 }
21818 if (mm->get_unmapped_area == arch_get_unmapped_area)
21819diff -urNp linux-2.6.32.45/arch/x86/mm/init_32.c linux-2.6.32.45/arch/x86/mm/init_32.c
21820--- linux-2.6.32.45/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21821+++ linux-2.6.32.45/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21822@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21823 }
21824
21825 /*
21826- * Creates a middle page table and puts a pointer to it in the
21827- * given global directory entry. This only returns the gd entry
21828- * in non-PAE compilation mode, since the middle layer is folded.
21829- */
21830-static pmd_t * __init one_md_table_init(pgd_t *pgd)
21831-{
21832- pud_t *pud;
21833- pmd_t *pmd_table;
21834-
21835-#ifdef CONFIG_X86_PAE
21836- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21837- if (after_bootmem)
21838- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21839- else
21840- pmd_table = (pmd_t *)alloc_low_page();
21841- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21842- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21843- pud = pud_offset(pgd, 0);
21844- BUG_ON(pmd_table != pmd_offset(pud, 0));
21845-
21846- return pmd_table;
21847- }
21848-#endif
21849- pud = pud_offset(pgd, 0);
21850- pmd_table = pmd_offset(pud, 0);
21851-
21852- return pmd_table;
21853-}
21854-
21855-/*
21856 * Create a page table and place a pointer to it in a middle page
21857 * directory entry:
21858 */
21859@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21860 page_table = (pte_t *)alloc_low_page();
21861
21862 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21863+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21864+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21865+#else
21866 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21867+#endif
21868 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21869 }
21870
21871 return pte_offset_kernel(pmd, 0);
21872 }
21873
21874+static pmd_t * __init one_md_table_init(pgd_t *pgd)
21875+{
21876+ pud_t *pud;
21877+ pmd_t *pmd_table;
21878+
21879+ pud = pud_offset(pgd, 0);
21880+ pmd_table = pmd_offset(pud, 0);
21881+
21882+ return pmd_table;
21883+}
21884+
21885 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21886 {
21887 int pgd_idx = pgd_index(vaddr);
21888@@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21889 int pgd_idx, pmd_idx;
21890 unsigned long vaddr;
21891 pgd_t *pgd;
21892+ pud_t *pud;
21893 pmd_t *pmd;
21894 pte_t *pte = NULL;
21895
21896@@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21897 pgd = pgd_base + pgd_idx;
21898
21899 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21900- pmd = one_md_table_init(pgd);
21901- pmd = pmd + pmd_index(vaddr);
21902+ pud = pud_offset(pgd, vaddr);
21903+ pmd = pmd_offset(pud, vaddr);
21904+
21905+#ifdef CONFIG_X86_PAE
21906+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21907+#endif
21908+
21909 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21910 pmd++, pmd_idx++) {
21911 pte = page_table_kmap_check(one_page_table_init(pmd),
21912@@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21913 }
21914 }
21915
21916-static inline int is_kernel_text(unsigned long addr)
21917+static inline int is_kernel_text(unsigned long start, unsigned long end)
21918 {
21919- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21920- return 1;
21921- return 0;
21922+ if ((start > ktla_ktva((unsigned long)_etext) ||
21923+ end <= ktla_ktva((unsigned long)_stext)) &&
21924+ (start > ktla_ktva((unsigned long)_einittext) ||
21925+ end <= ktla_ktva((unsigned long)_sinittext)) &&
21926+
21927+#ifdef CONFIG_ACPI_SLEEP
21928+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21929+#endif
21930+
21931+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21932+ return 0;
21933+ return 1;
21934 }
21935
21936 /*
21937@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21938 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21939 unsigned long start_pfn, end_pfn;
21940 pgd_t *pgd_base = swapper_pg_dir;
21941- int pgd_idx, pmd_idx, pte_ofs;
21942+ unsigned int pgd_idx, pmd_idx, pte_ofs;
21943 unsigned long pfn;
21944 pgd_t *pgd;
21945+ pud_t *pud;
21946 pmd_t *pmd;
21947 pte_t *pte;
21948 unsigned pages_2m, pages_4k;
21949@@ -278,8 +279,13 @@ repeat:
21950 pfn = start_pfn;
21951 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21952 pgd = pgd_base + pgd_idx;
21953- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21954- pmd = one_md_table_init(pgd);
21955+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21956+ pud = pud_offset(pgd, 0);
21957+ pmd = pmd_offset(pud, 0);
21958+
21959+#ifdef CONFIG_X86_PAE
21960+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21961+#endif
21962
21963 if (pfn >= end_pfn)
21964 continue;
21965@@ -291,14 +297,13 @@ repeat:
21966 #endif
21967 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21968 pmd++, pmd_idx++) {
21969- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21970+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21971
21972 /*
21973 * Map with big pages if possible, otherwise
21974 * create normal page tables:
21975 */
21976 if (use_pse) {
21977- unsigned int addr2;
21978 pgprot_t prot = PAGE_KERNEL_LARGE;
21979 /*
21980 * first pass will use the same initial
21981@@ -308,11 +313,7 @@ repeat:
21982 __pgprot(PTE_IDENT_ATTR |
21983 _PAGE_PSE);
21984
21985- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21986- PAGE_OFFSET + PAGE_SIZE-1;
21987-
21988- if (is_kernel_text(addr) ||
21989- is_kernel_text(addr2))
21990+ if (is_kernel_text(address, address + PMD_SIZE))
21991 prot = PAGE_KERNEL_LARGE_EXEC;
21992
21993 pages_2m++;
21994@@ -329,7 +330,7 @@ repeat:
21995 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21996 pte += pte_ofs;
21997 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21998- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21999+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22000 pgprot_t prot = PAGE_KERNEL;
22001 /*
22002 * first pass will use the same initial
22003@@ -337,7 +338,7 @@ repeat:
22004 */
22005 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22006
22007- if (is_kernel_text(addr))
22008+ if (is_kernel_text(address, address + PAGE_SIZE))
22009 prot = PAGE_KERNEL_EXEC;
22010
22011 pages_4k++;
22012@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
22013
22014 pud = pud_offset(pgd, va);
22015 pmd = pmd_offset(pud, va);
22016- if (!pmd_present(*pmd))
22017+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
22018 break;
22019
22020 pte = pte_offset_kernel(pmd, va);
22021@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
22022
22023 static void __init pagetable_init(void)
22024 {
22025- pgd_t *pgd_base = swapper_pg_dir;
22026-
22027- permanent_kmaps_init(pgd_base);
22028+ permanent_kmaps_init(swapper_pg_dir);
22029 }
22030
22031 #ifdef CONFIG_ACPI_SLEEP
22032@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
22033 * ACPI suspend needs this for resume, because things like the intel-agp
22034 * driver might have split up a kernel 4MB mapping.
22035 */
22036-char swsusp_pg_dir[PAGE_SIZE]
22037+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
22038 __attribute__ ((aligned(PAGE_SIZE)));
22039
22040 static inline void save_pg_dir(void)
22041 {
22042- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
22043+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
22044 }
22045 #else /* !CONFIG_ACPI_SLEEP */
22046 static inline void save_pg_dir(void)
22047@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
22048 flush_tlb_all();
22049 }
22050
22051-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22052+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22053 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22054
22055 /* user-defined highmem size */
22056@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
22057 * Initialize the boot-time allocator (with low memory only):
22058 */
22059 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
22060- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
22061+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
22062 PAGE_SIZE);
22063 if (bootmap == -1L)
22064 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
22065@@ -864,6 +863,12 @@ void __init mem_init(void)
22066
22067 pci_iommu_alloc();
22068
22069+#ifdef CONFIG_PAX_PER_CPU_PGD
22070+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22071+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22072+ KERNEL_PGD_PTRS);
22073+#endif
22074+
22075 #ifdef CONFIG_FLATMEM
22076 BUG_ON(!mem_map);
22077 #endif
22078@@ -881,7 +886,7 @@ void __init mem_init(void)
22079 set_highmem_pages_init();
22080
22081 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22082- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22083+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22084 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22085
22086 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22087@@ -923,10 +928,10 @@ void __init mem_init(void)
22088 ((unsigned long)&__init_end -
22089 (unsigned long)&__init_begin) >> 10,
22090
22091- (unsigned long)&_etext, (unsigned long)&_edata,
22092- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22093+ (unsigned long)&_sdata, (unsigned long)&_edata,
22094+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22095
22096- (unsigned long)&_text, (unsigned long)&_etext,
22097+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22098 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22099
22100 /*
22101@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
22102 if (!kernel_set_to_readonly)
22103 return;
22104
22105+ start = ktla_ktva(start);
22106 pr_debug("Set kernel text: %lx - %lx for read write\n",
22107 start, start+size);
22108
22109@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
22110 if (!kernel_set_to_readonly)
22111 return;
22112
22113+ start = ktla_ktva(start);
22114 pr_debug("Set kernel text: %lx - %lx for read only\n",
22115 start, start+size);
22116
22117@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
22118 unsigned long start = PFN_ALIGN(_text);
22119 unsigned long size = PFN_ALIGN(_etext) - start;
22120
22121+ start = ktla_ktva(start);
22122 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22123 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22124 size >> 10);
22125diff -urNp linux-2.6.32.45/arch/x86/mm/init_64.c linux-2.6.32.45/arch/x86/mm/init_64.c
22126--- linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
22127+++ linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
22128@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
22129 pmd = fill_pmd(pud, vaddr);
22130 pte = fill_pte(pmd, vaddr);
22131
22132+ pax_open_kernel();
22133 set_pte(pte, new_pte);
22134+ pax_close_kernel();
22135
22136 /*
22137 * It's enough to flush this one mapping.
22138@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
22139 pgd = pgd_offset_k((unsigned long)__va(phys));
22140 if (pgd_none(*pgd)) {
22141 pud = (pud_t *) spp_getpage();
22142- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22143- _PAGE_USER));
22144+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22145 }
22146 pud = pud_offset(pgd, (unsigned long)__va(phys));
22147 if (pud_none(*pud)) {
22148 pmd = (pmd_t *) spp_getpage();
22149- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22150- _PAGE_USER));
22151+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22152 }
22153 pmd = pmd_offset(pud, phys);
22154 BUG_ON(!pmd_none(*pmd));
22155@@ -675,6 +675,12 @@ void __init mem_init(void)
22156
22157 pci_iommu_alloc();
22158
22159+#ifdef CONFIG_PAX_PER_CPU_PGD
22160+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22161+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22162+ KERNEL_PGD_PTRS);
22163+#endif
22164+
22165 /* clear_bss() already clear the empty_zero_page */
22166
22167 reservedpages = 0;
22168@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
22169 static struct vm_area_struct gate_vma = {
22170 .vm_start = VSYSCALL_START,
22171 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22172- .vm_page_prot = PAGE_READONLY_EXEC,
22173- .vm_flags = VM_READ | VM_EXEC
22174+ .vm_page_prot = PAGE_READONLY,
22175+ .vm_flags = VM_READ
22176 };
22177
22178 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
22179@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
22180
22181 const char *arch_vma_name(struct vm_area_struct *vma)
22182 {
22183- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22184+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22185 return "[vdso]";
22186 if (vma == &gate_vma)
22187 return "[vsyscall]";
22188diff -urNp linux-2.6.32.45/arch/x86/mm/init.c linux-2.6.32.45/arch/x86/mm/init.c
22189--- linux-2.6.32.45/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
22190+++ linux-2.6.32.45/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
22191@@ -69,11 +69,7 @@ static void __init find_early_table_spac
22192 * cause a hotspot and fill up ZONE_DMA. The page tables
22193 * need roughly 0.5KB per GB.
22194 */
22195-#ifdef CONFIG_X86_32
22196- start = 0x7000;
22197-#else
22198- start = 0x8000;
22199-#endif
22200+ start = 0x100000;
22201 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
22202 tables, PAGE_SIZE);
22203 if (e820_table_start == -1UL)
22204@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
22205 #endif
22206
22207 set_nx();
22208- if (nx_enabled)
22209+ if (nx_enabled && cpu_has_nx)
22210 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
22211
22212 /* Enable PSE if available */
22213@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
22214 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
22215 * mmio resources as well as potential bios/acpi data regions.
22216 */
22217+
22218 int devmem_is_allowed(unsigned long pagenr)
22219 {
22220+#ifdef CONFIG_GRKERNSEC_KMEM
22221+ /* allow BDA */
22222+ if (!pagenr)
22223+ return 1;
22224+ /* allow EBDA */
22225+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22226+ return 1;
22227+ /* allow ISA/video mem */
22228+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22229+ return 1;
22230+ /* throw out everything else below 1MB */
22231+ if (pagenr <= 256)
22232+ return 0;
22233+#else
22234 if (pagenr <= 256)
22235 return 1;
22236+#endif
22237+
22238 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22239 return 0;
22240 if (!page_is_ram(pagenr))
22241@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
22242
22243 void free_initmem(void)
22244 {
22245+
22246+#ifdef CONFIG_PAX_KERNEXEC
22247+#ifdef CONFIG_X86_32
22248+ /* PaX: limit KERNEL_CS to actual size */
22249+ unsigned long addr, limit;
22250+ struct desc_struct d;
22251+ int cpu;
22252+
22253+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22254+ limit = (limit - 1UL) >> PAGE_SHIFT;
22255+
22256+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22257+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22258+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22259+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22260+ }
22261+
22262+ /* PaX: make KERNEL_CS read-only */
22263+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22264+ if (!paravirt_enabled())
22265+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22266+/*
22267+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22268+ pgd = pgd_offset_k(addr);
22269+ pud = pud_offset(pgd, addr);
22270+ pmd = pmd_offset(pud, addr);
22271+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22272+ }
22273+*/
22274+#ifdef CONFIG_X86_PAE
22275+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22276+/*
22277+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22278+ pgd = pgd_offset_k(addr);
22279+ pud = pud_offset(pgd, addr);
22280+ pmd = pmd_offset(pud, addr);
22281+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22282+ }
22283+*/
22284+#endif
22285+
22286+#ifdef CONFIG_MODULES
22287+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22288+#endif
22289+
22290+#else
22291+ pgd_t *pgd;
22292+ pud_t *pud;
22293+ pmd_t *pmd;
22294+ unsigned long addr, end;
22295+
22296+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22297+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22298+ pgd = pgd_offset_k(addr);
22299+ pud = pud_offset(pgd, addr);
22300+ pmd = pmd_offset(pud, addr);
22301+ if (!pmd_present(*pmd))
22302+ continue;
22303+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22304+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22305+ else
22306+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22307+ }
22308+
22309+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22310+ end = addr + KERNEL_IMAGE_SIZE;
22311+ for (; addr < end; addr += PMD_SIZE) {
22312+ pgd = pgd_offset_k(addr);
22313+ pud = pud_offset(pgd, addr);
22314+ pmd = pmd_offset(pud, addr);
22315+ if (!pmd_present(*pmd))
22316+ continue;
22317+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22318+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22319+ }
22320+#endif
22321+
22322+ flush_tlb_all();
22323+#endif
22324+
22325 free_init_pages("unused kernel memory",
22326 (unsigned long)(&__init_begin),
22327 (unsigned long)(&__init_end));
22328diff -urNp linux-2.6.32.45/arch/x86/mm/iomap_32.c linux-2.6.32.45/arch/x86/mm/iomap_32.c
22329--- linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
22330+++ linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
22331@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
22332 debug_kmap_atomic(type);
22333 idx = type + KM_TYPE_NR * smp_processor_id();
22334 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22335+
22336+ pax_open_kernel();
22337 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22338+ pax_close_kernel();
22339+
22340 arch_flush_lazy_mmu_mode();
22341
22342 return (void *)vaddr;
22343diff -urNp linux-2.6.32.45/arch/x86/mm/ioremap.c linux-2.6.32.45/arch/x86/mm/ioremap.c
22344--- linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
22345+++ linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
22346@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
22347 * Second special case: Some BIOSen report the PC BIOS
22348 * area (640->1Mb) as ram even though it is not.
22349 */
22350- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
22351- pagenr < (BIOS_END >> PAGE_SHIFT))
22352+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
22353+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22354 return 0;
22355
22356 for (i = 0; i < e820.nr_map; i++) {
22357@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
22358 /*
22359 * Don't allow anybody to remap normal RAM that we're using..
22360 */
22361- for (pfn = phys_addr >> PAGE_SHIFT;
22362- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
22363- pfn++) {
22364-
22365+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
22366 int is_ram = page_is_ram(pfn);
22367
22368- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22369+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22370 return NULL;
22371 WARN_ON_ONCE(is_ram);
22372 }
22373@@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
22374 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22375
22376 static __initdata int after_paging_init;
22377-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22378+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22379
22380 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22381 {
22382@@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
22383 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22384
22385 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22386- memset(bm_pte, 0, sizeof(bm_pte));
22387- pmd_populate_kernel(&init_mm, pmd, bm_pte);
22388+ pmd_populate_user(&init_mm, pmd, bm_pte);
22389
22390 /*
22391 * The boot-ioremap range spans multiple pmds, for which
22392diff -urNp linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c
22393--- linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
22394+++ linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
22395@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
22396 * memory (e.g. tracked pages)? For now, we need this to avoid
22397 * invoking kmemcheck for PnP BIOS calls.
22398 */
22399- if (regs->flags & X86_VM_MASK)
22400+ if (v8086_mode(regs))
22401 return false;
22402- if (regs->cs != __KERNEL_CS)
22403+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22404 return false;
22405
22406 pte = kmemcheck_pte_lookup(address);
22407diff -urNp linux-2.6.32.45/arch/x86/mm/mmap.c linux-2.6.32.45/arch/x86/mm/mmap.c
22408--- linux-2.6.32.45/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
22409+++ linux-2.6.32.45/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
22410@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
22411 * Leave an at least ~128 MB hole with possible stack randomization.
22412 */
22413 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22414-#define MAX_GAP (TASK_SIZE/6*5)
22415+#define MAX_GAP (pax_task_size/6*5)
22416
22417 /*
22418 * True on X86_32 or when emulating IA32 on X86_64
22419@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22420 return rnd << PAGE_SHIFT;
22421 }
22422
22423-static unsigned long mmap_base(void)
22424+static unsigned long mmap_base(struct mm_struct *mm)
22425 {
22426 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
22427+ unsigned long pax_task_size = TASK_SIZE;
22428+
22429+#ifdef CONFIG_PAX_SEGMEXEC
22430+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22431+ pax_task_size = SEGMEXEC_TASK_SIZE;
22432+#endif
22433
22434 if (gap < MIN_GAP)
22435 gap = MIN_GAP;
22436 else if (gap > MAX_GAP)
22437 gap = MAX_GAP;
22438
22439- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22440+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22441 }
22442
22443 /*
22444 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22445 * does, but not when emulating X86_32
22446 */
22447-static unsigned long mmap_legacy_base(void)
22448+static unsigned long mmap_legacy_base(struct mm_struct *mm)
22449 {
22450- if (mmap_is_ia32())
22451+ if (mmap_is_ia32()) {
22452+
22453+#ifdef CONFIG_PAX_SEGMEXEC
22454+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22455+ return SEGMEXEC_TASK_UNMAPPED_BASE;
22456+ else
22457+#endif
22458+
22459 return TASK_UNMAPPED_BASE;
22460- else
22461+ } else
22462 return TASK_UNMAPPED_BASE + mmap_rnd();
22463 }
22464
22465@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22466 void arch_pick_mmap_layout(struct mm_struct *mm)
22467 {
22468 if (mmap_is_legacy()) {
22469- mm->mmap_base = mmap_legacy_base();
22470+ mm->mmap_base = mmap_legacy_base(mm);
22471+
22472+#ifdef CONFIG_PAX_RANDMMAP
22473+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22474+ mm->mmap_base += mm->delta_mmap;
22475+#endif
22476+
22477 mm->get_unmapped_area = arch_get_unmapped_area;
22478 mm->unmap_area = arch_unmap_area;
22479 } else {
22480- mm->mmap_base = mmap_base();
22481+ mm->mmap_base = mmap_base(mm);
22482+
22483+#ifdef CONFIG_PAX_RANDMMAP
22484+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22485+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22486+#endif
22487+
22488 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22489 mm->unmap_area = arch_unmap_area_topdown;
22490 }
22491diff -urNp linux-2.6.32.45/arch/x86/mm/mmio-mod.c linux-2.6.32.45/arch/x86/mm/mmio-mod.c
22492--- linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22493+++ linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22494@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22495 break;
22496 default:
22497 {
22498- unsigned char *ip = (unsigned char *)instptr;
22499+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22500 my_trace->opcode = MMIO_UNKNOWN_OP;
22501 my_trace->width = 0;
22502 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22503@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22504 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22505 void __iomem *addr)
22506 {
22507- static atomic_t next_id;
22508+ static atomic_unchecked_t next_id;
22509 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22510 /* These are page-unaligned. */
22511 struct mmiotrace_map map = {
22512@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22513 .private = trace
22514 },
22515 .phys = offset,
22516- .id = atomic_inc_return(&next_id)
22517+ .id = atomic_inc_return_unchecked(&next_id)
22518 };
22519 map.map_id = trace->id;
22520
22521diff -urNp linux-2.6.32.45/arch/x86/mm/numa_32.c linux-2.6.32.45/arch/x86/mm/numa_32.c
22522--- linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22523+++ linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22524@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22525 }
22526 #endif
22527
22528-extern unsigned long find_max_low_pfn(void);
22529 extern unsigned long highend_pfn, highstart_pfn;
22530
22531 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22532diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr.c linux-2.6.32.45/arch/x86/mm/pageattr.c
22533--- linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22534+++ linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22535@@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22536 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22537 */
22538 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22539- pgprot_val(forbidden) |= _PAGE_NX;
22540+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22541
22542 /*
22543 * The kernel text needs to be executable for obvious reasons
22544 * Does not cover __inittext since that is gone later on. On
22545 * 64bit we do not enforce !NX on the low mapping
22546 */
22547- if (within(address, (unsigned long)_text, (unsigned long)_etext))
22548- pgprot_val(forbidden) |= _PAGE_NX;
22549+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22550+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22551
22552+#ifdef CONFIG_DEBUG_RODATA
22553 /*
22554 * The .rodata section needs to be read-only. Using the pfn
22555 * catches all aliases.
22556@@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22557 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22558 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22559 pgprot_val(forbidden) |= _PAGE_RW;
22560+#endif
22561+
22562+#ifdef CONFIG_PAX_KERNEXEC
22563+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22564+ pgprot_val(forbidden) |= _PAGE_RW;
22565+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22566+ }
22567+#endif
22568
22569 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22570
22571@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22572 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22573 {
22574 /* change init_mm */
22575+ pax_open_kernel();
22576 set_pte_atomic(kpte, pte);
22577+
22578 #ifdef CONFIG_X86_32
22579 if (!SHARED_KERNEL_PMD) {
22580+
22581+#ifdef CONFIG_PAX_PER_CPU_PGD
22582+ unsigned long cpu;
22583+#else
22584 struct page *page;
22585+#endif
22586
22587+#ifdef CONFIG_PAX_PER_CPU_PGD
22588+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22589+ pgd_t *pgd = get_cpu_pgd(cpu);
22590+#else
22591 list_for_each_entry(page, &pgd_list, lru) {
22592- pgd_t *pgd;
22593+ pgd_t *pgd = (pgd_t *)page_address(page);
22594+#endif
22595+
22596 pud_t *pud;
22597 pmd_t *pmd;
22598
22599- pgd = (pgd_t *)page_address(page) + pgd_index(address);
22600+ pgd += pgd_index(address);
22601 pud = pud_offset(pgd, address);
22602 pmd = pmd_offset(pud, address);
22603 set_pte_atomic((pte_t *)pmd, pte);
22604 }
22605 }
22606 #endif
22607+ pax_close_kernel();
22608 }
22609
22610 static int
22611diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr-test.c linux-2.6.32.45/arch/x86/mm/pageattr-test.c
22612--- linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22613+++ linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22614@@ -36,7 +36,7 @@ enum {
22615
22616 static int pte_testbit(pte_t pte)
22617 {
22618- return pte_flags(pte) & _PAGE_UNUSED1;
22619+ return pte_flags(pte) & _PAGE_CPA_TEST;
22620 }
22621
22622 struct split_state {
22623diff -urNp linux-2.6.32.45/arch/x86/mm/pat.c linux-2.6.32.45/arch/x86/mm/pat.c
22624--- linux-2.6.32.45/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22625+++ linux-2.6.32.45/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22626@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22627
22628 conflict:
22629 printk(KERN_INFO "%s:%d conflicting memory types "
22630- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22631+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22632 new->end, cattr_name(new->type), cattr_name(entry->type));
22633 return -EBUSY;
22634 }
22635@@ -559,7 +559,7 @@ unlock_ret:
22636
22637 if (err) {
22638 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22639- current->comm, current->pid, start, end);
22640+ current->comm, task_pid_nr(current), start, end);
22641 }
22642
22643 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22644@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22645 while (cursor < to) {
22646 if (!devmem_is_allowed(pfn)) {
22647 printk(KERN_INFO
22648- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22649- current->comm, from, to);
22650+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22651+ current->comm, from, to, cursor);
22652 return 0;
22653 }
22654 cursor += PAGE_SIZE;
22655@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22656 printk(KERN_INFO
22657 "%s:%d ioremap_change_attr failed %s "
22658 "for %Lx-%Lx\n",
22659- current->comm, current->pid,
22660+ current->comm, task_pid_nr(current),
22661 cattr_name(flags),
22662 base, (unsigned long long)(base + size));
22663 return -EINVAL;
22664@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22665 free_memtype(paddr, paddr + size);
22666 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22667 " for %Lx-%Lx, got %s\n",
22668- current->comm, current->pid,
22669+ current->comm, task_pid_nr(current),
22670 cattr_name(want_flags),
22671 (unsigned long long)paddr,
22672 (unsigned long long)(paddr + size),
22673diff -urNp linux-2.6.32.45/arch/x86/mm/pf_in.c linux-2.6.32.45/arch/x86/mm/pf_in.c
22674--- linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22675+++ linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22676@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22677 int i;
22678 enum reason_type rv = OTHERS;
22679
22680- p = (unsigned char *)ins_addr;
22681+ p = (unsigned char *)ktla_ktva(ins_addr);
22682 p += skip_prefix(p, &prf);
22683 p += get_opcode(p, &opcode);
22684
22685@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22686 struct prefix_bits prf;
22687 int i;
22688
22689- p = (unsigned char *)ins_addr;
22690+ p = (unsigned char *)ktla_ktva(ins_addr);
22691 p += skip_prefix(p, &prf);
22692 p += get_opcode(p, &opcode);
22693
22694@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22695 struct prefix_bits prf;
22696 int i;
22697
22698- p = (unsigned char *)ins_addr;
22699+ p = (unsigned char *)ktla_ktva(ins_addr);
22700 p += skip_prefix(p, &prf);
22701 p += get_opcode(p, &opcode);
22702
22703@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22704 int i;
22705 unsigned long rv;
22706
22707- p = (unsigned char *)ins_addr;
22708+ p = (unsigned char *)ktla_ktva(ins_addr);
22709 p += skip_prefix(p, &prf);
22710 p += get_opcode(p, &opcode);
22711 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22712@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22713 int i;
22714 unsigned long rv;
22715
22716- p = (unsigned char *)ins_addr;
22717+ p = (unsigned char *)ktla_ktva(ins_addr);
22718 p += skip_prefix(p, &prf);
22719 p += get_opcode(p, &opcode);
22720 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22721diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable_32.c linux-2.6.32.45/arch/x86/mm/pgtable_32.c
22722--- linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22723+++ linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22724@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22725 return;
22726 }
22727 pte = pte_offset_kernel(pmd, vaddr);
22728+
22729+ pax_open_kernel();
22730 if (pte_val(pteval))
22731 set_pte_at(&init_mm, vaddr, pte, pteval);
22732 else
22733 pte_clear(&init_mm, vaddr, pte);
22734+ pax_close_kernel();
22735
22736 /*
22737 * It's enough to flush this one mapping.
22738diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable.c linux-2.6.32.45/arch/x86/mm/pgtable.c
22739--- linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22740+++ linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22741@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22742 list_del(&page->lru);
22743 }
22744
22745-#define UNSHARED_PTRS_PER_PGD \
22746- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22747+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22748+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22749
22750+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22751+{
22752+ while (count--)
22753+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22754+}
22755+#endif
22756+
22757+#ifdef CONFIG_PAX_PER_CPU_PGD
22758+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22759+{
22760+ while (count--)
22761+
22762+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22763+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22764+#else
22765+ *dst++ = *src++;
22766+#endif
22767+
22768+}
22769+#endif
22770+
22771+#ifdef CONFIG_X86_64
22772+#define pxd_t pud_t
22773+#define pyd_t pgd_t
22774+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22775+#define pxd_free(mm, pud) pud_free((mm), (pud))
22776+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22777+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22778+#define PYD_SIZE PGDIR_SIZE
22779+#else
22780+#define pxd_t pmd_t
22781+#define pyd_t pud_t
22782+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22783+#define pxd_free(mm, pud) pmd_free((mm), (pud))
22784+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22785+#define pyd_offset(mm ,address) pud_offset((mm), (address))
22786+#define PYD_SIZE PUD_SIZE
22787+#endif
22788+
22789+#ifdef CONFIG_PAX_PER_CPU_PGD
22790+static inline void pgd_ctor(pgd_t *pgd) {}
22791+static inline void pgd_dtor(pgd_t *pgd) {}
22792+#else
22793 static void pgd_ctor(pgd_t *pgd)
22794 {
22795 /* If the pgd points to a shared pagetable level (either the
22796@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22797 pgd_list_del(pgd);
22798 spin_unlock_irqrestore(&pgd_lock, flags);
22799 }
22800+#endif
22801
22802 /*
22803 * List of all pgd's needed for non-PAE so it can invalidate entries
22804@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22805 * -- wli
22806 */
22807
22808-#ifdef CONFIG_X86_PAE
22809+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22810 /*
22811 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22812 * updating the top-level pagetable entries to guarantee the
22813@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22814 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22815 * and initialize the kernel pmds here.
22816 */
22817-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22818+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22819
22820 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22821 {
22822@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22823 */
22824 flush_tlb_mm(mm);
22825 }
22826+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22827+#define PREALLOCATED_PXDS USER_PGD_PTRS
22828 #else /* !CONFIG_X86_PAE */
22829
22830 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22831-#define PREALLOCATED_PMDS 0
22832+#define PREALLOCATED_PXDS 0
22833
22834 #endif /* CONFIG_X86_PAE */
22835
22836-static void free_pmds(pmd_t *pmds[])
22837+static void free_pxds(pxd_t *pxds[])
22838 {
22839 int i;
22840
22841- for(i = 0; i < PREALLOCATED_PMDS; i++)
22842- if (pmds[i])
22843- free_page((unsigned long)pmds[i]);
22844+ for(i = 0; i < PREALLOCATED_PXDS; i++)
22845+ if (pxds[i])
22846+ free_page((unsigned long)pxds[i]);
22847 }
22848
22849-static int preallocate_pmds(pmd_t *pmds[])
22850+static int preallocate_pxds(pxd_t *pxds[])
22851 {
22852 int i;
22853 bool failed = false;
22854
22855- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22856- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22857- if (pmd == NULL)
22858+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22859+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22860+ if (pxd == NULL)
22861 failed = true;
22862- pmds[i] = pmd;
22863+ pxds[i] = pxd;
22864 }
22865
22866 if (failed) {
22867- free_pmds(pmds);
22868+ free_pxds(pxds);
22869 return -ENOMEM;
22870 }
22871
22872@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22873 * preallocate which never got a corresponding vma will need to be
22874 * freed manually.
22875 */
22876-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22877+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22878 {
22879 int i;
22880
22881- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22882+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22883 pgd_t pgd = pgdp[i];
22884
22885 if (pgd_val(pgd) != 0) {
22886- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22887+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22888
22889- pgdp[i] = native_make_pgd(0);
22890+ set_pgd(pgdp + i, native_make_pgd(0));
22891
22892- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22893- pmd_free(mm, pmd);
22894+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22895+ pxd_free(mm, pxd);
22896 }
22897 }
22898 }
22899
22900-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22901+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22902 {
22903- pud_t *pud;
22904+ pyd_t *pyd;
22905 unsigned long addr;
22906 int i;
22907
22908- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22909+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22910 return;
22911
22912- pud = pud_offset(pgd, 0);
22913+#ifdef CONFIG_X86_64
22914+ pyd = pyd_offset(mm, 0L);
22915+#else
22916+ pyd = pyd_offset(pgd, 0L);
22917+#endif
22918
22919- for (addr = i = 0; i < PREALLOCATED_PMDS;
22920- i++, pud++, addr += PUD_SIZE) {
22921- pmd_t *pmd = pmds[i];
22922+ for (addr = i = 0; i < PREALLOCATED_PXDS;
22923+ i++, pyd++, addr += PYD_SIZE) {
22924+ pxd_t *pxd = pxds[i];
22925
22926 if (i >= KERNEL_PGD_BOUNDARY)
22927- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22928- sizeof(pmd_t) * PTRS_PER_PMD);
22929+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22930+ sizeof(pxd_t) * PTRS_PER_PMD);
22931
22932- pud_populate(mm, pud, pmd);
22933+ pyd_populate(mm, pyd, pxd);
22934 }
22935 }
22936
22937 pgd_t *pgd_alloc(struct mm_struct *mm)
22938 {
22939 pgd_t *pgd;
22940- pmd_t *pmds[PREALLOCATED_PMDS];
22941+ pxd_t *pxds[PREALLOCATED_PXDS];
22942+
22943 unsigned long flags;
22944
22945 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22946@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22947
22948 mm->pgd = pgd;
22949
22950- if (preallocate_pmds(pmds) != 0)
22951+ if (preallocate_pxds(pxds) != 0)
22952 goto out_free_pgd;
22953
22954 if (paravirt_pgd_alloc(mm) != 0)
22955- goto out_free_pmds;
22956+ goto out_free_pxds;
22957
22958 /*
22959 * Make sure that pre-populating the pmds is atomic with
22960@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22961 spin_lock_irqsave(&pgd_lock, flags);
22962
22963 pgd_ctor(pgd);
22964- pgd_prepopulate_pmd(mm, pgd, pmds);
22965+ pgd_prepopulate_pxd(mm, pgd, pxds);
22966
22967 spin_unlock_irqrestore(&pgd_lock, flags);
22968
22969 return pgd;
22970
22971-out_free_pmds:
22972- free_pmds(pmds);
22973+out_free_pxds:
22974+ free_pxds(pxds);
22975 out_free_pgd:
22976 free_page((unsigned long)pgd);
22977 out:
22978@@ -287,7 +338,7 @@ out:
22979
22980 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22981 {
22982- pgd_mop_up_pmds(mm, pgd);
22983+ pgd_mop_up_pxds(mm, pgd);
22984 pgd_dtor(pgd);
22985 paravirt_pgd_free(mm, pgd);
22986 free_page((unsigned long)pgd);
22987diff -urNp linux-2.6.32.45/arch/x86/mm/setup_nx.c linux-2.6.32.45/arch/x86/mm/setup_nx.c
22988--- linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22989+++ linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22990@@ -4,11 +4,10 @@
22991
22992 #include <asm/pgtable.h>
22993
22994+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22995 int nx_enabled;
22996
22997-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22998-static int disable_nx __cpuinitdata;
22999-
23000+#ifndef CONFIG_PAX_PAGEEXEC
23001 /*
23002 * noexec = on|off
23003 *
23004@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
23005 if (!str)
23006 return -EINVAL;
23007 if (!strncmp(str, "on", 2)) {
23008- __supported_pte_mask |= _PAGE_NX;
23009- disable_nx = 0;
23010+ nx_enabled = 1;
23011 } else if (!strncmp(str, "off", 3)) {
23012- disable_nx = 1;
23013- __supported_pte_mask &= ~_PAGE_NX;
23014+ nx_enabled = 0;
23015 }
23016 return 0;
23017 }
23018 early_param("noexec", noexec_setup);
23019 #endif
23020+#endif
23021
23022 #ifdef CONFIG_X86_PAE
23023 void __init set_nx(void)
23024 {
23025- unsigned int v[4], l, h;
23026+ if (!nx_enabled && cpu_has_nx) {
23027+ unsigned l, h;
23028
23029- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
23030- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
23031-
23032- if ((v[3] & (1 << 20)) && !disable_nx) {
23033- rdmsr(MSR_EFER, l, h);
23034- l |= EFER_NX;
23035- wrmsr(MSR_EFER, l, h);
23036- nx_enabled = 1;
23037- __supported_pte_mask |= _PAGE_NX;
23038- }
23039+ __supported_pte_mask &= ~_PAGE_NX;
23040+ rdmsr(MSR_EFER, l, h);
23041+ l &= ~EFER_NX;
23042+ wrmsr(MSR_EFER, l, h);
23043 }
23044 }
23045 #else
23046@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
23047 unsigned long efer;
23048
23049 rdmsrl(MSR_EFER, efer);
23050- if (!(efer & EFER_NX) || disable_nx)
23051+ if (!(efer & EFER_NX) || !nx_enabled)
23052 __supported_pte_mask &= ~_PAGE_NX;
23053 }
23054 #endif
23055diff -urNp linux-2.6.32.45/arch/x86/mm/tlb.c linux-2.6.32.45/arch/x86/mm/tlb.c
23056--- linux-2.6.32.45/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
23057+++ linux-2.6.32.45/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
23058@@ -61,7 +61,11 @@ void leave_mm(int cpu)
23059 BUG();
23060 cpumask_clear_cpu(cpu,
23061 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23062+
23063+#ifndef CONFIG_PAX_PER_CPU_PGD
23064 load_cr3(swapper_pg_dir);
23065+#endif
23066+
23067 }
23068 EXPORT_SYMBOL_GPL(leave_mm);
23069
23070diff -urNp linux-2.6.32.45/arch/x86/oprofile/backtrace.c linux-2.6.32.45/arch/x86/oprofile/backtrace.c
23071--- linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
23072+++ linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
23073@@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
23074 struct frame_head bufhead[2];
23075
23076 /* Also check accessibility of one struct frame_head beyond */
23077- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
23078+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
23079 return NULL;
23080 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
23081 return NULL;
23082@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
23083 {
23084 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
23085
23086- if (!user_mode_vm(regs)) {
23087+ if (!user_mode(regs)) {
23088 unsigned long stack = kernel_stack_pointer(regs);
23089 if (depth)
23090 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23091diff -urNp linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c
23092--- linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
23093+++ linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
23094@@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
23095 #endif
23096 }
23097
23098-static int inline addr_increment(void)
23099+static inline int addr_increment(void)
23100 {
23101 #ifdef CONFIG_SMP
23102 return smp_num_siblings == 2 ? 2 : 1;
23103diff -urNp linux-2.6.32.45/arch/x86/pci/common.c linux-2.6.32.45/arch/x86/pci/common.c
23104--- linux-2.6.32.45/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
23105+++ linux-2.6.32.45/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
23106@@ -31,8 +31,8 @@ int noioapicreroute = 1;
23107 int pcibios_last_bus = -1;
23108 unsigned long pirq_table_addr;
23109 struct pci_bus *pci_root_bus;
23110-struct pci_raw_ops *raw_pci_ops;
23111-struct pci_raw_ops *raw_pci_ext_ops;
23112+const struct pci_raw_ops *raw_pci_ops;
23113+const struct pci_raw_ops *raw_pci_ext_ops;
23114
23115 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
23116 int reg, int len, u32 *val)
23117diff -urNp linux-2.6.32.45/arch/x86/pci/direct.c linux-2.6.32.45/arch/x86/pci/direct.c
23118--- linux-2.6.32.45/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
23119+++ linux-2.6.32.45/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
23120@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
23121
23122 #undef PCI_CONF1_ADDRESS
23123
23124-struct pci_raw_ops pci_direct_conf1 = {
23125+const struct pci_raw_ops pci_direct_conf1 = {
23126 .read = pci_conf1_read,
23127 .write = pci_conf1_write,
23128 };
23129@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
23130
23131 #undef PCI_CONF2_ADDRESS
23132
23133-struct pci_raw_ops pci_direct_conf2 = {
23134+const struct pci_raw_ops pci_direct_conf2 = {
23135 .read = pci_conf2_read,
23136 .write = pci_conf2_write,
23137 };
23138@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
23139 * This should be close to trivial, but it isn't, because there are buggy
23140 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
23141 */
23142-static int __init pci_sanity_check(struct pci_raw_ops *o)
23143+static int __init pci_sanity_check(const struct pci_raw_ops *o)
23144 {
23145 u32 x = 0;
23146 int year, devfn;
23147diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_32.c linux-2.6.32.45/arch/x86/pci/mmconfig_32.c
23148--- linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
23149+++ linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
23150@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
23151 return 0;
23152 }
23153
23154-static struct pci_raw_ops pci_mmcfg = {
23155+static const struct pci_raw_ops pci_mmcfg = {
23156 .read = pci_mmcfg_read,
23157 .write = pci_mmcfg_write,
23158 };
23159diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_64.c linux-2.6.32.45/arch/x86/pci/mmconfig_64.c
23160--- linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
23161+++ linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
23162@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
23163 return 0;
23164 }
23165
23166-static struct pci_raw_ops pci_mmcfg = {
23167+static const struct pci_raw_ops pci_mmcfg = {
23168 .read = pci_mmcfg_read,
23169 .write = pci_mmcfg_write,
23170 };
23171diff -urNp linux-2.6.32.45/arch/x86/pci/numaq_32.c linux-2.6.32.45/arch/x86/pci/numaq_32.c
23172--- linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
23173+++ linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
23174@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
23175
23176 #undef PCI_CONF1_MQ_ADDRESS
23177
23178-static struct pci_raw_ops pci_direct_conf1_mq = {
23179+static const struct pci_raw_ops pci_direct_conf1_mq = {
23180 .read = pci_conf1_mq_read,
23181 .write = pci_conf1_mq_write
23182 };
23183diff -urNp linux-2.6.32.45/arch/x86/pci/olpc.c linux-2.6.32.45/arch/x86/pci/olpc.c
23184--- linux-2.6.32.45/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
23185+++ linux-2.6.32.45/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
23186@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
23187 return 0;
23188 }
23189
23190-static struct pci_raw_ops pci_olpc_conf = {
23191+static const struct pci_raw_ops pci_olpc_conf = {
23192 .read = pci_olpc_read,
23193 .write = pci_olpc_write,
23194 };
23195diff -urNp linux-2.6.32.45/arch/x86/pci/pcbios.c linux-2.6.32.45/arch/x86/pci/pcbios.c
23196--- linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
23197+++ linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
23198@@ -56,50 +56,93 @@ union bios32 {
23199 static struct {
23200 unsigned long address;
23201 unsigned short segment;
23202-} bios32_indirect = { 0, __KERNEL_CS };
23203+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23204
23205 /*
23206 * Returns the entry point for the given service, NULL on error
23207 */
23208
23209-static unsigned long bios32_service(unsigned long service)
23210+static unsigned long __devinit bios32_service(unsigned long service)
23211 {
23212 unsigned char return_code; /* %al */
23213 unsigned long address; /* %ebx */
23214 unsigned long length; /* %ecx */
23215 unsigned long entry; /* %edx */
23216 unsigned long flags;
23217+ struct desc_struct d, *gdt;
23218
23219 local_irq_save(flags);
23220- __asm__("lcall *(%%edi); cld"
23221+
23222+ gdt = get_cpu_gdt_table(smp_processor_id());
23223+
23224+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23225+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23226+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23227+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23228+
23229+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23230 : "=a" (return_code),
23231 "=b" (address),
23232 "=c" (length),
23233 "=d" (entry)
23234 : "0" (service),
23235 "1" (0),
23236- "D" (&bios32_indirect));
23237+ "D" (&bios32_indirect),
23238+ "r"(__PCIBIOS_DS)
23239+ : "memory");
23240+
23241+ pax_open_kernel();
23242+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23243+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23244+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23245+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23246+ pax_close_kernel();
23247+
23248 local_irq_restore(flags);
23249
23250 switch (return_code) {
23251- case 0:
23252- return address + entry;
23253- case 0x80: /* Not present */
23254- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23255- return 0;
23256- default: /* Shouldn't happen */
23257- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23258- service, return_code);
23259+ case 0: {
23260+ int cpu;
23261+ unsigned char flags;
23262+
23263+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23264+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23265+ printk(KERN_WARNING "bios32_service: not valid\n");
23266 return 0;
23267+ }
23268+ address = address + PAGE_OFFSET;
23269+ length += 16UL; /* some BIOSs underreport this... */
23270+ flags = 4;
23271+ if (length >= 64*1024*1024) {
23272+ length >>= PAGE_SHIFT;
23273+ flags |= 8;
23274+ }
23275+
23276+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23277+ gdt = get_cpu_gdt_table(cpu);
23278+ pack_descriptor(&d, address, length, 0x9b, flags);
23279+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23280+ pack_descriptor(&d, address, length, 0x93, flags);
23281+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23282+ }
23283+ return entry;
23284+ }
23285+ case 0x80: /* Not present */
23286+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23287+ return 0;
23288+ default: /* Shouldn't happen */
23289+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23290+ service, return_code);
23291+ return 0;
23292 }
23293 }
23294
23295 static struct {
23296 unsigned long address;
23297 unsigned short segment;
23298-} pci_indirect = { 0, __KERNEL_CS };
23299+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23300
23301-static int pci_bios_present;
23302+static int pci_bios_present __read_only;
23303
23304 static int __devinit check_pcibios(void)
23305 {
23306@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
23307 unsigned long flags, pcibios_entry;
23308
23309 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23310- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23311+ pci_indirect.address = pcibios_entry;
23312
23313 local_irq_save(flags);
23314- __asm__(
23315- "lcall *(%%edi); cld\n\t"
23316+ __asm__("movw %w6, %%ds\n\t"
23317+ "lcall *%%ss:(%%edi); cld\n\t"
23318+ "push %%ss\n\t"
23319+ "pop %%ds\n\t"
23320 "jc 1f\n\t"
23321 "xor %%ah, %%ah\n"
23322 "1:"
23323@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
23324 "=b" (ebx),
23325 "=c" (ecx)
23326 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23327- "D" (&pci_indirect)
23328+ "D" (&pci_indirect),
23329+ "r" (__PCIBIOS_DS)
23330 : "memory");
23331 local_irq_restore(flags);
23332
23333@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
23334
23335 switch (len) {
23336 case 1:
23337- __asm__("lcall *(%%esi); cld\n\t"
23338+ __asm__("movw %w6, %%ds\n\t"
23339+ "lcall *%%ss:(%%esi); cld\n\t"
23340+ "push %%ss\n\t"
23341+ "pop %%ds\n\t"
23342 "jc 1f\n\t"
23343 "xor %%ah, %%ah\n"
23344 "1:"
23345@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
23346 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23347 "b" (bx),
23348 "D" ((long)reg),
23349- "S" (&pci_indirect));
23350+ "S" (&pci_indirect),
23351+ "r" (__PCIBIOS_DS));
23352 /*
23353 * Zero-extend the result beyond 8 bits, do not trust the
23354 * BIOS having done it:
23355@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
23356 *value &= 0xff;
23357 break;
23358 case 2:
23359- __asm__("lcall *(%%esi); cld\n\t"
23360+ __asm__("movw %w6, %%ds\n\t"
23361+ "lcall *%%ss:(%%esi); cld\n\t"
23362+ "push %%ss\n\t"
23363+ "pop %%ds\n\t"
23364 "jc 1f\n\t"
23365 "xor %%ah, %%ah\n"
23366 "1:"
23367@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
23368 : "1" (PCIBIOS_READ_CONFIG_WORD),
23369 "b" (bx),
23370 "D" ((long)reg),
23371- "S" (&pci_indirect));
23372+ "S" (&pci_indirect),
23373+ "r" (__PCIBIOS_DS));
23374 /*
23375 * Zero-extend the result beyond 16 bits, do not trust the
23376 * BIOS having done it:
23377@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
23378 *value &= 0xffff;
23379 break;
23380 case 4:
23381- __asm__("lcall *(%%esi); cld\n\t"
23382+ __asm__("movw %w6, %%ds\n\t"
23383+ "lcall *%%ss:(%%esi); cld\n\t"
23384+ "push %%ss\n\t"
23385+ "pop %%ds\n\t"
23386 "jc 1f\n\t"
23387 "xor %%ah, %%ah\n"
23388 "1:"
23389@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
23390 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23391 "b" (bx),
23392 "D" ((long)reg),
23393- "S" (&pci_indirect));
23394+ "S" (&pci_indirect),
23395+ "r" (__PCIBIOS_DS));
23396 break;
23397 }
23398
23399@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
23400
23401 switch (len) {
23402 case 1:
23403- __asm__("lcall *(%%esi); cld\n\t"
23404+ __asm__("movw %w6, %%ds\n\t"
23405+ "lcall *%%ss:(%%esi); cld\n\t"
23406+ "push %%ss\n\t"
23407+ "pop %%ds\n\t"
23408 "jc 1f\n\t"
23409 "xor %%ah, %%ah\n"
23410 "1:"
23411@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
23412 "c" (value),
23413 "b" (bx),
23414 "D" ((long)reg),
23415- "S" (&pci_indirect));
23416+ "S" (&pci_indirect),
23417+ "r" (__PCIBIOS_DS));
23418 break;
23419 case 2:
23420- __asm__("lcall *(%%esi); cld\n\t"
23421+ __asm__("movw %w6, %%ds\n\t"
23422+ "lcall *%%ss:(%%esi); cld\n\t"
23423+ "push %%ss\n\t"
23424+ "pop %%ds\n\t"
23425 "jc 1f\n\t"
23426 "xor %%ah, %%ah\n"
23427 "1:"
23428@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
23429 "c" (value),
23430 "b" (bx),
23431 "D" ((long)reg),
23432- "S" (&pci_indirect));
23433+ "S" (&pci_indirect),
23434+ "r" (__PCIBIOS_DS));
23435 break;
23436 case 4:
23437- __asm__("lcall *(%%esi); cld\n\t"
23438+ __asm__("movw %w6, %%ds\n\t"
23439+ "lcall *%%ss:(%%esi); cld\n\t"
23440+ "push %%ss\n\t"
23441+ "pop %%ds\n\t"
23442 "jc 1f\n\t"
23443 "xor %%ah, %%ah\n"
23444 "1:"
23445@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
23446 "c" (value),
23447 "b" (bx),
23448 "D" ((long)reg),
23449- "S" (&pci_indirect));
23450+ "S" (&pci_indirect),
23451+ "r" (__PCIBIOS_DS));
23452 break;
23453 }
23454
23455@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
23456 * Function table for BIOS32 access
23457 */
23458
23459-static struct pci_raw_ops pci_bios_access = {
23460+static const struct pci_raw_ops pci_bios_access = {
23461 .read = pci_bios_read,
23462 .write = pci_bios_write
23463 };
23464@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23465 * Try to find PCI BIOS.
23466 */
23467
23468-static struct pci_raw_ops * __devinit pci_find_bios(void)
23469+static const struct pci_raw_ops * __devinit pci_find_bios(void)
23470 {
23471 union bios32 *check;
23472 unsigned char sum;
23473@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23474
23475 DBG("PCI: Fetching IRQ routing table... ");
23476 __asm__("push %%es\n\t"
23477+ "movw %w8, %%ds\n\t"
23478 "push %%ds\n\t"
23479 "pop %%es\n\t"
23480- "lcall *(%%esi); cld\n\t"
23481+ "lcall *%%ss:(%%esi); cld\n\t"
23482 "pop %%es\n\t"
23483+ "push %%ss\n\t"
23484+ "pop %%ds\n"
23485 "jc 1f\n\t"
23486 "xor %%ah, %%ah\n"
23487 "1:"
23488@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23489 "1" (0),
23490 "D" ((long) &opt),
23491 "S" (&pci_indirect),
23492- "m" (opt)
23493+ "m" (opt),
23494+ "r" (__PCIBIOS_DS)
23495 : "memory");
23496 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23497 if (ret & 0xff00)
23498@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23499 {
23500 int ret;
23501
23502- __asm__("lcall *(%%esi); cld\n\t"
23503+ __asm__("movw %w5, %%ds\n\t"
23504+ "lcall *%%ss:(%%esi); cld\n\t"
23505+ "push %%ss\n\t"
23506+ "pop %%ds\n"
23507 "jc 1f\n\t"
23508 "xor %%ah, %%ah\n"
23509 "1:"
23510@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23511 : "0" (PCIBIOS_SET_PCI_HW_INT),
23512 "b" ((dev->bus->number << 8) | dev->devfn),
23513 "c" ((irq << 8) | (pin + 10)),
23514- "S" (&pci_indirect));
23515+ "S" (&pci_indirect),
23516+ "r" (__PCIBIOS_DS));
23517 return !(ret & 0xff00);
23518 }
23519 EXPORT_SYMBOL(pcibios_set_irq_routing);
23520diff -urNp linux-2.6.32.45/arch/x86/power/cpu.c linux-2.6.32.45/arch/x86/power/cpu.c
23521--- linux-2.6.32.45/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23522+++ linux-2.6.32.45/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23523@@ -129,7 +129,7 @@ static void do_fpu_end(void)
23524 static void fix_processor_context(void)
23525 {
23526 int cpu = smp_processor_id();
23527- struct tss_struct *t = &per_cpu(init_tss, cpu);
23528+ struct tss_struct *t = init_tss + cpu;
23529
23530 set_tss_desc(cpu, t); /*
23531 * This just modifies memory; should not be
23532@@ -139,7 +139,9 @@ static void fix_processor_context(void)
23533 */
23534
23535 #ifdef CONFIG_X86_64
23536+ pax_open_kernel();
23537 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23538+ pax_close_kernel();
23539
23540 syscall_init(); /* This sets MSR_*STAR and related */
23541 #endif
23542diff -urNp linux-2.6.32.45/arch/x86/vdso/Makefile linux-2.6.32.45/arch/x86/vdso/Makefile
23543--- linux-2.6.32.45/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23544+++ linux-2.6.32.45/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23545@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23546 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23547 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23548
23549-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23550+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23551 GCOV_PROFILE := n
23552
23553 #
23554diff -urNp linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c
23555--- linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23556+++ linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23557@@ -22,24 +22,48 @@
23558 #include <asm/hpet.h>
23559 #include <asm/unistd.h>
23560 #include <asm/io.h>
23561+#include <asm/fixmap.h>
23562 #include "vextern.h"
23563
23564 #define gtod vdso_vsyscall_gtod_data
23565
23566+notrace noinline long __vdso_fallback_time(long *t)
23567+{
23568+ long secs;
23569+ asm volatile("syscall"
23570+ : "=a" (secs)
23571+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23572+ return secs;
23573+}
23574+
23575 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23576 {
23577 long ret;
23578 asm("syscall" : "=a" (ret) :
23579- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23580+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23581 return ret;
23582 }
23583
23584+notrace static inline cycle_t __vdso_vread_hpet(void)
23585+{
23586+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23587+}
23588+
23589+notrace static inline cycle_t __vdso_vread_tsc(void)
23590+{
23591+ cycle_t ret = (cycle_t)vget_cycles();
23592+
23593+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23594+}
23595+
23596 notrace static inline long vgetns(void)
23597 {
23598 long v;
23599- cycles_t (*vread)(void);
23600- vread = gtod->clock.vread;
23601- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23602+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23603+ v = __vdso_vread_tsc();
23604+ else
23605+ v = __vdso_vread_hpet();
23606+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23607 return (v * gtod->clock.mult) >> gtod->clock.shift;
23608 }
23609
23610@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23611
23612 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23613 {
23614- if (likely(gtod->sysctl_enabled))
23615+ if (likely(gtod->sysctl_enabled &&
23616+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23617+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23618 switch (clock) {
23619 case CLOCK_REALTIME:
23620 if (likely(gtod->clock.vread))
23621@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23622 int clock_gettime(clockid_t, struct timespec *)
23623 __attribute__((weak, alias("__vdso_clock_gettime")));
23624
23625-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23626+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23627 {
23628 long ret;
23629- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23630+ asm("syscall" : "=a" (ret) :
23631+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23632+ return ret;
23633+}
23634+
23635+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23636+{
23637+ if (likely(gtod->sysctl_enabled &&
23638+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23639+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23640+ {
23641 if (likely(tv != NULL)) {
23642 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23643 offsetof(struct timespec, tv_nsec) ||
23644@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23645 }
23646 return 0;
23647 }
23648- asm("syscall" : "=a" (ret) :
23649- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23650- return ret;
23651+ return __vdso_fallback_gettimeofday(tv, tz);
23652 }
23653 int gettimeofday(struct timeval *, struct timezone *)
23654 __attribute__((weak, alias("__vdso_gettimeofday")));
23655diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c
23656--- linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23657+++ linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23658@@ -25,6 +25,7 @@
23659 #include <asm/tlbflush.h>
23660 #include <asm/vdso.h>
23661 #include <asm/proto.h>
23662+#include <asm/mman.h>
23663
23664 enum {
23665 VDSO_DISABLED = 0,
23666@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23667 void enable_sep_cpu(void)
23668 {
23669 int cpu = get_cpu();
23670- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23671+ struct tss_struct *tss = init_tss + cpu;
23672
23673 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23674 put_cpu();
23675@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23676 gate_vma.vm_start = FIXADDR_USER_START;
23677 gate_vma.vm_end = FIXADDR_USER_END;
23678 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23679- gate_vma.vm_page_prot = __P101;
23680+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23681 /*
23682 * Make sure the vDSO gets into every core dump.
23683 * Dumping its contents makes post-mortem fully interpretable later
23684@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23685 if (compat)
23686 addr = VDSO_HIGH_BASE;
23687 else {
23688- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23689+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23690 if (IS_ERR_VALUE(addr)) {
23691 ret = addr;
23692 goto up_fail;
23693 }
23694 }
23695
23696- current->mm->context.vdso = (void *)addr;
23697+ current->mm->context.vdso = addr;
23698
23699 if (compat_uses_vma || !compat) {
23700 /*
23701@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23702 }
23703
23704 current_thread_info()->sysenter_return =
23705- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23706+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23707
23708 up_fail:
23709 if (ret)
23710- current->mm->context.vdso = NULL;
23711+ current->mm->context.vdso = 0;
23712
23713 up_write(&mm->mmap_sem);
23714
23715@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23716
23717 const char *arch_vma_name(struct vm_area_struct *vma)
23718 {
23719- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23720+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23721 return "[vdso]";
23722+
23723+#ifdef CONFIG_PAX_SEGMEXEC
23724+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23725+ return "[vdso]";
23726+#endif
23727+
23728 return NULL;
23729 }
23730
23731@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23732 struct mm_struct *mm = tsk->mm;
23733
23734 /* Check to see if this task was created in compat vdso mode */
23735- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23736+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23737 return &gate_vma;
23738 return NULL;
23739 }
23740diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso.lds.S linux-2.6.32.45/arch/x86/vdso/vdso.lds.S
23741--- linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23742+++ linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23743@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23744 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23745 #include "vextern.h"
23746 #undef VEXTERN
23747+
23748+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23749+VEXTERN(fallback_gettimeofday)
23750+VEXTERN(fallback_time)
23751+VEXTERN(getcpu)
23752+#undef VEXTERN
23753diff -urNp linux-2.6.32.45/arch/x86/vdso/vextern.h linux-2.6.32.45/arch/x86/vdso/vextern.h
23754--- linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23755+++ linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23756@@ -11,6 +11,5 @@
23757 put into vextern.h and be referenced as a pointer with vdso prefix.
23758 The main kernel later fills in the values. */
23759
23760-VEXTERN(jiffies)
23761 VEXTERN(vgetcpu_mode)
23762 VEXTERN(vsyscall_gtod_data)
23763diff -urNp linux-2.6.32.45/arch/x86/vdso/vma.c linux-2.6.32.45/arch/x86/vdso/vma.c
23764--- linux-2.6.32.45/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23765+++ linux-2.6.32.45/arch/x86/vdso/vma.c 2011-08-23 20:24:19.000000000 -0400
23766@@ -17,8 +17,6 @@
23767 #include "vextern.h" /* Just for VMAGIC. */
23768 #undef VEXTERN
23769
23770-unsigned int __read_mostly vdso_enabled = 1;
23771-
23772 extern char vdso_start[], vdso_end[];
23773 extern unsigned short vdso_sync_cpuid;
23774
23775@@ -27,10 +25,8 @@ static unsigned vdso_size;
23776
23777 static inline void *var_ref(void *p, char *name)
23778 {
23779- if (*(void **)p != (void *)VMAGIC) {
23780- printk("VDSO: variable %s broken\n", name);
23781- vdso_enabled = 0;
23782- }
23783+ if (*(void **)p != (void *)VMAGIC)
23784+ panic("VDSO: variable %s broken\n", name);
23785 return p;
23786 }
23787
23788@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
23789 if (!vbase)
23790 goto oom;
23791
23792- if (memcmp(vbase, "\177ELF", 4)) {
23793- printk("VDSO: I'm broken; not ELF\n");
23794- vdso_enabled = 0;
23795- }
23796+ if (memcmp(vbase, ELFMAG, SELFMAG))
23797+ panic("VDSO: I'm broken; not ELF\n");
23798
23799 #define VEXTERN(x) \
23800 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23801 #include "vextern.h"
23802 #undef VEXTERN
23803+ vunmap(vbase);
23804 return 0;
23805
23806 oom:
23807- printk("Cannot allocate vdso\n");
23808- vdso_enabled = 0;
23809- return -ENOMEM;
23810+ panic("Cannot allocate vdso\n");
23811 }
23812 __initcall(init_vdso_vars);
23813
23814@@ -105,9 +98,6 @@ int arch_setup_additional_pages(struct l
23815 unsigned long addr;
23816 int ret;
23817
23818- if (!vdso_enabled)
23819- return 0;
23820-
23821 down_write(&mm->mmap_sem);
23822 addr = vdso_addr(mm->start_stack, vdso_size);
23823 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
23824@@ -116,7 +106,7 @@ int arch_setup_additional_pages(struct l
23825 goto up_fail;
23826 }
23827
23828- current->mm->context.vdso = (void *)addr;
23829+ current->mm->context.vdso = addr;
23830
23831 ret = install_special_mapping(mm, addr, vdso_size,
23832 VM_READ|VM_EXEC|
23833@@ -124,7 +114,7 @@ int arch_setup_additional_pages(struct l
23834 VM_ALWAYSDUMP,
23835 vdso_pages);
23836 if (ret) {
23837- current->mm->context.vdso = NULL;
23838+ current->mm->context.vdso = 0;
23839 goto up_fail;
23840 }
23841
23842@@ -132,10 +122,3 @@ up_fail:
23843 up_write(&mm->mmap_sem);
23844 return ret;
23845 }
23846-
23847-static __init int vdso_setup(char *s)
23848-{
23849- vdso_enabled = simple_strtoul(s, NULL, 0);
23850- return 0;
23851-}
23852-__setup("vdso=", vdso_setup);
23853diff -urNp linux-2.6.32.45/arch/x86/xen/enlighten.c linux-2.6.32.45/arch/x86/xen/enlighten.c
23854--- linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23855+++ linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23856@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23857
23858 struct shared_info xen_dummy_shared_info;
23859
23860-void *xen_initial_gdt;
23861-
23862 /*
23863 * Point at some empty memory to start with. We map the real shared_info
23864 * page as soon as fixmap is up and running.
23865@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23866
23867 preempt_disable();
23868
23869- start = __get_cpu_var(idt_desc).address;
23870+ start = (unsigned long)__get_cpu_var(idt_desc).address;
23871 end = start + __get_cpu_var(idt_desc).size + 1;
23872
23873 xen_mc_flush();
23874@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23875 #endif
23876 };
23877
23878-static void xen_reboot(int reason)
23879+static __noreturn void xen_reboot(int reason)
23880 {
23881 struct sched_shutdown r = { .reason = reason };
23882
23883@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23884 BUG();
23885 }
23886
23887-static void xen_restart(char *msg)
23888+static __noreturn void xen_restart(char *msg)
23889 {
23890 xen_reboot(SHUTDOWN_reboot);
23891 }
23892
23893-static void xen_emergency_restart(void)
23894+static __noreturn void xen_emergency_restart(void)
23895 {
23896 xen_reboot(SHUTDOWN_reboot);
23897 }
23898
23899-static void xen_machine_halt(void)
23900+static __noreturn void xen_machine_halt(void)
23901 {
23902 xen_reboot(SHUTDOWN_poweroff);
23903 }
23904@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23905 */
23906 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23907
23908-#ifdef CONFIG_X86_64
23909 /* Work out if we support NX */
23910- check_efer();
23911+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23912+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23913+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23914+ unsigned l, h;
23915+
23916+#ifdef CONFIG_X86_PAE
23917+ nx_enabled = 1;
23918+#endif
23919+ __supported_pte_mask |= _PAGE_NX;
23920+ rdmsr(MSR_EFER, l, h);
23921+ l |= EFER_NX;
23922+ wrmsr(MSR_EFER, l, h);
23923+ }
23924 #endif
23925
23926 xen_setup_features();
23927@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23928
23929 machine_ops = xen_machine_ops;
23930
23931- /*
23932- * The only reliable way to retain the initial address of the
23933- * percpu gdt_page is to remember it here, so we can go and
23934- * mark it RW later, when the initial percpu area is freed.
23935- */
23936- xen_initial_gdt = &per_cpu(gdt_page, 0);
23937-
23938 xen_smp_init();
23939
23940 pgd = (pgd_t *)xen_start_info->pt_base;
23941diff -urNp linux-2.6.32.45/arch/x86/xen/mmu.c linux-2.6.32.45/arch/x86/xen/mmu.c
23942--- linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23943+++ linux-2.6.32.45/arch/x86/xen/mmu.c 2011-08-24 18:35:52.000000000 -0400
23944@@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23945 convert_pfn_mfn(init_level4_pgt);
23946 convert_pfn_mfn(level3_ident_pgt);
23947 convert_pfn_mfn(level3_kernel_pgt);
23948+ convert_pfn_mfn(level3_vmalloc_pgt);
23949+ convert_pfn_mfn(level3_vmemmap_pgt);
23950
23951 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23952 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23953@@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23954 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23955 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23956 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23957+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23958+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23959 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23960+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23961 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23962 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23963
23964@@ -1860,6 +1865,7 @@ static __init void xen_post_allocator_in
23965 pv_mmu_ops.set_pud = xen_set_pud;
23966 #if PAGETABLE_LEVELS == 4
23967 pv_mmu_ops.set_pgd = xen_set_pgd;
23968+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
23969 #endif
23970
23971 /* This will work as long as patching hasn't happened yet
23972@@ -1946,6 +1952,7 @@ static const struct pv_mmu_ops xen_mmu_o
23973 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
23974 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
23975 .set_pgd = xen_set_pgd_hyper,
23976+ .set_pgd_batched = xen_set_pgd_hyper,
23977
23978 .alloc_pud = xen_alloc_pmd_init,
23979 .release_pud = xen_release_pmd_init,
23980diff -urNp linux-2.6.32.45/arch/x86/xen/smp.c linux-2.6.32.45/arch/x86/xen/smp.c
23981--- linux-2.6.32.45/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23982+++ linux-2.6.32.45/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23983@@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23984 {
23985 BUG_ON(smp_processor_id() != 0);
23986 native_smp_prepare_boot_cpu();
23987-
23988- /* We've switched to the "real" per-cpu gdt, so make sure the
23989- old memory can be recycled */
23990- make_lowmem_page_readwrite(xen_initial_gdt);
23991-
23992 xen_setup_vcpu_info_placement();
23993 }
23994
23995@@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23996 gdt = get_cpu_gdt_table(cpu);
23997
23998 ctxt->flags = VGCF_IN_KERNEL;
23999- ctxt->user_regs.ds = __USER_DS;
24000- ctxt->user_regs.es = __USER_DS;
24001+ ctxt->user_regs.ds = __KERNEL_DS;
24002+ ctxt->user_regs.es = __KERNEL_DS;
24003 ctxt->user_regs.ss = __KERNEL_DS;
24004 #ifdef CONFIG_X86_32
24005 ctxt->user_regs.fs = __KERNEL_PERCPU;
24006- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24007+ savesegment(gs, ctxt->user_regs.gs);
24008 #else
24009 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24010 #endif
24011@@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
24012 int rc;
24013
24014 per_cpu(current_task, cpu) = idle;
24015+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
24016 #ifdef CONFIG_X86_32
24017 irq_ctx_init(cpu);
24018 #else
24019 clear_tsk_thread_flag(idle, TIF_FORK);
24020- per_cpu(kernel_stack, cpu) =
24021- (unsigned long)task_stack_page(idle) -
24022- KERNEL_STACK_OFFSET + THREAD_SIZE;
24023+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24024 #endif
24025 xen_setup_runstate_info(cpu);
24026 xen_setup_timer(cpu);
24027diff -urNp linux-2.6.32.45/arch/x86/xen/xen-asm_32.S linux-2.6.32.45/arch/x86/xen/xen-asm_32.S
24028--- linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
24029+++ linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
24030@@ -83,14 +83,14 @@ ENTRY(xen_iret)
24031 ESP_OFFSET=4 # bytes pushed onto stack
24032
24033 /*
24034- * Store vcpu_info pointer for easy access. Do it this way to
24035- * avoid having to reload %fs
24036+ * Store vcpu_info pointer for easy access.
24037 */
24038 #ifdef CONFIG_SMP
24039- GET_THREAD_INFO(%eax)
24040- movl TI_cpu(%eax), %eax
24041- movl __per_cpu_offset(,%eax,4), %eax
24042- mov per_cpu__xen_vcpu(%eax), %eax
24043+ push %fs
24044+ mov $(__KERNEL_PERCPU), %eax
24045+ mov %eax, %fs
24046+ mov PER_CPU_VAR(xen_vcpu), %eax
24047+ pop %fs
24048 #else
24049 movl per_cpu__xen_vcpu, %eax
24050 #endif
24051diff -urNp linux-2.6.32.45/arch/x86/xen/xen-head.S linux-2.6.32.45/arch/x86/xen/xen-head.S
24052--- linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
24053+++ linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
24054@@ -19,6 +19,17 @@ ENTRY(startup_xen)
24055 #ifdef CONFIG_X86_32
24056 mov %esi,xen_start_info
24057 mov $init_thread_union+THREAD_SIZE,%esp
24058+#ifdef CONFIG_SMP
24059+ movl $cpu_gdt_table,%edi
24060+ movl $__per_cpu_load,%eax
24061+ movw %ax,__KERNEL_PERCPU + 2(%edi)
24062+ rorl $16,%eax
24063+ movb %al,__KERNEL_PERCPU + 4(%edi)
24064+ movb %ah,__KERNEL_PERCPU + 7(%edi)
24065+ movl $__per_cpu_end - 1,%eax
24066+ subl $__per_cpu_start,%eax
24067+ movw %ax,__KERNEL_PERCPU + 0(%edi)
24068+#endif
24069 #else
24070 mov %rsi,xen_start_info
24071 mov $init_thread_union+THREAD_SIZE,%rsp
24072diff -urNp linux-2.6.32.45/arch/x86/xen/xen-ops.h linux-2.6.32.45/arch/x86/xen/xen-ops.h
24073--- linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
24074+++ linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
24075@@ -10,8 +10,6 @@
24076 extern const char xen_hypervisor_callback[];
24077 extern const char xen_failsafe_callback[];
24078
24079-extern void *xen_initial_gdt;
24080-
24081 struct trap_info;
24082 void xen_copy_trap_info(struct trap_info *traps);
24083
24084diff -urNp linux-2.6.32.45/block/blk-integrity.c linux-2.6.32.45/block/blk-integrity.c
24085--- linux-2.6.32.45/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
24086+++ linux-2.6.32.45/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
24087@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
24088 NULL,
24089 };
24090
24091-static struct sysfs_ops integrity_ops = {
24092+static const struct sysfs_ops integrity_ops = {
24093 .show = &integrity_attr_show,
24094 .store = &integrity_attr_store,
24095 };
24096diff -urNp linux-2.6.32.45/block/blk-iopoll.c linux-2.6.32.45/block/blk-iopoll.c
24097--- linux-2.6.32.45/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
24098+++ linux-2.6.32.45/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
24099@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
24100 }
24101 EXPORT_SYMBOL(blk_iopoll_complete);
24102
24103-static void blk_iopoll_softirq(struct softirq_action *h)
24104+static void blk_iopoll_softirq(void)
24105 {
24106 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24107 int rearm = 0, budget = blk_iopoll_budget;
24108diff -urNp linux-2.6.32.45/block/blk-map.c linux-2.6.32.45/block/blk-map.c
24109--- linux-2.6.32.45/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
24110+++ linux-2.6.32.45/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
24111@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
24112 * direct dma. else, set up kernel bounce buffers
24113 */
24114 uaddr = (unsigned long) ubuf;
24115- if (blk_rq_aligned(q, ubuf, len) && !map_data)
24116+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
24117 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
24118 else
24119 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
24120@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
24121 for (i = 0; i < iov_count; i++) {
24122 unsigned long uaddr = (unsigned long)iov[i].iov_base;
24123
24124+ if (!iov[i].iov_len)
24125+ return -EINVAL;
24126+
24127 if (uaddr & queue_dma_alignment(q)) {
24128 unaligned = 1;
24129 break;
24130 }
24131- if (!iov[i].iov_len)
24132- return -EINVAL;
24133 }
24134
24135 if (unaligned || (q->dma_pad_mask & len) || map_data)
24136@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
24137 if (!len || !kbuf)
24138 return -EINVAL;
24139
24140- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
24141+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
24142 if (do_copy)
24143 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24144 else
24145diff -urNp linux-2.6.32.45/block/blk-softirq.c linux-2.6.32.45/block/blk-softirq.c
24146--- linux-2.6.32.45/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
24147+++ linux-2.6.32.45/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
24148@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
24149 * Softirq action handler - move entries to local list and loop over them
24150 * while passing them to the queue registered handler.
24151 */
24152-static void blk_done_softirq(struct softirq_action *h)
24153+static void blk_done_softirq(void)
24154 {
24155 struct list_head *cpu_list, local_list;
24156
24157diff -urNp linux-2.6.32.45/block/blk-sysfs.c linux-2.6.32.45/block/blk-sysfs.c
24158--- linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
24159+++ linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
24160@@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
24161 kmem_cache_free(blk_requestq_cachep, q);
24162 }
24163
24164-static struct sysfs_ops queue_sysfs_ops = {
24165+static const struct sysfs_ops queue_sysfs_ops = {
24166 .show = queue_attr_show,
24167 .store = queue_attr_store,
24168 };
24169diff -urNp linux-2.6.32.45/block/bsg.c linux-2.6.32.45/block/bsg.c
24170--- linux-2.6.32.45/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
24171+++ linux-2.6.32.45/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
24172@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
24173 struct sg_io_v4 *hdr, struct bsg_device *bd,
24174 fmode_t has_write_perm)
24175 {
24176+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24177+ unsigned char *cmdptr;
24178+
24179 if (hdr->request_len > BLK_MAX_CDB) {
24180 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24181 if (!rq->cmd)
24182 return -ENOMEM;
24183- }
24184+ cmdptr = rq->cmd;
24185+ } else
24186+ cmdptr = tmpcmd;
24187
24188- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
24189+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
24190 hdr->request_len))
24191 return -EFAULT;
24192
24193+ if (cmdptr != rq->cmd)
24194+ memcpy(rq->cmd, cmdptr, hdr->request_len);
24195+
24196 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24197 if (blk_verify_command(rq->cmd, has_write_perm))
24198 return -EPERM;
24199diff -urNp linux-2.6.32.45/block/elevator.c linux-2.6.32.45/block/elevator.c
24200--- linux-2.6.32.45/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
24201+++ linux-2.6.32.45/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
24202@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
24203 return error;
24204 }
24205
24206-static struct sysfs_ops elv_sysfs_ops = {
24207+static const struct sysfs_ops elv_sysfs_ops = {
24208 .show = elv_attr_show,
24209 .store = elv_attr_store,
24210 };
24211diff -urNp linux-2.6.32.45/block/scsi_ioctl.c linux-2.6.32.45/block/scsi_ioctl.c
24212--- linux-2.6.32.45/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
24213+++ linux-2.6.32.45/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
24214@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
24215 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24216 struct sg_io_hdr *hdr, fmode_t mode)
24217 {
24218- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24219+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24220+ unsigned char *cmdptr;
24221+
24222+ if (rq->cmd != rq->__cmd)
24223+ cmdptr = rq->cmd;
24224+ else
24225+ cmdptr = tmpcmd;
24226+
24227+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24228 return -EFAULT;
24229+
24230+ if (cmdptr != rq->cmd)
24231+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24232+
24233 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24234 return -EPERM;
24235
24236@@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
24237 int err;
24238 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24239 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24240+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24241+ unsigned char *cmdptr;
24242
24243 if (!sic)
24244 return -EINVAL;
24245@@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
24246 */
24247 err = -EFAULT;
24248 rq->cmd_len = cmdlen;
24249- if (copy_from_user(rq->cmd, sic->data, cmdlen))
24250+
24251+ if (rq->cmd != rq->__cmd)
24252+ cmdptr = rq->cmd;
24253+ else
24254+ cmdptr = tmpcmd;
24255+
24256+ if (copy_from_user(cmdptr, sic->data, cmdlen))
24257 goto error;
24258
24259+ if (rq->cmd != cmdptr)
24260+ memcpy(rq->cmd, cmdptr, cmdlen);
24261+
24262 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24263 goto error;
24264
24265diff -urNp linux-2.6.32.45/crypto/cryptd.c linux-2.6.32.45/crypto/cryptd.c
24266--- linux-2.6.32.45/crypto/cryptd.c 2011-03-27 14:31:47.000000000 -0400
24267+++ linux-2.6.32.45/crypto/cryptd.c 2011-08-23 21:22:32.000000000 -0400
24268@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
24269
24270 struct cryptd_blkcipher_request_ctx {
24271 crypto_completion_t complete;
24272-};
24273+} __no_const;
24274
24275 struct cryptd_hash_ctx {
24276 struct crypto_shash *child;
24277diff -urNp linux-2.6.32.45/crypto/gf128mul.c linux-2.6.32.45/crypto/gf128mul.c
24278--- linux-2.6.32.45/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
24279+++ linux-2.6.32.45/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
24280@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
24281 for (i = 0; i < 7; ++i)
24282 gf128mul_x_lle(&p[i + 1], &p[i]);
24283
24284- memset(r, 0, sizeof(r));
24285+ memset(r, 0, sizeof(*r));
24286 for (i = 0;;) {
24287 u8 ch = ((u8 *)b)[15 - i];
24288
24289@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
24290 for (i = 0; i < 7; ++i)
24291 gf128mul_x_bbe(&p[i + 1], &p[i]);
24292
24293- memset(r, 0, sizeof(r));
24294+ memset(r, 0, sizeof(*r));
24295 for (i = 0;;) {
24296 u8 ch = ((u8 *)b)[i];
24297
24298diff -urNp linux-2.6.32.45/crypto/serpent.c linux-2.6.32.45/crypto/serpent.c
24299--- linux-2.6.32.45/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
24300+++ linux-2.6.32.45/crypto/serpent.c 2011-08-18 23:59:56.000000000 -0400
24301@@ -21,6 +21,7 @@
24302 #include <asm/byteorder.h>
24303 #include <linux/crypto.h>
24304 #include <linux/types.h>
24305+#include <linux/sched.h>
24306
24307 /* Key is padded to the maximum of 256 bits before round key generation.
24308 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
24309@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_
24310 u32 r0,r1,r2,r3,r4;
24311 int i;
24312
24313+ pax_track_stack();
24314+
24315 /* Copy key, add padding */
24316
24317 for (i = 0; i < keylen; ++i)
24318diff -urNp linux-2.6.32.45/Documentation/dontdiff linux-2.6.32.45/Documentation/dontdiff
24319--- linux-2.6.32.45/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
24320+++ linux-2.6.32.45/Documentation/dontdiff 2011-08-21 18:59:02.000000000 -0400
24321@@ -1,13 +1,16 @@
24322 *.a
24323 *.aux
24324 *.bin
24325+*.cis
24326 *.cpio
24327 *.csp
24328+*.dbg
24329 *.dsp
24330 *.dvi
24331 *.elf
24332 *.eps
24333 *.fw
24334+*.gcno
24335 *.gen.S
24336 *.gif
24337 *.grep
24338@@ -38,8 +41,10 @@
24339 *.tab.h
24340 *.tex
24341 *.ver
24342+*.vim
24343 *.xml
24344 *_MODULES
24345+*_reg_safe.h
24346 *_vga16.c
24347 *~
24348 *.9
24349@@ -49,11 +54,16 @@
24350 53c700_d.h
24351 CVS
24352 ChangeSet
24353+GPATH
24354+GRTAGS
24355+GSYMS
24356+GTAGS
24357 Image
24358 Kerntypes
24359 Module.markers
24360 Module.symvers
24361 PENDING
24362+PERF*
24363 SCCS
24364 System.map*
24365 TAGS
24366@@ -76,7 +86,11 @@ btfixupprep
24367 build
24368 bvmlinux
24369 bzImage*
24370+capability_names.h
24371+capflags.c
24372 classlist.h*
24373+clut_vga16.c
24374+common-cmds.h
24375 comp*.log
24376 compile.h*
24377 conf
24378@@ -97,19 +111,21 @@ elfconfig.h*
24379 fixdep
24380 fore200e_mkfirm
24381 fore200e_pca_fw.c*
24382+gate.lds
24383 gconf
24384 gen-devlist
24385 gen_crc32table
24386 gen_init_cpio
24387 genksyms
24388 *_gray256.c
24389+hash
24390 ihex2fw
24391 ikconfig.h*
24392 initramfs_data.cpio
24393+initramfs_data.cpio.bz2
24394 initramfs_data.cpio.gz
24395 initramfs_list
24396 kallsyms
24397-kconfig
24398 keywords.c
24399 ksym.c*
24400 ksym.h*
24401@@ -133,7 +149,9 @@ mkboot
24402 mkbugboot
24403 mkcpustr
24404 mkdep
24405+mkpiggy
24406 mkprep
24407+mkregtable
24408 mktables
24409 mktree
24410 modpost
24411@@ -149,6 +167,7 @@ patches*
24412 pca200e.bin
24413 pca200e_ecd.bin2
24414 piggy.gz
24415+piggy.S
24416 piggyback
24417 pnmtologo
24418 ppc_defs.h*
24419@@ -157,12 +176,15 @@ qconf
24420 raid6altivec*.c
24421 raid6int*.c
24422 raid6tables.c
24423+regdb.c
24424 relocs
24425+rlim_names.h
24426 series
24427 setup
24428 setup.bin
24429 setup.elf
24430 sImage
24431+slabinfo
24432 sm_tbl*
24433 split-include
24434 syscalltab.h
24435@@ -186,14 +208,20 @@ version.h*
24436 vmlinux
24437 vmlinux-*
24438 vmlinux.aout
24439+vmlinux.bin.all
24440+vmlinux.bin.bz2
24441 vmlinux.lds
24442+vmlinux.relocs
24443+voffset.h
24444 vsyscall.lds
24445 vsyscall_32.lds
24446 wanxlfw.inc
24447 uImage
24448 unifdef
24449+utsrelease.h
24450 wakeup.bin
24451 wakeup.elf
24452 wakeup.lds
24453 zImage*
24454 zconf.hash.c
24455+zoffset.h
24456diff -urNp linux-2.6.32.45/Documentation/kernel-parameters.txt linux-2.6.32.45/Documentation/kernel-parameters.txt
24457--- linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
24458+++ linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
24459@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
24460 the specified number of seconds. This is to be used if
24461 your oopses keep scrolling off the screen.
24462
24463+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
24464+ virtualization environments that don't cope well with the
24465+ expand down segment used by UDEREF on X86-32 or the frequent
24466+ page table updates on X86-64.
24467+
24468+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
24469+
24470 pcbit= [HW,ISDN]
24471
24472 pcd. [PARIDE]
24473diff -urNp linux-2.6.32.45/drivers/acpi/acpi_pad.c linux-2.6.32.45/drivers/acpi/acpi_pad.c
24474--- linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
24475+++ linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
24476@@ -30,7 +30,7 @@
24477 #include <acpi/acpi_bus.h>
24478 #include <acpi/acpi_drivers.h>
24479
24480-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
24481+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
24482 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
24483 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
24484 static DEFINE_MUTEX(isolated_cpus_lock);
24485diff -urNp linux-2.6.32.45/drivers/acpi/battery.c linux-2.6.32.45/drivers/acpi/battery.c
24486--- linux-2.6.32.45/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
24487+++ linux-2.6.32.45/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
24488@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
24489 }
24490
24491 static struct battery_file {
24492- struct file_operations ops;
24493+ const struct file_operations ops;
24494 mode_t mode;
24495 const char *name;
24496 } acpi_battery_file[] = {
24497diff -urNp linux-2.6.32.45/drivers/acpi/dock.c linux-2.6.32.45/drivers/acpi/dock.c
24498--- linux-2.6.32.45/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
24499+++ linux-2.6.32.45/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
24500@@ -77,7 +77,7 @@ struct dock_dependent_device {
24501 struct list_head list;
24502 struct list_head hotplug_list;
24503 acpi_handle handle;
24504- struct acpi_dock_ops *ops;
24505+ const struct acpi_dock_ops *ops;
24506 void *context;
24507 };
24508
24509@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
24510 * the dock driver after _DCK is executed.
24511 */
24512 int
24513-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
24514+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
24515 void *context)
24516 {
24517 struct dock_dependent_device *dd;
24518diff -urNp linux-2.6.32.45/drivers/acpi/osl.c linux-2.6.32.45/drivers/acpi/osl.c
24519--- linux-2.6.32.45/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
24520+++ linux-2.6.32.45/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
24521@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
24522 void __iomem *virt_addr;
24523
24524 virt_addr = ioremap(phys_addr, width);
24525+ if (!virt_addr)
24526+ return AE_NO_MEMORY;
24527 if (!value)
24528 value = &dummy;
24529
24530@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
24531 void __iomem *virt_addr;
24532
24533 virt_addr = ioremap(phys_addr, width);
24534+ if (!virt_addr)
24535+ return AE_NO_MEMORY;
24536
24537 switch (width) {
24538 case 8:
24539diff -urNp linux-2.6.32.45/drivers/acpi/power_meter.c linux-2.6.32.45/drivers/acpi/power_meter.c
24540--- linux-2.6.32.45/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
24541+++ linux-2.6.32.45/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
24542@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
24543 return res;
24544
24545 temp /= 1000;
24546- if (temp < 0)
24547- return -EINVAL;
24548
24549 mutex_lock(&resource->lock);
24550 resource->trip[attr->index - 7] = temp;
24551diff -urNp linux-2.6.32.45/drivers/acpi/proc.c linux-2.6.32.45/drivers/acpi/proc.c
24552--- linux-2.6.32.45/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24553+++ linux-2.6.32.45/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24554@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24555 size_t count, loff_t * ppos)
24556 {
24557 struct list_head *node, *next;
24558- char strbuf[5];
24559- char str[5] = "";
24560- unsigned int len = count;
24561+ char strbuf[5] = {0};
24562 struct acpi_device *found_dev = NULL;
24563
24564- if (len > 4)
24565- len = 4;
24566- if (len < 0)
24567- return -EFAULT;
24568+ if (count > 4)
24569+ count = 4;
24570
24571- if (copy_from_user(strbuf, buffer, len))
24572+ if (copy_from_user(strbuf, buffer, count))
24573 return -EFAULT;
24574- strbuf[len] = '\0';
24575- sscanf(strbuf, "%s", str);
24576+ strbuf[count] = '\0';
24577
24578 mutex_lock(&acpi_device_lock);
24579 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24580@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24581 if (!dev->wakeup.flags.valid)
24582 continue;
24583
24584- if (!strncmp(dev->pnp.bus_id, str, 4)) {
24585+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24586 dev->wakeup.state.enabled =
24587 dev->wakeup.state.enabled ? 0 : 1;
24588 found_dev = dev;
24589diff -urNp linux-2.6.32.45/drivers/acpi/processor_core.c linux-2.6.32.45/drivers/acpi/processor_core.c
24590--- linux-2.6.32.45/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24591+++ linux-2.6.32.45/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24592@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24593 return 0;
24594 }
24595
24596- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24597+ BUG_ON(pr->id >= nr_cpu_ids);
24598
24599 /*
24600 * Buggy BIOS check
24601diff -urNp linux-2.6.32.45/drivers/acpi/sbshc.c linux-2.6.32.45/drivers/acpi/sbshc.c
24602--- linux-2.6.32.45/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24603+++ linux-2.6.32.45/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24604@@ -17,7 +17,7 @@
24605
24606 #define PREFIX "ACPI: "
24607
24608-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24609+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24610 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24611
24612 struct acpi_smb_hc {
24613diff -urNp linux-2.6.32.45/drivers/acpi/sleep.c linux-2.6.32.45/drivers/acpi/sleep.c
24614--- linux-2.6.32.45/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24615+++ linux-2.6.32.45/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24616@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24617 }
24618 }
24619
24620-static struct platform_suspend_ops acpi_suspend_ops = {
24621+static const struct platform_suspend_ops acpi_suspend_ops = {
24622 .valid = acpi_suspend_state_valid,
24623 .begin = acpi_suspend_begin,
24624 .prepare_late = acpi_pm_prepare,
24625@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24626 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24627 * been requested.
24628 */
24629-static struct platform_suspend_ops acpi_suspend_ops_old = {
24630+static const struct platform_suspend_ops acpi_suspend_ops_old = {
24631 .valid = acpi_suspend_state_valid,
24632 .begin = acpi_suspend_begin_old,
24633 .prepare_late = acpi_pm_disable_gpes,
24634@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24635 acpi_enable_all_runtime_gpes();
24636 }
24637
24638-static struct platform_hibernation_ops acpi_hibernation_ops = {
24639+static const struct platform_hibernation_ops acpi_hibernation_ops = {
24640 .begin = acpi_hibernation_begin,
24641 .end = acpi_pm_end,
24642 .pre_snapshot = acpi_hibernation_pre_snapshot,
24643@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24644 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24645 * been requested.
24646 */
24647-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24648+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24649 .begin = acpi_hibernation_begin_old,
24650 .end = acpi_pm_end,
24651 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24652diff -urNp linux-2.6.32.45/drivers/acpi/video.c linux-2.6.32.45/drivers/acpi/video.c
24653--- linux-2.6.32.45/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24654+++ linux-2.6.32.45/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24655@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24656 vd->brightness->levels[request_level]);
24657 }
24658
24659-static struct backlight_ops acpi_backlight_ops = {
24660+static const struct backlight_ops acpi_backlight_ops = {
24661 .get_brightness = acpi_video_get_brightness,
24662 .update_status = acpi_video_set_brightness,
24663 };
24664diff -urNp linux-2.6.32.45/drivers/ata/ahci.c linux-2.6.32.45/drivers/ata/ahci.c
24665--- linux-2.6.32.45/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24666+++ linux-2.6.32.45/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24667@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24668 .sdev_attrs = ahci_sdev_attrs,
24669 };
24670
24671-static struct ata_port_operations ahci_ops = {
24672+static const struct ata_port_operations ahci_ops = {
24673 .inherits = &sata_pmp_port_ops,
24674
24675 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24676@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24677 .port_stop = ahci_port_stop,
24678 };
24679
24680-static struct ata_port_operations ahci_vt8251_ops = {
24681+static const struct ata_port_operations ahci_vt8251_ops = {
24682 .inherits = &ahci_ops,
24683 .hardreset = ahci_vt8251_hardreset,
24684 };
24685
24686-static struct ata_port_operations ahci_p5wdh_ops = {
24687+static const struct ata_port_operations ahci_p5wdh_ops = {
24688 .inherits = &ahci_ops,
24689 .hardreset = ahci_p5wdh_hardreset,
24690 };
24691
24692-static struct ata_port_operations ahci_sb600_ops = {
24693+static const struct ata_port_operations ahci_sb600_ops = {
24694 .inherits = &ahci_ops,
24695 .softreset = ahci_sb600_softreset,
24696 .pmp_softreset = ahci_sb600_softreset,
24697diff -urNp linux-2.6.32.45/drivers/ata/ata_generic.c linux-2.6.32.45/drivers/ata/ata_generic.c
24698--- linux-2.6.32.45/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24699+++ linux-2.6.32.45/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24700@@ -104,7 +104,7 @@ static struct scsi_host_template generic
24701 ATA_BMDMA_SHT(DRV_NAME),
24702 };
24703
24704-static struct ata_port_operations generic_port_ops = {
24705+static const struct ata_port_operations generic_port_ops = {
24706 .inherits = &ata_bmdma_port_ops,
24707 .cable_detect = ata_cable_unknown,
24708 .set_mode = generic_set_mode,
24709diff -urNp linux-2.6.32.45/drivers/ata/ata_piix.c linux-2.6.32.45/drivers/ata/ata_piix.c
24710--- linux-2.6.32.45/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24711+++ linux-2.6.32.45/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24712@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24713 ATA_BMDMA_SHT(DRV_NAME),
24714 };
24715
24716-static struct ata_port_operations piix_pata_ops = {
24717+static const struct ata_port_operations piix_pata_ops = {
24718 .inherits = &ata_bmdma32_port_ops,
24719 .cable_detect = ata_cable_40wire,
24720 .set_piomode = piix_set_piomode,
24721@@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24722 .prereset = piix_pata_prereset,
24723 };
24724
24725-static struct ata_port_operations piix_vmw_ops = {
24726+static const struct ata_port_operations piix_vmw_ops = {
24727 .inherits = &piix_pata_ops,
24728 .bmdma_status = piix_vmw_bmdma_status,
24729 };
24730
24731-static struct ata_port_operations ich_pata_ops = {
24732+static const struct ata_port_operations ich_pata_ops = {
24733 .inherits = &piix_pata_ops,
24734 .cable_detect = ich_pata_cable_detect,
24735 .set_dmamode = ich_set_dmamode,
24736 };
24737
24738-static struct ata_port_operations piix_sata_ops = {
24739+static const struct ata_port_operations piix_sata_ops = {
24740 .inherits = &ata_bmdma_port_ops,
24741 };
24742
24743-static struct ata_port_operations piix_sidpr_sata_ops = {
24744+static const struct ata_port_operations piix_sidpr_sata_ops = {
24745 .inherits = &piix_sata_ops,
24746 .hardreset = sata_std_hardreset,
24747 .scr_read = piix_sidpr_scr_read,
24748diff -urNp linux-2.6.32.45/drivers/ata/libata-acpi.c linux-2.6.32.45/drivers/ata/libata-acpi.c
24749--- linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24750+++ linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24751@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24752 ata_acpi_uevent(dev->link->ap, dev, event);
24753 }
24754
24755-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24756+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24757 .handler = ata_acpi_dev_notify_dock,
24758 .uevent = ata_acpi_dev_uevent,
24759 };
24760
24761-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24762+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24763 .handler = ata_acpi_ap_notify_dock,
24764 .uevent = ata_acpi_ap_uevent,
24765 };
24766diff -urNp linux-2.6.32.45/drivers/ata/libata-core.c linux-2.6.32.45/drivers/ata/libata-core.c
24767--- linux-2.6.32.45/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24768+++ linux-2.6.32.45/drivers/ata/libata-core.c 2011-08-05 20:33:55.000000000 -0400
24769@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24770 struct ata_port *ap;
24771 unsigned int tag;
24772
24773- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24774+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24775 ap = qc->ap;
24776
24777 qc->flags = 0;
24778@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24779 struct ata_port *ap;
24780 struct ata_link *link;
24781
24782- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24783+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24784 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24785 ap = qc->ap;
24786 link = qc->dev->link;
24787@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24788 * LOCKING:
24789 * None.
24790 */
24791-static void ata_finalize_port_ops(struct ata_port_operations *ops)
24792+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24793 {
24794 static DEFINE_SPINLOCK(lock);
24795 const struct ata_port_operations *cur;
24796@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24797 return;
24798
24799 spin_lock(&lock);
24800+ pax_open_kernel();
24801
24802 for (cur = ops->inherits; cur; cur = cur->inherits) {
24803 void **inherit = (void **)cur;
24804@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24805 if (IS_ERR(*pp))
24806 *pp = NULL;
24807
24808- ops->inherits = NULL;
24809+ *(struct ata_port_operations **)&ops->inherits = NULL;
24810
24811+ pax_close_kernel();
24812 spin_unlock(&lock);
24813 }
24814
24815@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24816 */
24817 /* KILLME - the only user left is ipr */
24818 void ata_host_init(struct ata_host *host, struct device *dev,
24819- unsigned long flags, struct ata_port_operations *ops)
24820+ unsigned long flags, const struct ata_port_operations *ops)
24821 {
24822 spin_lock_init(&host->lock);
24823 host->dev = dev;
24824@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24825 /* truly dummy */
24826 }
24827
24828-struct ata_port_operations ata_dummy_port_ops = {
24829+const struct ata_port_operations ata_dummy_port_ops = {
24830 .qc_prep = ata_noop_qc_prep,
24831 .qc_issue = ata_dummy_qc_issue,
24832 .error_handler = ata_dummy_error_handler,
24833diff -urNp linux-2.6.32.45/drivers/ata/libata-eh.c linux-2.6.32.45/drivers/ata/libata-eh.c
24834--- linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:35:28.000000000 -0400
24835+++ linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:33:59.000000000 -0400
24836@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24837 {
24838 struct ata_link *link;
24839
24840+ pax_track_stack();
24841+
24842 ata_for_each_link(link, ap, HOST_FIRST)
24843 ata_eh_link_report(link);
24844 }
24845@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24846 */
24847 void ata_std_error_handler(struct ata_port *ap)
24848 {
24849- struct ata_port_operations *ops = ap->ops;
24850+ const struct ata_port_operations *ops = ap->ops;
24851 ata_reset_fn_t hardreset = ops->hardreset;
24852
24853 /* ignore built-in hardreset if SCR access is not available */
24854diff -urNp linux-2.6.32.45/drivers/ata/libata-pmp.c linux-2.6.32.45/drivers/ata/libata-pmp.c
24855--- linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24856+++ linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24857@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24858 */
24859 static int sata_pmp_eh_recover(struct ata_port *ap)
24860 {
24861- struct ata_port_operations *ops = ap->ops;
24862+ const struct ata_port_operations *ops = ap->ops;
24863 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24864 struct ata_link *pmp_link = &ap->link;
24865 struct ata_device *pmp_dev = pmp_link->device;
24866diff -urNp linux-2.6.32.45/drivers/ata/pata_acpi.c linux-2.6.32.45/drivers/ata/pata_acpi.c
24867--- linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24868+++ linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24869@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24870 ATA_BMDMA_SHT(DRV_NAME),
24871 };
24872
24873-static struct ata_port_operations pacpi_ops = {
24874+static const struct ata_port_operations pacpi_ops = {
24875 .inherits = &ata_bmdma_port_ops,
24876 .qc_issue = pacpi_qc_issue,
24877 .cable_detect = pacpi_cable_detect,
24878diff -urNp linux-2.6.32.45/drivers/ata/pata_ali.c linux-2.6.32.45/drivers/ata/pata_ali.c
24879--- linux-2.6.32.45/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24880+++ linux-2.6.32.45/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24881@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24882 * Port operations for PIO only ALi
24883 */
24884
24885-static struct ata_port_operations ali_early_port_ops = {
24886+static const struct ata_port_operations ali_early_port_ops = {
24887 .inherits = &ata_sff_port_ops,
24888 .cable_detect = ata_cable_40wire,
24889 .set_piomode = ali_set_piomode,
24890@@ -382,7 +382,7 @@ static const struct ata_port_operations
24891 * Port operations for DMA capable ALi without cable
24892 * detect
24893 */
24894-static struct ata_port_operations ali_20_port_ops = {
24895+static const struct ata_port_operations ali_20_port_ops = {
24896 .inherits = &ali_dma_base_ops,
24897 .cable_detect = ata_cable_40wire,
24898 .mode_filter = ali_20_filter,
24899@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24900 /*
24901 * Port operations for DMA capable ALi with cable detect
24902 */
24903-static struct ata_port_operations ali_c2_port_ops = {
24904+static const struct ata_port_operations ali_c2_port_ops = {
24905 .inherits = &ali_dma_base_ops,
24906 .check_atapi_dma = ali_check_atapi_dma,
24907 .cable_detect = ali_c2_cable_detect,
24908@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24909 /*
24910 * Port operations for DMA capable ALi with cable detect
24911 */
24912-static struct ata_port_operations ali_c4_port_ops = {
24913+static const struct ata_port_operations ali_c4_port_ops = {
24914 .inherits = &ali_dma_base_ops,
24915 .check_atapi_dma = ali_check_atapi_dma,
24916 .cable_detect = ali_c2_cable_detect,
24917@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24918 /*
24919 * Port operations for DMA capable ALi with cable detect and LBA48
24920 */
24921-static struct ata_port_operations ali_c5_port_ops = {
24922+static const struct ata_port_operations ali_c5_port_ops = {
24923 .inherits = &ali_dma_base_ops,
24924 .check_atapi_dma = ali_check_atapi_dma,
24925 .dev_config = ali_warn_atapi_dma,
24926diff -urNp linux-2.6.32.45/drivers/ata/pata_amd.c linux-2.6.32.45/drivers/ata/pata_amd.c
24927--- linux-2.6.32.45/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24928+++ linux-2.6.32.45/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24929@@ -397,28 +397,28 @@ static const struct ata_port_operations
24930 .prereset = amd_pre_reset,
24931 };
24932
24933-static struct ata_port_operations amd33_port_ops = {
24934+static const struct ata_port_operations amd33_port_ops = {
24935 .inherits = &amd_base_port_ops,
24936 .cable_detect = ata_cable_40wire,
24937 .set_piomode = amd33_set_piomode,
24938 .set_dmamode = amd33_set_dmamode,
24939 };
24940
24941-static struct ata_port_operations amd66_port_ops = {
24942+static const struct ata_port_operations amd66_port_ops = {
24943 .inherits = &amd_base_port_ops,
24944 .cable_detect = ata_cable_unknown,
24945 .set_piomode = amd66_set_piomode,
24946 .set_dmamode = amd66_set_dmamode,
24947 };
24948
24949-static struct ata_port_operations amd100_port_ops = {
24950+static const struct ata_port_operations amd100_port_ops = {
24951 .inherits = &amd_base_port_ops,
24952 .cable_detect = ata_cable_unknown,
24953 .set_piomode = amd100_set_piomode,
24954 .set_dmamode = amd100_set_dmamode,
24955 };
24956
24957-static struct ata_port_operations amd133_port_ops = {
24958+static const struct ata_port_operations amd133_port_ops = {
24959 .inherits = &amd_base_port_ops,
24960 .cable_detect = amd_cable_detect,
24961 .set_piomode = amd133_set_piomode,
24962@@ -433,13 +433,13 @@ static const struct ata_port_operations
24963 .host_stop = nv_host_stop,
24964 };
24965
24966-static struct ata_port_operations nv100_port_ops = {
24967+static const struct ata_port_operations nv100_port_ops = {
24968 .inherits = &nv_base_port_ops,
24969 .set_piomode = nv100_set_piomode,
24970 .set_dmamode = nv100_set_dmamode,
24971 };
24972
24973-static struct ata_port_operations nv133_port_ops = {
24974+static const struct ata_port_operations nv133_port_ops = {
24975 .inherits = &nv_base_port_ops,
24976 .set_piomode = nv133_set_piomode,
24977 .set_dmamode = nv133_set_dmamode,
24978diff -urNp linux-2.6.32.45/drivers/ata/pata_artop.c linux-2.6.32.45/drivers/ata/pata_artop.c
24979--- linux-2.6.32.45/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24980+++ linux-2.6.32.45/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24981@@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24982 ATA_BMDMA_SHT(DRV_NAME),
24983 };
24984
24985-static struct ata_port_operations artop6210_ops = {
24986+static const struct ata_port_operations artop6210_ops = {
24987 .inherits = &ata_bmdma_port_ops,
24988 .cable_detect = ata_cable_40wire,
24989 .set_piomode = artop6210_set_piomode,
24990@@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24991 .qc_defer = artop6210_qc_defer,
24992 };
24993
24994-static struct ata_port_operations artop6260_ops = {
24995+static const struct ata_port_operations artop6260_ops = {
24996 .inherits = &ata_bmdma_port_ops,
24997 .cable_detect = artop6260_cable_detect,
24998 .set_piomode = artop6260_set_piomode,
24999diff -urNp linux-2.6.32.45/drivers/ata/pata_at32.c linux-2.6.32.45/drivers/ata/pata_at32.c
25000--- linux-2.6.32.45/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
25001+++ linux-2.6.32.45/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
25002@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
25003 ATA_PIO_SHT(DRV_NAME),
25004 };
25005
25006-static struct ata_port_operations at32_port_ops = {
25007+static const struct ata_port_operations at32_port_ops = {
25008 .inherits = &ata_sff_port_ops,
25009 .cable_detect = ata_cable_40wire,
25010 .set_piomode = pata_at32_set_piomode,
25011diff -urNp linux-2.6.32.45/drivers/ata/pata_at91.c linux-2.6.32.45/drivers/ata/pata_at91.c
25012--- linux-2.6.32.45/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
25013+++ linux-2.6.32.45/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
25014@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
25015 ATA_PIO_SHT(DRV_NAME),
25016 };
25017
25018-static struct ata_port_operations pata_at91_port_ops = {
25019+static const struct ata_port_operations pata_at91_port_ops = {
25020 .inherits = &ata_sff_port_ops,
25021
25022 .sff_data_xfer = pata_at91_data_xfer_noirq,
25023diff -urNp linux-2.6.32.45/drivers/ata/pata_atiixp.c linux-2.6.32.45/drivers/ata/pata_atiixp.c
25024--- linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
25025+++ linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
25026@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
25027 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25028 };
25029
25030-static struct ata_port_operations atiixp_port_ops = {
25031+static const struct ata_port_operations atiixp_port_ops = {
25032 .inherits = &ata_bmdma_port_ops,
25033
25034 .qc_prep = ata_sff_dumb_qc_prep,
25035diff -urNp linux-2.6.32.45/drivers/ata/pata_atp867x.c linux-2.6.32.45/drivers/ata/pata_atp867x.c
25036--- linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
25037+++ linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
25038@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
25039 ATA_BMDMA_SHT(DRV_NAME),
25040 };
25041
25042-static struct ata_port_operations atp867x_ops = {
25043+static const struct ata_port_operations atp867x_ops = {
25044 .inherits = &ata_bmdma_port_ops,
25045 .cable_detect = atp867x_cable_detect,
25046 .set_piomode = atp867x_set_piomode,
25047diff -urNp linux-2.6.32.45/drivers/ata/pata_bf54x.c linux-2.6.32.45/drivers/ata/pata_bf54x.c
25048--- linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
25049+++ linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
25050@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
25051 .dma_boundary = ATA_DMA_BOUNDARY,
25052 };
25053
25054-static struct ata_port_operations bfin_pata_ops = {
25055+static const struct ata_port_operations bfin_pata_ops = {
25056 .inherits = &ata_sff_port_ops,
25057
25058 .set_piomode = bfin_set_piomode,
25059diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd640.c linux-2.6.32.45/drivers/ata/pata_cmd640.c
25060--- linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
25061+++ linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
25062@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
25063 ATA_BMDMA_SHT(DRV_NAME),
25064 };
25065
25066-static struct ata_port_operations cmd640_port_ops = {
25067+static const struct ata_port_operations cmd640_port_ops = {
25068 .inherits = &ata_bmdma_port_ops,
25069 /* In theory xfer_noirq is not needed once we kill the prefetcher */
25070 .sff_data_xfer = ata_sff_data_xfer_noirq,
25071diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd64x.c linux-2.6.32.45/drivers/ata/pata_cmd64x.c
25072--- linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
25073+++ linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
25074@@ -271,18 +271,18 @@ static const struct ata_port_operations
25075 .set_dmamode = cmd64x_set_dmamode,
25076 };
25077
25078-static struct ata_port_operations cmd64x_port_ops = {
25079+static const struct ata_port_operations cmd64x_port_ops = {
25080 .inherits = &cmd64x_base_ops,
25081 .cable_detect = ata_cable_40wire,
25082 };
25083
25084-static struct ata_port_operations cmd646r1_port_ops = {
25085+static const struct ata_port_operations cmd646r1_port_ops = {
25086 .inherits = &cmd64x_base_ops,
25087 .bmdma_stop = cmd646r1_bmdma_stop,
25088 .cable_detect = ata_cable_40wire,
25089 };
25090
25091-static struct ata_port_operations cmd648_port_ops = {
25092+static const struct ata_port_operations cmd648_port_ops = {
25093 .inherits = &cmd64x_base_ops,
25094 .bmdma_stop = cmd648_bmdma_stop,
25095 .cable_detect = cmd648_cable_detect,
25096diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5520.c linux-2.6.32.45/drivers/ata/pata_cs5520.c
25097--- linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
25098+++ linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
25099@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
25100 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25101 };
25102
25103-static struct ata_port_operations cs5520_port_ops = {
25104+static const struct ata_port_operations cs5520_port_ops = {
25105 .inherits = &ata_bmdma_port_ops,
25106 .qc_prep = ata_sff_dumb_qc_prep,
25107 .cable_detect = ata_cable_40wire,
25108diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5530.c linux-2.6.32.45/drivers/ata/pata_cs5530.c
25109--- linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
25110+++ linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
25111@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
25112 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25113 };
25114
25115-static struct ata_port_operations cs5530_port_ops = {
25116+static const struct ata_port_operations cs5530_port_ops = {
25117 .inherits = &ata_bmdma_port_ops,
25118
25119 .qc_prep = ata_sff_dumb_qc_prep,
25120diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5535.c linux-2.6.32.45/drivers/ata/pata_cs5535.c
25121--- linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
25122+++ linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
25123@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
25124 ATA_BMDMA_SHT(DRV_NAME),
25125 };
25126
25127-static struct ata_port_operations cs5535_port_ops = {
25128+static const struct ata_port_operations cs5535_port_ops = {
25129 .inherits = &ata_bmdma_port_ops,
25130 .cable_detect = cs5535_cable_detect,
25131 .set_piomode = cs5535_set_piomode,
25132diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5536.c linux-2.6.32.45/drivers/ata/pata_cs5536.c
25133--- linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
25134+++ linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
25135@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
25136 ATA_BMDMA_SHT(DRV_NAME),
25137 };
25138
25139-static struct ata_port_operations cs5536_port_ops = {
25140+static const struct ata_port_operations cs5536_port_ops = {
25141 .inherits = &ata_bmdma_port_ops,
25142 .cable_detect = cs5536_cable_detect,
25143 .set_piomode = cs5536_set_piomode,
25144diff -urNp linux-2.6.32.45/drivers/ata/pata_cypress.c linux-2.6.32.45/drivers/ata/pata_cypress.c
25145--- linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
25146+++ linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
25147@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
25148 ATA_BMDMA_SHT(DRV_NAME),
25149 };
25150
25151-static struct ata_port_operations cy82c693_port_ops = {
25152+static const struct ata_port_operations cy82c693_port_ops = {
25153 .inherits = &ata_bmdma_port_ops,
25154 .cable_detect = ata_cable_40wire,
25155 .set_piomode = cy82c693_set_piomode,
25156diff -urNp linux-2.6.32.45/drivers/ata/pata_efar.c linux-2.6.32.45/drivers/ata/pata_efar.c
25157--- linux-2.6.32.45/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
25158+++ linux-2.6.32.45/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
25159@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
25160 ATA_BMDMA_SHT(DRV_NAME),
25161 };
25162
25163-static struct ata_port_operations efar_ops = {
25164+static const struct ata_port_operations efar_ops = {
25165 .inherits = &ata_bmdma_port_ops,
25166 .cable_detect = efar_cable_detect,
25167 .set_piomode = efar_set_piomode,
25168diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt366.c linux-2.6.32.45/drivers/ata/pata_hpt366.c
25169--- linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
25170+++ linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
25171@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
25172 * Configuration for HPT366/68
25173 */
25174
25175-static struct ata_port_operations hpt366_port_ops = {
25176+static const struct ata_port_operations hpt366_port_ops = {
25177 .inherits = &ata_bmdma_port_ops,
25178 .cable_detect = hpt36x_cable_detect,
25179 .mode_filter = hpt366_filter,
25180diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt37x.c linux-2.6.32.45/drivers/ata/pata_hpt37x.c
25181--- linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
25182+++ linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
25183@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
25184 * Configuration for HPT370
25185 */
25186
25187-static struct ata_port_operations hpt370_port_ops = {
25188+static const struct ata_port_operations hpt370_port_ops = {
25189 .inherits = &ata_bmdma_port_ops,
25190
25191 .bmdma_stop = hpt370_bmdma_stop,
25192@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
25193 * Configuration for HPT370A. Close to 370 but less filters
25194 */
25195
25196-static struct ata_port_operations hpt370a_port_ops = {
25197+static const struct ata_port_operations hpt370a_port_ops = {
25198 .inherits = &hpt370_port_ops,
25199 .mode_filter = hpt370a_filter,
25200 };
25201@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
25202 * and DMA mode setting functionality.
25203 */
25204
25205-static struct ata_port_operations hpt372_port_ops = {
25206+static const struct ata_port_operations hpt372_port_ops = {
25207 .inherits = &ata_bmdma_port_ops,
25208
25209 .bmdma_stop = hpt37x_bmdma_stop,
25210@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
25211 * but we have a different cable detection procedure for function 1.
25212 */
25213
25214-static struct ata_port_operations hpt374_fn1_port_ops = {
25215+static const struct ata_port_operations hpt374_fn1_port_ops = {
25216 .inherits = &hpt372_port_ops,
25217 .prereset = hpt374_fn1_pre_reset,
25218 };
25219diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c
25220--- linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
25221+++ linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
25222@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
25223 * Configuration for HPT3x2n.
25224 */
25225
25226-static struct ata_port_operations hpt3x2n_port_ops = {
25227+static const struct ata_port_operations hpt3x2n_port_ops = {
25228 .inherits = &ata_bmdma_port_ops,
25229
25230 .bmdma_stop = hpt3x2n_bmdma_stop,
25231diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x3.c linux-2.6.32.45/drivers/ata/pata_hpt3x3.c
25232--- linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
25233+++ linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
25234@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
25235 ATA_BMDMA_SHT(DRV_NAME),
25236 };
25237
25238-static struct ata_port_operations hpt3x3_port_ops = {
25239+static const struct ata_port_operations hpt3x3_port_ops = {
25240 .inherits = &ata_bmdma_port_ops,
25241 .cable_detect = ata_cable_40wire,
25242 .set_piomode = hpt3x3_set_piomode,
25243diff -urNp linux-2.6.32.45/drivers/ata/pata_icside.c linux-2.6.32.45/drivers/ata/pata_icside.c
25244--- linux-2.6.32.45/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
25245+++ linux-2.6.32.45/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
25246@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
25247 }
25248 }
25249
25250-static struct ata_port_operations pata_icside_port_ops = {
25251+static const struct ata_port_operations pata_icside_port_ops = {
25252 .inherits = &ata_sff_port_ops,
25253 /* no need to build any PRD tables for DMA */
25254 .qc_prep = ata_noop_qc_prep,
25255diff -urNp linux-2.6.32.45/drivers/ata/pata_isapnp.c linux-2.6.32.45/drivers/ata/pata_isapnp.c
25256--- linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
25257+++ linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
25258@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
25259 ATA_PIO_SHT(DRV_NAME),
25260 };
25261
25262-static struct ata_port_operations isapnp_port_ops = {
25263+static const struct ata_port_operations isapnp_port_ops = {
25264 .inherits = &ata_sff_port_ops,
25265 .cable_detect = ata_cable_40wire,
25266 };
25267
25268-static struct ata_port_operations isapnp_noalt_port_ops = {
25269+static const struct ata_port_operations isapnp_noalt_port_ops = {
25270 .inherits = &ata_sff_port_ops,
25271 .cable_detect = ata_cable_40wire,
25272 /* No altstatus so we don't want to use the lost interrupt poll */
25273diff -urNp linux-2.6.32.45/drivers/ata/pata_it8213.c linux-2.6.32.45/drivers/ata/pata_it8213.c
25274--- linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
25275+++ linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
25276@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
25277 };
25278
25279
25280-static struct ata_port_operations it8213_ops = {
25281+static const struct ata_port_operations it8213_ops = {
25282 .inherits = &ata_bmdma_port_ops,
25283 .cable_detect = it8213_cable_detect,
25284 .set_piomode = it8213_set_piomode,
25285diff -urNp linux-2.6.32.45/drivers/ata/pata_it821x.c linux-2.6.32.45/drivers/ata/pata_it821x.c
25286--- linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
25287+++ linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
25288@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
25289 ATA_BMDMA_SHT(DRV_NAME),
25290 };
25291
25292-static struct ata_port_operations it821x_smart_port_ops = {
25293+static const struct ata_port_operations it821x_smart_port_ops = {
25294 .inherits = &ata_bmdma_port_ops,
25295
25296 .check_atapi_dma= it821x_check_atapi_dma,
25297@@ -814,7 +814,7 @@ static struct ata_port_operations it821x
25298 .port_start = it821x_port_start,
25299 };
25300
25301-static struct ata_port_operations it821x_passthru_port_ops = {
25302+static const struct ata_port_operations it821x_passthru_port_ops = {
25303 .inherits = &ata_bmdma_port_ops,
25304
25305 .check_atapi_dma= it821x_check_atapi_dma,
25306@@ -830,7 +830,7 @@ static struct ata_port_operations it821x
25307 .port_start = it821x_port_start,
25308 };
25309
25310-static struct ata_port_operations it821x_rdc_port_ops = {
25311+static const struct ata_port_operations it821x_rdc_port_ops = {
25312 .inherits = &ata_bmdma_port_ops,
25313
25314 .check_atapi_dma= it821x_check_atapi_dma,
25315diff -urNp linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c
25316--- linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
25317+++ linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
25318@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
25319 ATA_PIO_SHT(DRV_NAME),
25320 };
25321
25322-static struct ata_port_operations ixp4xx_port_ops = {
25323+static const struct ata_port_operations ixp4xx_port_ops = {
25324 .inherits = &ata_sff_port_ops,
25325 .sff_data_xfer = ixp4xx_mmio_data_xfer,
25326 .cable_detect = ata_cable_40wire,
25327diff -urNp linux-2.6.32.45/drivers/ata/pata_jmicron.c linux-2.6.32.45/drivers/ata/pata_jmicron.c
25328--- linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
25329+++ linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
25330@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
25331 ATA_BMDMA_SHT(DRV_NAME),
25332 };
25333
25334-static struct ata_port_operations jmicron_ops = {
25335+static const struct ata_port_operations jmicron_ops = {
25336 .inherits = &ata_bmdma_port_ops,
25337 .prereset = jmicron_pre_reset,
25338 };
25339diff -urNp linux-2.6.32.45/drivers/ata/pata_legacy.c linux-2.6.32.45/drivers/ata/pata_legacy.c
25340--- linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
25341+++ linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
25342@@ -106,7 +106,7 @@ struct legacy_probe {
25343
25344 struct legacy_controller {
25345 const char *name;
25346- struct ata_port_operations *ops;
25347+ const struct ata_port_operations *ops;
25348 unsigned int pio_mask;
25349 unsigned int flags;
25350 unsigned int pflags;
25351@@ -223,12 +223,12 @@ static const struct ata_port_operations
25352 * pio_mask as well.
25353 */
25354
25355-static struct ata_port_operations simple_port_ops = {
25356+static const struct ata_port_operations simple_port_ops = {
25357 .inherits = &legacy_base_port_ops,
25358 .sff_data_xfer = ata_sff_data_xfer_noirq,
25359 };
25360
25361-static struct ata_port_operations legacy_port_ops = {
25362+static const struct ata_port_operations legacy_port_ops = {
25363 .inherits = &legacy_base_port_ops,
25364 .sff_data_xfer = ata_sff_data_xfer_noirq,
25365 .set_mode = legacy_set_mode,
25366@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
25367 return buflen;
25368 }
25369
25370-static struct ata_port_operations pdc20230_port_ops = {
25371+static const struct ata_port_operations pdc20230_port_ops = {
25372 .inherits = &legacy_base_port_ops,
25373 .set_piomode = pdc20230_set_piomode,
25374 .sff_data_xfer = pdc_data_xfer_vlb,
25375@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
25376 ioread8(ap->ioaddr.status_addr);
25377 }
25378
25379-static struct ata_port_operations ht6560a_port_ops = {
25380+static const struct ata_port_operations ht6560a_port_ops = {
25381 .inherits = &legacy_base_port_ops,
25382 .set_piomode = ht6560a_set_piomode,
25383 };
25384@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
25385 ioread8(ap->ioaddr.status_addr);
25386 }
25387
25388-static struct ata_port_operations ht6560b_port_ops = {
25389+static const struct ata_port_operations ht6560b_port_ops = {
25390 .inherits = &legacy_base_port_ops,
25391 .set_piomode = ht6560b_set_piomode,
25392 };
25393@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
25394 }
25395
25396
25397-static struct ata_port_operations opti82c611a_port_ops = {
25398+static const struct ata_port_operations opti82c611a_port_ops = {
25399 .inherits = &legacy_base_port_ops,
25400 .set_piomode = opti82c611a_set_piomode,
25401 };
25402@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
25403 return ata_sff_qc_issue(qc);
25404 }
25405
25406-static struct ata_port_operations opti82c46x_port_ops = {
25407+static const struct ata_port_operations opti82c46x_port_ops = {
25408 .inherits = &legacy_base_port_ops,
25409 .set_piomode = opti82c46x_set_piomode,
25410 .qc_issue = opti82c46x_qc_issue,
25411@@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
25412 return 0;
25413 }
25414
25415-static struct ata_port_operations qdi6500_port_ops = {
25416+static const struct ata_port_operations qdi6500_port_ops = {
25417 .inherits = &legacy_base_port_ops,
25418 .set_piomode = qdi6500_set_piomode,
25419 .qc_issue = qdi_qc_issue,
25420 .sff_data_xfer = vlb32_data_xfer,
25421 };
25422
25423-static struct ata_port_operations qdi6580_port_ops = {
25424+static const struct ata_port_operations qdi6580_port_ops = {
25425 .inherits = &legacy_base_port_ops,
25426 .set_piomode = qdi6580_set_piomode,
25427 .sff_data_xfer = vlb32_data_xfer,
25428 };
25429
25430-static struct ata_port_operations qdi6580dp_port_ops = {
25431+static const struct ata_port_operations qdi6580dp_port_ops = {
25432 .inherits = &legacy_base_port_ops,
25433 .set_piomode = qdi6580dp_set_piomode,
25434 .sff_data_xfer = vlb32_data_xfer,
25435@@ -855,7 +855,7 @@ static int winbond_port(struct platform_
25436 return 0;
25437 }
25438
25439-static struct ata_port_operations winbond_port_ops = {
25440+static const struct ata_port_operations winbond_port_ops = {
25441 .inherits = &legacy_base_port_ops,
25442 .set_piomode = winbond_set_piomode,
25443 .sff_data_xfer = vlb32_data_xfer,
25444@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
25445 int pio_modes = controller->pio_mask;
25446 unsigned long io = probe->port;
25447 u32 mask = (1 << probe->slot);
25448- struct ata_port_operations *ops = controller->ops;
25449+ const struct ata_port_operations *ops = controller->ops;
25450 struct legacy_data *ld = &legacy_data[probe->slot];
25451 struct ata_host *host = NULL;
25452 struct ata_port *ap;
25453diff -urNp linux-2.6.32.45/drivers/ata/pata_marvell.c linux-2.6.32.45/drivers/ata/pata_marvell.c
25454--- linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
25455+++ linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
25456@@ -100,7 +100,7 @@ static struct scsi_host_template marvell
25457 ATA_BMDMA_SHT(DRV_NAME),
25458 };
25459
25460-static struct ata_port_operations marvell_ops = {
25461+static const struct ata_port_operations marvell_ops = {
25462 .inherits = &ata_bmdma_port_ops,
25463 .cable_detect = marvell_cable_detect,
25464 .prereset = marvell_pre_reset,
25465diff -urNp linux-2.6.32.45/drivers/ata/pata_mpc52xx.c linux-2.6.32.45/drivers/ata/pata_mpc52xx.c
25466--- linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
25467+++ linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
25468@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
25469 ATA_PIO_SHT(DRV_NAME),
25470 };
25471
25472-static struct ata_port_operations mpc52xx_ata_port_ops = {
25473+static const struct ata_port_operations mpc52xx_ata_port_ops = {
25474 .inherits = &ata_bmdma_port_ops,
25475 .sff_dev_select = mpc52xx_ata_dev_select,
25476 .set_piomode = mpc52xx_ata_set_piomode,
25477diff -urNp linux-2.6.32.45/drivers/ata/pata_mpiix.c linux-2.6.32.45/drivers/ata/pata_mpiix.c
25478--- linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
25479+++ linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
25480@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
25481 ATA_PIO_SHT(DRV_NAME),
25482 };
25483
25484-static struct ata_port_operations mpiix_port_ops = {
25485+static const struct ata_port_operations mpiix_port_ops = {
25486 .inherits = &ata_sff_port_ops,
25487 .qc_issue = mpiix_qc_issue,
25488 .cable_detect = ata_cable_40wire,
25489diff -urNp linux-2.6.32.45/drivers/ata/pata_netcell.c linux-2.6.32.45/drivers/ata/pata_netcell.c
25490--- linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
25491+++ linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
25492@@ -34,7 +34,7 @@ static struct scsi_host_template netcell
25493 ATA_BMDMA_SHT(DRV_NAME),
25494 };
25495
25496-static struct ata_port_operations netcell_ops = {
25497+static const struct ata_port_operations netcell_ops = {
25498 .inherits = &ata_bmdma_port_ops,
25499 .cable_detect = ata_cable_80wire,
25500 .read_id = netcell_read_id,
25501diff -urNp linux-2.6.32.45/drivers/ata/pata_ninja32.c linux-2.6.32.45/drivers/ata/pata_ninja32.c
25502--- linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
25503+++ linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
25504@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
25505 ATA_BMDMA_SHT(DRV_NAME),
25506 };
25507
25508-static struct ata_port_operations ninja32_port_ops = {
25509+static const struct ata_port_operations ninja32_port_ops = {
25510 .inherits = &ata_bmdma_port_ops,
25511 .sff_dev_select = ninja32_dev_select,
25512 .cable_detect = ata_cable_40wire,
25513diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87410.c linux-2.6.32.45/drivers/ata/pata_ns87410.c
25514--- linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
25515+++ linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
25516@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
25517 ATA_PIO_SHT(DRV_NAME),
25518 };
25519
25520-static struct ata_port_operations ns87410_port_ops = {
25521+static const struct ata_port_operations ns87410_port_ops = {
25522 .inherits = &ata_sff_port_ops,
25523 .qc_issue = ns87410_qc_issue,
25524 .cable_detect = ata_cable_40wire,
25525diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87415.c linux-2.6.32.45/drivers/ata/pata_ns87415.c
25526--- linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
25527+++ linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
25528@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
25529 }
25530 #endif /* 87560 SuperIO Support */
25531
25532-static struct ata_port_operations ns87415_pata_ops = {
25533+static const struct ata_port_operations ns87415_pata_ops = {
25534 .inherits = &ata_bmdma_port_ops,
25535
25536 .check_atapi_dma = ns87415_check_atapi_dma,
25537@@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
25538 };
25539
25540 #if defined(CONFIG_SUPERIO)
25541-static struct ata_port_operations ns87560_pata_ops = {
25542+static const struct ata_port_operations ns87560_pata_ops = {
25543 .inherits = &ns87415_pata_ops,
25544 .sff_tf_read = ns87560_tf_read,
25545 .sff_check_status = ns87560_check_status,
25546diff -urNp linux-2.6.32.45/drivers/ata/pata_octeon_cf.c linux-2.6.32.45/drivers/ata/pata_octeon_cf.c
25547--- linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25548+++ linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25549@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25550 return 0;
25551 }
25552
25553+/* cannot be const */
25554 static struct ata_port_operations octeon_cf_ops = {
25555 .inherits = &ata_sff_port_ops,
25556 .check_atapi_dma = octeon_cf_check_atapi_dma,
25557diff -urNp linux-2.6.32.45/drivers/ata/pata_oldpiix.c linux-2.6.32.45/drivers/ata/pata_oldpiix.c
25558--- linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25559+++ linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25560@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25561 ATA_BMDMA_SHT(DRV_NAME),
25562 };
25563
25564-static struct ata_port_operations oldpiix_pata_ops = {
25565+static const struct ata_port_operations oldpiix_pata_ops = {
25566 .inherits = &ata_bmdma_port_ops,
25567 .qc_issue = oldpiix_qc_issue,
25568 .cable_detect = ata_cable_40wire,
25569diff -urNp linux-2.6.32.45/drivers/ata/pata_opti.c linux-2.6.32.45/drivers/ata/pata_opti.c
25570--- linux-2.6.32.45/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25571+++ linux-2.6.32.45/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25572@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25573 ATA_PIO_SHT(DRV_NAME),
25574 };
25575
25576-static struct ata_port_operations opti_port_ops = {
25577+static const struct ata_port_operations opti_port_ops = {
25578 .inherits = &ata_sff_port_ops,
25579 .cable_detect = ata_cable_40wire,
25580 .set_piomode = opti_set_piomode,
25581diff -urNp linux-2.6.32.45/drivers/ata/pata_optidma.c linux-2.6.32.45/drivers/ata/pata_optidma.c
25582--- linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25583+++ linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25584@@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25585 ATA_BMDMA_SHT(DRV_NAME),
25586 };
25587
25588-static struct ata_port_operations optidma_port_ops = {
25589+static const struct ata_port_operations optidma_port_ops = {
25590 .inherits = &ata_bmdma_port_ops,
25591 .cable_detect = ata_cable_40wire,
25592 .set_piomode = optidma_set_pio_mode,
25593@@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25594 .prereset = optidma_pre_reset,
25595 };
25596
25597-static struct ata_port_operations optiplus_port_ops = {
25598+static const struct ata_port_operations optiplus_port_ops = {
25599 .inherits = &optidma_port_ops,
25600 .set_piomode = optiplus_set_pio_mode,
25601 .set_dmamode = optiplus_set_dma_mode,
25602diff -urNp linux-2.6.32.45/drivers/ata/pata_palmld.c linux-2.6.32.45/drivers/ata/pata_palmld.c
25603--- linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25604+++ linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25605@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25606 ATA_PIO_SHT(DRV_NAME),
25607 };
25608
25609-static struct ata_port_operations palmld_port_ops = {
25610+static const struct ata_port_operations palmld_port_ops = {
25611 .inherits = &ata_sff_port_ops,
25612 .sff_data_xfer = ata_sff_data_xfer_noirq,
25613 .cable_detect = ata_cable_40wire,
25614diff -urNp linux-2.6.32.45/drivers/ata/pata_pcmcia.c linux-2.6.32.45/drivers/ata/pata_pcmcia.c
25615--- linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25616+++ linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25617@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25618 ATA_PIO_SHT(DRV_NAME),
25619 };
25620
25621-static struct ata_port_operations pcmcia_port_ops = {
25622+static const struct ata_port_operations pcmcia_port_ops = {
25623 .inherits = &ata_sff_port_ops,
25624 .sff_data_xfer = ata_sff_data_xfer_noirq,
25625 .cable_detect = ata_cable_40wire,
25626 .set_mode = pcmcia_set_mode,
25627 };
25628
25629-static struct ata_port_operations pcmcia_8bit_port_ops = {
25630+static const struct ata_port_operations pcmcia_8bit_port_ops = {
25631 .inherits = &ata_sff_port_ops,
25632 .sff_data_xfer = ata_data_xfer_8bit,
25633 .cable_detect = ata_cable_40wire,
25634@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25635 unsigned long io_base, ctl_base;
25636 void __iomem *io_addr, *ctl_addr;
25637 int n_ports = 1;
25638- struct ata_port_operations *ops = &pcmcia_port_ops;
25639+ const struct ata_port_operations *ops = &pcmcia_port_ops;
25640
25641 info = kzalloc(sizeof(*info), GFP_KERNEL);
25642 if (info == NULL)
25643diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc2027x.c linux-2.6.32.45/drivers/ata/pata_pdc2027x.c
25644--- linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25645+++ linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25646@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25647 ATA_BMDMA_SHT(DRV_NAME),
25648 };
25649
25650-static struct ata_port_operations pdc2027x_pata100_ops = {
25651+static const struct ata_port_operations pdc2027x_pata100_ops = {
25652 .inherits = &ata_bmdma_port_ops,
25653 .check_atapi_dma = pdc2027x_check_atapi_dma,
25654 .cable_detect = pdc2027x_cable_detect,
25655 .prereset = pdc2027x_prereset,
25656 };
25657
25658-static struct ata_port_operations pdc2027x_pata133_ops = {
25659+static const struct ata_port_operations pdc2027x_pata133_ops = {
25660 .inherits = &pdc2027x_pata100_ops,
25661 .mode_filter = pdc2027x_mode_filter,
25662 .set_piomode = pdc2027x_set_piomode,
25663diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c
25664--- linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25665+++ linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25666@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25667 ATA_BMDMA_SHT(DRV_NAME),
25668 };
25669
25670-static struct ata_port_operations pdc2024x_port_ops = {
25671+static const struct ata_port_operations pdc2024x_port_ops = {
25672 .inherits = &ata_bmdma_port_ops,
25673
25674 .cable_detect = ata_cable_40wire,
25675@@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25676 .sff_exec_command = pdc202xx_exec_command,
25677 };
25678
25679-static struct ata_port_operations pdc2026x_port_ops = {
25680+static const struct ata_port_operations pdc2026x_port_ops = {
25681 .inherits = &pdc2024x_port_ops,
25682
25683 .check_atapi_dma = pdc2026x_check_atapi_dma,
25684diff -urNp linux-2.6.32.45/drivers/ata/pata_platform.c linux-2.6.32.45/drivers/ata/pata_platform.c
25685--- linux-2.6.32.45/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25686+++ linux-2.6.32.45/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25687@@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25688 ATA_PIO_SHT(DRV_NAME),
25689 };
25690
25691-static struct ata_port_operations pata_platform_port_ops = {
25692+static const struct ata_port_operations pata_platform_port_ops = {
25693 .inherits = &ata_sff_port_ops,
25694 .sff_data_xfer = ata_sff_data_xfer_noirq,
25695 .cable_detect = ata_cable_unknown,
25696diff -urNp linux-2.6.32.45/drivers/ata/pata_qdi.c linux-2.6.32.45/drivers/ata/pata_qdi.c
25697--- linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25698+++ linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25699@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25700 ATA_PIO_SHT(DRV_NAME),
25701 };
25702
25703-static struct ata_port_operations qdi6500_port_ops = {
25704+static const struct ata_port_operations qdi6500_port_ops = {
25705 .inherits = &ata_sff_port_ops,
25706 .qc_issue = qdi_qc_issue,
25707 .sff_data_xfer = qdi_data_xfer,
25708@@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25709 .set_piomode = qdi6500_set_piomode,
25710 };
25711
25712-static struct ata_port_operations qdi6580_port_ops = {
25713+static const struct ata_port_operations qdi6580_port_ops = {
25714 .inherits = &qdi6500_port_ops,
25715 .set_piomode = qdi6580_set_piomode,
25716 };
25717diff -urNp linux-2.6.32.45/drivers/ata/pata_radisys.c linux-2.6.32.45/drivers/ata/pata_radisys.c
25718--- linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25719+++ linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25720@@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25721 ATA_BMDMA_SHT(DRV_NAME),
25722 };
25723
25724-static struct ata_port_operations radisys_pata_ops = {
25725+static const struct ata_port_operations radisys_pata_ops = {
25726 .inherits = &ata_bmdma_port_ops,
25727 .qc_issue = radisys_qc_issue,
25728 .cable_detect = ata_cable_unknown,
25729diff -urNp linux-2.6.32.45/drivers/ata/pata_rb532_cf.c linux-2.6.32.45/drivers/ata/pata_rb532_cf.c
25730--- linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25731+++ linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25732@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25733 return IRQ_HANDLED;
25734 }
25735
25736-static struct ata_port_operations rb532_pata_port_ops = {
25737+static const struct ata_port_operations rb532_pata_port_ops = {
25738 .inherits = &ata_sff_port_ops,
25739 .sff_data_xfer = ata_sff_data_xfer32,
25740 };
25741diff -urNp linux-2.6.32.45/drivers/ata/pata_rdc.c linux-2.6.32.45/drivers/ata/pata_rdc.c
25742--- linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25743+++ linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25744@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25745 pci_write_config_byte(dev, 0x48, udma_enable);
25746 }
25747
25748-static struct ata_port_operations rdc_pata_ops = {
25749+static const struct ata_port_operations rdc_pata_ops = {
25750 .inherits = &ata_bmdma32_port_ops,
25751 .cable_detect = rdc_pata_cable_detect,
25752 .set_piomode = rdc_set_piomode,
25753diff -urNp linux-2.6.32.45/drivers/ata/pata_rz1000.c linux-2.6.32.45/drivers/ata/pata_rz1000.c
25754--- linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25755+++ linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25756@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25757 ATA_PIO_SHT(DRV_NAME),
25758 };
25759
25760-static struct ata_port_operations rz1000_port_ops = {
25761+static const struct ata_port_operations rz1000_port_ops = {
25762 .inherits = &ata_sff_port_ops,
25763 .cable_detect = ata_cable_40wire,
25764 .set_mode = rz1000_set_mode,
25765diff -urNp linux-2.6.32.45/drivers/ata/pata_sc1200.c linux-2.6.32.45/drivers/ata/pata_sc1200.c
25766--- linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25767+++ linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25768@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25769 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25770 };
25771
25772-static struct ata_port_operations sc1200_port_ops = {
25773+static const struct ata_port_operations sc1200_port_ops = {
25774 .inherits = &ata_bmdma_port_ops,
25775 .qc_prep = ata_sff_dumb_qc_prep,
25776 .qc_issue = sc1200_qc_issue,
25777diff -urNp linux-2.6.32.45/drivers/ata/pata_scc.c linux-2.6.32.45/drivers/ata/pata_scc.c
25778--- linux-2.6.32.45/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25779+++ linux-2.6.32.45/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25780@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25781 ATA_BMDMA_SHT(DRV_NAME),
25782 };
25783
25784-static struct ata_port_operations scc_pata_ops = {
25785+static const struct ata_port_operations scc_pata_ops = {
25786 .inherits = &ata_bmdma_port_ops,
25787
25788 .set_piomode = scc_set_piomode,
25789diff -urNp linux-2.6.32.45/drivers/ata/pata_sch.c linux-2.6.32.45/drivers/ata/pata_sch.c
25790--- linux-2.6.32.45/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25791+++ linux-2.6.32.45/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25792@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25793 ATA_BMDMA_SHT(DRV_NAME),
25794 };
25795
25796-static struct ata_port_operations sch_pata_ops = {
25797+static const struct ata_port_operations sch_pata_ops = {
25798 .inherits = &ata_bmdma_port_ops,
25799 .cable_detect = ata_cable_unknown,
25800 .set_piomode = sch_set_piomode,
25801diff -urNp linux-2.6.32.45/drivers/ata/pata_serverworks.c linux-2.6.32.45/drivers/ata/pata_serverworks.c
25802--- linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25803+++ linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25804@@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25805 ATA_BMDMA_SHT(DRV_NAME),
25806 };
25807
25808-static struct ata_port_operations serverworks_osb4_port_ops = {
25809+static const struct ata_port_operations serverworks_osb4_port_ops = {
25810 .inherits = &ata_bmdma_port_ops,
25811 .cable_detect = serverworks_cable_detect,
25812 .mode_filter = serverworks_osb4_filter,
25813@@ -307,7 +307,7 @@ static struct ata_port_operations server
25814 .set_dmamode = serverworks_set_dmamode,
25815 };
25816
25817-static struct ata_port_operations serverworks_csb_port_ops = {
25818+static const struct ata_port_operations serverworks_csb_port_ops = {
25819 .inherits = &serverworks_osb4_port_ops,
25820 .mode_filter = serverworks_csb_filter,
25821 };
25822diff -urNp linux-2.6.32.45/drivers/ata/pata_sil680.c linux-2.6.32.45/drivers/ata/pata_sil680.c
25823--- linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25824+++ linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25825@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25826 ATA_BMDMA_SHT(DRV_NAME),
25827 };
25828
25829-static struct ata_port_operations sil680_port_ops = {
25830+static const struct ata_port_operations sil680_port_ops = {
25831 .inherits = &ata_bmdma32_port_ops,
25832 .cable_detect = sil680_cable_detect,
25833 .set_piomode = sil680_set_piomode,
25834diff -urNp linux-2.6.32.45/drivers/ata/pata_sis.c linux-2.6.32.45/drivers/ata/pata_sis.c
25835--- linux-2.6.32.45/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25836+++ linux-2.6.32.45/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25837@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25838 ATA_BMDMA_SHT(DRV_NAME),
25839 };
25840
25841-static struct ata_port_operations sis_133_for_sata_ops = {
25842+static const struct ata_port_operations sis_133_for_sata_ops = {
25843 .inherits = &ata_bmdma_port_ops,
25844 .set_piomode = sis_133_set_piomode,
25845 .set_dmamode = sis_133_set_dmamode,
25846 .cable_detect = sis_133_cable_detect,
25847 };
25848
25849-static struct ata_port_operations sis_base_ops = {
25850+static const struct ata_port_operations sis_base_ops = {
25851 .inherits = &ata_bmdma_port_ops,
25852 .prereset = sis_pre_reset,
25853 };
25854
25855-static struct ata_port_operations sis_133_ops = {
25856+static const struct ata_port_operations sis_133_ops = {
25857 .inherits = &sis_base_ops,
25858 .set_piomode = sis_133_set_piomode,
25859 .set_dmamode = sis_133_set_dmamode,
25860 .cable_detect = sis_133_cable_detect,
25861 };
25862
25863-static struct ata_port_operations sis_133_early_ops = {
25864+static const struct ata_port_operations sis_133_early_ops = {
25865 .inherits = &sis_base_ops,
25866 .set_piomode = sis_100_set_piomode,
25867 .set_dmamode = sis_133_early_set_dmamode,
25868 .cable_detect = sis_66_cable_detect,
25869 };
25870
25871-static struct ata_port_operations sis_100_ops = {
25872+static const struct ata_port_operations sis_100_ops = {
25873 .inherits = &sis_base_ops,
25874 .set_piomode = sis_100_set_piomode,
25875 .set_dmamode = sis_100_set_dmamode,
25876 .cable_detect = sis_66_cable_detect,
25877 };
25878
25879-static struct ata_port_operations sis_66_ops = {
25880+static const struct ata_port_operations sis_66_ops = {
25881 .inherits = &sis_base_ops,
25882 .set_piomode = sis_old_set_piomode,
25883 .set_dmamode = sis_66_set_dmamode,
25884 .cable_detect = sis_66_cable_detect,
25885 };
25886
25887-static struct ata_port_operations sis_old_ops = {
25888+static const struct ata_port_operations sis_old_ops = {
25889 .inherits = &sis_base_ops,
25890 .set_piomode = sis_old_set_piomode,
25891 .set_dmamode = sis_old_set_dmamode,
25892diff -urNp linux-2.6.32.45/drivers/ata/pata_sl82c105.c linux-2.6.32.45/drivers/ata/pata_sl82c105.c
25893--- linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25894+++ linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25895@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25896 ATA_BMDMA_SHT(DRV_NAME),
25897 };
25898
25899-static struct ata_port_operations sl82c105_port_ops = {
25900+static const struct ata_port_operations sl82c105_port_ops = {
25901 .inherits = &ata_bmdma_port_ops,
25902 .qc_defer = sl82c105_qc_defer,
25903 .bmdma_start = sl82c105_bmdma_start,
25904diff -urNp linux-2.6.32.45/drivers/ata/pata_triflex.c linux-2.6.32.45/drivers/ata/pata_triflex.c
25905--- linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25906+++ linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25907@@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25908 ATA_BMDMA_SHT(DRV_NAME),
25909 };
25910
25911-static struct ata_port_operations triflex_port_ops = {
25912+static const struct ata_port_operations triflex_port_ops = {
25913 .inherits = &ata_bmdma_port_ops,
25914 .bmdma_start = triflex_bmdma_start,
25915 .bmdma_stop = triflex_bmdma_stop,
25916diff -urNp linux-2.6.32.45/drivers/ata/pata_via.c linux-2.6.32.45/drivers/ata/pata_via.c
25917--- linux-2.6.32.45/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25918+++ linux-2.6.32.45/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25919@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25920 ATA_BMDMA_SHT(DRV_NAME),
25921 };
25922
25923-static struct ata_port_operations via_port_ops = {
25924+static const struct ata_port_operations via_port_ops = {
25925 .inherits = &ata_bmdma_port_ops,
25926 .cable_detect = via_cable_detect,
25927 .set_piomode = via_set_piomode,
25928@@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25929 .port_start = via_port_start,
25930 };
25931
25932-static struct ata_port_operations via_port_ops_noirq = {
25933+static const struct ata_port_operations via_port_ops_noirq = {
25934 .inherits = &via_port_ops,
25935 .sff_data_xfer = ata_sff_data_xfer_noirq,
25936 };
25937diff -urNp linux-2.6.32.45/drivers/ata/pata_winbond.c linux-2.6.32.45/drivers/ata/pata_winbond.c
25938--- linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25939+++ linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25940@@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25941 ATA_PIO_SHT(DRV_NAME),
25942 };
25943
25944-static struct ata_port_operations winbond_port_ops = {
25945+static const struct ata_port_operations winbond_port_ops = {
25946 .inherits = &ata_sff_port_ops,
25947 .sff_data_xfer = winbond_data_xfer,
25948 .cable_detect = ata_cable_40wire,
25949diff -urNp linux-2.6.32.45/drivers/ata/pdc_adma.c linux-2.6.32.45/drivers/ata/pdc_adma.c
25950--- linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25951+++ linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25952@@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25953 .dma_boundary = ADMA_DMA_BOUNDARY,
25954 };
25955
25956-static struct ata_port_operations adma_ata_ops = {
25957+static const struct ata_port_operations adma_ata_ops = {
25958 .inherits = &ata_sff_port_ops,
25959
25960 .lost_interrupt = ATA_OP_NULL,
25961diff -urNp linux-2.6.32.45/drivers/ata/sata_fsl.c linux-2.6.32.45/drivers/ata/sata_fsl.c
25962--- linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25963+++ linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25964@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25965 .dma_boundary = ATA_DMA_BOUNDARY,
25966 };
25967
25968-static struct ata_port_operations sata_fsl_ops = {
25969+static const struct ata_port_operations sata_fsl_ops = {
25970 .inherits = &sata_pmp_port_ops,
25971
25972 .qc_defer = ata_std_qc_defer,
25973diff -urNp linux-2.6.32.45/drivers/ata/sata_inic162x.c linux-2.6.32.45/drivers/ata/sata_inic162x.c
25974--- linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25975+++ linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25976@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25977 return 0;
25978 }
25979
25980-static struct ata_port_operations inic_port_ops = {
25981+static const struct ata_port_operations inic_port_ops = {
25982 .inherits = &sata_port_ops,
25983
25984 .check_atapi_dma = inic_check_atapi_dma,
25985diff -urNp linux-2.6.32.45/drivers/ata/sata_mv.c linux-2.6.32.45/drivers/ata/sata_mv.c
25986--- linux-2.6.32.45/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25987+++ linux-2.6.32.45/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25988@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25989 .dma_boundary = MV_DMA_BOUNDARY,
25990 };
25991
25992-static struct ata_port_operations mv5_ops = {
25993+static const struct ata_port_operations mv5_ops = {
25994 .inherits = &ata_sff_port_ops,
25995
25996 .lost_interrupt = ATA_OP_NULL,
25997@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25998 .port_stop = mv_port_stop,
25999 };
26000
26001-static struct ata_port_operations mv6_ops = {
26002+static const struct ata_port_operations mv6_ops = {
26003 .inherits = &mv5_ops,
26004 .dev_config = mv6_dev_config,
26005 .scr_read = mv_scr_read,
26006@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
26007 .bmdma_status = mv_bmdma_status,
26008 };
26009
26010-static struct ata_port_operations mv_iie_ops = {
26011+static const struct ata_port_operations mv_iie_ops = {
26012 .inherits = &mv6_ops,
26013 .dev_config = ATA_OP_NULL,
26014 .qc_prep = mv_qc_prep_iie,
26015diff -urNp linux-2.6.32.45/drivers/ata/sata_nv.c linux-2.6.32.45/drivers/ata/sata_nv.c
26016--- linux-2.6.32.45/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
26017+++ linux-2.6.32.45/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
26018@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
26019 * cases. Define nv_hardreset() which only kicks in for post-boot
26020 * probing and use it for all variants.
26021 */
26022-static struct ata_port_operations nv_generic_ops = {
26023+static const struct ata_port_operations nv_generic_ops = {
26024 .inherits = &ata_bmdma_port_ops,
26025 .lost_interrupt = ATA_OP_NULL,
26026 .scr_read = nv_scr_read,
26027@@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
26028 .hardreset = nv_hardreset,
26029 };
26030
26031-static struct ata_port_operations nv_nf2_ops = {
26032+static const struct ata_port_operations nv_nf2_ops = {
26033 .inherits = &nv_generic_ops,
26034 .freeze = nv_nf2_freeze,
26035 .thaw = nv_nf2_thaw,
26036 };
26037
26038-static struct ata_port_operations nv_ck804_ops = {
26039+static const struct ata_port_operations nv_ck804_ops = {
26040 .inherits = &nv_generic_ops,
26041 .freeze = nv_ck804_freeze,
26042 .thaw = nv_ck804_thaw,
26043 .host_stop = nv_ck804_host_stop,
26044 };
26045
26046-static struct ata_port_operations nv_adma_ops = {
26047+static const struct ata_port_operations nv_adma_ops = {
26048 .inherits = &nv_ck804_ops,
26049
26050 .check_atapi_dma = nv_adma_check_atapi_dma,
26051@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
26052 .host_stop = nv_adma_host_stop,
26053 };
26054
26055-static struct ata_port_operations nv_swncq_ops = {
26056+static const struct ata_port_operations nv_swncq_ops = {
26057 .inherits = &nv_generic_ops,
26058
26059 .qc_defer = ata_std_qc_defer,
26060diff -urNp linux-2.6.32.45/drivers/ata/sata_promise.c linux-2.6.32.45/drivers/ata/sata_promise.c
26061--- linux-2.6.32.45/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
26062+++ linux-2.6.32.45/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
26063@@ -195,7 +195,7 @@ static const struct ata_port_operations
26064 .error_handler = pdc_error_handler,
26065 };
26066
26067-static struct ata_port_operations pdc_sata_ops = {
26068+static const struct ata_port_operations pdc_sata_ops = {
26069 .inherits = &pdc_common_ops,
26070 .cable_detect = pdc_sata_cable_detect,
26071 .freeze = pdc_sata_freeze,
26072@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
26073
26074 /* First-generation chips need a more restrictive ->check_atapi_dma op,
26075 and ->freeze/thaw that ignore the hotplug controls. */
26076-static struct ata_port_operations pdc_old_sata_ops = {
26077+static const struct ata_port_operations pdc_old_sata_ops = {
26078 .inherits = &pdc_sata_ops,
26079 .freeze = pdc_freeze,
26080 .thaw = pdc_thaw,
26081 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
26082 };
26083
26084-static struct ata_port_operations pdc_pata_ops = {
26085+static const struct ata_port_operations pdc_pata_ops = {
26086 .inherits = &pdc_common_ops,
26087 .cable_detect = pdc_pata_cable_detect,
26088 .freeze = pdc_freeze,
26089diff -urNp linux-2.6.32.45/drivers/ata/sata_qstor.c linux-2.6.32.45/drivers/ata/sata_qstor.c
26090--- linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
26091+++ linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
26092@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
26093 .dma_boundary = QS_DMA_BOUNDARY,
26094 };
26095
26096-static struct ata_port_operations qs_ata_ops = {
26097+static const struct ata_port_operations qs_ata_ops = {
26098 .inherits = &ata_sff_port_ops,
26099
26100 .check_atapi_dma = qs_check_atapi_dma,
26101diff -urNp linux-2.6.32.45/drivers/ata/sata_sil24.c linux-2.6.32.45/drivers/ata/sata_sil24.c
26102--- linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
26103+++ linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
26104@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
26105 .dma_boundary = ATA_DMA_BOUNDARY,
26106 };
26107
26108-static struct ata_port_operations sil24_ops = {
26109+static const struct ata_port_operations sil24_ops = {
26110 .inherits = &sata_pmp_port_ops,
26111
26112 .qc_defer = sil24_qc_defer,
26113diff -urNp linux-2.6.32.45/drivers/ata/sata_sil.c linux-2.6.32.45/drivers/ata/sata_sil.c
26114--- linux-2.6.32.45/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
26115+++ linux-2.6.32.45/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
26116@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
26117 .sg_tablesize = ATA_MAX_PRD
26118 };
26119
26120-static struct ata_port_operations sil_ops = {
26121+static const struct ata_port_operations sil_ops = {
26122 .inherits = &ata_bmdma32_port_ops,
26123 .dev_config = sil_dev_config,
26124 .set_mode = sil_set_mode,
26125diff -urNp linux-2.6.32.45/drivers/ata/sata_sis.c linux-2.6.32.45/drivers/ata/sata_sis.c
26126--- linux-2.6.32.45/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
26127+++ linux-2.6.32.45/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
26128@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
26129 ATA_BMDMA_SHT(DRV_NAME),
26130 };
26131
26132-static struct ata_port_operations sis_ops = {
26133+static const struct ata_port_operations sis_ops = {
26134 .inherits = &ata_bmdma_port_ops,
26135 .scr_read = sis_scr_read,
26136 .scr_write = sis_scr_write,
26137diff -urNp linux-2.6.32.45/drivers/ata/sata_svw.c linux-2.6.32.45/drivers/ata/sata_svw.c
26138--- linux-2.6.32.45/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
26139+++ linux-2.6.32.45/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
26140@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
26141 };
26142
26143
26144-static struct ata_port_operations k2_sata_ops = {
26145+static const struct ata_port_operations k2_sata_ops = {
26146 .inherits = &ata_bmdma_port_ops,
26147 .sff_tf_load = k2_sata_tf_load,
26148 .sff_tf_read = k2_sata_tf_read,
26149diff -urNp linux-2.6.32.45/drivers/ata/sata_sx4.c linux-2.6.32.45/drivers/ata/sata_sx4.c
26150--- linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
26151+++ linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
26152@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
26153 };
26154
26155 /* TODO: inherit from base port_ops after converting to new EH */
26156-static struct ata_port_operations pdc_20621_ops = {
26157+static const struct ata_port_operations pdc_20621_ops = {
26158 .inherits = &ata_sff_port_ops,
26159
26160 .check_atapi_dma = pdc_check_atapi_dma,
26161diff -urNp linux-2.6.32.45/drivers/ata/sata_uli.c linux-2.6.32.45/drivers/ata/sata_uli.c
26162--- linux-2.6.32.45/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
26163+++ linux-2.6.32.45/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
26164@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
26165 ATA_BMDMA_SHT(DRV_NAME),
26166 };
26167
26168-static struct ata_port_operations uli_ops = {
26169+static const struct ata_port_operations uli_ops = {
26170 .inherits = &ata_bmdma_port_ops,
26171 .scr_read = uli_scr_read,
26172 .scr_write = uli_scr_write,
26173diff -urNp linux-2.6.32.45/drivers/ata/sata_via.c linux-2.6.32.45/drivers/ata/sata_via.c
26174--- linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
26175+++ linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
26176@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
26177 ATA_BMDMA_SHT(DRV_NAME),
26178 };
26179
26180-static struct ata_port_operations svia_base_ops = {
26181+static const struct ata_port_operations svia_base_ops = {
26182 .inherits = &ata_bmdma_port_ops,
26183 .sff_tf_load = svia_tf_load,
26184 };
26185
26186-static struct ata_port_operations vt6420_sata_ops = {
26187+static const struct ata_port_operations vt6420_sata_ops = {
26188 .inherits = &svia_base_ops,
26189 .freeze = svia_noop_freeze,
26190 .prereset = vt6420_prereset,
26191 .bmdma_start = vt6420_bmdma_start,
26192 };
26193
26194-static struct ata_port_operations vt6421_pata_ops = {
26195+static const struct ata_port_operations vt6421_pata_ops = {
26196 .inherits = &svia_base_ops,
26197 .cable_detect = vt6421_pata_cable_detect,
26198 .set_piomode = vt6421_set_pio_mode,
26199 .set_dmamode = vt6421_set_dma_mode,
26200 };
26201
26202-static struct ata_port_operations vt6421_sata_ops = {
26203+static const struct ata_port_operations vt6421_sata_ops = {
26204 .inherits = &svia_base_ops,
26205 .scr_read = svia_scr_read,
26206 .scr_write = svia_scr_write,
26207 };
26208
26209-static struct ata_port_operations vt8251_ops = {
26210+static const struct ata_port_operations vt8251_ops = {
26211 .inherits = &svia_base_ops,
26212 .hardreset = sata_std_hardreset,
26213 .scr_read = vt8251_scr_read,
26214diff -urNp linux-2.6.32.45/drivers/ata/sata_vsc.c linux-2.6.32.45/drivers/ata/sata_vsc.c
26215--- linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
26216+++ linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
26217@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
26218 };
26219
26220
26221-static struct ata_port_operations vsc_sata_ops = {
26222+static const struct ata_port_operations vsc_sata_ops = {
26223 .inherits = &ata_bmdma_port_ops,
26224 /* The IRQ handling is not quite standard SFF behaviour so we
26225 cannot use the default lost interrupt handler */
26226diff -urNp linux-2.6.32.45/drivers/atm/adummy.c linux-2.6.32.45/drivers/atm/adummy.c
26227--- linux-2.6.32.45/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
26228+++ linux-2.6.32.45/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
26229@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
26230 vcc->pop(vcc, skb);
26231 else
26232 dev_kfree_skb_any(skb);
26233- atomic_inc(&vcc->stats->tx);
26234+ atomic_inc_unchecked(&vcc->stats->tx);
26235
26236 return 0;
26237 }
26238diff -urNp linux-2.6.32.45/drivers/atm/ambassador.c linux-2.6.32.45/drivers/atm/ambassador.c
26239--- linux-2.6.32.45/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
26240+++ linux-2.6.32.45/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
26241@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
26242 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
26243
26244 // VC layer stats
26245- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26246+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26247
26248 // free the descriptor
26249 kfree (tx_descr);
26250@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
26251 dump_skb ("<<<", vc, skb);
26252
26253 // VC layer stats
26254- atomic_inc(&atm_vcc->stats->rx);
26255+ atomic_inc_unchecked(&atm_vcc->stats->rx);
26256 __net_timestamp(skb);
26257 // end of our responsability
26258 atm_vcc->push (atm_vcc, skb);
26259@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
26260 } else {
26261 PRINTK (KERN_INFO, "dropped over-size frame");
26262 // should we count this?
26263- atomic_inc(&atm_vcc->stats->rx_drop);
26264+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26265 }
26266
26267 } else {
26268@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
26269 }
26270
26271 if (check_area (skb->data, skb->len)) {
26272- atomic_inc(&atm_vcc->stats->tx_err);
26273+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26274 return -ENOMEM; // ?
26275 }
26276
26277diff -urNp linux-2.6.32.45/drivers/atm/atmtcp.c linux-2.6.32.45/drivers/atm/atmtcp.c
26278--- linux-2.6.32.45/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
26279+++ linux-2.6.32.45/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
26280@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
26281 if (vcc->pop) vcc->pop(vcc,skb);
26282 else dev_kfree_skb(skb);
26283 if (dev_data) return 0;
26284- atomic_inc(&vcc->stats->tx_err);
26285+ atomic_inc_unchecked(&vcc->stats->tx_err);
26286 return -ENOLINK;
26287 }
26288 size = skb->len+sizeof(struct atmtcp_hdr);
26289@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
26290 if (!new_skb) {
26291 if (vcc->pop) vcc->pop(vcc,skb);
26292 else dev_kfree_skb(skb);
26293- atomic_inc(&vcc->stats->tx_err);
26294+ atomic_inc_unchecked(&vcc->stats->tx_err);
26295 return -ENOBUFS;
26296 }
26297 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26298@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
26299 if (vcc->pop) vcc->pop(vcc,skb);
26300 else dev_kfree_skb(skb);
26301 out_vcc->push(out_vcc,new_skb);
26302- atomic_inc(&vcc->stats->tx);
26303- atomic_inc(&out_vcc->stats->rx);
26304+ atomic_inc_unchecked(&vcc->stats->tx);
26305+ atomic_inc_unchecked(&out_vcc->stats->rx);
26306 return 0;
26307 }
26308
26309@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
26310 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26311 read_unlock(&vcc_sklist_lock);
26312 if (!out_vcc) {
26313- atomic_inc(&vcc->stats->tx_err);
26314+ atomic_inc_unchecked(&vcc->stats->tx_err);
26315 goto done;
26316 }
26317 skb_pull(skb,sizeof(struct atmtcp_hdr));
26318@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
26319 __net_timestamp(new_skb);
26320 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26321 out_vcc->push(out_vcc,new_skb);
26322- atomic_inc(&vcc->stats->tx);
26323- atomic_inc(&out_vcc->stats->rx);
26324+ atomic_inc_unchecked(&vcc->stats->tx);
26325+ atomic_inc_unchecked(&out_vcc->stats->rx);
26326 done:
26327 if (vcc->pop) vcc->pop(vcc,skb);
26328 else dev_kfree_skb(skb);
26329diff -urNp linux-2.6.32.45/drivers/atm/eni.c linux-2.6.32.45/drivers/atm/eni.c
26330--- linux-2.6.32.45/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
26331+++ linux-2.6.32.45/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
26332@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26333 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26334 vcc->dev->number);
26335 length = 0;
26336- atomic_inc(&vcc->stats->rx_err);
26337+ atomic_inc_unchecked(&vcc->stats->rx_err);
26338 }
26339 else {
26340 length = ATM_CELL_SIZE-1; /* no HEC */
26341@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26342 size);
26343 }
26344 eff = length = 0;
26345- atomic_inc(&vcc->stats->rx_err);
26346+ atomic_inc_unchecked(&vcc->stats->rx_err);
26347 }
26348 else {
26349 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26350@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26351 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26352 vcc->dev->number,vcc->vci,length,size << 2,descr);
26353 length = eff = 0;
26354- atomic_inc(&vcc->stats->rx_err);
26355+ atomic_inc_unchecked(&vcc->stats->rx_err);
26356 }
26357 }
26358 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26359@@ -770,7 +770,7 @@ rx_dequeued++;
26360 vcc->push(vcc,skb);
26361 pushed++;
26362 }
26363- atomic_inc(&vcc->stats->rx);
26364+ atomic_inc_unchecked(&vcc->stats->rx);
26365 }
26366 wake_up(&eni_dev->rx_wait);
26367 }
26368@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
26369 PCI_DMA_TODEVICE);
26370 if (vcc->pop) vcc->pop(vcc,skb);
26371 else dev_kfree_skb_irq(skb);
26372- atomic_inc(&vcc->stats->tx);
26373+ atomic_inc_unchecked(&vcc->stats->tx);
26374 wake_up(&eni_dev->tx_wait);
26375 dma_complete++;
26376 }
26377diff -urNp linux-2.6.32.45/drivers/atm/firestream.c linux-2.6.32.45/drivers/atm/firestream.c
26378--- linux-2.6.32.45/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
26379+++ linux-2.6.32.45/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
26380@@ -748,7 +748,7 @@ static void process_txdone_queue (struct
26381 }
26382 }
26383
26384- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26385+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26386
26387 fs_dprintk (FS_DEBUG_TXMEM, "i");
26388 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26389@@ -815,7 +815,7 @@ static void process_incoming (struct fs_
26390 #endif
26391 skb_put (skb, qe->p1 & 0xffff);
26392 ATM_SKB(skb)->vcc = atm_vcc;
26393- atomic_inc(&atm_vcc->stats->rx);
26394+ atomic_inc_unchecked(&atm_vcc->stats->rx);
26395 __net_timestamp(skb);
26396 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26397 atm_vcc->push (atm_vcc, skb);
26398@@ -836,12 +836,12 @@ static void process_incoming (struct fs_
26399 kfree (pe);
26400 }
26401 if (atm_vcc)
26402- atomic_inc(&atm_vcc->stats->rx_drop);
26403+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26404 break;
26405 case 0x1f: /* Reassembly abort: no buffers. */
26406 /* Silently increment error counter. */
26407 if (atm_vcc)
26408- atomic_inc(&atm_vcc->stats->rx_drop);
26409+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26410 break;
26411 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26412 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26413diff -urNp linux-2.6.32.45/drivers/atm/fore200e.c linux-2.6.32.45/drivers/atm/fore200e.c
26414--- linux-2.6.32.45/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
26415+++ linux-2.6.32.45/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
26416@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
26417 #endif
26418 /* check error condition */
26419 if (*entry->status & STATUS_ERROR)
26420- atomic_inc(&vcc->stats->tx_err);
26421+ atomic_inc_unchecked(&vcc->stats->tx_err);
26422 else
26423- atomic_inc(&vcc->stats->tx);
26424+ atomic_inc_unchecked(&vcc->stats->tx);
26425 }
26426 }
26427
26428@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
26429 if (skb == NULL) {
26430 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26431
26432- atomic_inc(&vcc->stats->rx_drop);
26433+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26434 return -ENOMEM;
26435 }
26436
26437@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
26438
26439 dev_kfree_skb_any(skb);
26440
26441- atomic_inc(&vcc->stats->rx_drop);
26442+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26443 return -ENOMEM;
26444 }
26445
26446 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26447
26448 vcc->push(vcc, skb);
26449- atomic_inc(&vcc->stats->rx);
26450+ atomic_inc_unchecked(&vcc->stats->rx);
26451
26452 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26453
26454@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
26455 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
26456 fore200e->atm_dev->number,
26457 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
26458- atomic_inc(&vcc->stats->rx_err);
26459+ atomic_inc_unchecked(&vcc->stats->rx_err);
26460 }
26461 }
26462
26463@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
26464 goto retry_here;
26465 }
26466
26467- atomic_inc(&vcc->stats->tx_err);
26468+ atomic_inc_unchecked(&vcc->stats->tx_err);
26469
26470 fore200e->tx_sat++;
26471 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
26472diff -urNp linux-2.6.32.45/drivers/atm/he.c linux-2.6.32.45/drivers/atm/he.c
26473--- linux-2.6.32.45/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
26474+++ linux-2.6.32.45/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
26475@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26476
26477 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
26478 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
26479- atomic_inc(&vcc->stats->rx_drop);
26480+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26481 goto return_host_buffers;
26482 }
26483
26484@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26485 RBRQ_LEN_ERR(he_dev->rbrq_head)
26486 ? "LEN_ERR" : "",
26487 vcc->vpi, vcc->vci);
26488- atomic_inc(&vcc->stats->rx_err);
26489+ atomic_inc_unchecked(&vcc->stats->rx_err);
26490 goto return_host_buffers;
26491 }
26492
26493@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26494 vcc->push(vcc, skb);
26495 spin_lock(&he_dev->global_lock);
26496
26497- atomic_inc(&vcc->stats->rx);
26498+ atomic_inc_unchecked(&vcc->stats->rx);
26499
26500 return_host_buffers:
26501 ++pdus_assembled;
26502@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
26503 tpd->vcc->pop(tpd->vcc, tpd->skb);
26504 else
26505 dev_kfree_skb_any(tpd->skb);
26506- atomic_inc(&tpd->vcc->stats->tx_err);
26507+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
26508 }
26509 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
26510 return;
26511@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26512 vcc->pop(vcc, skb);
26513 else
26514 dev_kfree_skb_any(skb);
26515- atomic_inc(&vcc->stats->tx_err);
26516+ atomic_inc_unchecked(&vcc->stats->tx_err);
26517 return -EINVAL;
26518 }
26519
26520@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26521 vcc->pop(vcc, skb);
26522 else
26523 dev_kfree_skb_any(skb);
26524- atomic_inc(&vcc->stats->tx_err);
26525+ atomic_inc_unchecked(&vcc->stats->tx_err);
26526 return -EINVAL;
26527 }
26528 #endif
26529@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26530 vcc->pop(vcc, skb);
26531 else
26532 dev_kfree_skb_any(skb);
26533- atomic_inc(&vcc->stats->tx_err);
26534+ atomic_inc_unchecked(&vcc->stats->tx_err);
26535 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26536 return -ENOMEM;
26537 }
26538@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26539 vcc->pop(vcc, skb);
26540 else
26541 dev_kfree_skb_any(skb);
26542- atomic_inc(&vcc->stats->tx_err);
26543+ atomic_inc_unchecked(&vcc->stats->tx_err);
26544 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26545 return -ENOMEM;
26546 }
26547@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26548 __enqueue_tpd(he_dev, tpd, cid);
26549 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26550
26551- atomic_inc(&vcc->stats->tx);
26552+ atomic_inc_unchecked(&vcc->stats->tx);
26553
26554 return 0;
26555 }
26556diff -urNp linux-2.6.32.45/drivers/atm/horizon.c linux-2.6.32.45/drivers/atm/horizon.c
26557--- linux-2.6.32.45/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26558+++ linux-2.6.32.45/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26559@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26560 {
26561 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26562 // VC layer stats
26563- atomic_inc(&vcc->stats->rx);
26564+ atomic_inc_unchecked(&vcc->stats->rx);
26565 __net_timestamp(skb);
26566 // end of our responsability
26567 vcc->push (vcc, skb);
26568@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26569 dev->tx_iovec = NULL;
26570
26571 // VC layer stats
26572- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26573+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26574
26575 // free the skb
26576 hrz_kfree_skb (skb);
26577diff -urNp linux-2.6.32.45/drivers/atm/idt77252.c linux-2.6.32.45/drivers/atm/idt77252.c
26578--- linux-2.6.32.45/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26579+++ linux-2.6.32.45/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26580@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26581 else
26582 dev_kfree_skb(skb);
26583
26584- atomic_inc(&vcc->stats->tx);
26585+ atomic_inc_unchecked(&vcc->stats->tx);
26586 }
26587
26588 atomic_dec(&scq->used);
26589@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26590 if ((sb = dev_alloc_skb(64)) == NULL) {
26591 printk("%s: Can't allocate buffers for aal0.\n",
26592 card->name);
26593- atomic_add(i, &vcc->stats->rx_drop);
26594+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
26595 break;
26596 }
26597 if (!atm_charge(vcc, sb->truesize)) {
26598 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26599 card->name);
26600- atomic_add(i - 1, &vcc->stats->rx_drop);
26601+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26602 dev_kfree_skb(sb);
26603 break;
26604 }
26605@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26606 ATM_SKB(sb)->vcc = vcc;
26607 __net_timestamp(sb);
26608 vcc->push(vcc, sb);
26609- atomic_inc(&vcc->stats->rx);
26610+ atomic_inc_unchecked(&vcc->stats->rx);
26611
26612 cell += ATM_CELL_PAYLOAD;
26613 }
26614@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26615 "(CDC: %08x)\n",
26616 card->name, len, rpp->len, readl(SAR_REG_CDC));
26617 recycle_rx_pool_skb(card, rpp);
26618- atomic_inc(&vcc->stats->rx_err);
26619+ atomic_inc_unchecked(&vcc->stats->rx_err);
26620 return;
26621 }
26622 if (stat & SAR_RSQE_CRC) {
26623 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26624 recycle_rx_pool_skb(card, rpp);
26625- atomic_inc(&vcc->stats->rx_err);
26626+ atomic_inc_unchecked(&vcc->stats->rx_err);
26627 return;
26628 }
26629 if (skb_queue_len(&rpp->queue) > 1) {
26630@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26631 RXPRINTK("%s: Can't alloc RX skb.\n",
26632 card->name);
26633 recycle_rx_pool_skb(card, rpp);
26634- atomic_inc(&vcc->stats->rx_err);
26635+ atomic_inc_unchecked(&vcc->stats->rx_err);
26636 return;
26637 }
26638 if (!atm_charge(vcc, skb->truesize)) {
26639@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26640 __net_timestamp(skb);
26641
26642 vcc->push(vcc, skb);
26643- atomic_inc(&vcc->stats->rx);
26644+ atomic_inc_unchecked(&vcc->stats->rx);
26645
26646 return;
26647 }
26648@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26649 __net_timestamp(skb);
26650
26651 vcc->push(vcc, skb);
26652- atomic_inc(&vcc->stats->rx);
26653+ atomic_inc_unchecked(&vcc->stats->rx);
26654
26655 if (skb->truesize > SAR_FB_SIZE_3)
26656 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26657@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26658 if (vcc->qos.aal != ATM_AAL0) {
26659 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26660 card->name, vpi, vci);
26661- atomic_inc(&vcc->stats->rx_drop);
26662+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26663 goto drop;
26664 }
26665
26666 if ((sb = dev_alloc_skb(64)) == NULL) {
26667 printk("%s: Can't allocate buffers for AAL0.\n",
26668 card->name);
26669- atomic_inc(&vcc->stats->rx_err);
26670+ atomic_inc_unchecked(&vcc->stats->rx_err);
26671 goto drop;
26672 }
26673
26674@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26675 ATM_SKB(sb)->vcc = vcc;
26676 __net_timestamp(sb);
26677 vcc->push(vcc, sb);
26678- atomic_inc(&vcc->stats->rx);
26679+ atomic_inc_unchecked(&vcc->stats->rx);
26680
26681 drop:
26682 skb_pull(queue, 64);
26683@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26684
26685 if (vc == NULL) {
26686 printk("%s: NULL connection in send().\n", card->name);
26687- atomic_inc(&vcc->stats->tx_err);
26688+ atomic_inc_unchecked(&vcc->stats->tx_err);
26689 dev_kfree_skb(skb);
26690 return -EINVAL;
26691 }
26692 if (!test_bit(VCF_TX, &vc->flags)) {
26693 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26694- atomic_inc(&vcc->stats->tx_err);
26695+ atomic_inc_unchecked(&vcc->stats->tx_err);
26696 dev_kfree_skb(skb);
26697 return -EINVAL;
26698 }
26699@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26700 break;
26701 default:
26702 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26703- atomic_inc(&vcc->stats->tx_err);
26704+ atomic_inc_unchecked(&vcc->stats->tx_err);
26705 dev_kfree_skb(skb);
26706 return -EINVAL;
26707 }
26708
26709 if (skb_shinfo(skb)->nr_frags != 0) {
26710 printk("%s: No scatter-gather yet.\n", card->name);
26711- atomic_inc(&vcc->stats->tx_err);
26712+ atomic_inc_unchecked(&vcc->stats->tx_err);
26713 dev_kfree_skb(skb);
26714 return -EINVAL;
26715 }
26716@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26717
26718 err = queue_skb(card, vc, skb, oam);
26719 if (err) {
26720- atomic_inc(&vcc->stats->tx_err);
26721+ atomic_inc_unchecked(&vcc->stats->tx_err);
26722 dev_kfree_skb(skb);
26723 return err;
26724 }
26725@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26726 skb = dev_alloc_skb(64);
26727 if (!skb) {
26728 printk("%s: Out of memory in send_oam().\n", card->name);
26729- atomic_inc(&vcc->stats->tx_err);
26730+ atomic_inc_unchecked(&vcc->stats->tx_err);
26731 return -ENOMEM;
26732 }
26733 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26734diff -urNp linux-2.6.32.45/drivers/atm/iphase.c linux-2.6.32.45/drivers/atm/iphase.c
26735--- linux-2.6.32.45/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26736+++ linux-2.6.32.45/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26737@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26738 status = (u_short) (buf_desc_ptr->desc_mode);
26739 if (status & (RX_CER | RX_PTE | RX_OFL))
26740 {
26741- atomic_inc(&vcc->stats->rx_err);
26742+ atomic_inc_unchecked(&vcc->stats->rx_err);
26743 IF_ERR(printk("IA: bad packet, dropping it");)
26744 if (status & RX_CER) {
26745 IF_ERR(printk(" cause: packet CRC error\n");)
26746@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26747 len = dma_addr - buf_addr;
26748 if (len > iadev->rx_buf_sz) {
26749 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26750- atomic_inc(&vcc->stats->rx_err);
26751+ atomic_inc_unchecked(&vcc->stats->rx_err);
26752 goto out_free_desc;
26753 }
26754
26755@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26756 ia_vcc = INPH_IA_VCC(vcc);
26757 if (ia_vcc == NULL)
26758 {
26759- atomic_inc(&vcc->stats->rx_err);
26760+ atomic_inc_unchecked(&vcc->stats->rx_err);
26761 dev_kfree_skb_any(skb);
26762 atm_return(vcc, atm_guess_pdu2truesize(len));
26763 goto INCR_DLE;
26764@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26765 if ((length > iadev->rx_buf_sz) || (length >
26766 (skb->len - sizeof(struct cpcs_trailer))))
26767 {
26768- atomic_inc(&vcc->stats->rx_err);
26769+ atomic_inc_unchecked(&vcc->stats->rx_err);
26770 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26771 length, skb->len);)
26772 dev_kfree_skb_any(skb);
26773@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26774
26775 IF_RX(printk("rx_dle_intr: skb push");)
26776 vcc->push(vcc,skb);
26777- atomic_inc(&vcc->stats->rx);
26778+ atomic_inc_unchecked(&vcc->stats->rx);
26779 iadev->rx_pkt_cnt++;
26780 }
26781 INCR_DLE:
26782@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26783 {
26784 struct k_sonet_stats *stats;
26785 stats = &PRIV(_ia_dev[board])->sonet_stats;
26786- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26787- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26788- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26789- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26790- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26791- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26792- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26793- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26794- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26795+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26796+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26797+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26798+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26799+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26800+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26801+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26802+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26803+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26804 }
26805 ia_cmds.status = 0;
26806 break;
26807@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26808 if ((desc == 0) || (desc > iadev->num_tx_desc))
26809 {
26810 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26811- atomic_inc(&vcc->stats->tx);
26812+ atomic_inc_unchecked(&vcc->stats->tx);
26813 if (vcc->pop)
26814 vcc->pop(vcc, skb);
26815 else
26816@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26817 ATM_DESC(skb) = vcc->vci;
26818 skb_queue_tail(&iadev->tx_dma_q, skb);
26819
26820- atomic_inc(&vcc->stats->tx);
26821+ atomic_inc_unchecked(&vcc->stats->tx);
26822 iadev->tx_pkt_cnt++;
26823 /* Increment transaction counter */
26824 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26825
26826 #if 0
26827 /* add flow control logic */
26828- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26829+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26830 if (iavcc->vc_desc_cnt > 10) {
26831 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26832 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26833diff -urNp linux-2.6.32.45/drivers/atm/lanai.c linux-2.6.32.45/drivers/atm/lanai.c
26834--- linux-2.6.32.45/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26835+++ linux-2.6.32.45/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26836@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26837 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26838 lanai_endtx(lanai, lvcc);
26839 lanai_free_skb(lvcc->tx.atmvcc, skb);
26840- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26841+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26842 }
26843
26844 /* Try to fill the buffer - don't call unless there is backlog */
26845@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26846 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26847 __net_timestamp(skb);
26848 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26849- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26850+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26851 out:
26852 lvcc->rx.buf.ptr = end;
26853 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26854@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26855 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26856 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26857 lanai->stats.service_rxnotaal5++;
26858- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26859+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26860 return 0;
26861 }
26862 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26863@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26864 int bytes;
26865 read_unlock(&vcc_sklist_lock);
26866 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26867- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26868+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26869 lvcc->stats.x.aal5.service_trash++;
26870 bytes = (SERVICE_GET_END(s) * 16) -
26871 (((unsigned long) lvcc->rx.buf.ptr) -
26872@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26873 }
26874 if (s & SERVICE_STREAM) {
26875 read_unlock(&vcc_sklist_lock);
26876- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26877+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26878 lvcc->stats.x.aal5.service_stream++;
26879 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26880 "PDU on VCI %d!\n", lanai->number, vci);
26881@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26882 return 0;
26883 }
26884 DPRINTK("got rx crc error on vci %d\n", vci);
26885- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26886+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26887 lvcc->stats.x.aal5.service_rxcrc++;
26888 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26889 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26890diff -urNp linux-2.6.32.45/drivers/atm/nicstar.c linux-2.6.32.45/drivers/atm/nicstar.c
26891--- linux-2.6.32.45/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26892+++ linux-2.6.32.45/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26893@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26894 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26895 {
26896 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26897- atomic_inc(&vcc->stats->tx_err);
26898+ atomic_inc_unchecked(&vcc->stats->tx_err);
26899 dev_kfree_skb_any(skb);
26900 return -EINVAL;
26901 }
26902@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26903 if (!vc->tx)
26904 {
26905 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26906- atomic_inc(&vcc->stats->tx_err);
26907+ atomic_inc_unchecked(&vcc->stats->tx_err);
26908 dev_kfree_skb_any(skb);
26909 return -EINVAL;
26910 }
26911@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26912 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26913 {
26914 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26915- atomic_inc(&vcc->stats->tx_err);
26916+ atomic_inc_unchecked(&vcc->stats->tx_err);
26917 dev_kfree_skb_any(skb);
26918 return -EINVAL;
26919 }
26920@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26921 if (skb_shinfo(skb)->nr_frags != 0)
26922 {
26923 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26924- atomic_inc(&vcc->stats->tx_err);
26925+ atomic_inc_unchecked(&vcc->stats->tx_err);
26926 dev_kfree_skb_any(skb);
26927 return -EINVAL;
26928 }
26929@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26930
26931 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26932 {
26933- atomic_inc(&vcc->stats->tx_err);
26934+ atomic_inc_unchecked(&vcc->stats->tx_err);
26935 dev_kfree_skb_any(skb);
26936 return -EIO;
26937 }
26938- atomic_inc(&vcc->stats->tx);
26939+ atomic_inc_unchecked(&vcc->stats->tx);
26940
26941 return 0;
26942 }
26943@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26944 {
26945 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26946 card->index);
26947- atomic_add(i,&vcc->stats->rx_drop);
26948+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
26949 break;
26950 }
26951 if (!atm_charge(vcc, sb->truesize))
26952 {
26953 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26954 card->index);
26955- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26956+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26957 dev_kfree_skb_any(sb);
26958 break;
26959 }
26960@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26961 ATM_SKB(sb)->vcc = vcc;
26962 __net_timestamp(sb);
26963 vcc->push(vcc, sb);
26964- atomic_inc(&vcc->stats->rx);
26965+ atomic_inc_unchecked(&vcc->stats->rx);
26966 cell += ATM_CELL_PAYLOAD;
26967 }
26968
26969@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26970 if (iovb == NULL)
26971 {
26972 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26973- atomic_inc(&vcc->stats->rx_drop);
26974+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26975 recycle_rx_buf(card, skb);
26976 return;
26977 }
26978@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26979 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26980 {
26981 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26982- atomic_inc(&vcc->stats->rx_err);
26983+ atomic_inc_unchecked(&vcc->stats->rx_err);
26984 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26985 NS_SKB(iovb)->iovcnt = 0;
26986 iovb->len = 0;
26987@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26988 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26989 card->index);
26990 which_list(card, skb);
26991- atomic_inc(&vcc->stats->rx_err);
26992+ atomic_inc_unchecked(&vcc->stats->rx_err);
26993 recycle_rx_buf(card, skb);
26994 vc->rx_iov = NULL;
26995 recycle_iov_buf(card, iovb);
26996@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26997 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26998 card->index);
26999 which_list(card, skb);
27000- atomic_inc(&vcc->stats->rx_err);
27001+ atomic_inc_unchecked(&vcc->stats->rx_err);
27002 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
27003 NS_SKB(iovb)->iovcnt);
27004 vc->rx_iov = NULL;
27005@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
27006 printk(" - PDU size mismatch.\n");
27007 else
27008 printk(".\n");
27009- atomic_inc(&vcc->stats->rx_err);
27010+ atomic_inc_unchecked(&vcc->stats->rx_err);
27011 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
27012 NS_SKB(iovb)->iovcnt);
27013 vc->rx_iov = NULL;
27014@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
27015 if (!atm_charge(vcc, skb->truesize))
27016 {
27017 push_rxbufs(card, skb);
27018- atomic_inc(&vcc->stats->rx_drop);
27019+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27020 }
27021 else
27022 {
27023@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
27024 ATM_SKB(skb)->vcc = vcc;
27025 __net_timestamp(skb);
27026 vcc->push(vcc, skb);
27027- atomic_inc(&vcc->stats->rx);
27028+ atomic_inc_unchecked(&vcc->stats->rx);
27029 }
27030 }
27031 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
27032@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
27033 if (!atm_charge(vcc, sb->truesize))
27034 {
27035 push_rxbufs(card, sb);
27036- atomic_inc(&vcc->stats->rx_drop);
27037+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27038 }
27039 else
27040 {
27041@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
27042 ATM_SKB(sb)->vcc = vcc;
27043 __net_timestamp(sb);
27044 vcc->push(vcc, sb);
27045- atomic_inc(&vcc->stats->rx);
27046+ atomic_inc_unchecked(&vcc->stats->rx);
27047 }
27048
27049 push_rxbufs(card, skb);
27050@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
27051 if (!atm_charge(vcc, skb->truesize))
27052 {
27053 push_rxbufs(card, skb);
27054- atomic_inc(&vcc->stats->rx_drop);
27055+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27056 }
27057 else
27058 {
27059@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
27060 ATM_SKB(skb)->vcc = vcc;
27061 __net_timestamp(skb);
27062 vcc->push(vcc, skb);
27063- atomic_inc(&vcc->stats->rx);
27064+ atomic_inc_unchecked(&vcc->stats->rx);
27065 }
27066
27067 push_rxbufs(card, sb);
27068@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
27069 if (hb == NULL)
27070 {
27071 printk("nicstar%d: Out of huge buffers.\n", card->index);
27072- atomic_inc(&vcc->stats->rx_drop);
27073+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27074 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
27075 NS_SKB(iovb)->iovcnt);
27076 vc->rx_iov = NULL;
27077@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
27078 }
27079 else
27080 dev_kfree_skb_any(hb);
27081- atomic_inc(&vcc->stats->rx_drop);
27082+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27083 }
27084 else
27085 {
27086@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
27087 #endif /* NS_USE_DESTRUCTORS */
27088 __net_timestamp(hb);
27089 vcc->push(vcc, hb);
27090- atomic_inc(&vcc->stats->rx);
27091+ atomic_inc_unchecked(&vcc->stats->rx);
27092 }
27093 }
27094
27095diff -urNp linux-2.6.32.45/drivers/atm/solos-pci.c linux-2.6.32.45/drivers/atm/solos-pci.c
27096--- linux-2.6.32.45/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
27097+++ linux-2.6.32.45/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
27098@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
27099 }
27100 atm_charge(vcc, skb->truesize);
27101 vcc->push(vcc, skb);
27102- atomic_inc(&vcc->stats->rx);
27103+ atomic_inc_unchecked(&vcc->stats->rx);
27104 break;
27105
27106 case PKT_STATUS:
27107@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
27108 char msg[500];
27109 char item[10];
27110
27111+ pax_track_stack();
27112+
27113 len = buf->len;
27114 for (i = 0; i < len; i++){
27115 if(i % 8 == 0)
27116@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
27117 vcc = SKB_CB(oldskb)->vcc;
27118
27119 if (vcc) {
27120- atomic_inc(&vcc->stats->tx);
27121+ atomic_inc_unchecked(&vcc->stats->tx);
27122 solos_pop(vcc, oldskb);
27123 } else
27124 dev_kfree_skb_irq(oldskb);
27125diff -urNp linux-2.6.32.45/drivers/atm/suni.c linux-2.6.32.45/drivers/atm/suni.c
27126--- linux-2.6.32.45/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
27127+++ linux-2.6.32.45/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
27128@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
27129
27130
27131 #define ADD_LIMITED(s,v) \
27132- atomic_add((v),&stats->s); \
27133- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
27134+ atomic_add_unchecked((v),&stats->s); \
27135+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
27136
27137
27138 static void suni_hz(unsigned long from_timer)
27139diff -urNp linux-2.6.32.45/drivers/atm/uPD98402.c linux-2.6.32.45/drivers/atm/uPD98402.c
27140--- linux-2.6.32.45/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
27141+++ linux-2.6.32.45/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
27142@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
27143 struct sonet_stats tmp;
27144 int error = 0;
27145
27146- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27147+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27148 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
27149 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
27150 if (zero && !error) {
27151@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
27152
27153
27154 #define ADD_LIMITED(s,v) \
27155- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
27156- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
27157- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27158+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
27159+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
27160+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27161
27162
27163 static void stat_event(struct atm_dev *dev)
27164@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
27165 if (reason & uPD98402_INT_PFM) stat_event(dev);
27166 if (reason & uPD98402_INT_PCO) {
27167 (void) GET(PCOCR); /* clear interrupt cause */
27168- atomic_add(GET(HECCT),
27169+ atomic_add_unchecked(GET(HECCT),
27170 &PRIV(dev)->sonet_stats.uncorr_hcs);
27171 }
27172 if ((reason & uPD98402_INT_RFO) &&
27173@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
27174 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
27175 uPD98402_INT_LOS),PIMR); /* enable them */
27176 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
27177- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27178- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
27179- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
27180+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27181+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
27182+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
27183 return 0;
27184 }
27185
27186diff -urNp linux-2.6.32.45/drivers/atm/zatm.c linux-2.6.32.45/drivers/atm/zatm.c
27187--- linux-2.6.32.45/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
27188+++ linux-2.6.32.45/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
27189@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27190 }
27191 if (!size) {
27192 dev_kfree_skb_irq(skb);
27193- if (vcc) atomic_inc(&vcc->stats->rx_err);
27194+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
27195 continue;
27196 }
27197 if (!atm_charge(vcc,skb->truesize)) {
27198@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27199 skb->len = size;
27200 ATM_SKB(skb)->vcc = vcc;
27201 vcc->push(vcc,skb);
27202- atomic_inc(&vcc->stats->rx);
27203+ atomic_inc_unchecked(&vcc->stats->rx);
27204 }
27205 zout(pos & 0xffff,MTA(mbx));
27206 #if 0 /* probably a stupid idea */
27207@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
27208 skb_queue_head(&zatm_vcc->backlog,skb);
27209 break;
27210 }
27211- atomic_inc(&vcc->stats->tx);
27212+ atomic_inc_unchecked(&vcc->stats->tx);
27213 wake_up(&zatm_vcc->tx_wait);
27214 }
27215
27216diff -urNp linux-2.6.32.45/drivers/base/bus.c linux-2.6.32.45/drivers/base/bus.c
27217--- linux-2.6.32.45/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
27218+++ linux-2.6.32.45/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
27219@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
27220 return ret;
27221 }
27222
27223-static struct sysfs_ops driver_sysfs_ops = {
27224+static const struct sysfs_ops driver_sysfs_ops = {
27225 .show = drv_attr_show,
27226 .store = drv_attr_store,
27227 };
27228@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
27229 return ret;
27230 }
27231
27232-static struct sysfs_ops bus_sysfs_ops = {
27233+static const struct sysfs_ops bus_sysfs_ops = {
27234 .show = bus_attr_show,
27235 .store = bus_attr_store,
27236 };
27237@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
27238 return 0;
27239 }
27240
27241-static struct kset_uevent_ops bus_uevent_ops = {
27242+static const struct kset_uevent_ops bus_uevent_ops = {
27243 .filter = bus_uevent_filter,
27244 };
27245
27246diff -urNp linux-2.6.32.45/drivers/base/class.c linux-2.6.32.45/drivers/base/class.c
27247--- linux-2.6.32.45/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
27248+++ linux-2.6.32.45/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
27249@@ -63,7 +63,7 @@ static void class_release(struct kobject
27250 kfree(cp);
27251 }
27252
27253-static struct sysfs_ops class_sysfs_ops = {
27254+static const struct sysfs_ops class_sysfs_ops = {
27255 .show = class_attr_show,
27256 .store = class_attr_store,
27257 };
27258diff -urNp linux-2.6.32.45/drivers/base/core.c linux-2.6.32.45/drivers/base/core.c
27259--- linux-2.6.32.45/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
27260+++ linux-2.6.32.45/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
27261@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
27262 return ret;
27263 }
27264
27265-static struct sysfs_ops dev_sysfs_ops = {
27266+static const struct sysfs_ops dev_sysfs_ops = {
27267 .show = dev_attr_show,
27268 .store = dev_attr_store,
27269 };
27270@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
27271 return retval;
27272 }
27273
27274-static struct kset_uevent_ops device_uevent_ops = {
27275+static const struct kset_uevent_ops device_uevent_ops = {
27276 .filter = dev_uevent_filter,
27277 .name = dev_uevent_name,
27278 .uevent = dev_uevent,
27279diff -urNp linux-2.6.32.45/drivers/base/memory.c linux-2.6.32.45/drivers/base/memory.c
27280--- linux-2.6.32.45/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
27281+++ linux-2.6.32.45/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
27282@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
27283 return retval;
27284 }
27285
27286-static struct kset_uevent_ops memory_uevent_ops = {
27287+static const struct kset_uevent_ops memory_uevent_ops = {
27288 .name = memory_uevent_name,
27289 .uevent = memory_uevent,
27290 };
27291diff -urNp linux-2.6.32.45/drivers/base/sys.c linux-2.6.32.45/drivers/base/sys.c
27292--- linux-2.6.32.45/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
27293+++ linux-2.6.32.45/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
27294@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
27295 return -EIO;
27296 }
27297
27298-static struct sysfs_ops sysfs_ops = {
27299+static const struct sysfs_ops sysfs_ops = {
27300 .show = sysdev_show,
27301 .store = sysdev_store,
27302 };
27303@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
27304 return -EIO;
27305 }
27306
27307-static struct sysfs_ops sysfs_class_ops = {
27308+static const struct sysfs_ops sysfs_class_ops = {
27309 .show = sysdev_class_show,
27310 .store = sysdev_class_store,
27311 };
27312diff -urNp linux-2.6.32.45/drivers/block/cciss.c linux-2.6.32.45/drivers/block/cciss.c
27313--- linux-2.6.32.45/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
27314+++ linux-2.6.32.45/drivers/block/cciss.c 2011-08-05 20:33:55.000000000 -0400
27315@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
27316 int err;
27317 u32 cp;
27318
27319+ memset(&arg64, 0, sizeof(arg64));
27320+
27321 err = 0;
27322 err |=
27323 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27324@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ct
27325 /* Wait (up to 20 seconds) for a command to complete */
27326
27327 for (i = 20 * HZ; i > 0; i--) {
27328- done = hba[ctlr]->access.command_completed(hba[ctlr]);
27329+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
27330 if (done == FIFO_EMPTY)
27331 schedule_timeout_uninterruptible(1);
27332 else
27333@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h,
27334 resend_cmd1:
27335
27336 /* Disable interrupt on the board. */
27337- h->access.set_intr_mask(h, CCISS_INTR_OFF);
27338+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
27339
27340 /* Make sure there is room in the command FIFO */
27341 /* Actually it should be completely empty at this time */
27342@@ -2884,13 +2886,13 @@ resend_cmd1:
27343 /* tape side of the driver. */
27344 for (i = 200000; i > 0; i--) {
27345 /* if fifo isn't full go */
27346- if (!(h->access.fifo_full(h)))
27347+ if (!(h->access->fifo_full(h)))
27348 break;
27349 udelay(10);
27350 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
27351 " waiting!\n", h->ctlr);
27352 }
27353- h->access.submit_command(h, c); /* Send the cmd */
27354+ h->access->submit_command(h, c); /* Send the cmd */
27355 do {
27356 complete = pollcomplete(h->ctlr);
27357
27358@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
27359 while (!hlist_empty(&h->reqQ)) {
27360 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
27361 /* can't do anything if fifo is full */
27362- if ((h->access.fifo_full(h))) {
27363+ if ((h->access->fifo_full(h))) {
27364 printk(KERN_WARNING "cciss: fifo full\n");
27365 break;
27366 }
27367@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
27368 h->Qdepth--;
27369
27370 /* Tell the controller execute command */
27371- h->access.submit_command(h, c);
27372+ h->access->submit_command(h, c);
27373
27374 /* Put job onto the completed Q */
27375 addQ(&h->cmpQ, c);
27376@@ -3393,17 +3395,17 @@ startio:
27377
27378 static inline unsigned long get_next_completion(ctlr_info_t *h)
27379 {
27380- return h->access.command_completed(h);
27381+ return h->access->command_completed(h);
27382 }
27383
27384 static inline int interrupt_pending(ctlr_info_t *h)
27385 {
27386- return h->access.intr_pending(h);
27387+ return h->access->intr_pending(h);
27388 }
27389
27390 static inline long interrupt_not_for_us(ctlr_info_t *h)
27391 {
27392- return (((h->access.intr_pending(h) == 0) ||
27393+ return (((h->access->intr_pending(h) == 0) ||
27394 (h->interrupts_enabled == 0)));
27395 }
27396
27397@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr
27398 */
27399 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
27400 c->product_name = products[prod_index].product_name;
27401- c->access = *(products[prod_index].access);
27402+ c->access = products[prod_index].access;
27403 c->nr_cmds = c->max_commands - 4;
27404 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
27405 (readb(&c->cfgtable->Signature[1]) != 'I') ||
27406@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(stru
27407 }
27408
27409 /* make sure the board interrupts are off */
27410- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
27411+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
27412 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
27413 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
27414 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
27415@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(stru
27416 cciss_scsi_setup(i);
27417
27418 /* Turn the interrupts on so we can service requests */
27419- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
27420+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
27421
27422 /* Get the firmware version */
27423 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27424diff -urNp linux-2.6.32.45/drivers/block/cciss.h linux-2.6.32.45/drivers/block/cciss.h
27425--- linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:35:28.000000000 -0400
27426+++ linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:33:59.000000000 -0400
27427@@ -90,7 +90,7 @@ struct ctlr_info
27428 // information about each logical volume
27429 drive_info_struct *drv[CISS_MAX_LUN];
27430
27431- struct access_method access;
27432+ struct access_method *access;
27433
27434 /* queue and queue Info */
27435 struct hlist_head reqQ;
27436diff -urNp linux-2.6.32.45/drivers/block/cpqarray.c linux-2.6.32.45/drivers/block/cpqarray.c
27437--- linux-2.6.32.45/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
27438+++ linux-2.6.32.45/drivers/block/cpqarray.c 2011-08-05 20:33:55.000000000 -0400
27439@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr
27440 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27441 goto Enomem4;
27442 }
27443- hba[i]->access.set_intr_mask(hba[i], 0);
27444+ hba[i]->access->set_intr_mask(hba[i], 0);
27445 if (request_irq(hba[i]->intr, do_ida_intr,
27446 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27447 {
27448@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr
27449 add_timer(&hba[i]->timer);
27450
27451 /* Enable IRQ now that spinlock and rate limit timer are set up */
27452- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27453+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27454
27455 for(j=0; j<NWD; j++) {
27456 struct gendisk *disk = ida_gendisk[i][j];
27457@@ -695,7 +695,7 @@ DBGINFO(
27458 for(i=0; i<NR_PRODUCTS; i++) {
27459 if (board_id == products[i].board_id) {
27460 c->product_name = products[i].product_name;
27461- c->access = *(products[i].access);
27462+ c->access = products[i].access;
27463 break;
27464 }
27465 }
27466@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(v
27467 hba[ctlr]->intr = intr;
27468 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27469 hba[ctlr]->product_name = products[j].product_name;
27470- hba[ctlr]->access = *(products[j].access);
27471+ hba[ctlr]->access = products[j].access;
27472 hba[ctlr]->ctlr = ctlr;
27473 hba[ctlr]->board_id = board_id;
27474 hba[ctlr]->pci_dev = NULL; /* not PCI */
27475@@ -896,6 +896,8 @@ static void do_ida_request(struct reques
27476 struct scatterlist tmp_sg[SG_MAX];
27477 int i, dir, seg;
27478
27479+ pax_track_stack();
27480+
27481 if (blk_queue_plugged(q))
27482 goto startio;
27483
27484@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
27485
27486 while((c = h->reqQ) != NULL) {
27487 /* Can't do anything if we're busy */
27488- if (h->access.fifo_full(h) == 0)
27489+ if (h->access->fifo_full(h) == 0)
27490 return;
27491
27492 /* Get the first entry from the request Q */
27493@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
27494 h->Qdepth--;
27495
27496 /* Tell the controller to do our bidding */
27497- h->access.submit_command(h, c);
27498+ h->access->submit_command(h, c);
27499
27500 /* Get onto the completion Q */
27501 addQ(&h->cmpQ, c);
27502@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq,
27503 unsigned long flags;
27504 __u32 a,a1;
27505
27506- istat = h->access.intr_pending(h);
27507+ istat = h->access->intr_pending(h);
27508 /* Is this interrupt for us? */
27509 if (istat == 0)
27510 return IRQ_NONE;
27511@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq,
27512 */
27513 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
27514 if (istat & FIFO_NOT_EMPTY) {
27515- while((a = h->access.command_completed(h))) {
27516+ while((a = h->access->command_completed(h))) {
27517 a1 = a; a &= ~3;
27518 if ((c = h->cmpQ) == NULL)
27519 {
27520@@ -1434,11 +1436,11 @@ static int sendcmd(
27521 /*
27522 * Disable interrupt
27523 */
27524- info_p->access.set_intr_mask(info_p, 0);
27525+ info_p->access->set_intr_mask(info_p, 0);
27526 /* Make sure there is room in the command FIFO */
27527 /* Actually it should be completely empty at this time. */
27528 for (i = 200000; i > 0; i--) {
27529- temp = info_p->access.fifo_full(info_p);
27530+ temp = info_p->access->fifo_full(info_p);
27531 if (temp != 0) {
27532 break;
27533 }
27534@@ -1451,7 +1453,7 @@ DBG(
27535 /*
27536 * Send the cmd
27537 */
27538- info_p->access.submit_command(info_p, c);
27539+ info_p->access->submit_command(info_p, c);
27540 complete = pollcomplete(ctlr);
27541
27542 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
27543@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t
27544 * we check the new geometry. Then turn interrupts back on when
27545 * we're done.
27546 */
27547- host->access.set_intr_mask(host, 0);
27548+ host->access->set_intr_mask(host, 0);
27549 getgeometry(ctlr);
27550- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
27551+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
27552
27553 for(i=0; i<NWD; i++) {
27554 struct gendisk *disk = ida_gendisk[ctlr][i];
27555@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
27556 /* Wait (up to 2 seconds) for a command to complete */
27557
27558 for (i = 200000; i > 0; i--) {
27559- done = hba[ctlr]->access.command_completed(hba[ctlr]);
27560+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
27561 if (done == 0) {
27562 udelay(10); /* a short fixed delay */
27563 } else
27564diff -urNp linux-2.6.32.45/drivers/block/cpqarray.h linux-2.6.32.45/drivers/block/cpqarray.h
27565--- linux-2.6.32.45/drivers/block/cpqarray.h 2011-03-27 14:31:47.000000000 -0400
27566+++ linux-2.6.32.45/drivers/block/cpqarray.h 2011-08-05 20:33:55.000000000 -0400
27567@@ -99,7 +99,7 @@ struct ctlr_info {
27568 drv_info_t drv[NWD];
27569 struct proc_dir_entry *proc;
27570
27571- struct access_method access;
27572+ struct access_method *access;
27573
27574 cmdlist_t *reqQ;
27575 cmdlist_t *cmpQ;
27576diff -urNp linux-2.6.32.45/drivers/block/DAC960.c linux-2.6.32.45/drivers/block/DAC960.c
27577--- linux-2.6.32.45/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
27578+++ linux-2.6.32.45/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
27579@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
27580 unsigned long flags;
27581 int Channel, TargetID;
27582
27583+ pax_track_stack();
27584+
27585 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
27586 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
27587 sizeof(DAC960_SCSI_Inquiry_T) +
27588diff -urNp linux-2.6.32.45/drivers/block/nbd.c linux-2.6.32.45/drivers/block/nbd.c
27589--- linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
27590+++ linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
27591@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
27592 struct kvec iov;
27593 sigset_t blocked, oldset;
27594
27595+ pax_track_stack();
27596+
27597 if (unlikely(!sock)) {
27598 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
27599 lo->disk->disk_name, (send ? "send" : "recv"));
27600@@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
27601 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
27602 unsigned int cmd, unsigned long arg)
27603 {
27604+ pax_track_stack();
27605+
27606 switch (cmd) {
27607 case NBD_DISCONNECT: {
27608 struct request sreq;
27609diff -urNp linux-2.6.32.45/drivers/block/pktcdvd.c linux-2.6.32.45/drivers/block/pktcdvd.c
27610--- linux-2.6.32.45/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
27611+++ linux-2.6.32.45/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
27612@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
27613 return len;
27614 }
27615
27616-static struct sysfs_ops kobj_pkt_ops = {
27617+static const struct sysfs_ops kobj_pkt_ops = {
27618 .show = kobj_pkt_show,
27619 .store = kobj_pkt_store
27620 };
27621diff -urNp linux-2.6.32.45/drivers/char/agp/frontend.c linux-2.6.32.45/drivers/char/agp/frontend.c
27622--- linux-2.6.32.45/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
27623+++ linux-2.6.32.45/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
27624@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
27625 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27626 return -EFAULT;
27627
27628- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27629+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27630 return -EFAULT;
27631
27632 client = agp_find_client_by_pid(reserve.pid);
27633diff -urNp linux-2.6.32.45/drivers/char/briq_panel.c linux-2.6.32.45/drivers/char/briq_panel.c
27634--- linux-2.6.32.45/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
27635+++ linux-2.6.32.45/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
27636@@ -10,6 +10,7 @@
27637 #include <linux/types.h>
27638 #include <linux/errno.h>
27639 #include <linux/tty.h>
27640+#include <linux/mutex.h>
27641 #include <linux/timer.h>
27642 #include <linux/kernel.h>
27643 #include <linux/wait.h>
27644@@ -36,6 +37,7 @@ static int vfd_is_open;
27645 static unsigned char vfd[40];
27646 static int vfd_cursor;
27647 static unsigned char ledpb, led;
27648+static DEFINE_MUTEX(vfd_mutex);
27649
27650 static void update_vfd(void)
27651 {
27652@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
27653 if (!vfd_is_open)
27654 return -EBUSY;
27655
27656+ mutex_lock(&vfd_mutex);
27657 for (;;) {
27658 char c;
27659 if (!indx)
27660 break;
27661- if (get_user(c, buf))
27662+ if (get_user(c, buf)) {
27663+ mutex_unlock(&vfd_mutex);
27664 return -EFAULT;
27665+ }
27666 if (esc) {
27667 set_led(c);
27668 esc = 0;
27669@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
27670 buf++;
27671 }
27672 update_vfd();
27673+ mutex_unlock(&vfd_mutex);
27674
27675 return len;
27676 }
27677diff -urNp linux-2.6.32.45/drivers/char/genrtc.c linux-2.6.32.45/drivers/char/genrtc.c
27678--- linux-2.6.32.45/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
27679+++ linux-2.6.32.45/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
27680@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
27681 switch (cmd) {
27682
27683 case RTC_PLL_GET:
27684+ memset(&pll, 0, sizeof(pll));
27685 if (get_rtc_pll(&pll))
27686 return -EINVAL;
27687 else
27688diff -urNp linux-2.6.32.45/drivers/char/hpet.c linux-2.6.32.45/drivers/char/hpet.c
27689--- linux-2.6.32.45/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
27690+++ linux-2.6.32.45/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
27691@@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
27692 return 0;
27693 }
27694
27695-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
27696+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
27697
27698 static int
27699 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
27700@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
27701 }
27702
27703 static int
27704-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
27705+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
27706 {
27707 struct hpet_timer __iomem *timer;
27708 struct hpet __iomem *hpet;
27709@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
27710 {
27711 struct hpet_info info;
27712
27713+ memset(&info, 0, sizeof(info));
27714+
27715 if (devp->hd_ireqfreq)
27716 info.hi_ireqfreq =
27717 hpet_time_div(hpetp, devp->hd_ireqfreq);
27718- else
27719- info.hi_ireqfreq = 0;
27720 info.hi_flags =
27721 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
27722 info.hi_hpet = hpetp->hp_which;
27723diff -urNp linux-2.6.32.45/drivers/char/hvc_beat.c linux-2.6.32.45/drivers/char/hvc_beat.c
27724--- linux-2.6.32.45/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
27725+++ linux-2.6.32.45/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
27726@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
27727 return cnt;
27728 }
27729
27730-static struct hv_ops hvc_beat_get_put_ops = {
27731+static const struct hv_ops hvc_beat_get_put_ops = {
27732 .get_chars = hvc_beat_get_chars,
27733 .put_chars = hvc_beat_put_chars,
27734 };
27735diff -urNp linux-2.6.32.45/drivers/char/hvc_console.c linux-2.6.32.45/drivers/char/hvc_console.c
27736--- linux-2.6.32.45/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
27737+++ linux-2.6.32.45/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
27738@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
27739 * console interfaces but can still be used as a tty device. This has to be
27740 * static because kmalloc will not work during early console init.
27741 */
27742-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27743+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27744 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
27745 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
27746
27747@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
27748 * vty adapters do NOT get an hvc_instantiate() callback since they
27749 * appear after early console init.
27750 */
27751-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
27752+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
27753 {
27754 struct hvc_struct *hp;
27755
27756@@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
27757 };
27758
27759 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
27760- struct hv_ops *ops, int outbuf_size)
27761+ const struct hv_ops *ops, int outbuf_size)
27762 {
27763 struct hvc_struct *hp;
27764 int i;
27765diff -urNp linux-2.6.32.45/drivers/char/hvc_console.h linux-2.6.32.45/drivers/char/hvc_console.h
27766--- linux-2.6.32.45/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
27767+++ linux-2.6.32.45/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
27768@@ -55,7 +55,7 @@ struct hvc_struct {
27769 int outbuf_size;
27770 int n_outbuf;
27771 uint32_t vtermno;
27772- struct hv_ops *ops;
27773+ const struct hv_ops *ops;
27774 int irq_requested;
27775 int data;
27776 struct winsize ws;
27777@@ -76,11 +76,11 @@ struct hv_ops {
27778 };
27779
27780 /* Register a vterm and a slot index for use as a console (console_init) */
27781-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
27782+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
27783
27784 /* register a vterm for hvc tty operation (module_init or hotplug add) */
27785 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
27786- struct hv_ops *ops, int outbuf_size);
27787+ const struct hv_ops *ops, int outbuf_size);
27788 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27789 extern int hvc_remove(struct hvc_struct *hp);
27790
27791diff -urNp linux-2.6.32.45/drivers/char/hvc_iseries.c linux-2.6.32.45/drivers/char/hvc_iseries.c
27792--- linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27793+++ linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27794@@ -197,7 +197,7 @@ done:
27795 return sent;
27796 }
27797
27798-static struct hv_ops hvc_get_put_ops = {
27799+static const struct hv_ops hvc_get_put_ops = {
27800 .get_chars = get_chars,
27801 .put_chars = put_chars,
27802 .notifier_add = notifier_add_irq,
27803diff -urNp linux-2.6.32.45/drivers/char/hvc_iucv.c linux-2.6.32.45/drivers/char/hvc_iucv.c
27804--- linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27805+++ linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27806@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27807
27808
27809 /* HVC operations */
27810-static struct hv_ops hvc_iucv_ops = {
27811+static const struct hv_ops hvc_iucv_ops = {
27812 .get_chars = hvc_iucv_get_chars,
27813 .put_chars = hvc_iucv_put_chars,
27814 .notifier_add = hvc_iucv_notifier_add,
27815diff -urNp linux-2.6.32.45/drivers/char/hvc_rtas.c linux-2.6.32.45/drivers/char/hvc_rtas.c
27816--- linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27817+++ linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27818@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27819 return i;
27820 }
27821
27822-static struct hv_ops hvc_rtas_get_put_ops = {
27823+static const struct hv_ops hvc_rtas_get_put_ops = {
27824 .get_chars = hvc_rtas_read_console,
27825 .put_chars = hvc_rtas_write_console,
27826 };
27827diff -urNp linux-2.6.32.45/drivers/char/hvcs.c linux-2.6.32.45/drivers/char/hvcs.c
27828--- linux-2.6.32.45/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27829+++ linux-2.6.32.45/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27830@@ -82,6 +82,7 @@
27831 #include <asm/hvcserver.h>
27832 #include <asm/uaccess.h>
27833 #include <asm/vio.h>
27834+#include <asm/local.h>
27835
27836 /*
27837 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27838@@ -269,7 +270,7 @@ struct hvcs_struct {
27839 unsigned int index;
27840
27841 struct tty_struct *tty;
27842- int open_count;
27843+ local_t open_count;
27844
27845 /*
27846 * Used to tell the driver kernel_thread what operations need to take
27847@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27848
27849 spin_lock_irqsave(&hvcsd->lock, flags);
27850
27851- if (hvcsd->open_count > 0) {
27852+ if (local_read(&hvcsd->open_count) > 0) {
27853 spin_unlock_irqrestore(&hvcsd->lock, flags);
27854 printk(KERN_INFO "HVCS: vterm state unchanged. "
27855 "The hvcs device node is still in use.\n");
27856@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27857 if ((retval = hvcs_partner_connect(hvcsd)))
27858 goto error_release;
27859
27860- hvcsd->open_count = 1;
27861+ local_set(&hvcsd->open_count, 1);
27862 hvcsd->tty = tty;
27863 tty->driver_data = hvcsd;
27864
27865@@ -1169,7 +1170,7 @@ fast_open:
27866
27867 spin_lock_irqsave(&hvcsd->lock, flags);
27868 kref_get(&hvcsd->kref);
27869- hvcsd->open_count++;
27870+ local_inc(&hvcsd->open_count);
27871 hvcsd->todo_mask |= HVCS_SCHED_READ;
27872 spin_unlock_irqrestore(&hvcsd->lock, flags);
27873
27874@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27875 hvcsd = tty->driver_data;
27876
27877 spin_lock_irqsave(&hvcsd->lock, flags);
27878- if (--hvcsd->open_count == 0) {
27879+ if (local_dec_and_test(&hvcsd->open_count)) {
27880
27881 vio_disable_interrupts(hvcsd->vdev);
27882
27883@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27884 free_irq(irq, hvcsd);
27885 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27886 return;
27887- } else if (hvcsd->open_count < 0) {
27888+ } else if (local_read(&hvcsd->open_count) < 0) {
27889 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27890 " is missmanaged.\n",
27891- hvcsd->vdev->unit_address, hvcsd->open_count);
27892+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27893 }
27894
27895 spin_unlock_irqrestore(&hvcsd->lock, flags);
27896@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27897
27898 spin_lock_irqsave(&hvcsd->lock, flags);
27899 /* Preserve this so that we know how many kref refs to put */
27900- temp_open_count = hvcsd->open_count;
27901+ temp_open_count = local_read(&hvcsd->open_count);
27902
27903 /*
27904 * Don't kref put inside the spinlock because the destruction
27905@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27906 hvcsd->tty->driver_data = NULL;
27907 hvcsd->tty = NULL;
27908
27909- hvcsd->open_count = 0;
27910+ local_set(&hvcsd->open_count, 0);
27911
27912 /* This will drop any buffered data on the floor which is OK in a hangup
27913 * scenario. */
27914@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27915 * the middle of a write operation? This is a crummy place to do this
27916 * but we want to keep it all in the spinlock.
27917 */
27918- if (hvcsd->open_count <= 0) {
27919+ if (local_read(&hvcsd->open_count) <= 0) {
27920 spin_unlock_irqrestore(&hvcsd->lock, flags);
27921 return -ENODEV;
27922 }
27923@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27924 {
27925 struct hvcs_struct *hvcsd = tty->driver_data;
27926
27927- if (!hvcsd || hvcsd->open_count <= 0)
27928+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27929 return 0;
27930
27931 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27932diff -urNp linux-2.6.32.45/drivers/char/hvc_udbg.c linux-2.6.32.45/drivers/char/hvc_udbg.c
27933--- linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27934+++ linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27935@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27936 return i;
27937 }
27938
27939-static struct hv_ops hvc_udbg_ops = {
27940+static const struct hv_ops hvc_udbg_ops = {
27941 .get_chars = hvc_udbg_get,
27942 .put_chars = hvc_udbg_put,
27943 };
27944diff -urNp linux-2.6.32.45/drivers/char/hvc_vio.c linux-2.6.32.45/drivers/char/hvc_vio.c
27945--- linux-2.6.32.45/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27946+++ linux-2.6.32.45/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27947@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27948 return got;
27949 }
27950
27951-static struct hv_ops hvc_get_put_ops = {
27952+static const struct hv_ops hvc_get_put_ops = {
27953 .get_chars = filtered_get_chars,
27954 .put_chars = hvc_put_chars,
27955 .notifier_add = notifier_add_irq,
27956diff -urNp linux-2.6.32.45/drivers/char/hvc_xen.c linux-2.6.32.45/drivers/char/hvc_xen.c
27957--- linux-2.6.32.45/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27958+++ linux-2.6.32.45/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27959@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27960 return recv;
27961 }
27962
27963-static struct hv_ops hvc_ops = {
27964+static const struct hv_ops hvc_ops = {
27965 .get_chars = read_console,
27966 .put_chars = write_console,
27967 .notifier_add = notifier_add_irq,
27968diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c
27969--- linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27970+++ linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27971@@ -414,7 +414,7 @@ struct ipmi_smi {
27972 struct proc_dir_entry *proc_dir;
27973 char proc_dir_name[10];
27974
27975- atomic_t stats[IPMI_NUM_STATS];
27976+ atomic_unchecked_t stats[IPMI_NUM_STATS];
27977
27978 /*
27979 * run_to_completion duplicate of smb_info, smi_info
27980@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27981
27982
27983 #define ipmi_inc_stat(intf, stat) \
27984- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27985+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27986 #define ipmi_get_stat(intf, stat) \
27987- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27988+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27989
27990 static int is_lan_addr(struct ipmi_addr *addr)
27991 {
27992@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27993 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27994 init_waitqueue_head(&intf->waitq);
27995 for (i = 0; i < IPMI_NUM_STATS; i++)
27996- atomic_set(&intf->stats[i], 0);
27997+ atomic_set_unchecked(&intf->stats[i], 0);
27998
27999 intf->proc_dir = NULL;
28000
28001@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
28002 struct ipmi_smi_msg smi_msg;
28003 struct ipmi_recv_msg recv_msg;
28004
28005+ pax_track_stack();
28006+
28007 si = (struct ipmi_system_interface_addr *) &addr;
28008 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
28009 si->channel = IPMI_BMC_CHANNEL;
28010diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c
28011--- linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
28012+++ linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
28013@@ -277,7 +277,7 @@ struct smi_info {
28014 unsigned char slave_addr;
28015
28016 /* Counters and things for the proc filesystem. */
28017- atomic_t stats[SI_NUM_STATS];
28018+ atomic_unchecked_t stats[SI_NUM_STATS];
28019
28020 struct task_struct *thread;
28021
28022@@ -285,9 +285,9 @@ struct smi_info {
28023 };
28024
28025 #define smi_inc_stat(smi, stat) \
28026- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
28027+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
28028 #define smi_get_stat(smi, stat) \
28029- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
28030+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
28031
28032 #define SI_MAX_PARMS 4
28033
28034@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
28035 atomic_set(&new_smi->req_events, 0);
28036 new_smi->run_to_completion = 0;
28037 for (i = 0; i < SI_NUM_STATS; i++)
28038- atomic_set(&new_smi->stats[i], 0);
28039+ atomic_set_unchecked(&new_smi->stats[i], 0);
28040
28041 new_smi->interrupt_disabled = 0;
28042 atomic_set(&new_smi->stop_operation, 0);
28043diff -urNp linux-2.6.32.45/drivers/char/istallion.c linux-2.6.32.45/drivers/char/istallion.c
28044--- linux-2.6.32.45/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
28045+++ linux-2.6.32.45/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
28046@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
28047 * re-used for each stats call.
28048 */
28049 static comstats_t stli_comstats;
28050-static combrd_t stli_brdstats;
28051 static struct asystats stli_cdkstats;
28052
28053 /*****************************************************************************/
28054@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
28055 {
28056 struct stlibrd *brdp;
28057 unsigned int i;
28058+ combrd_t stli_brdstats;
28059
28060 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
28061 return -EFAULT;
28062@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
28063 struct stliport stli_dummyport;
28064 struct stliport *portp;
28065
28066+ pax_track_stack();
28067+
28068 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
28069 return -EFAULT;
28070 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
28071@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
28072 struct stlibrd stli_dummybrd;
28073 struct stlibrd *brdp;
28074
28075+ pax_track_stack();
28076+
28077 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
28078 return -EFAULT;
28079 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
28080diff -urNp linux-2.6.32.45/drivers/char/Kconfig linux-2.6.32.45/drivers/char/Kconfig
28081--- linux-2.6.32.45/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
28082+++ linux-2.6.32.45/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
28083@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
28084
28085 config DEVKMEM
28086 bool "/dev/kmem virtual device support"
28087- default y
28088+ default n
28089+ depends on !GRKERNSEC_KMEM
28090 help
28091 Say Y here if you want to support the /dev/kmem device. The
28092 /dev/kmem device is rarely used, but can be used for certain
28093@@ -1114,6 +1115,7 @@ config DEVPORT
28094 bool
28095 depends on !M68K
28096 depends on ISA || PCI
28097+ depends on !GRKERNSEC_KMEM
28098 default y
28099
28100 source "drivers/s390/char/Kconfig"
28101diff -urNp linux-2.6.32.45/drivers/char/keyboard.c linux-2.6.32.45/drivers/char/keyboard.c
28102--- linux-2.6.32.45/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
28103+++ linux-2.6.32.45/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
28104@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
28105 kbd->kbdmode == VC_MEDIUMRAW) &&
28106 value != KVAL(K_SAK))
28107 return; /* SAK is allowed even in raw mode */
28108+
28109+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
28110+ {
28111+ void *func = fn_handler[value];
28112+ if (func == fn_show_state || func == fn_show_ptregs ||
28113+ func == fn_show_mem)
28114+ return;
28115+ }
28116+#endif
28117+
28118 fn_handler[value](vc);
28119 }
28120
28121@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
28122 .evbit = { BIT_MASK(EV_SND) },
28123 },
28124
28125- { }, /* Terminating entry */
28126+ { 0 }, /* Terminating entry */
28127 };
28128
28129 MODULE_DEVICE_TABLE(input, kbd_ids);
28130diff -urNp linux-2.6.32.45/drivers/char/mem.c linux-2.6.32.45/drivers/char/mem.c
28131--- linux-2.6.32.45/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
28132+++ linux-2.6.32.45/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
28133@@ -18,6 +18,7 @@
28134 #include <linux/raw.h>
28135 #include <linux/tty.h>
28136 #include <linux/capability.h>
28137+#include <linux/security.h>
28138 #include <linux/ptrace.h>
28139 #include <linux/device.h>
28140 #include <linux/highmem.h>
28141@@ -35,6 +36,10 @@
28142 # include <linux/efi.h>
28143 #endif
28144
28145+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28146+extern struct file_operations grsec_fops;
28147+#endif
28148+
28149 static inline unsigned long size_inside_page(unsigned long start,
28150 unsigned long size)
28151 {
28152@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
28153
28154 while (cursor < to) {
28155 if (!devmem_is_allowed(pfn)) {
28156+#ifdef CONFIG_GRKERNSEC_KMEM
28157+ gr_handle_mem_readwrite(from, to);
28158+#else
28159 printk(KERN_INFO
28160 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
28161 current->comm, from, to);
28162+#endif
28163 return 0;
28164 }
28165 cursor += PAGE_SIZE;
28166@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
28167 }
28168 return 1;
28169 }
28170+#elif defined(CONFIG_GRKERNSEC_KMEM)
28171+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28172+{
28173+ return 0;
28174+}
28175 #else
28176 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28177 {
28178@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
28179 #endif
28180
28181 while (count > 0) {
28182+ char *temp;
28183+
28184 /*
28185 * Handle first page in case it's not aligned
28186 */
28187@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
28188 if (!ptr)
28189 return -EFAULT;
28190
28191- if (copy_to_user(buf, ptr, sz)) {
28192+#ifdef CONFIG_PAX_USERCOPY
28193+ temp = kmalloc(sz, GFP_KERNEL);
28194+ if (!temp) {
28195+ unxlate_dev_mem_ptr(p, ptr);
28196+ return -ENOMEM;
28197+ }
28198+ memcpy(temp, ptr, sz);
28199+#else
28200+ temp = ptr;
28201+#endif
28202+
28203+ if (copy_to_user(buf, temp, sz)) {
28204+
28205+#ifdef CONFIG_PAX_USERCOPY
28206+ kfree(temp);
28207+#endif
28208+
28209 unxlate_dev_mem_ptr(p, ptr);
28210 return -EFAULT;
28211 }
28212
28213+#ifdef CONFIG_PAX_USERCOPY
28214+ kfree(temp);
28215+#endif
28216+
28217 unxlate_dev_mem_ptr(p, ptr);
28218
28219 buf += sz;
28220@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
28221 size_t count, loff_t *ppos)
28222 {
28223 unsigned long p = *ppos;
28224- ssize_t low_count, read, sz;
28225+ ssize_t low_count, read, sz, err = 0;
28226 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
28227- int err = 0;
28228
28229 read = 0;
28230 if (p < (unsigned long) high_memory) {
28231@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
28232 }
28233 #endif
28234 while (low_count > 0) {
28235+ char *temp;
28236+
28237 sz = size_inside_page(p, low_count);
28238
28239 /*
28240@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
28241 */
28242 kbuf = xlate_dev_kmem_ptr((char *)p);
28243
28244- if (copy_to_user(buf, kbuf, sz))
28245+#ifdef CONFIG_PAX_USERCOPY
28246+ temp = kmalloc(sz, GFP_KERNEL);
28247+ if (!temp)
28248+ return -ENOMEM;
28249+ memcpy(temp, kbuf, sz);
28250+#else
28251+ temp = kbuf;
28252+#endif
28253+
28254+ err = copy_to_user(buf, temp, sz);
28255+
28256+#ifdef CONFIG_PAX_USERCOPY
28257+ kfree(temp);
28258+#endif
28259+
28260+ if (err)
28261 return -EFAULT;
28262 buf += sz;
28263 p += sz;
28264@@ -889,6 +941,9 @@ static const struct memdev {
28265 #ifdef CONFIG_CRASH_DUMP
28266 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28267 #endif
28268+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28269+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28270+#endif
28271 };
28272
28273 static int memory_open(struct inode *inode, struct file *filp)
28274diff -urNp linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c
28275--- linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
28276+++ linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
28277@@ -29,6 +29,7 @@
28278 #include <linux/tty_driver.h>
28279 #include <linux/tty_flip.h>
28280 #include <linux/uaccess.h>
28281+#include <asm/local.h>
28282
28283 #include "tty.h"
28284 #include "network.h"
28285@@ -51,7 +52,7 @@ struct ipw_tty {
28286 int tty_type;
28287 struct ipw_network *network;
28288 struct tty_struct *linux_tty;
28289- int open_count;
28290+ local_t open_count;
28291 unsigned int control_lines;
28292 struct mutex ipw_tty_mutex;
28293 int tx_bytes_queued;
28294@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
28295 mutex_unlock(&tty->ipw_tty_mutex);
28296 return -ENODEV;
28297 }
28298- if (tty->open_count == 0)
28299+ if (local_read(&tty->open_count) == 0)
28300 tty->tx_bytes_queued = 0;
28301
28302- tty->open_count++;
28303+ local_inc(&tty->open_count);
28304
28305 tty->linux_tty = linux_tty;
28306 linux_tty->driver_data = tty;
28307@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
28308
28309 static void do_ipw_close(struct ipw_tty *tty)
28310 {
28311- tty->open_count--;
28312-
28313- if (tty->open_count == 0) {
28314+ if (local_dec_return(&tty->open_count) == 0) {
28315 struct tty_struct *linux_tty = tty->linux_tty;
28316
28317 if (linux_tty != NULL) {
28318@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
28319 return;
28320
28321 mutex_lock(&tty->ipw_tty_mutex);
28322- if (tty->open_count == 0) {
28323+ if (local_read(&tty->open_count) == 0) {
28324 mutex_unlock(&tty->ipw_tty_mutex);
28325 return;
28326 }
28327@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
28328 return;
28329 }
28330
28331- if (!tty->open_count) {
28332+ if (!local_read(&tty->open_count)) {
28333 mutex_unlock(&tty->ipw_tty_mutex);
28334 return;
28335 }
28336@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
28337 return -ENODEV;
28338
28339 mutex_lock(&tty->ipw_tty_mutex);
28340- if (!tty->open_count) {
28341+ if (!local_read(&tty->open_count)) {
28342 mutex_unlock(&tty->ipw_tty_mutex);
28343 return -EINVAL;
28344 }
28345@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
28346 if (!tty)
28347 return -ENODEV;
28348
28349- if (!tty->open_count)
28350+ if (!local_read(&tty->open_count))
28351 return -EINVAL;
28352
28353 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
28354@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
28355 if (!tty)
28356 return 0;
28357
28358- if (!tty->open_count)
28359+ if (!local_read(&tty->open_count))
28360 return 0;
28361
28362 return tty->tx_bytes_queued;
28363@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
28364 if (!tty)
28365 return -ENODEV;
28366
28367- if (!tty->open_count)
28368+ if (!local_read(&tty->open_count))
28369 return -EINVAL;
28370
28371 return get_control_lines(tty);
28372@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
28373 if (!tty)
28374 return -ENODEV;
28375
28376- if (!tty->open_count)
28377+ if (!local_read(&tty->open_count))
28378 return -EINVAL;
28379
28380 return set_control_lines(tty, set, clear);
28381@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
28382 if (!tty)
28383 return -ENODEV;
28384
28385- if (!tty->open_count)
28386+ if (!local_read(&tty->open_count))
28387 return -EINVAL;
28388
28389 /* FIXME: Exactly how is the tty object locked here .. */
28390@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
28391 against a parallel ioctl etc */
28392 mutex_lock(&ttyj->ipw_tty_mutex);
28393 }
28394- while (ttyj->open_count)
28395+ while (local_read(&ttyj->open_count))
28396 do_ipw_close(ttyj);
28397 ipwireless_disassociate_network_ttys(network,
28398 ttyj->channel_idx);
28399diff -urNp linux-2.6.32.45/drivers/char/pty.c linux-2.6.32.45/drivers/char/pty.c
28400--- linux-2.6.32.45/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
28401+++ linux-2.6.32.45/drivers/char/pty.c 2011-08-05 20:33:55.000000000 -0400
28402@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
28403 register_sysctl_table(pty_root_table);
28404
28405 /* Now create the /dev/ptmx special device */
28406+ pax_open_kernel();
28407 tty_default_fops(&ptmx_fops);
28408- ptmx_fops.open = ptmx_open;
28409+ *(void **)&ptmx_fops.open = ptmx_open;
28410+ pax_close_kernel();
28411
28412 cdev_init(&ptmx_cdev, &ptmx_fops);
28413 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
28414diff -urNp linux-2.6.32.45/drivers/char/random.c linux-2.6.32.45/drivers/char/random.c
28415--- linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:37:25.000000000 -0400
28416+++ linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:43:23.000000000 -0400
28417@@ -254,8 +254,13 @@
28418 /*
28419 * Configuration information
28420 */
28421+#ifdef CONFIG_GRKERNSEC_RANDNET
28422+#define INPUT_POOL_WORDS 512
28423+#define OUTPUT_POOL_WORDS 128
28424+#else
28425 #define INPUT_POOL_WORDS 128
28426 #define OUTPUT_POOL_WORDS 32
28427+#endif
28428 #define SEC_XFER_SIZE 512
28429
28430 /*
28431@@ -292,10 +297,17 @@ static struct poolinfo {
28432 int poolwords;
28433 int tap1, tap2, tap3, tap4, tap5;
28434 } poolinfo_table[] = {
28435+#ifdef CONFIG_GRKERNSEC_RANDNET
28436+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28437+ { 512, 411, 308, 208, 104, 1 },
28438+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28439+ { 128, 103, 76, 51, 25, 1 },
28440+#else
28441 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28442 { 128, 103, 76, 51, 25, 1 },
28443 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28444 { 32, 26, 20, 14, 7, 1 },
28445+#endif
28446 #if 0
28447 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28448 { 2048, 1638, 1231, 819, 411, 1 },
28449@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28450 #include <linux/sysctl.h>
28451
28452 static int min_read_thresh = 8, min_write_thresh;
28453-static int max_read_thresh = INPUT_POOL_WORDS * 32;
28454+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28455 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28456 static char sysctl_bootid[16];
28457
28458diff -urNp linux-2.6.32.45/drivers/char/rocket.c linux-2.6.32.45/drivers/char/rocket.c
28459--- linux-2.6.32.45/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
28460+++ linux-2.6.32.45/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
28461@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
28462 struct rocket_ports tmp;
28463 int board;
28464
28465+ pax_track_stack();
28466+
28467 if (!retports)
28468 return -EFAULT;
28469 memset(&tmp, 0, sizeof (tmp));
28470diff -urNp linux-2.6.32.45/drivers/char/sonypi.c linux-2.6.32.45/drivers/char/sonypi.c
28471--- linux-2.6.32.45/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
28472+++ linux-2.6.32.45/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
28473@@ -55,6 +55,7 @@
28474 #include <asm/uaccess.h>
28475 #include <asm/io.h>
28476 #include <asm/system.h>
28477+#include <asm/local.h>
28478
28479 #include <linux/sonypi.h>
28480
28481@@ -491,7 +492,7 @@ static struct sonypi_device {
28482 spinlock_t fifo_lock;
28483 wait_queue_head_t fifo_proc_list;
28484 struct fasync_struct *fifo_async;
28485- int open_count;
28486+ local_t open_count;
28487 int model;
28488 struct input_dev *input_jog_dev;
28489 struct input_dev *input_key_dev;
28490@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
28491 static int sonypi_misc_release(struct inode *inode, struct file *file)
28492 {
28493 mutex_lock(&sonypi_device.lock);
28494- sonypi_device.open_count--;
28495+ local_dec(&sonypi_device.open_count);
28496 mutex_unlock(&sonypi_device.lock);
28497 return 0;
28498 }
28499@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
28500 lock_kernel();
28501 mutex_lock(&sonypi_device.lock);
28502 /* Flush input queue on first open */
28503- if (!sonypi_device.open_count)
28504+ if (!local_read(&sonypi_device.open_count))
28505 kfifo_reset(sonypi_device.fifo);
28506- sonypi_device.open_count++;
28507+ local_inc(&sonypi_device.open_count);
28508 mutex_unlock(&sonypi_device.lock);
28509 unlock_kernel();
28510 return 0;
28511diff -urNp linux-2.6.32.45/drivers/char/stallion.c linux-2.6.32.45/drivers/char/stallion.c
28512--- linux-2.6.32.45/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
28513+++ linux-2.6.32.45/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
28514@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
28515 struct stlport stl_dummyport;
28516 struct stlport *portp;
28517
28518+ pax_track_stack();
28519+
28520 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
28521 return -EFAULT;
28522 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
28523diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm_bios.c linux-2.6.32.45/drivers/char/tpm/tpm_bios.c
28524--- linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
28525+++ linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
28526@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
28527 event = addr;
28528
28529 if ((event->event_type == 0 && event->event_size == 0) ||
28530- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28531+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28532 return NULL;
28533
28534 return addr;
28535@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
28536 return NULL;
28537
28538 if ((event->event_type == 0 && event->event_size == 0) ||
28539- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28540+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28541 return NULL;
28542
28543 (*pos)++;
28544@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
28545 int i;
28546
28547 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28548- seq_putc(m, data[i]);
28549+ if (!seq_putc(m, data[i]))
28550+ return -EFAULT;
28551
28552 return 0;
28553 }
28554@@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
28555 log->bios_event_log_end = log->bios_event_log + len;
28556
28557 virt = acpi_os_map_memory(start, len);
28558+ if (!virt) {
28559+ kfree(log->bios_event_log);
28560+ log->bios_event_log = NULL;
28561+ return -EFAULT;
28562+ }
28563
28564 memcpy(log->bios_event_log, virt, len);
28565
28566diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm.c linux-2.6.32.45/drivers/char/tpm/tpm.c
28567--- linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
28568+++ linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
28569@@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
28570 chip->vendor.req_complete_val)
28571 goto out_recv;
28572
28573- if ((status == chip->vendor.req_canceled)) {
28574+ if (status == chip->vendor.req_canceled) {
28575 dev_err(chip->dev, "Operation Canceled\n");
28576 rc = -ECANCELED;
28577 goto out;
28578@@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
28579
28580 struct tpm_chip *chip = dev_get_drvdata(dev);
28581
28582+ pax_track_stack();
28583+
28584 tpm_cmd.header.in = tpm_readpubek_header;
28585 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
28586 "attempting to read the PUBEK");
28587diff -urNp linux-2.6.32.45/drivers/char/tty_io.c linux-2.6.32.45/drivers/char/tty_io.c
28588--- linux-2.6.32.45/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
28589+++ linux-2.6.32.45/drivers/char/tty_io.c 2011-08-05 20:33:55.000000000 -0400
28590@@ -2582,8 +2582,10 @@ long tty_ioctl(struct file *file, unsign
28591 return retval;
28592 }
28593
28594+EXPORT_SYMBOL(tty_ioctl);
28595+
28596 #ifdef CONFIG_COMPAT
28597-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
28598+long tty_compat_ioctl(struct file *file, unsigned int cmd,
28599 unsigned long arg)
28600 {
28601 struct inode *inode = file->f_dentry->d_inode;
28602@@ -2607,6 +2609,8 @@ static long tty_compat_ioctl(struct file
28603
28604 return retval;
28605 }
28606+
28607+EXPORT_SYMBOL(tty_compat_ioctl);
28608 #endif
28609
28610 /*
28611@@ -3052,7 +3056,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
28612
28613 void tty_default_fops(struct file_operations *fops)
28614 {
28615- *fops = tty_fops;
28616+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
28617 }
28618
28619 /*
28620diff -urNp linux-2.6.32.45/drivers/char/tty_ldisc.c linux-2.6.32.45/drivers/char/tty_ldisc.c
28621--- linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
28622+++ linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
28623@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
28624 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
28625 struct tty_ldisc_ops *ldo = ld->ops;
28626
28627- ldo->refcount--;
28628+ atomic_dec(&ldo->refcount);
28629 module_put(ldo->owner);
28630 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28631
28632@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
28633 spin_lock_irqsave(&tty_ldisc_lock, flags);
28634 tty_ldiscs[disc] = new_ldisc;
28635 new_ldisc->num = disc;
28636- new_ldisc->refcount = 0;
28637+ atomic_set(&new_ldisc->refcount, 0);
28638 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28639
28640 return ret;
28641@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
28642 return -EINVAL;
28643
28644 spin_lock_irqsave(&tty_ldisc_lock, flags);
28645- if (tty_ldiscs[disc]->refcount)
28646+ if (atomic_read(&tty_ldiscs[disc]->refcount))
28647 ret = -EBUSY;
28648 else
28649 tty_ldiscs[disc] = NULL;
28650@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
28651 if (ldops) {
28652 ret = ERR_PTR(-EAGAIN);
28653 if (try_module_get(ldops->owner)) {
28654- ldops->refcount++;
28655+ atomic_inc(&ldops->refcount);
28656 ret = ldops;
28657 }
28658 }
28659@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
28660 unsigned long flags;
28661
28662 spin_lock_irqsave(&tty_ldisc_lock, flags);
28663- ldops->refcount--;
28664+ atomic_dec(&ldops->refcount);
28665 module_put(ldops->owner);
28666 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28667 }
28668diff -urNp linux-2.6.32.45/drivers/char/virtio_console.c linux-2.6.32.45/drivers/char/virtio_console.c
28669--- linux-2.6.32.45/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28670+++ linux-2.6.32.45/drivers/char/virtio_console.c 2011-08-05 20:33:55.000000000 -0400
28671@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *
28672 * virtqueue, so we let the drivers do some boutique early-output thing. */
28673 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
28674 {
28675- virtio_cons.put_chars = put_chars;
28676+ pax_open_kernel();
28677+ *(void **)&virtio_cons.put_chars = put_chars;
28678+ pax_close_kernel();
28679 return hvc_instantiate(0, 0, &virtio_cons);
28680 }
28681
28682@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(stru
28683 out_vq = vqs[1];
28684
28685 /* Start using the new console output. */
28686- virtio_cons.get_chars = get_chars;
28687- virtio_cons.put_chars = put_chars;
28688- virtio_cons.notifier_add = notifier_add_vio;
28689- virtio_cons.notifier_del = notifier_del_vio;
28690- virtio_cons.notifier_hangup = notifier_del_vio;
28691+ pax_open_kernel();
28692+ *(void **)&virtio_cons.get_chars = get_chars;
28693+ *(void **)&virtio_cons.put_chars = put_chars;
28694+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
28695+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
28696+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
28697+ pax_close_kernel();
28698
28699 /* The first argument of hvc_alloc() is the virtual console number, so
28700 * we use zero. The second argument is the parameter for the
28701diff -urNp linux-2.6.32.45/drivers/char/vt.c linux-2.6.32.45/drivers/char/vt.c
28702--- linux-2.6.32.45/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28703+++ linux-2.6.32.45/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28704@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28705
28706 static void notify_write(struct vc_data *vc, unsigned int unicode)
28707 {
28708- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28709+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
28710 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28711 }
28712
28713diff -urNp linux-2.6.32.45/drivers/char/vt_ioctl.c linux-2.6.32.45/drivers/char/vt_ioctl.c
28714--- linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28715+++ linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28716@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28717 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28718 return -EFAULT;
28719
28720- if (!capable(CAP_SYS_TTY_CONFIG))
28721- perm = 0;
28722-
28723 switch (cmd) {
28724 case KDGKBENT:
28725 key_map = key_maps[s];
28726@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28727 val = (i ? K_HOLE : K_NOSUCHMAP);
28728 return put_user(val, &user_kbe->kb_value);
28729 case KDSKBENT:
28730+ if (!capable(CAP_SYS_TTY_CONFIG))
28731+ perm = 0;
28732+
28733 if (!perm)
28734 return -EPERM;
28735+
28736 if (!i && v == K_NOSUCHMAP) {
28737 /* deallocate map */
28738 key_map = key_maps[s];
28739@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28740 int i, j, k;
28741 int ret;
28742
28743- if (!capable(CAP_SYS_TTY_CONFIG))
28744- perm = 0;
28745-
28746 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28747 if (!kbs) {
28748 ret = -ENOMEM;
28749@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28750 kfree(kbs);
28751 return ((p && *p) ? -EOVERFLOW : 0);
28752 case KDSKBSENT:
28753+ if (!capable(CAP_SYS_TTY_CONFIG))
28754+ perm = 0;
28755+
28756 if (!perm) {
28757 ret = -EPERM;
28758 goto reterr;
28759diff -urNp linux-2.6.32.45/drivers/cpufreq/cpufreq.c linux-2.6.32.45/drivers/cpufreq/cpufreq.c
28760--- linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28761+++ linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28762@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28763 complete(&policy->kobj_unregister);
28764 }
28765
28766-static struct sysfs_ops sysfs_ops = {
28767+static const struct sysfs_ops sysfs_ops = {
28768 .show = show,
28769 .store = store,
28770 };
28771diff -urNp linux-2.6.32.45/drivers/cpuidle/sysfs.c linux-2.6.32.45/drivers/cpuidle/sysfs.c
28772--- linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28773+++ linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28774@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28775 return ret;
28776 }
28777
28778-static struct sysfs_ops cpuidle_sysfs_ops = {
28779+static const struct sysfs_ops cpuidle_sysfs_ops = {
28780 .show = cpuidle_show,
28781 .store = cpuidle_store,
28782 };
28783@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28784 return ret;
28785 }
28786
28787-static struct sysfs_ops cpuidle_state_sysfs_ops = {
28788+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28789 .show = cpuidle_state_show,
28790 };
28791
28792@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28793 .release = cpuidle_state_sysfs_release,
28794 };
28795
28796-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28797+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28798 {
28799 kobject_put(&device->kobjs[i]->kobj);
28800 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28801diff -urNp linux-2.6.32.45/drivers/crypto/hifn_795x.c linux-2.6.32.45/drivers/crypto/hifn_795x.c
28802--- linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28803+++ linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28804@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28805 0xCA, 0x34, 0x2B, 0x2E};
28806 struct scatterlist sg;
28807
28808+ pax_track_stack();
28809+
28810 memset(src, 0, sizeof(src));
28811 memset(ctx.key, 0, sizeof(ctx.key));
28812
28813diff -urNp linux-2.6.32.45/drivers/crypto/padlock-aes.c linux-2.6.32.45/drivers/crypto/padlock-aes.c
28814--- linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28815+++ linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28816@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28817 struct crypto_aes_ctx gen_aes;
28818 int cpu;
28819
28820+ pax_track_stack();
28821+
28822 if (key_len % 8) {
28823 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28824 return -EINVAL;
28825diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.c linux-2.6.32.45/drivers/dma/ioat/dma.c
28826--- linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28827+++ linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28828@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28829 return entry->show(&chan->common, page);
28830 }
28831
28832-struct sysfs_ops ioat_sysfs_ops = {
28833+const struct sysfs_ops ioat_sysfs_ops = {
28834 .show = ioat_attr_show,
28835 };
28836
28837diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.h linux-2.6.32.45/drivers/dma/ioat/dma.h
28838--- linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28839+++ linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28840@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28841 unsigned long *phys_complete);
28842 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28843 void ioat_kobject_del(struct ioatdma_device *device);
28844-extern struct sysfs_ops ioat_sysfs_ops;
28845+extern const struct sysfs_ops ioat_sysfs_ops;
28846 extern struct ioat_sysfs_entry ioat_version_attr;
28847 extern struct ioat_sysfs_entry ioat_cap_attr;
28848 #endif /* IOATDMA_H */
28849diff -urNp linux-2.6.32.45/drivers/edac/edac_device_sysfs.c linux-2.6.32.45/drivers/edac/edac_device_sysfs.c
28850--- linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28851+++ linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28852@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28853 }
28854
28855 /* edac_dev file operations for an 'ctl_info' */
28856-static struct sysfs_ops device_ctl_info_ops = {
28857+static const struct sysfs_ops device_ctl_info_ops = {
28858 .show = edac_dev_ctl_info_show,
28859 .store = edac_dev_ctl_info_store
28860 };
28861@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28862 }
28863
28864 /* edac_dev file operations for an 'instance' */
28865-static struct sysfs_ops device_instance_ops = {
28866+static const struct sysfs_ops device_instance_ops = {
28867 .show = edac_dev_instance_show,
28868 .store = edac_dev_instance_store
28869 };
28870@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28871 }
28872
28873 /* edac_dev file operations for a 'block' */
28874-static struct sysfs_ops device_block_ops = {
28875+static const struct sysfs_ops device_block_ops = {
28876 .show = edac_dev_block_show,
28877 .store = edac_dev_block_store
28878 };
28879diff -urNp linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c
28880--- linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28881+++ linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28882@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28883 return -EIO;
28884 }
28885
28886-static struct sysfs_ops csrowfs_ops = {
28887+static const struct sysfs_ops csrowfs_ops = {
28888 .show = csrowdev_show,
28889 .store = csrowdev_store
28890 };
28891@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28892 }
28893
28894 /* Intermediate show/store table */
28895-static struct sysfs_ops mci_ops = {
28896+static const struct sysfs_ops mci_ops = {
28897 .show = mcidev_show,
28898 .store = mcidev_store
28899 };
28900diff -urNp linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c
28901--- linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28902+++ linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28903@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28904 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28905 static int edac_pci_poll_msec = 1000; /* one second workq period */
28906
28907-static atomic_t pci_parity_count = ATOMIC_INIT(0);
28908-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28909+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28910+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28911
28912 static struct kobject *edac_pci_top_main_kobj;
28913 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28914@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28915 }
28916
28917 /* fs_ops table */
28918-static struct sysfs_ops pci_instance_ops = {
28919+static const struct sysfs_ops pci_instance_ops = {
28920 .show = edac_pci_instance_show,
28921 .store = edac_pci_instance_store
28922 };
28923@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28924 return -EIO;
28925 }
28926
28927-static struct sysfs_ops edac_pci_sysfs_ops = {
28928+static const struct sysfs_ops edac_pci_sysfs_ops = {
28929 .show = edac_pci_dev_show,
28930 .store = edac_pci_dev_store
28931 };
28932@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28933 edac_printk(KERN_CRIT, EDAC_PCI,
28934 "Signaled System Error on %s\n",
28935 pci_name(dev));
28936- atomic_inc(&pci_nonparity_count);
28937+ atomic_inc_unchecked(&pci_nonparity_count);
28938 }
28939
28940 if (status & (PCI_STATUS_PARITY)) {
28941@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28942 "Master Data Parity Error on %s\n",
28943 pci_name(dev));
28944
28945- atomic_inc(&pci_parity_count);
28946+ atomic_inc_unchecked(&pci_parity_count);
28947 }
28948
28949 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28950@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28951 "Detected Parity Error on %s\n",
28952 pci_name(dev));
28953
28954- atomic_inc(&pci_parity_count);
28955+ atomic_inc_unchecked(&pci_parity_count);
28956 }
28957 }
28958
28959@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28960 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28961 "Signaled System Error on %s\n",
28962 pci_name(dev));
28963- atomic_inc(&pci_nonparity_count);
28964+ atomic_inc_unchecked(&pci_nonparity_count);
28965 }
28966
28967 if (status & (PCI_STATUS_PARITY)) {
28968@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28969 "Master Data Parity Error on "
28970 "%s\n", pci_name(dev));
28971
28972- atomic_inc(&pci_parity_count);
28973+ atomic_inc_unchecked(&pci_parity_count);
28974 }
28975
28976 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28977@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28978 "Detected Parity Error on %s\n",
28979 pci_name(dev));
28980
28981- atomic_inc(&pci_parity_count);
28982+ atomic_inc_unchecked(&pci_parity_count);
28983 }
28984 }
28985 }
28986@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28987 if (!check_pci_errors)
28988 return;
28989
28990- before_count = atomic_read(&pci_parity_count);
28991+ before_count = atomic_read_unchecked(&pci_parity_count);
28992
28993 /* scan all PCI devices looking for a Parity Error on devices and
28994 * bridges.
28995@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28996 /* Only if operator has selected panic on PCI Error */
28997 if (edac_pci_get_panic_on_pe()) {
28998 /* If the count is different 'after' from 'before' */
28999- if (before_count != atomic_read(&pci_parity_count))
29000+ if (before_count != atomic_read_unchecked(&pci_parity_count))
29001 panic("EDAC: PCI Parity Error");
29002 }
29003 }
29004diff -urNp linux-2.6.32.45/drivers/firewire/core-card.c linux-2.6.32.45/drivers/firewire/core-card.c
29005--- linux-2.6.32.45/drivers/firewire/core-card.c 2011-03-27 14:31:47.000000000 -0400
29006+++ linux-2.6.32.45/drivers/firewire/core-card.c 2011-08-23 21:22:32.000000000 -0400
29007@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
29008
29009 void fw_core_remove_card(struct fw_card *card)
29010 {
29011- struct fw_card_driver dummy_driver = dummy_driver_template;
29012+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
29013
29014 card->driver->update_phy_reg(card, 4,
29015 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29016diff -urNp linux-2.6.32.45/drivers/firewire/core-cdev.c linux-2.6.32.45/drivers/firewire/core-cdev.c
29017--- linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
29018+++ linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
29019@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
29020 int ret;
29021
29022 if ((request->channels == 0 && request->bandwidth == 0) ||
29023- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29024- request->bandwidth < 0)
29025+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29026 return -EINVAL;
29027
29028 r = kmalloc(sizeof(*r), GFP_KERNEL);
29029diff -urNp linux-2.6.32.45/drivers/firewire/core.h linux-2.6.32.45/drivers/firewire/core.h
29030--- linux-2.6.32.45/drivers/firewire/core.h 2011-03-27 14:31:47.000000000 -0400
29031+++ linux-2.6.32.45/drivers/firewire/core.h 2011-08-23 20:24:26.000000000 -0400
29032@@ -86,6 +86,7 @@ struct fw_card_driver {
29033
29034 int (*stop_iso)(struct fw_iso_context *ctx);
29035 };
29036+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29037
29038 void fw_card_initialize(struct fw_card *card,
29039 const struct fw_card_driver *driver, struct device *device);
29040diff -urNp linux-2.6.32.45/drivers/firewire/core-transaction.c linux-2.6.32.45/drivers/firewire/core-transaction.c
29041--- linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
29042+++ linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
29043@@ -36,6 +36,7 @@
29044 #include <linux/string.h>
29045 #include <linux/timer.h>
29046 #include <linux/types.h>
29047+#include <linux/sched.h>
29048
29049 #include <asm/byteorder.h>
29050
29051@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
29052 struct transaction_callback_data d;
29053 struct fw_transaction t;
29054
29055+ pax_track_stack();
29056+
29057 init_completion(&d.done);
29058 d.payload = payload;
29059 fw_send_request(card, &t, tcode, destination_id, generation, speed,
29060diff -urNp linux-2.6.32.45/drivers/firmware/dmi_scan.c linux-2.6.32.45/drivers/firmware/dmi_scan.c
29061--- linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
29062+++ linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
29063@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
29064 }
29065 }
29066 else {
29067- /*
29068- * no iounmap() for that ioremap(); it would be a no-op, but
29069- * it's so early in setup that sucker gets confused into doing
29070- * what it shouldn't if we actually call it.
29071- */
29072 p = dmi_ioremap(0xF0000, 0x10000);
29073 if (p == NULL)
29074 goto error;
29075diff -urNp linux-2.6.32.45/drivers/firmware/edd.c linux-2.6.32.45/drivers/firmware/edd.c
29076--- linux-2.6.32.45/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
29077+++ linux-2.6.32.45/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
29078@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
29079 return ret;
29080 }
29081
29082-static struct sysfs_ops edd_attr_ops = {
29083+static const struct sysfs_ops edd_attr_ops = {
29084 .show = edd_attr_show,
29085 };
29086
29087diff -urNp linux-2.6.32.45/drivers/firmware/efivars.c linux-2.6.32.45/drivers/firmware/efivars.c
29088--- linux-2.6.32.45/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
29089+++ linux-2.6.32.45/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
29090@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
29091 return ret;
29092 }
29093
29094-static struct sysfs_ops efivar_attr_ops = {
29095+static const struct sysfs_ops efivar_attr_ops = {
29096 .show = efivar_attr_show,
29097 .store = efivar_attr_store,
29098 };
29099diff -urNp linux-2.6.32.45/drivers/firmware/iscsi_ibft.c linux-2.6.32.45/drivers/firmware/iscsi_ibft.c
29100--- linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
29101+++ linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
29102@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
29103 return ret;
29104 }
29105
29106-static struct sysfs_ops ibft_attr_ops = {
29107+static const struct sysfs_ops ibft_attr_ops = {
29108 .show = ibft_show_attribute,
29109 };
29110
29111diff -urNp linux-2.6.32.45/drivers/firmware/memmap.c linux-2.6.32.45/drivers/firmware/memmap.c
29112--- linux-2.6.32.45/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
29113+++ linux-2.6.32.45/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
29114@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
29115 NULL
29116 };
29117
29118-static struct sysfs_ops memmap_attr_ops = {
29119+static const struct sysfs_ops memmap_attr_ops = {
29120 .show = memmap_attr_show,
29121 };
29122
29123diff -urNp linux-2.6.32.45/drivers/gpio/vr41xx_giu.c linux-2.6.32.45/drivers/gpio/vr41xx_giu.c
29124--- linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
29125+++ linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
29126@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29127 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29128 maskl, pendl, maskh, pendh);
29129
29130- atomic_inc(&irq_err_count);
29131+ atomic_inc_unchecked(&irq_err_count);
29132
29133 return -EINVAL;
29134 }
29135diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c
29136--- linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
29137+++ linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
29138@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
29139 struct drm_crtc *tmp;
29140 int crtc_mask = 1;
29141
29142- WARN(!crtc, "checking null crtc?");
29143+ BUG_ON(!crtc);
29144
29145 dev = crtc->dev;
29146
29147@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
29148
29149 adjusted_mode = drm_mode_duplicate(dev, mode);
29150
29151+ pax_track_stack();
29152+
29153 crtc->enabled = drm_helper_crtc_in_use(crtc);
29154
29155 if (!crtc->enabled)
29156diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_drv.c linux-2.6.32.45/drivers/gpu/drm/drm_drv.c
29157--- linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
29158+++ linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
29159@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
29160 char *kdata = NULL;
29161
29162 atomic_inc(&dev->ioctl_count);
29163- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29164+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29165 ++file_priv->ioctl_count;
29166
29167 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29168diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_fops.c linux-2.6.32.45/drivers/gpu/drm/drm_fops.c
29169--- linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
29170+++ linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
29171@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
29172 }
29173
29174 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29175- atomic_set(&dev->counts[i], 0);
29176+ atomic_set_unchecked(&dev->counts[i], 0);
29177
29178 dev->sigdata.lock = NULL;
29179
29180@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
29181
29182 retcode = drm_open_helper(inode, filp, dev);
29183 if (!retcode) {
29184- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29185+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29186 spin_lock(&dev->count_lock);
29187- if (!dev->open_count++) {
29188+ if (local_inc_return(&dev->open_count) == 1) {
29189 spin_unlock(&dev->count_lock);
29190 retcode = drm_setup(dev);
29191 goto out;
29192@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
29193
29194 lock_kernel();
29195
29196- DRM_DEBUG("open_count = %d\n", dev->open_count);
29197+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
29198
29199 if (dev->driver->preclose)
29200 dev->driver->preclose(dev, file_priv);
29201@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
29202 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29203 task_pid_nr(current),
29204 (long)old_encode_dev(file_priv->minor->device),
29205- dev->open_count);
29206+ local_read(&dev->open_count));
29207
29208 /* if the master has gone away we can't do anything with the lock */
29209 if (file_priv->minor->master)
29210@@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
29211 * End inline drm_release
29212 */
29213
29214- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29215+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29216 spin_lock(&dev->count_lock);
29217- if (!--dev->open_count) {
29218+ if (local_dec_and_test(&dev->open_count)) {
29219 if (atomic_read(&dev->ioctl_count)) {
29220 DRM_ERROR("Device busy: %d\n",
29221 atomic_read(&dev->ioctl_count));
29222diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_gem.c linux-2.6.32.45/drivers/gpu/drm/drm_gem.c
29223--- linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
29224+++ linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
29225@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
29226 spin_lock_init(&dev->object_name_lock);
29227 idr_init(&dev->object_name_idr);
29228 atomic_set(&dev->object_count, 0);
29229- atomic_set(&dev->object_memory, 0);
29230+ atomic_set_unchecked(&dev->object_memory, 0);
29231 atomic_set(&dev->pin_count, 0);
29232- atomic_set(&dev->pin_memory, 0);
29233+ atomic_set_unchecked(&dev->pin_memory, 0);
29234 atomic_set(&dev->gtt_count, 0);
29235- atomic_set(&dev->gtt_memory, 0);
29236+ atomic_set_unchecked(&dev->gtt_memory, 0);
29237
29238 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
29239 if (!mm) {
29240@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
29241 goto fput;
29242 }
29243 atomic_inc(&dev->object_count);
29244- atomic_add(obj->size, &dev->object_memory);
29245+ atomic_add_unchecked(obj->size, &dev->object_memory);
29246 return obj;
29247 fput:
29248 fput(obj->filp);
29249@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
29250
29251 fput(obj->filp);
29252 atomic_dec(&dev->object_count);
29253- atomic_sub(obj->size, &dev->object_memory);
29254+ atomic_sub_unchecked(obj->size, &dev->object_memory);
29255 kfree(obj);
29256 }
29257 EXPORT_SYMBOL(drm_gem_object_free);
29258diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_info.c linux-2.6.32.45/drivers/gpu/drm/drm_info.c
29259--- linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
29260+++ linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
29261@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
29262 struct drm_local_map *map;
29263 struct drm_map_list *r_list;
29264
29265- /* Hardcoded from _DRM_FRAME_BUFFER,
29266- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29267- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29268- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29269+ static const char * const types[] = {
29270+ [_DRM_FRAME_BUFFER] = "FB",
29271+ [_DRM_REGISTERS] = "REG",
29272+ [_DRM_SHM] = "SHM",
29273+ [_DRM_AGP] = "AGP",
29274+ [_DRM_SCATTER_GATHER] = "SG",
29275+ [_DRM_CONSISTENT] = "PCI",
29276+ [_DRM_GEM] = "GEM" };
29277 const char *type;
29278 int i;
29279
29280@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
29281 map = r_list->map;
29282 if (!map)
29283 continue;
29284- if (map->type < 0 || map->type > 5)
29285+ if (map->type >= ARRAY_SIZE(types))
29286 type = "??";
29287 else
29288 type = types[map->type];
29289@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
29290 struct drm_device *dev = node->minor->dev;
29291
29292 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
29293- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
29294+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
29295 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
29296- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
29297- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
29298+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
29299+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
29300 seq_printf(m, "%d gtt total\n", dev->gtt_total);
29301 return 0;
29302 }
29303@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
29304 mutex_lock(&dev->struct_mutex);
29305 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
29306 atomic_read(&dev->vma_count),
29307+#ifdef CONFIG_GRKERNSEC_HIDESYM
29308+ NULL, 0);
29309+#else
29310 high_memory, (u64)virt_to_phys(high_memory));
29311+#endif
29312
29313 list_for_each_entry(pt, &dev->vmalist, head) {
29314 vma = pt->vma;
29315@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
29316 continue;
29317 seq_printf(m,
29318 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
29319- pt->pid, vma->vm_start, vma->vm_end,
29320+ pt->pid,
29321+#ifdef CONFIG_GRKERNSEC_HIDESYM
29322+ 0, 0,
29323+#else
29324+ vma->vm_start, vma->vm_end,
29325+#endif
29326 vma->vm_flags & VM_READ ? 'r' : '-',
29327 vma->vm_flags & VM_WRITE ? 'w' : '-',
29328 vma->vm_flags & VM_EXEC ? 'x' : '-',
29329 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29330 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29331 vma->vm_flags & VM_IO ? 'i' : '-',
29332+#ifdef CONFIG_GRKERNSEC_HIDESYM
29333+ 0);
29334+#else
29335 vma->vm_pgoff);
29336+#endif
29337
29338 #if defined(__i386__)
29339 pgprot = pgprot_val(vma->vm_page_prot);
29340diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c
29341--- linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
29342+++ linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
29343@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
29344 stats->data[i].value =
29345 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29346 else
29347- stats->data[i].value = atomic_read(&dev->counts[i]);
29348+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29349 stats->data[i].type = dev->types[i];
29350 }
29351
29352diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_lock.c linux-2.6.32.45/drivers/gpu/drm/drm_lock.c
29353--- linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
29354+++ linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
29355@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
29356 if (drm_lock_take(&master->lock, lock->context)) {
29357 master->lock.file_priv = file_priv;
29358 master->lock.lock_time = jiffies;
29359- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29360+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29361 break; /* Got lock */
29362 }
29363
29364@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
29365 return -EINVAL;
29366 }
29367
29368- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29369+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29370
29371 /* kernel_context_switch isn't used by any of the x86 drm
29372 * modules but is required by the Sparc driver.
29373diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c
29374--- linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
29375+++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
29376@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
29377 dma->buflist[vertex->idx],
29378 vertex->discard, vertex->used);
29379
29380- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29381- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29382+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29383+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29384 sarea_priv->last_enqueue = dev_priv->counter - 1;
29385 sarea_priv->last_dispatch = (int)hw_status[5];
29386
29387@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
29388 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29389 mc->last_render);
29390
29391- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29392- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29393+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29394+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29395 sarea_priv->last_enqueue = dev_priv->counter - 1;
29396 sarea_priv->last_dispatch = (int)hw_status[5];
29397
29398diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h
29399--- linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
29400+++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
29401@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29402 int page_flipping;
29403
29404 wait_queue_head_t irq_queue;
29405- atomic_t irq_received;
29406- atomic_t irq_emitted;
29407+ atomic_unchecked_t irq_received;
29408+ atomic_unchecked_t irq_emitted;
29409
29410 int front_offset;
29411 } drm_i810_private_t;
29412diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h
29413--- linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
29414+++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
29415@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
29416 int page_flipping;
29417
29418 wait_queue_head_t irq_queue;
29419- atomic_t irq_received;
29420- atomic_t irq_emitted;
29421+ atomic_unchecked_t irq_received;
29422+ atomic_unchecked_t irq_emitted;
29423
29424 int use_mi_batchbuffer_start;
29425
29426diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c
29427--- linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
29428+++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
29429@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
29430
29431 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
29432
29433- atomic_inc(&dev_priv->irq_received);
29434+ atomic_inc_unchecked(&dev_priv->irq_received);
29435 wake_up_interruptible(&dev_priv->irq_queue);
29436
29437 return IRQ_HANDLED;
29438@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
29439
29440 DRM_DEBUG("%s\n", __func__);
29441
29442- atomic_inc(&dev_priv->irq_emitted);
29443+ atomic_inc_unchecked(&dev_priv->irq_emitted);
29444
29445 BEGIN_LP_RING(2);
29446 OUT_RING(0);
29447 OUT_RING(GFX_OP_USER_INTERRUPT);
29448 ADVANCE_LP_RING();
29449
29450- return atomic_read(&dev_priv->irq_emitted);
29451+ return atomic_read_unchecked(&dev_priv->irq_emitted);
29452 }
29453
29454 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
29455@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
29456
29457 DRM_DEBUG("%s\n", __func__);
29458
29459- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29460+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29461 return 0;
29462
29463 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
29464@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
29465
29466 for (;;) {
29467 __set_current_state(TASK_INTERRUPTIBLE);
29468- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29469+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29470 break;
29471 if ((signed)(end - jiffies) <= 0) {
29472 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
29473@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
29474 I830_WRITE16(I830REG_HWSTAM, 0xffff);
29475 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
29476 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
29477- atomic_set(&dev_priv->irq_received, 0);
29478- atomic_set(&dev_priv->irq_emitted, 0);
29479+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29480+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
29481 init_waitqueue_head(&dev_priv->irq_queue);
29482 }
29483
29484diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c
29485--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
29486+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
29487@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
29488 }
29489 }
29490
29491-struct intel_dvo_dev_ops ch7017_ops = {
29492+const struct intel_dvo_dev_ops ch7017_ops = {
29493 .init = ch7017_init,
29494 .detect = ch7017_detect,
29495 .mode_valid = ch7017_mode_valid,
29496diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c
29497--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
29498+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
29499@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
29500 }
29501 }
29502
29503-struct intel_dvo_dev_ops ch7xxx_ops = {
29504+const struct intel_dvo_dev_ops ch7xxx_ops = {
29505 .init = ch7xxx_init,
29506 .detect = ch7xxx_detect,
29507 .mode_valid = ch7xxx_mode_valid,
29508diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h
29509--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
29510+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
29511@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
29512 *
29513 * \return singly-linked list of modes or NULL if no modes found.
29514 */
29515- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
29516+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
29517
29518 /**
29519 * Clean up driver-specific bits of the output
29520 */
29521- void (*destroy) (struct intel_dvo_device *dvo);
29522+ void (* const destroy) (struct intel_dvo_device *dvo);
29523
29524 /**
29525 * Debugging hook to dump device registers to log file
29526 */
29527- void (*dump_regs)(struct intel_dvo_device *dvo);
29528+ void (* const dump_regs)(struct intel_dvo_device *dvo);
29529 };
29530
29531-extern struct intel_dvo_dev_ops sil164_ops;
29532-extern struct intel_dvo_dev_ops ch7xxx_ops;
29533-extern struct intel_dvo_dev_ops ivch_ops;
29534-extern struct intel_dvo_dev_ops tfp410_ops;
29535-extern struct intel_dvo_dev_ops ch7017_ops;
29536+extern const struct intel_dvo_dev_ops sil164_ops;
29537+extern const struct intel_dvo_dev_ops ch7xxx_ops;
29538+extern const struct intel_dvo_dev_ops ivch_ops;
29539+extern const struct intel_dvo_dev_ops tfp410_ops;
29540+extern const struct intel_dvo_dev_ops ch7017_ops;
29541
29542 #endif /* _INTEL_DVO_H */
29543diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c
29544--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
29545+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
29546@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
29547 }
29548 }
29549
29550-struct intel_dvo_dev_ops ivch_ops= {
29551+const struct intel_dvo_dev_ops ivch_ops= {
29552 .init = ivch_init,
29553 .dpms = ivch_dpms,
29554 .save = ivch_save,
29555diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c
29556--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
29557+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
29558@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
29559 }
29560 }
29561
29562-struct intel_dvo_dev_ops sil164_ops = {
29563+const struct intel_dvo_dev_ops sil164_ops = {
29564 .init = sil164_init,
29565 .detect = sil164_detect,
29566 .mode_valid = sil164_mode_valid,
29567diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c
29568--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
29569+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
29570@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
29571 }
29572 }
29573
29574-struct intel_dvo_dev_ops tfp410_ops = {
29575+const struct intel_dvo_dev_ops tfp410_ops = {
29576 .init = tfp410_init,
29577 .detect = tfp410_detect,
29578 .mode_valid = tfp410_mode_valid,
29579diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c
29580--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
29581+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
29582@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
29583 I915_READ(GTIMR));
29584 }
29585 seq_printf(m, "Interrupts received: %d\n",
29586- atomic_read(&dev_priv->irq_received));
29587+ atomic_read_unchecked(&dev_priv->irq_received));
29588 if (dev_priv->hw_status_page != NULL) {
29589 seq_printf(m, "Current sequence: %d\n",
29590 i915_get_gem_seqno(dev));
29591diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c
29592--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
29593+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
29594@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
29595 return i915_resume(dev);
29596 }
29597
29598-static struct vm_operations_struct i915_gem_vm_ops = {
29599+static const struct vm_operations_struct i915_gem_vm_ops = {
29600 .fault = i915_gem_fault,
29601 .open = drm_gem_vm_open,
29602 .close = drm_gem_vm_close,
29603diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h
29604--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
29605+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:33:55.000000000 -0400
29606@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
29607 /* display clock increase/decrease */
29608 /* pll clock increase/decrease */
29609 /* clock gating init */
29610-};
29611+} __no_const;
29612
29613 typedef struct drm_i915_private {
29614 struct drm_device *dev;
29615@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
29616 int page_flipping;
29617
29618 wait_queue_head_t irq_queue;
29619- atomic_t irq_received;
29620+ atomic_unchecked_t irq_received;
29621 /** Protects user_irq_refcount and irq_mask_reg */
29622 spinlock_t user_irq_lock;
29623 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
29624diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c
29625--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
29626+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
29627@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
29628
29629 args->aper_size = dev->gtt_total;
29630 args->aper_available_size = (args->aper_size -
29631- atomic_read(&dev->pin_memory));
29632+ atomic_read_unchecked(&dev->pin_memory));
29633
29634 return 0;
29635 }
29636@@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
29637 return -EINVAL;
29638 }
29639
29640+ if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29641+ drm_gem_object_unreference(obj);
29642+ return -EFAULT;
29643+ }
29644+
29645 if (i915_gem_object_needs_bit17_swizzle(obj)) {
29646 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
29647 } else {
29648@@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
29649 return -EINVAL;
29650 }
29651
29652+ if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29653+ drm_gem_object_unreference(obj);
29654+ return -EFAULT;
29655+ }
29656+
29657 /* We can only do the GTT pwrite on untiled buffers, as otherwise
29658 * it would end up going through the fenced access, and we'll get
29659 * different detiling behavior between reading and writing.
29660@@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
29661
29662 if (obj_priv->gtt_space) {
29663 atomic_dec(&dev->gtt_count);
29664- atomic_sub(obj->size, &dev->gtt_memory);
29665+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
29666
29667 drm_mm_put_block(obj_priv->gtt_space);
29668 obj_priv->gtt_space = NULL;
29669@@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
29670 goto search_free;
29671 }
29672 atomic_inc(&dev->gtt_count);
29673- atomic_add(obj->size, &dev->gtt_memory);
29674+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
29675
29676 /* Assert that the object is not currently in any GPU domain. As it
29677 * wasn't in the GTT, there shouldn't be any way it could have been in
29678@@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
29679 "%d/%d gtt bytes\n",
29680 atomic_read(&dev->object_count),
29681 atomic_read(&dev->pin_count),
29682- atomic_read(&dev->object_memory),
29683- atomic_read(&dev->pin_memory),
29684- atomic_read(&dev->gtt_memory),
29685+ atomic_read_unchecked(&dev->object_memory),
29686+ atomic_read_unchecked(&dev->pin_memory),
29687+ atomic_read_unchecked(&dev->gtt_memory),
29688 dev->gtt_total);
29689 }
29690 goto err;
29691@@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
29692 */
29693 if (obj_priv->pin_count == 1) {
29694 atomic_inc(&dev->pin_count);
29695- atomic_add(obj->size, &dev->pin_memory);
29696+ atomic_add_unchecked(obj->size, &dev->pin_memory);
29697 if (!obj_priv->active &&
29698 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
29699 !list_empty(&obj_priv->list))
29700@@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
29701 list_move_tail(&obj_priv->list,
29702 &dev_priv->mm.inactive_list);
29703 atomic_dec(&dev->pin_count);
29704- atomic_sub(obj->size, &dev->pin_memory);
29705+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
29706 }
29707 i915_verify_inactive(dev, __FILE__, __LINE__);
29708 }
29709diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c
29710--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
29711+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
29712@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
29713 int irq_received;
29714 int ret = IRQ_NONE;
29715
29716- atomic_inc(&dev_priv->irq_received);
29717+ atomic_inc_unchecked(&dev_priv->irq_received);
29718
29719 if (IS_IGDNG(dev))
29720 return igdng_irq_handler(dev);
29721@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29722 {
29723 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29724
29725- atomic_set(&dev_priv->irq_received, 0);
29726+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29727
29728 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29729 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29730diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c
29731--- linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-03-27 14:31:47.000000000 -0400
29732+++ linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-08-05 20:33:55.000000000 -0400
29733@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *
29734 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
29735
29736 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
29737- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29738+ pax_open_kernel();
29739+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29740+ pax_close_kernel();
29741
29742 /* Read the regs to test if we can talk to the device */
29743 for (i = 0; i < 0x40; i++) {
29744diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h
29745--- linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29746+++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29747@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29748 u32 clear_cmd;
29749 u32 maccess;
29750
29751- atomic_t vbl_received; /**< Number of vblanks received. */
29752+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29753 wait_queue_head_t fence_queue;
29754- atomic_t last_fence_retired;
29755+ atomic_unchecked_t last_fence_retired;
29756 u32 next_fence_to_post;
29757
29758 unsigned int fb_cpp;
29759diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c
29760--- linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29761+++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29762@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29763 if (crtc != 0)
29764 return 0;
29765
29766- return atomic_read(&dev_priv->vbl_received);
29767+ return atomic_read_unchecked(&dev_priv->vbl_received);
29768 }
29769
29770
29771@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29772 /* VBLANK interrupt */
29773 if (status & MGA_VLINEPEN) {
29774 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29775- atomic_inc(&dev_priv->vbl_received);
29776+ atomic_inc_unchecked(&dev_priv->vbl_received);
29777 drm_handle_vblank(dev, 0);
29778 handled = 1;
29779 }
29780@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29781 MGA_WRITE(MGA_PRIMEND, prim_end);
29782 }
29783
29784- atomic_inc(&dev_priv->last_fence_retired);
29785+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
29786 DRM_WAKEUP(&dev_priv->fence_queue);
29787 handled = 1;
29788 }
29789@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29790 * using fences.
29791 */
29792 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29793- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29794+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29795 - *sequence) <= (1 << 23)));
29796
29797 *sequence = cur_fence;
29798diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c
29799--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29800+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29801@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29802
29803 /* GH: Simple idle check.
29804 */
29805- atomic_set(&dev_priv->idle_count, 0);
29806+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29807
29808 /* We don't support anything other than bus-mastering ring mode,
29809 * but the ring can be in either AGP or PCI space for the ring
29810diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h
29811--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29812+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29813@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29814 int is_pci;
29815 unsigned long cce_buffers_offset;
29816
29817- atomic_t idle_count;
29818+ atomic_unchecked_t idle_count;
29819
29820 int page_flipping;
29821 int current_page;
29822 u32 crtc_offset;
29823 u32 crtc_offset_cntl;
29824
29825- atomic_t vbl_received;
29826+ atomic_unchecked_t vbl_received;
29827
29828 u32 color_fmt;
29829 unsigned int front_offset;
29830diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c
29831--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29832+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29833@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29834 if (crtc != 0)
29835 return 0;
29836
29837- return atomic_read(&dev_priv->vbl_received);
29838+ return atomic_read_unchecked(&dev_priv->vbl_received);
29839 }
29840
29841 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29842@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29843 /* VBLANK interrupt */
29844 if (status & R128_CRTC_VBLANK_INT) {
29845 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29846- atomic_inc(&dev_priv->vbl_received);
29847+ atomic_inc_unchecked(&dev_priv->vbl_received);
29848 drm_handle_vblank(dev, 0);
29849 return IRQ_HANDLED;
29850 }
29851diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c
29852--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29853+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29854@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29855
29856 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29857 {
29858- if (atomic_read(&dev_priv->idle_count) == 0) {
29859+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29860 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29861 } else {
29862- atomic_set(&dev_priv->idle_count, 0);
29863+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29864 }
29865 }
29866
29867diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c
29868--- linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
29869+++ linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
29870@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
29871 char name[512];
29872 int i;
29873
29874+ pax_track_stack();
29875+
29876 ctx->card = card;
29877 ctx->bios = bios;
29878
29879diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c
29880--- linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29881+++ linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29882@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29883 regex_t mask_rex;
29884 regmatch_t match[4];
29885 char buf[1024];
29886- size_t end;
29887+ long end;
29888 int len;
29889 int done = 0;
29890 int r;
29891 unsigned o;
29892 struct offset *offset;
29893 char last_reg_s[10];
29894- int last_reg;
29895+ unsigned long last_reg;
29896
29897 if (regcomp
29898 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29899diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c
29900--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29901+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29902@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29903 bool linkb;
29904 struct radeon_i2c_bus_rec ddc_bus;
29905
29906+ pax_track_stack();
29907+
29908 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29909
29910 if (data_offset == 0)
29911@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29912 }
29913 }
29914
29915-struct bios_connector {
29916+static struct bios_connector {
29917 bool valid;
29918 uint16_t line_mux;
29919 uint16_t devices;
29920 int connector_type;
29921 struct radeon_i2c_bus_rec ddc_bus;
29922-};
29923+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29924
29925 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29926 drm_device
29927@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29928 uint8_t dac;
29929 union atom_supported_devices *supported_devices;
29930 int i, j;
29931- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29932
29933 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29934
29935diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c
29936--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29937+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29938@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29939
29940 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29941 error = freq - current_freq;
29942- error = error < 0 ? 0xffffffff : error;
29943+ error = (int32_t)error < 0 ? 0xffffffff : error;
29944 } else
29945 error = abs(current_freq - freq);
29946 vco_diff = abs(vco - best_vco);
29947diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h
29948--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29949+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29950@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29951
29952 /* SW interrupt */
29953 wait_queue_head_t swi_queue;
29954- atomic_t swi_emitted;
29955+ atomic_unchecked_t swi_emitted;
29956 int vblank_crtc;
29957 uint32_t irq_enable_reg;
29958 uint32_t r500_disp_irq_reg;
29959diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c
29960--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29961+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29962@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29963 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29964 return 0;
29965 }
29966- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29967+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29968 if (!rdev->cp.ready) {
29969 /* FIXME: cp is not running assume everythings is done right
29970 * away
29971@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29972 return r;
29973 }
29974 WREG32(rdev->fence_drv.scratch_reg, 0);
29975- atomic_set(&rdev->fence_drv.seq, 0);
29976+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29977 INIT_LIST_HEAD(&rdev->fence_drv.created);
29978 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29979 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29980diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h
29981--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29982+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:33:55.000000000 -0400
29983@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29984 */
29985 struct radeon_fence_driver {
29986 uint32_t scratch_reg;
29987- atomic_t seq;
29988+ atomic_unchecked_t seq;
29989 uint32_t last_seq;
29990 unsigned long count_timeout;
29991 wait_queue_head_t queue;
29992@@ -640,7 +640,7 @@ struct radeon_asic {
29993 uint32_t offset, uint32_t obj_size);
29994 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
29995 void (*bandwidth_update)(struct radeon_device *rdev);
29996-};
29997+} __no_const;
29998
29999 /*
30000 * Asic structures
30001diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c
30002--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
30003+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
30004@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
30005 request = compat_alloc_user_space(sizeof(*request));
30006 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30007 || __put_user(req32.param, &request->param)
30008- || __put_user((void __user *)(unsigned long)req32.value,
30009+ || __put_user((unsigned long)req32.value,
30010 &request->value))
30011 return -EFAULT;
30012
30013diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c
30014--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
30015+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
30016@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
30017 unsigned int ret;
30018 RING_LOCALS;
30019
30020- atomic_inc(&dev_priv->swi_emitted);
30021- ret = atomic_read(&dev_priv->swi_emitted);
30022+ atomic_inc_unchecked(&dev_priv->swi_emitted);
30023+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30024
30025 BEGIN_RING(4);
30026 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30027@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
30028 drm_radeon_private_t *dev_priv =
30029 (drm_radeon_private_t *) dev->dev_private;
30030
30031- atomic_set(&dev_priv->swi_emitted, 0);
30032+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30033 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30034
30035 dev->max_vblank_count = 0x001fffff;
30036diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c
30037--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
30038+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
30039@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
30040 {
30041 drm_radeon_private_t *dev_priv = dev->dev_private;
30042 drm_radeon_getparam_t *param = data;
30043- int value;
30044+ int value = 0;
30045
30046 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30047
30048diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c
30049--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
30050+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
30051@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
30052 DRM_INFO("radeon: ttm finalized\n");
30053 }
30054
30055-static struct vm_operations_struct radeon_ttm_vm_ops;
30056-static const struct vm_operations_struct *ttm_vm_ops = NULL;
30057-
30058-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
30059-{
30060- struct ttm_buffer_object *bo;
30061- int r;
30062-
30063- bo = (struct ttm_buffer_object *)vma->vm_private_data;
30064- if (bo == NULL) {
30065- return VM_FAULT_NOPAGE;
30066- }
30067- r = ttm_vm_ops->fault(vma, vmf);
30068- return r;
30069-}
30070-
30071 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30072 {
30073 struct drm_file *file_priv;
30074 struct radeon_device *rdev;
30075- int r;
30076
30077 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
30078 return drm_mmap(filp, vma);
30079@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
30080
30081 file_priv = (struct drm_file *)filp->private_data;
30082 rdev = file_priv->minor->dev->dev_private;
30083- if (rdev == NULL) {
30084+ if (!rdev)
30085 return -EINVAL;
30086- }
30087- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30088- if (unlikely(r != 0)) {
30089- return r;
30090- }
30091- if (unlikely(ttm_vm_ops == NULL)) {
30092- ttm_vm_ops = vma->vm_ops;
30093- radeon_ttm_vm_ops = *ttm_vm_ops;
30094- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30095- }
30096- vma->vm_ops = &radeon_ttm_vm_ops;
30097- return 0;
30098+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30099 }
30100
30101
30102diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c
30103--- linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
30104+++ linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
30105@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
30106 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30107 rdev->pm.sideport_bandwidth.full)
30108 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30109- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
30110+ read_delay_latency.full = rfixed_const(800 * 1000);
30111 read_delay_latency.full = rfixed_div(read_delay_latency,
30112 rdev->pm.igp_sideport_mclk);
30113+ a.full = rfixed_const(370);
30114+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
30115 } else {
30116 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30117 rdev->pm.k8_bandwidth.full)
30118diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c
30119--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
30120+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
30121@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
30122 NULL
30123 };
30124
30125-static struct sysfs_ops ttm_bo_global_ops = {
30126+static const struct sysfs_ops ttm_bo_global_ops = {
30127 .show = &ttm_bo_global_show
30128 };
30129
30130diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c
30131--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
30132+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
30133@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
30134 {
30135 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
30136 vma->vm_private_data;
30137- struct ttm_bo_device *bdev = bo->bdev;
30138+ struct ttm_bo_device *bdev;
30139 unsigned long bus_base;
30140 unsigned long bus_offset;
30141 unsigned long bus_size;
30142@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
30143 unsigned long address = (unsigned long)vmf->virtual_address;
30144 int retval = VM_FAULT_NOPAGE;
30145
30146+ if (!bo)
30147+ return VM_FAULT_NOPAGE;
30148+ bdev = bo->bdev;
30149+
30150 /*
30151 * Work around locking order reversal in fault / nopfn
30152 * between mmap_sem and bo_reserve: Perform a trylock operation
30153diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c
30154--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
30155+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
30156@@ -36,7 +36,7 @@
30157 struct ttm_global_item {
30158 struct mutex mutex;
30159 void *object;
30160- int refcount;
30161+ atomic_t refcount;
30162 };
30163
30164 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
30165@@ -49,7 +49,7 @@ void ttm_global_init(void)
30166 struct ttm_global_item *item = &glob[i];
30167 mutex_init(&item->mutex);
30168 item->object = NULL;
30169- item->refcount = 0;
30170+ atomic_set(&item->refcount, 0);
30171 }
30172 }
30173
30174@@ -59,7 +59,7 @@ void ttm_global_release(void)
30175 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
30176 struct ttm_global_item *item = &glob[i];
30177 BUG_ON(item->object != NULL);
30178- BUG_ON(item->refcount != 0);
30179+ BUG_ON(atomic_read(&item->refcount) != 0);
30180 }
30181 }
30182
30183@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
30184 void *object;
30185
30186 mutex_lock(&item->mutex);
30187- if (item->refcount == 0) {
30188+ if (atomic_read(&item->refcount) == 0) {
30189 item->object = kzalloc(ref->size, GFP_KERNEL);
30190 if (unlikely(item->object == NULL)) {
30191 ret = -ENOMEM;
30192@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
30193 goto out_err;
30194
30195 }
30196- ++item->refcount;
30197+ atomic_inc(&item->refcount);
30198 ref->object = item->object;
30199 object = item->object;
30200 mutex_unlock(&item->mutex);
30201@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
30202 struct ttm_global_item *item = &glob[ref->global_type];
30203
30204 mutex_lock(&item->mutex);
30205- BUG_ON(item->refcount == 0);
30206+ BUG_ON(atomic_read(&item->refcount) == 0);
30207 BUG_ON(ref->object != item->object);
30208- if (--item->refcount == 0) {
30209+ if (atomic_dec_and_test(&item->refcount)) {
30210 ref->release(ref);
30211 item->object = NULL;
30212 }
30213diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c
30214--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
30215+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
30216@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
30217 NULL
30218 };
30219
30220-static struct sysfs_ops ttm_mem_zone_ops = {
30221+static const struct sysfs_ops ttm_mem_zone_ops = {
30222 .show = &ttm_mem_zone_show,
30223 .store = &ttm_mem_zone_store
30224 };
30225diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h
30226--- linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
30227+++ linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
30228@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30229 typedef uint32_t maskarray_t[5];
30230
30231 typedef struct drm_via_irq {
30232- atomic_t irq_received;
30233+ atomic_unchecked_t irq_received;
30234 uint32_t pending_mask;
30235 uint32_t enable_mask;
30236 wait_queue_head_t irq_queue;
30237@@ -75,7 +75,7 @@ typedef struct drm_via_private {
30238 struct timeval last_vblank;
30239 int last_vblank_valid;
30240 unsigned usec_per_vblank;
30241- atomic_t vbl_received;
30242+ atomic_unchecked_t vbl_received;
30243 drm_via_state_t hc_state;
30244 char pci_buf[VIA_PCI_BUF_SIZE];
30245 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30246diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c
30247--- linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
30248+++ linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
30249@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
30250 if (crtc != 0)
30251 return 0;
30252
30253- return atomic_read(&dev_priv->vbl_received);
30254+ return atomic_read_unchecked(&dev_priv->vbl_received);
30255 }
30256
30257 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30258@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
30259
30260 status = VIA_READ(VIA_REG_INTERRUPT);
30261 if (status & VIA_IRQ_VBLANK_PENDING) {
30262- atomic_inc(&dev_priv->vbl_received);
30263- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30264+ atomic_inc_unchecked(&dev_priv->vbl_received);
30265+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30266 do_gettimeofday(&cur_vblank);
30267 if (dev_priv->last_vblank_valid) {
30268 dev_priv->usec_per_vblank =
30269@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30270 dev_priv->last_vblank = cur_vblank;
30271 dev_priv->last_vblank_valid = 1;
30272 }
30273- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30274+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30275 DRM_DEBUG("US per vblank is: %u\n",
30276 dev_priv->usec_per_vblank);
30277 }
30278@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30279
30280 for (i = 0; i < dev_priv->num_irqs; ++i) {
30281 if (status & cur_irq->pending_mask) {
30282- atomic_inc(&cur_irq->irq_received);
30283+ atomic_inc_unchecked(&cur_irq->irq_received);
30284 DRM_WAKEUP(&cur_irq->irq_queue);
30285 handled = 1;
30286 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
30287@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
30288 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30289 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30290 masks[irq][4]));
30291- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30292+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30293 } else {
30294 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30295 (((cur_irq_sequence =
30296- atomic_read(&cur_irq->irq_received)) -
30297+ atomic_read_unchecked(&cur_irq->irq_received)) -
30298 *sequence) <= (1 << 23)));
30299 }
30300 *sequence = cur_irq_sequence;
30301@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
30302 }
30303
30304 for (i = 0; i < dev_priv->num_irqs; ++i) {
30305- atomic_set(&cur_irq->irq_received, 0);
30306+ atomic_set_unchecked(&cur_irq->irq_received, 0);
30307 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30308 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30309 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30310@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
30311 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30312 case VIA_IRQ_RELATIVE:
30313 irqwait->request.sequence +=
30314- atomic_read(&cur_irq->irq_received);
30315+ atomic_read_unchecked(&cur_irq->irq_received);
30316 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30317 case VIA_IRQ_ABSOLUTE:
30318 break;
30319diff -urNp linux-2.6.32.45/drivers/hid/hid-core.c linux-2.6.32.45/drivers/hid/hid-core.c
30320--- linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
30321+++ linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
30322@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
30323
30324 int hid_add_device(struct hid_device *hdev)
30325 {
30326- static atomic_t id = ATOMIC_INIT(0);
30327+ static atomic_unchecked_t id = ATOMIC_INIT(0);
30328 int ret;
30329
30330 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30331@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
30332 /* XXX hack, any other cleaner solution after the driver core
30333 * is converted to allow more than 20 bytes as the device name? */
30334 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30335- hdev->vendor, hdev->product, atomic_inc_return(&id));
30336+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30337
30338 ret = device_add(&hdev->dev);
30339 if (!ret)
30340diff -urNp linux-2.6.32.45/drivers/hid/usbhid/hiddev.c linux-2.6.32.45/drivers/hid/usbhid/hiddev.c
30341--- linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
30342+++ linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
30343@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
30344 return put_user(HID_VERSION, (int __user *)arg);
30345
30346 case HIDIOCAPPLICATION:
30347- if (arg < 0 || arg >= hid->maxapplication)
30348+ if (arg >= hid->maxapplication)
30349 return -EINVAL;
30350
30351 for (i = 0; i < hid->maxcollection; i++)
30352diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.c linux-2.6.32.45/drivers/hwmon/lis3lv02d.c
30353--- linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
30354+++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
30355@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
30356 * the lid is closed. This leads to interrupts as soon as a little move
30357 * is done.
30358 */
30359- atomic_inc(&lis3_dev.count);
30360+ atomic_inc_unchecked(&lis3_dev.count);
30361
30362 wake_up_interruptible(&lis3_dev.misc_wait);
30363 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30364@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
30365 if (test_and_set_bit(0, &lis3_dev.misc_opened))
30366 return -EBUSY; /* already open */
30367
30368- atomic_set(&lis3_dev.count, 0);
30369+ atomic_set_unchecked(&lis3_dev.count, 0);
30370
30371 /*
30372 * The sensor can generate interrupts for free-fall and direction
30373@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
30374 add_wait_queue(&lis3_dev.misc_wait, &wait);
30375 while (true) {
30376 set_current_state(TASK_INTERRUPTIBLE);
30377- data = atomic_xchg(&lis3_dev.count, 0);
30378+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30379 if (data)
30380 break;
30381
30382@@ -244,7 +244,7 @@ out:
30383 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30384 {
30385 poll_wait(file, &lis3_dev.misc_wait, wait);
30386- if (atomic_read(&lis3_dev.count))
30387+ if (atomic_read_unchecked(&lis3_dev.count))
30388 return POLLIN | POLLRDNORM;
30389 return 0;
30390 }
30391diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.h linux-2.6.32.45/drivers/hwmon/lis3lv02d.h
30392--- linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
30393+++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
30394@@ -201,7 +201,7 @@ struct lis3lv02d {
30395
30396 struct input_polled_dev *idev; /* input device */
30397 struct platform_device *pdev; /* platform device */
30398- atomic_t count; /* interrupt count after last read */
30399+ atomic_unchecked_t count; /* interrupt count after last read */
30400 int xcalib; /* calibrated null value for x */
30401 int ycalib; /* calibrated null value for y */
30402 int zcalib; /* calibrated null value for z */
30403diff -urNp linux-2.6.32.45/drivers/hwmon/sht15.c linux-2.6.32.45/drivers/hwmon/sht15.c
30404--- linux-2.6.32.45/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
30405+++ linux-2.6.32.45/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
30406@@ -112,7 +112,7 @@ struct sht15_data {
30407 int supply_uV;
30408 int supply_uV_valid;
30409 struct work_struct update_supply_work;
30410- atomic_t interrupt_handled;
30411+ atomic_unchecked_t interrupt_handled;
30412 };
30413
30414 /**
30415@@ -245,13 +245,13 @@ static inline int sht15_update_single_va
30416 return ret;
30417
30418 gpio_direction_input(data->pdata->gpio_data);
30419- atomic_set(&data->interrupt_handled, 0);
30420+ atomic_set_unchecked(&data->interrupt_handled, 0);
30421
30422 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30423 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30424 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30425 /* Only relevant if the interrupt hasn't occured. */
30426- if (!atomic_read(&data->interrupt_handled))
30427+ if (!atomic_read_unchecked(&data->interrupt_handled))
30428 schedule_work(&data->read_work);
30429 }
30430 ret = wait_event_timeout(data->wait_queue,
30431@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
30432 struct sht15_data *data = d;
30433 /* First disable the interrupt */
30434 disable_irq_nosync(irq);
30435- atomic_inc(&data->interrupt_handled);
30436+ atomic_inc_unchecked(&data->interrupt_handled);
30437 /* Then schedule a reading work struct */
30438 if (data->flag != SHT15_READING_NOTHING)
30439 schedule_work(&data->read_work);
30440@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
30441 here as could have gone low in meantime so verify
30442 it hasn't!
30443 */
30444- atomic_set(&data->interrupt_handled, 0);
30445+ atomic_set_unchecked(&data->interrupt_handled, 0);
30446 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30447 /* If still not occured or another handler has been scheduled */
30448 if (gpio_get_value(data->pdata->gpio_data)
30449- || atomic_read(&data->interrupt_handled))
30450+ || atomic_read_unchecked(&data->interrupt_handled))
30451 return;
30452 }
30453 /* Read the data back from the device */
30454diff -urNp linux-2.6.32.45/drivers/hwmon/w83791d.c linux-2.6.32.45/drivers/hwmon/w83791d.c
30455--- linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
30456+++ linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
30457@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
30458 struct i2c_board_info *info);
30459 static int w83791d_remove(struct i2c_client *client);
30460
30461-static int w83791d_read(struct i2c_client *client, u8 register);
30462-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
30463+static int w83791d_read(struct i2c_client *client, u8 reg);
30464+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
30465 static struct w83791d_data *w83791d_update_device(struct device *dev);
30466
30467 #ifdef DEBUG
30468diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c
30469--- linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-03-27 14:31:47.000000000 -0400
30470+++ linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:22:32.000000000 -0400
30471@@ -43,7 +43,7 @@
30472 extern struct i2c_adapter amd756_smbus;
30473
30474 static struct i2c_adapter *s4882_adapter;
30475-static struct i2c_algorithm *s4882_algo;
30476+static i2c_algorithm_no_const *s4882_algo;
30477
30478 /* Wrapper access functions for multiplexed SMBus */
30479 static DEFINE_MUTEX(amd756_lock);
30480diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c
30481--- linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-03-27 14:31:47.000000000 -0400
30482+++ linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:22:32.000000000 -0400
30483@@ -41,7 +41,7 @@
30484 extern struct i2c_adapter *nforce2_smbus;
30485
30486 static struct i2c_adapter *s4985_adapter;
30487-static struct i2c_algorithm *s4985_algo;
30488+static i2c_algorithm_no_const *s4985_algo;
30489
30490 /* Wrapper access functions for multiplexed SMBus */
30491 static DEFINE_MUTEX(nforce2_lock);
30492diff -urNp linux-2.6.32.45/drivers/ide/ide-cd.c linux-2.6.32.45/drivers/ide/ide-cd.c
30493--- linux-2.6.32.45/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
30494+++ linux-2.6.32.45/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
30495@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
30496 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30497 if ((unsigned long)buf & alignment
30498 || blk_rq_bytes(rq) & q->dma_pad_mask
30499- || object_is_on_stack(buf))
30500+ || object_starts_on_stack(buf))
30501 drive->dma = 0;
30502 }
30503 }
30504diff -urNp linux-2.6.32.45/drivers/ide/ide-floppy.c linux-2.6.32.45/drivers/ide/ide-floppy.c
30505--- linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
30506+++ linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
30507@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
30508 u8 pc_buf[256], header_len, desc_cnt;
30509 int i, rc = 1, blocks, length;
30510
30511+ pax_track_stack();
30512+
30513 ide_debug_log(IDE_DBG_FUNC, "enter");
30514
30515 drive->bios_cyl = 0;
30516diff -urNp linux-2.6.32.45/drivers/ide/setup-pci.c linux-2.6.32.45/drivers/ide/setup-pci.c
30517--- linux-2.6.32.45/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
30518+++ linux-2.6.32.45/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
30519@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
30520 int ret, i, n_ports = dev2 ? 4 : 2;
30521 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
30522
30523+ pax_track_stack();
30524+
30525 for (i = 0; i < n_ports / 2; i++) {
30526 ret = ide_setup_pci_controller(pdev[i], d, !i);
30527 if (ret < 0)
30528diff -urNp linux-2.6.32.45/drivers/ieee1394/dv1394.c linux-2.6.32.45/drivers/ieee1394/dv1394.c
30529--- linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
30530+++ linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
30531@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
30532 based upon DIF section and sequence
30533 */
30534
30535-static void inline
30536+static inline void
30537 frame_put_packet (struct frame *f, struct packet *p)
30538 {
30539 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
30540diff -urNp linux-2.6.32.45/drivers/ieee1394/hosts.c linux-2.6.32.45/drivers/ieee1394/hosts.c
30541--- linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
30542+++ linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
30543@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
30544 }
30545
30546 static struct hpsb_host_driver dummy_driver = {
30547+ .name = "dummy",
30548 .transmit_packet = dummy_transmit_packet,
30549 .devctl = dummy_devctl,
30550 .isoctl = dummy_isoctl
30551diff -urNp linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c
30552--- linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
30553+++ linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
30554@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
30555 for (func = 0; func < 8; func++) {
30556 u32 class = read_pci_config(num,slot,func,
30557 PCI_CLASS_REVISION);
30558- if ((class == 0xffffffff))
30559+ if (class == 0xffffffff)
30560 continue; /* No device at this func */
30561
30562 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
30563diff -urNp linux-2.6.32.45/drivers/ieee1394/ohci1394.c linux-2.6.32.45/drivers/ieee1394/ohci1394.c
30564--- linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
30565+++ linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
30566@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
30567 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
30568
30569 /* Module Parameters */
30570-static int phys_dma = 1;
30571+static int phys_dma;
30572 module_param(phys_dma, int, 0444);
30573-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
30574+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
30575
30576 static void dma_trm_tasklet(unsigned long data);
30577 static void dma_trm_reset(struct dma_trm_ctx *d);
30578diff -urNp linux-2.6.32.45/drivers/ieee1394/sbp2.c linux-2.6.32.45/drivers/ieee1394/sbp2.c
30579--- linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
30580+++ linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
30581@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
30582 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
30583 MODULE_LICENSE("GPL");
30584
30585-static int sbp2_module_init(void)
30586+static int __init sbp2_module_init(void)
30587 {
30588 int ret;
30589
30590diff -urNp linux-2.6.32.45/drivers/infiniband/core/cm.c linux-2.6.32.45/drivers/infiniband/core/cm.c
30591--- linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
30592+++ linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
30593@@ -112,7 +112,7 @@ static char const counter_group_names[CM
30594
30595 struct cm_counter_group {
30596 struct kobject obj;
30597- atomic_long_t counter[CM_ATTR_COUNT];
30598+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30599 };
30600
30601 struct cm_counter_attribute {
30602@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
30603 struct ib_mad_send_buf *msg = NULL;
30604 int ret;
30605
30606- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30607+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30608 counter[CM_REQ_COUNTER]);
30609
30610 /* Quick state check to discard duplicate REQs. */
30611@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
30612 if (!cm_id_priv)
30613 return;
30614
30615- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30616+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30617 counter[CM_REP_COUNTER]);
30618 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30619 if (ret)
30620@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
30621 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30622 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30623 spin_unlock_irq(&cm_id_priv->lock);
30624- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30625+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30626 counter[CM_RTU_COUNTER]);
30627 goto out;
30628 }
30629@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
30630 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30631 dreq_msg->local_comm_id);
30632 if (!cm_id_priv) {
30633- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30634+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30635 counter[CM_DREQ_COUNTER]);
30636 cm_issue_drep(work->port, work->mad_recv_wc);
30637 return -EINVAL;
30638@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
30639 case IB_CM_MRA_REP_RCVD:
30640 break;
30641 case IB_CM_TIMEWAIT:
30642- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30643+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30644 counter[CM_DREQ_COUNTER]);
30645 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30646 goto unlock;
30647@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
30648 cm_free_msg(msg);
30649 goto deref;
30650 case IB_CM_DREQ_RCVD:
30651- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30652+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30653 counter[CM_DREQ_COUNTER]);
30654 goto unlock;
30655 default:
30656@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
30657 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30658 cm_id_priv->msg, timeout)) {
30659 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30660- atomic_long_inc(&work->port->
30661+ atomic_long_inc_unchecked(&work->port->
30662 counter_group[CM_RECV_DUPLICATES].
30663 counter[CM_MRA_COUNTER]);
30664 goto out;
30665@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
30666 break;
30667 case IB_CM_MRA_REQ_RCVD:
30668 case IB_CM_MRA_REP_RCVD:
30669- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30670+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30671 counter[CM_MRA_COUNTER]);
30672 /* fall through */
30673 default:
30674@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
30675 case IB_CM_LAP_IDLE:
30676 break;
30677 case IB_CM_MRA_LAP_SENT:
30678- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30679+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30680 counter[CM_LAP_COUNTER]);
30681 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30682 goto unlock;
30683@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
30684 cm_free_msg(msg);
30685 goto deref;
30686 case IB_CM_LAP_RCVD:
30687- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30688+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30689 counter[CM_LAP_COUNTER]);
30690 goto unlock;
30691 default:
30692@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
30693 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30694 if (cur_cm_id_priv) {
30695 spin_unlock_irq(&cm.lock);
30696- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30697+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30698 counter[CM_SIDR_REQ_COUNTER]);
30699 goto out; /* Duplicate message. */
30700 }
30701@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
30702 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30703 msg->retries = 1;
30704
30705- atomic_long_add(1 + msg->retries,
30706+ atomic_long_add_unchecked(1 + msg->retries,
30707 &port->counter_group[CM_XMIT].counter[attr_index]);
30708 if (msg->retries)
30709- atomic_long_add(msg->retries,
30710+ atomic_long_add_unchecked(msg->retries,
30711 &port->counter_group[CM_XMIT_RETRIES].
30712 counter[attr_index]);
30713
30714@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
30715 }
30716
30717 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30718- atomic_long_inc(&port->counter_group[CM_RECV].
30719+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30720 counter[attr_id - CM_ATTR_ID_OFFSET]);
30721
30722 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30723@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
30724 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30725
30726 return sprintf(buf, "%ld\n",
30727- atomic_long_read(&group->counter[cm_attr->index]));
30728+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30729 }
30730
30731-static struct sysfs_ops cm_counter_ops = {
30732+static const struct sysfs_ops cm_counter_ops = {
30733 .show = cm_show_counter
30734 };
30735
30736diff -urNp linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c
30737--- linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
30738+++ linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
30739@@ -97,8 +97,8 @@ struct ib_fmr_pool {
30740
30741 struct task_struct *thread;
30742
30743- atomic_t req_ser;
30744- atomic_t flush_ser;
30745+ atomic_unchecked_t req_ser;
30746+ atomic_unchecked_t flush_ser;
30747
30748 wait_queue_head_t force_wait;
30749 };
30750@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
30751 struct ib_fmr_pool *pool = pool_ptr;
30752
30753 do {
30754- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30755+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30756 ib_fmr_batch_release(pool);
30757
30758- atomic_inc(&pool->flush_ser);
30759+ atomic_inc_unchecked(&pool->flush_ser);
30760 wake_up_interruptible(&pool->force_wait);
30761
30762 if (pool->flush_function)
30763@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30764 }
30765
30766 set_current_state(TASK_INTERRUPTIBLE);
30767- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30768+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30769 !kthread_should_stop())
30770 schedule();
30771 __set_current_state(TASK_RUNNING);
30772@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
30773 pool->dirty_watermark = params->dirty_watermark;
30774 pool->dirty_len = 0;
30775 spin_lock_init(&pool->pool_lock);
30776- atomic_set(&pool->req_ser, 0);
30777- atomic_set(&pool->flush_ser, 0);
30778+ atomic_set_unchecked(&pool->req_ser, 0);
30779+ atomic_set_unchecked(&pool->flush_ser, 0);
30780 init_waitqueue_head(&pool->force_wait);
30781
30782 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30783@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
30784 }
30785 spin_unlock_irq(&pool->pool_lock);
30786
30787- serial = atomic_inc_return(&pool->req_ser);
30788+ serial = atomic_inc_return_unchecked(&pool->req_ser);
30789 wake_up_process(pool->thread);
30790
30791 if (wait_event_interruptible(pool->force_wait,
30792- atomic_read(&pool->flush_ser) - serial >= 0))
30793+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30794 return -EINTR;
30795
30796 return 0;
30797@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30798 } else {
30799 list_add_tail(&fmr->list, &pool->dirty_list);
30800 if (++pool->dirty_len >= pool->dirty_watermark) {
30801- atomic_inc(&pool->req_ser);
30802+ atomic_inc_unchecked(&pool->req_ser);
30803 wake_up_process(pool->thread);
30804 }
30805 }
30806diff -urNp linux-2.6.32.45/drivers/infiniband/core/sysfs.c linux-2.6.32.45/drivers/infiniband/core/sysfs.c
30807--- linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30808+++ linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30809@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30810 return port_attr->show(p, port_attr, buf);
30811 }
30812
30813-static struct sysfs_ops port_sysfs_ops = {
30814+static const struct sysfs_ops port_sysfs_ops = {
30815 .show = port_attr_show
30816 };
30817
30818diff -urNp linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c
30819--- linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30820+++ linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30821@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
30822 dst->grh.sgid_index = src->grh.sgid_index;
30823 dst->grh.hop_limit = src->grh.hop_limit;
30824 dst->grh.traffic_class = src->grh.traffic_class;
30825+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
30826 dst->dlid = src->dlid;
30827 dst->sl = src->sl;
30828 dst->src_path_bits = src->src_path_bits;
30829 dst->static_rate = src->static_rate;
30830 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
30831 dst->port_num = src->port_num;
30832+ dst->reserved = 0;
30833 }
30834 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
30835
30836 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
30837 struct ib_qp_attr *src)
30838 {
30839+ dst->qp_state = src->qp_state;
30840 dst->cur_qp_state = src->cur_qp_state;
30841 dst->path_mtu = src->path_mtu;
30842 dst->path_mig_state = src->path_mig_state;
30843@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
30844 dst->rnr_retry = src->rnr_retry;
30845 dst->alt_port_num = src->alt_port_num;
30846 dst->alt_timeout = src->alt_timeout;
30847+ memset(dst->reserved, 0, sizeof(dst->reserved));
30848 }
30849 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
30850
30851diff -urNp linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c
30852--- linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
30853+++ linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
30854@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
30855 struct infinipath_counters counters;
30856 struct ipath_devdata *dd;
30857
30858+ pax_track_stack();
30859+
30860 dd = file->f_path.dentry->d_inode->i_private;
30861 dd->ipath_f_read_counters(dd, &counters);
30862
30863diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c
30864--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
30865+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
30866@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
30867 LIST_HEAD(nes_adapter_list);
30868 static LIST_HEAD(nes_dev_list);
30869
30870-atomic_t qps_destroyed;
30871+atomic_unchecked_t qps_destroyed;
30872
30873 static unsigned int ee_flsh_adapter;
30874 static unsigned int sysfs_nonidx_addr;
30875@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
30876 struct nes_adapter *nesadapter = nesdev->nesadapter;
30877 u32 qp_id;
30878
30879- atomic_inc(&qps_destroyed);
30880+ atomic_inc_unchecked(&qps_destroyed);
30881
30882 /* Free the control structures */
30883
30884diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c
30885--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
30886+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
30887@@ -69,11 +69,11 @@ u32 cm_packets_received;
30888 u32 cm_listens_created;
30889 u32 cm_listens_destroyed;
30890 u32 cm_backlog_drops;
30891-atomic_t cm_loopbacks;
30892-atomic_t cm_nodes_created;
30893-atomic_t cm_nodes_destroyed;
30894-atomic_t cm_accel_dropped_pkts;
30895-atomic_t cm_resets_recvd;
30896+atomic_unchecked_t cm_loopbacks;
30897+atomic_unchecked_t cm_nodes_created;
30898+atomic_unchecked_t cm_nodes_destroyed;
30899+atomic_unchecked_t cm_accel_dropped_pkts;
30900+atomic_unchecked_t cm_resets_recvd;
30901
30902 static inline int mini_cm_accelerated(struct nes_cm_core *,
30903 struct nes_cm_node *);
30904@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
30905
30906 static struct nes_cm_core *g_cm_core;
30907
30908-atomic_t cm_connects;
30909-atomic_t cm_accepts;
30910-atomic_t cm_disconnects;
30911-atomic_t cm_closes;
30912-atomic_t cm_connecteds;
30913-atomic_t cm_connect_reqs;
30914-atomic_t cm_rejects;
30915+atomic_unchecked_t cm_connects;
30916+atomic_unchecked_t cm_accepts;
30917+atomic_unchecked_t cm_disconnects;
30918+atomic_unchecked_t cm_closes;
30919+atomic_unchecked_t cm_connecteds;
30920+atomic_unchecked_t cm_connect_reqs;
30921+atomic_unchecked_t cm_rejects;
30922
30923
30924 /**
30925@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30926 cm_node->rem_mac);
30927
30928 add_hte_node(cm_core, cm_node);
30929- atomic_inc(&cm_nodes_created);
30930+ atomic_inc_unchecked(&cm_nodes_created);
30931
30932 return cm_node;
30933 }
30934@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30935 }
30936
30937 atomic_dec(&cm_core->node_cnt);
30938- atomic_inc(&cm_nodes_destroyed);
30939+ atomic_inc_unchecked(&cm_nodes_destroyed);
30940 nesqp = cm_node->nesqp;
30941 if (nesqp) {
30942 nesqp->cm_node = NULL;
30943@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30944
30945 static void drop_packet(struct sk_buff *skb)
30946 {
30947- atomic_inc(&cm_accel_dropped_pkts);
30948+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30949 dev_kfree_skb_any(skb);
30950 }
30951
30952@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30953
30954 int reset = 0; /* whether to send reset in case of err.. */
30955 int passive_state;
30956- atomic_inc(&cm_resets_recvd);
30957+ atomic_inc_unchecked(&cm_resets_recvd);
30958 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30959 " refcnt=%d\n", cm_node, cm_node->state,
30960 atomic_read(&cm_node->ref_count));
30961@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30962 rem_ref_cm_node(cm_node->cm_core, cm_node);
30963 return NULL;
30964 }
30965- atomic_inc(&cm_loopbacks);
30966+ atomic_inc_unchecked(&cm_loopbacks);
30967 loopbackremotenode->loopbackpartner = cm_node;
30968 loopbackremotenode->tcp_cntxt.rcv_wscale =
30969 NES_CM_DEFAULT_RCV_WND_SCALE;
30970@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30971 add_ref_cm_node(cm_node);
30972 } else if (cm_node->state == NES_CM_STATE_TSA) {
30973 rem_ref_cm_node(cm_core, cm_node);
30974- atomic_inc(&cm_accel_dropped_pkts);
30975+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30976 dev_kfree_skb_any(skb);
30977 break;
30978 }
30979@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30980
30981 if ((cm_id) && (cm_id->event_handler)) {
30982 if (issue_disconn) {
30983- atomic_inc(&cm_disconnects);
30984+ atomic_inc_unchecked(&cm_disconnects);
30985 cm_event.event = IW_CM_EVENT_DISCONNECT;
30986 cm_event.status = disconn_status;
30987 cm_event.local_addr = cm_id->local_addr;
30988@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30989 }
30990
30991 if (issue_close) {
30992- atomic_inc(&cm_closes);
30993+ atomic_inc_unchecked(&cm_closes);
30994 nes_disconnect(nesqp, 1);
30995
30996 cm_id->provider_data = nesqp;
30997@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30998
30999 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
31000 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
31001- atomic_inc(&cm_accepts);
31002+ atomic_inc_unchecked(&cm_accepts);
31003
31004 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
31005 atomic_read(&nesvnic->netdev->refcnt));
31006@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
31007
31008 struct nes_cm_core *cm_core;
31009
31010- atomic_inc(&cm_rejects);
31011+ atomic_inc_unchecked(&cm_rejects);
31012 cm_node = (struct nes_cm_node *) cm_id->provider_data;
31013 loopback = cm_node->loopbackpartner;
31014 cm_core = cm_node->cm_core;
31015@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
31016 ntohl(cm_id->local_addr.sin_addr.s_addr),
31017 ntohs(cm_id->local_addr.sin_port));
31018
31019- atomic_inc(&cm_connects);
31020+ atomic_inc_unchecked(&cm_connects);
31021 nesqp->active_conn = 1;
31022
31023 /* cache the cm_id in the qp */
31024@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
31025 if (nesqp->destroyed) {
31026 return;
31027 }
31028- atomic_inc(&cm_connecteds);
31029+ atomic_inc_unchecked(&cm_connecteds);
31030 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
31031 " local port 0x%04X. jiffies = %lu.\n",
31032 nesqp->hwqp.qp_id,
31033@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
31034
31035 ret = cm_id->event_handler(cm_id, &cm_event);
31036 cm_id->add_ref(cm_id);
31037- atomic_inc(&cm_closes);
31038+ atomic_inc_unchecked(&cm_closes);
31039 cm_event.event = IW_CM_EVENT_CLOSE;
31040 cm_event.status = IW_CM_EVENT_STATUS_OK;
31041 cm_event.provider_data = cm_id->provider_data;
31042@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
31043 return;
31044 cm_id = cm_node->cm_id;
31045
31046- atomic_inc(&cm_connect_reqs);
31047+ atomic_inc_unchecked(&cm_connect_reqs);
31048 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31049 cm_node, cm_id, jiffies);
31050
31051@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
31052 return;
31053 cm_id = cm_node->cm_id;
31054
31055- atomic_inc(&cm_connect_reqs);
31056+ atomic_inc_unchecked(&cm_connect_reqs);
31057 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31058 cm_node, cm_id, jiffies);
31059
31060diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h
31061--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
31062+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
31063@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
31064 extern unsigned int wqm_quanta;
31065 extern struct list_head nes_adapter_list;
31066
31067-extern atomic_t cm_connects;
31068-extern atomic_t cm_accepts;
31069-extern atomic_t cm_disconnects;
31070-extern atomic_t cm_closes;
31071-extern atomic_t cm_connecteds;
31072-extern atomic_t cm_connect_reqs;
31073-extern atomic_t cm_rejects;
31074-extern atomic_t mod_qp_timouts;
31075-extern atomic_t qps_created;
31076-extern atomic_t qps_destroyed;
31077-extern atomic_t sw_qps_destroyed;
31078+extern atomic_unchecked_t cm_connects;
31079+extern atomic_unchecked_t cm_accepts;
31080+extern atomic_unchecked_t cm_disconnects;
31081+extern atomic_unchecked_t cm_closes;
31082+extern atomic_unchecked_t cm_connecteds;
31083+extern atomic_unchecked_t cm_connect_reqs;
31084+extern atomic_unchecked_t cm_rejects;
31085+extern atomic_unchecked_t mod_qp_timouts;
31086+extern atomic_unchecked_t qps_created;
31087+extern atomic_unchecked_t qps_destroyed;
31088+extern atomic_unchecked_t sw_qps_destroyed;
31089 extern u32 mh_detected;
31090 extern u32 mh_pauses_sent;
31091 extern u32 cm_packets_sent;
31092@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
31093 extern u32 cm_listens_created;
31094 extern u32 cm_listens_destroyed;
31095 extern u32 cm_backlog_drops;
31096-extern atomic_t cm_loopbacks;
31097-extern atomic_t cm_nodes_created;
31098-extern atomic_t cm_nodes_destroyed;
31099-extern atomic_t cm_accel_dropped_pkts;
31100-extern atomic_t cm_resets_recvd;
31101+extern atomic_unchecked_t cm_loopbacks;
31102+extern atomic_unchecked_t cm_nodes_created;
31103+extern atomic_unchecked_t cm_nodes_destroyed;
31104+extern atomic_unchecked_t cm_accel_dropped_pkts;
31105+extern atomic_unchecked_t cm_resets_recvd;
31106
31107 extern u32 int_mod_timer_init;
31108 extern u32 int_mod_cq_depth_256;
31109diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c
31110--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
31111+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
31112@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
31113 target_stat_values[++index] = mh_detected;
31114 target_stat_values[++index] = mh_pauses_sent;
31115 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
31116- target_stat_values[++index] = atomic_read(&cm_connects);
31117- target_stat_values[++index] = atomic_read(&cm_accepts);
31118- target_stat_values[++index] = atomic_read(&cm_disconnects);
31119- target_stat_values[++index] = atomic_read(&cm_connecteds);
31120- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
31121- target_stat_values[++index] = atomic_read(&cm_rejects);
31122- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
31123- target_stat_values[++index] = atomic_read(&qps_created);
31124- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
31125- target_stat_values[++index] = atomic_read(&qps_destroyed);
31126- target_stat_values[++index] = atomic_read(&cm_closes);
31127+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
31128+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
31129+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
31130+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
31131+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
31132+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
31133+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
31134+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
31135+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
31136+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
31137+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
31138 target_stat_values[++index] = cm_packets_sent;
31139 target_stat_values[++index] = cm_packets_bounced;
31140 target_stat_values[++index] = cm_packets_created;
31141@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
31142 target_stat_values[++index] = cm_listens_created;
31143 target_stat_values[++index] = cm_listens_destroyed;
31144 target_stat_values[++index] = cm_backlog_drops;
31145- target_stat_values[++index] = atomic_read(&cm_loopbacks);
31146- target_stat_values[++index] = atomic_read(&cm_nodes_created);
31147- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
31148- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
31149- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
31150+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
31151+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
31152+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
31153+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
31154+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
31155 target_stat_values[++index] = int_mod_timer_init;
31156 target_stat_values[++index] = int_mod_cq_depth_1;
31157 target_stat_values[++index] = int_mod_cq_depth_4;
31158diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c
31159--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
31160+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
31161@@ -45,9 +45,9 @@
31162
31163 #include <rdma/ib_umem.h>
31164
31165-atomic_t mod_qp_timouts;
31166-atomic_t qps_created;
31167-atomic_t sw_qps_destroyed;
31168+atomic_unchecked_t mod_qp_timouts;
31169+atomic_unchecked_t qps_created;
31170+atomic_unchecked_t sw_qps_destroyed;
31171
31172 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
31173
31174@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
31175 if (init_attr->create_flags)
31176 return ERR_PTR(-EINVAL);
31177
31178- atomic_inc(&qps_created);
31179+ atomic_inc_unchecked(&qps_created);
31180 switch (init_attr->qp_type) {
31181 case IB_QPT_RC:
31182 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
31183@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
31184 struct iw_cm_event cm_event;
31185 int ret;
31186
31187- atomic_inc(&sw_qps_destroyed);
31188+ atomic_inc_unchecked(&sw_qps_destroyed);
31189 nesqp->destroyed = 1;
31190
31191 /* Blow away the connection if it exists. */
31192diff -urNp linux-2.6.32.45/drivers/input/gameport/gameport.c linux-2.6.32.45/drivers/input/gameport/gameport.c
31193--- linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
31194+++ linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
31195@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
31196 */
31197 static void gameport_init_port(struct gameport *gameport)
31198 {
31199- static atomic_t gameport_no = ATOMIC_INIT(0);
31200+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
31201
31202 __module_get(THIS_MODULE);
31203
31204 mutex_init(&gameport->drv_mutex);
31205 device_initialize(&gameport->dev);
31206- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
31207+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
31208 gameport->dev.bus = &gameport_bus;
31209 gameport->dev.release = gameport_release_port;
31210 if (gameport->parent)
31211diff -urNp linux-2.6.32.45/drivers/input/input.c linux-2.6.32.45/drivers/input/input.c
31212--- linux-2.6.32.45/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
31213+++ linux-2.6.32.45/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
31214@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
31215 */
31216 int input_register_device(struct input_dev *dev)
31217 {
31218- static atomic_t input_no = ATOMIC_INIT(0);
31219+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
31220 struct input_handler *handler;
31221 const char *path;
31222 int error;
31223@@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
31224 dev->setkeycode = input_default_setkeycode;
31225
31226 dev_set_name(&dev->dev, "input%ld",
31227- (unsigned long) atomic_inc_return(&input_no) - 1);
31228+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
31229
31230 error = device_add(&dev->dev);
31231 if (error)
31232diff -urNp linux-2.6.32.45/drivers/input/joystick/sidewinder.c linux-2.6.32.45/drivers/input/joystick/sidewinder.c
31233--- linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
31234+++ linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
31235@@ -30,6 +30,7 @@
31236 #include <linux/kernel.h>
31237 #include <linux/module.h>
31238 #include <linux/slab.h>
31239+#include <linux/sched.h>
31240 #include <linux/init.h>
31241 #include <linux/input.h>
31242 #include <linux/gameport.h>
31243@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
31244 unsigned char buf[SW_LENGTH];
31245 int i;
31246
31247+ pax_track_stack();
31248+
31249 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
31250
31251 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
31252diff -urNp linux-2.6.32.45/drivers/input/joystick/xpad.c linux-2.6.32.45/drivers/input/joystick/xpad.c
31253--- linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
31254+++ linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
31255@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
31256
31257 static int xpad_led_probe(struct usb_xpad *xpad)
31258 {
31259- static atomic_t led_seq = ATOMIC_INIT(0);
31260+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
31261 long led_no;
31262 struct xpad_led *led;
31263 struct led_classdev *led_cdev;
31264@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
31265 if (!led)
31266 return -ENOMEM;
31267
31268- led_no = (long)atomic_inc_return(&led_seq) - 1;
31269+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
31270
31271 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
31272 led->xpad = xpad;
31273diff -urNp linux-2.6.32.45/drivers/input/serio/serio.c linux-2.6.32.45/drivers/input/serio/serio.c
31274--- linux-2.6.32.45/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
31275+++ linux-2.6.32.45/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
31276@@ -527,7 +527,7 @@ static void serio_release_port(struct de
31277 */
31278 static void serio_init_port(struct serio *serio)
31279 {
31280- static atomic_t serio_no = ATOMIC_INIT(0);
31281+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
31282
31283 __module_get(THIS_MODULE);
31284
31285@@ -536,7 +536,7 @@ static void serio_init_port(struct serio
31286 mutex_init(&serio->drv_mutex);
31287 device_initialize(&serio->dev);
31288 dev_set_name(&serio->dev, "serio%ld",
31289- (long)atomic_inc_return(&serio_no) - 1);
31290+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
31291 serio->dev.bus = &serio_bus;
31292 serio->dev.release = serio_release_port;
31293 if (serio->parent) {
31294diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/common.c linux-2.6.32.45/drivers/isdn/gigaset/common.c
31295--- linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
31296+++ linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
31297@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
31298 cs->commands_pending = 0;
31299 cs->cur_at_seq = 0;
31300 cs->gotfwver = -1;
31301- cs->open_count = 0;
31302+ local_set(&cs->open_count, 0);
31303 cs->dev = NULL;
31304 cs->tty = NULL;
31305 cs->tty_dev = NULL;
31306diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h
31307--- linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
31308+++ linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
31309@@ -34,6 +34,7 @@
31310 #include <linux/tty_driver.h>
31311 #include <linux/list.h>
31312 #include <asm/atomic.h>
31313+#include <asm/local.h>
31314
31315 #define GIG_VERSION {0,5,0,0}
31316 #define GIG_COMPAT {0,4,0,0}
31317@@ -446,7 +447,7 @@ struct cardstate {
31318 spinlock_t cmdlock;
31319 unsigned curlen, cmdbytes;
31320
31321- unsigned open_count;
31322+ local_t open_count;
31323 struct tty_struct *tty;
31324 struct tasklet_struct if_wake_tasklet;
31325 unsigned control_state;
31326diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/interface.c linux-2.6.32.45/drivers/isdn/gigaset/interface.c
31327--- linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
31328+++ linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
31329@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
31330 return -ERESTARTSYS; // FIXME -EINTR?
31331 tty->driver_data = cs;
31332
31333- ++cs->open_count;
31334-
31335- if (cs->open_count == 1) {
31336+ if (local_inc_return(&cs->open_count) == 1) {
31337 spin_lock_irqsave(&cs->lock, flags);
31338 cs->tty = tty;
31339 spin_unlock_irqrestore(&cs->lock, flags);
31340@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
31341
31342 if (!cs->connected)
31343 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31344- else if (!cs->open_count)
31345+ else if (!local_read(&cs->open_count))
31346 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31347 else {
31348- if (!--cs->open_count) {
31349+ if (!local_dec_return(&cs->open_count)) {
31350 spin_lock_irqsave(&cs->lock, flags);
31351 cs->tty = NULL;
31352 spin_unlock_irqrestore(&cs->lock, flags);
31353@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
31354 if (!cs->connected) {
31355 gig_dbg(DEBUG_IF, "not connected");
31356 retval = -ENODEV;
31357- } else if (!cs->open_count)
31358+ } else if (!local_read(&cs->open_count))
31359 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31360 else {
31361 retval = 0;
31362@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
31363 if (!cs->connected) {
31364 gig_dbg(DEBUG_IF, "not connected");
31365 retval = -ENODEV;
31366- } else if (!cs->open_count)
31367+ } else if (!local_read(&cs->open_count))
31368 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31369 else if (cs->mstate != MS_LOCKED) {
31370 dev_warn(cs->dev, "can't write to unlocked device\n");
31371@@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
31372 if (!cs->connected) {
31373 gig_dbg(DEBUG_IF, "not connected");
31374 retval = -ENODEV;
31375- } else if (!cs->open_count)
31376+ } else if (!local_read(&cs->open_count))
31377 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31378 else if (cs->mstate != MS_LOCKED) {
31379 dev_warn(cs->dev, "can't write to unlocked device\n");
31380@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
31381
31382 if (!cs->connected)
31383 gig_dbg(DEBUG_IF, "not connected");
31384- else if (!cs->open_count)
31385+ else if (!local_read(&cs->open_count))
31386 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31387 else if (cs->mstate != MS_LOCKED)
31388 dev_warn(cs->dev, "can't write to unlocked device\n");
31389@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
31390
31391 if (!cs->connected)
31392 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31393- else if (!cs->open_count)
31394+ else if (!local_read(&cs->open_count))
31395 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31396 else {
31397 //FIXME
31398@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
31399
31400 if (!cs->connected)
31401 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31402- else if (!cs->open_count)
31403+ else if (!local_read(&cs->open_count))
31404 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31405 else {
31406 //FIXME
31407@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
31408 goto out;
31409 }
31410
31411- if (!cs->open_count) {
31412+ if (!local_read(&cs->open_count)) {
31413 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31414 goto out;
31415 }
31416diff -urNp linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c
31417--- linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
31418+++ linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
31419@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
31420 }
31421 if (left) {
31422 if (t4file->user) {
31423- if (copy_from_user(buf, dp, left))
31424+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31425 return -EFAULT;
31426 } else {
31427 memcpy(buf, dp, left);
31428@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
31429 }
31430 if (left) {
31431 if (config->user) {
31432- if (copy_from_user(buf, dp, left))
31433+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31434 return -EFAULT;
31435 } else {
31436 memcpy(buf, dp, left);
31437diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c
31438--- linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
31439+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
31440@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
31441 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
31442 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
31443
31444+ pax_track_stack();
31445
31446 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
31447 {
31448diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c
31449--- linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
31450+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
31451@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
31452 IDI_SYNC_REQ req;
31453 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31454
31455+ pax_track_stack();
31456+
31457 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31458
31459 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31460diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c
31461--- linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
31462+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
31463@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
31464 IDI_SYNC_REQ req;
31465 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31466
31467+ pax_track_stack();
31468+
31469 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31470
31471 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31472diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c
31473--- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
31474+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
31475@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
31476 IDI_SYNC_REQ req;
31477 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31478
31479+ pax_track_stack();
31480+
31481 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31482
31483 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31484diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h
31485--- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-03-27 14:31:47.000000000 -0400
31486+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:33:55.000000000 -0400
31487@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31488 } diva_didd_add_adapter_t;
31489 typedef struct _diva_didd_remove_adapter {
31490 IDI_CALL p_request;
31491-} diva_didd_remove_adapter_t;
31492+} __no_const diva_didd_remove_adapter_t;
31493 typedef struct _diva_didd_read_adapter_array {
31494 void * buffer;
31495 dword length;
31496diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c
31497--- linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
31498+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
31499@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
31500 IDI_SYNC_REQ req;
31501 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31502
31503+ pax_track_stack();
31504+
31505 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31506
31507 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31508diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c
31509--- linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
31510+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
31511@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
31512 dword d;
31513 word w;
31514
31515+ pax_track_stack();
31516+
31517 a = plci->adapter;
31518 Id = ((word)plci->Id<<8)|a->Id;
31519 PUT_WORD(&SS_Ind[4],0x0000);
31520@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
31521 word j, n, w;
31522 dword d;
31523
31524+ pax_track_stack();
31525+
31526
31527 for(i=0;i<8;i++) bp_parms[i].length = 0;
31528 for(i=0;i<2;i++) global_config[i].length = 0;
31529@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
31530 const byte llc3[] = {4,3,2,2,6,6,0};
31531 const byte header[] = {0,2,3,3,0,0,0};
31532
31533+ pax_track_stack();
31534+
31535 for(i=0;i<8;i++) bp_parms[i].length = 0;
31536 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
31537 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
31538@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
31539 word appl_number_group_type[MAX_APPL];
31540 PLCI *auxplci;
31541
31542+ pax_track_stack();
31543+
31544 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
31545
31546 if(!a->group_optimization_enabled)
31547diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c
31548--- linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
31549+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
31550@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
31551 IDI_SYNC_REQ req;
31552 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31553
31554+ pax_track_stack();
31555+
31556 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31557
31558 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31559diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h
31560--- linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-03-27 14:31:47.000000000 -0400
31561+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:33:55.000000000 -0400
31562@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31563 typedef struct _diva_os_idi_adapter_interface {
31564 diva_init_card_proc_t cleanup_adapter_proc;
31565 diva_cmd_card_proc_t cmd_proc;
31566-} diva_os_idi_adapter_interface_t;
31567+} __no_const diva_os_idi_adapter_interface_t;
31568
31569 typedef struct _diva_os_xdi_adapter {
31570 struct list_head link;
31571diff -urNp linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c
31572--- linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
31573+++ linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
31574@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
31575 } iocpar;
31576 void __user *argp = (void __user *)arg;
31577
31578+ pax_track_stack();
31579+
31580 #define name iocpar.name
31581 #define bname iocpar.bname
31582 #define iocts iocpar.iocts
31583diff -urNp linux-2.6.32.45/drivers/isdn/icn/icn.c linux-2.6.32.45/drivers/isdn/icn/icn.c
31584--- linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
31585+++ linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
31586@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
31587 if (count > len)
31588 count = len;
31589 if (user) {
31590- if (copy_from_user(msg, buf, count))
31591+ if (count > sizeof msg || copy_from_user(msg, buf, count))
31592 return -EFAULT;
31593 } else
31594 memcpy(msg, buf, count);
31595diff -urNp linux-2.6.32.45/drivers/isdn/mISDN/socket.c linux-2.6.32.45/drivers/isdn/mISDN/socket.c
31596--- linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
31597+++ linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
31598@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
31599 if (dev) {
31600 struct mISDN_devinfo di;
31601
31602+ memset(&di, 0, sizeof(di));
31603 di.id = dev->id;
31604 di.Dprotocols = dev->Dprotocols;
31605 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31606@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
31607 if (dev) {
31608 struct mISDN_devinfo di;
31609
31610+ memset(&di, 0, sizeof(di));
31611 di.id = dev->id;
31612 di.Dprotocols = dev->Dprotocols;
31613 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31614diff -urNp linux-2.6.32.45/drivers/isdn/sc/interrupt.c linux-2.6.32.45/drivers/isdn/sc/interrupt.c
31615--- linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
31616+++ linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
31617@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
31618 }
31619 else if(callid>=0x0000 && callid<=0x7FFF)
31620 {
31621+ int len;
31622+
31623 pr_debug("%s: Got Incoming Call\n",
31624 sc_adapter[card]->devicename);
31625- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
31626- strcpy(setup.eazmsn,
31627- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
31628+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
31629+ sizeof(setup.phone));
31630+ if (len >= sizeof(setup.phone))
31631+ continue;
31632+ len = strlcpy(setup.eazmsn,
31633+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31634+ sizeof(setup.eazmsn));
31635+ if (len >= sizeof(setup.eazmsn))
31636+ continue;
31637 setup.si1 = 7;
31638 setup.si2 = 0;
31639 setup.plan = 0;
31640@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
31641 * Handle a GetMyNumber Rsp
31642 */
31643 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
31644- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
31645+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31646+ rcvmsg.msg_data.byte_array,
31647+ sizeof(rcvmsg.msg_data.byte_array));
31648 continue;
31649 }
31650
31651diff -urNp linux-2.6.32.45/drivers/lguest/core.c linux-2.6.32.45/drivers/lguest/core.c
31652--- linux-2.6.32.45/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
31653+++ linux-2.6.32.45/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
31654@@ -91,9 +91,17 @@ static __init int map_switcher(void)
31655 * it's worked so far. The end address needs +1 because __get_vm_area
31656 * allocates an extra guard page, so we need space for that.
31657 */
31658+
31659+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31660+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31661+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31662+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31663+#else
31664 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31665 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31666 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31667+#endif
31668+
31669 if (!switcher_vma) {
31670 err = -ENOMEM;
31671 printk("lguest: could not map switcher pages high\n");
31672@@ -118,7 +126,7 @@ static __init int map_switcher(void)
31673 * Now the Switcher is mapped at the right address, we can't fail!
31674 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
31675 */
31676- memcpy(switcher_vma->addr, start_switcher_text,
31677+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31678 end_switcher_text - start_switcher_text);
31679
31680 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31681diff -urNp linux-2.6.32.45/drivers/lguest/x86/core.c linux-2.6.32.45/drivers/lguest/x86/core.c
31682--- linux-2.6.32.45/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
31683+++ linux-2.6.32.45/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
31684@@ -59,7 +59,7 @@ static struct {
31685 /* Offset from where switcher.S was compiled to where we've copied it */
31686 static unsigned long switcher_offset(void)
31687 {
31688- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31689+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31690 }
31691
31692 /* This cpu's struct lguest_pages. */
31693@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
31694 * These copies are pretty cheap, so we do them unconditionally: */
31695 /* Save the current Host top-level page directory.
31696 */
31697+
31698+#ifdef CONFIG_PAX_PER_CPU_PGD
31699+ pages->state.host_cr3 = read_cr3();
31700+#else
31701 pages->state.host_cr3 = __pa(current->mm->pgd);
31702+#endif
31703+
31704 /*
31705 * Set up the Guest's page tables to see this CPU's pages (and no
31706 * other CPU's pages).
31707@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
31708 * compiled-in switcher code and the high-mapped copy we just made.
31709 */
31710 for (i = 0; i < IDT_ENTRIES; i++)
31711- default_idt_entries[i] += switcher_offset();
31712+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31713
31714 /*
31715 * Set up the Switcher's per-cpu areas.
31716@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
31717 * it will be undisturbed when we switch. To change %cs and jump we
31718 * need this structure to feed to Intel's "lcall" instruction.
31719 */
31720- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31721+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31722 lguest_entry.segment = LGUEST_CS;
31723
31724 /*
31725diff -urNp linux-2.6.32.45/drivers/lguest/x86/switcher_32.S linux-2.6.32.45/drivers/lguest/x86/switcher_32.S
31726--- linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
31727+++ linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
31728@@ -87,6 +87,7 @@
31729 #include <asm/page.h>
31730 #include <asm/segment.h>
31731 #include <asm/lguest.h>
31732+#include <asm/processor-flags.h>
31733
31734 // We mark the start of the code to copy
31735 // It's placed in .text tho it's never run here
31736@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31737 // Changes type when we load it: damn Intel!
31738 // For after we switch over our page tables
31739 // That entry will be read-only: we'd crash.
31740+
31741+#ifdef CONFIG_PAX_KERNEXEC
31742+ mov %cr0, %edx
31743+ xor $X86_CR0_WP, %edx
31744+ mov %edx, %cr0
31745+#endif
31746+
31747 movl $(GDT_ENTRY_TSS*8), %edx
31748 ltr %dx
31749
31750@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31751 // Let's clear it again for our return.
31752 // The GDT descriptor of the Host
31753 // Points to the table after two "size" bytes
31754- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31755+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31756 // Clear "used" from type field (byte 5, bit 2)
31757- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31758+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31759+
31760+#ifdef CONFIG_PAX_KERNEXEC
31761+ mov %cr0, %eax
31762+ xor $X86_CR0_WP, %eax
31763+ mov %eax, %cr0
31764+#endif
31765
31766 // Once our page table's switched, the Guest is live!
31767 // The Host fades as we run this final step.
31768@@ -295,13 +309,12 @@ deliver_to_host:
31769 // I consulted gcc, and it gave
31770 // These instructions, which I gladly credit:
31771 leal (%edx,%ebx,8), %eax
31772- movzwl (%eax),%edx
31773- movl 4(%eax), %eax
31774- xorw %ax, %ax
31775- orl %eax, %edx
31776+ movl 4(%eax), %edx
31777+ movw (%eax), %dx
31778 // Now the address of the handler's in %edx
31779 // We call it now: its "iret" drops us home.
31780- jmp *%edx
31781+ ljmp $__KERNEL_CS, $1f
31782+1: jmp *%edx
31783
31784 // Every interrupt can come to us here
31785 // But we must truly tell each apart.
31786diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c
31787--- linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
31788+++ linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
31789@@ -15,7 +15,7 @@
31790
31791 #define MAX_PMU_LEVEL 0xFF
31792
31793-static struct backlight_ops pmu_backlight_data;
31794+static const struct backlight_ops pmu_backlight_data;
31795 static DEFINE_SPINLOCK(pmu_backlight_lock);
31796 static int sleeping, uses_pmu_bl;
31797 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
31798@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
31799 return bd->props.brightness;
31800 }
31801
31802-static struct backlight_ops pmu_backlight_data = {
31803+static const struct backlight_ops pmu_backlight_data = {
31804 .get_brightness = pmu_backlight_get_brightness,
31805 .update_status = pmu_backlight_update_status,
31806
31807diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu.c linux-2.6.32.45/drivers/macintosh/via-pmu.c
31808--- linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
31809+++ linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
31810@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
31811 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
31812 }
31813
31814-static struct platform_suspend_ops pmu_pm_ops = {
31815+static const struct platform_suspend_ops pmu_pm_ops = {
31816 .enter = powerbook_sleep,
31817 .valid = pmu_sleep_valid,
31818 };
31819diff -urNp linux-2.6.32.45/drivers/md/dm.c linux-2.6.32.45/drivers/md/dm.c
31820--- linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:35:29.000000000 -0400
31821+++ linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:33:59.000000000 -0400
31822@@ -165,9 +165,9 @@ struct mapped_device {
31823 /*
31824 * Event handling.
31825 */
31826- atomic_t event_nr;
31827+ atomic_unchecked_t event_nr;
31828 wait_queue_head_t eventq;
31829- atomic_t uevent_seq;
31830+ atomic_unchecked_t uevent_seq;
31831 struct list_head uevent_list;
31832 spinlock_t uevent_lock; /* Protect access to uevent_list */
31833
31834@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(i
31835 rwlock_init(&md->map_lock);
31836 atomic_set(&md->holders, 1);
31837 atomic_set(&md->open_count, 0);
31838- atomic_set(&md->event_nr, 0);
31839- atomic_set(&md->uevent_seq, 0);
31840+ atomic_set_unchecked(&md->event_nr, 0);
31841+ atomic_set_unchecked(&md->uevent_seq, 0);
31842 INIT_LIST_HEAD(&md->uevent_list);
31843 spin_lock_init(&md->uevent_lock);
31844
31845@@ -1927,7 +1927,7 @@ static void event_callback(void *context
31846
31847 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31848
31849- atomic_inc(&md->event_nr);
31850+ atomic_inc_unchecked(&md->event_nr);
31851 wake_up(&md->eventq);
31852 }
31853
31854@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_dev
31855
31856 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31857 {
31858- return atomic_add_return(1, &md->uevent_seq);
31859+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31860 }
31861
31862 uint32_t dm_get_event_nr(struct mapped_device *md)
31863 {
31864- return atomic_read(&md->event_nr);
31865+ return atomic_read_unchecked(&md->event_nr);
31866 }
31867
31868 int dm_wait_event(struct mapped_device *md, int event_nr)
31869 {
31870 return wait_event_interruptible(md->eventq,
31871- (event_nr != atomic_read(&md->event_nr)));
31872+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31873 }
31874
31875 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31876diff -urNp linux-2.6.32.45/drivers/md/dm-ioctl.c linux-2.6.32.45/drivers/md/dm-ioctl.c
31877--- linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
31878+++ linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
31879@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
31880 cmd == DM_LIST_VERSIONS_CMD)
31881 return 0;
31882
31883- if ((cmd == DM_DEV_CREATE_CMD)) {
31884+ if (cmd == DM_DEV_CREATE_CMD) {
31885 if (!*param->name) {
31886 DMWARN("name not supplied when creating device");
31887 return -EINVAL;
31888diff -urNp linux-2.6.32.45/drivers/md/dm-raid1.c linux-2.6.32.45/drivers/md/dm-raid1.c
31889--- linux-2.6.32.45/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
31890+++ linux-2.6.32.45/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
31891@@ -41,7 +41,7 @@ enum dm_raid1_error {
31892
31893 struct mirror {
31894 struct mirror_set *ms;
31895- atomic_t error_count;
31896+ atomic_unchecked_t error_count;
31897 unsigned long error_type;
31898 struct dm_dev *dev;
31899 sector_t offset;
31900@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
31901 * simple way to tell if a device has encountered
31902 * errors.
31903 */
31904- atomic_inc(&m->error_count);
31905+ atomic_inc_unchecked(&m->error_count);
31906
31907 if (test_and_set_bit(error_type, &m->error_type))
31908 return;
31909@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
31910 }
31911
31912 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
31913- if (!atomic_read(&new->error_count)) {
31914+ if (!atomic_read_unchecked(&new->error_count)) {
31915 set_default_mirror(new);
31916 break;
31917 }
31918@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
31919 struct mirror *m = get_default_mirror(ms);
31920
31921 do {
31922- if (likely(!atomic_read(&m->error_count)))
31923+ if (likely(!atomic_read_unchecked(&m->error_count)))
31924 return m;
31925
31926 if (m-- == ms->mirror)
31927@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
31928 {
31929 struct mirror *default_mirror = get_default_mirror(m->ms);
31930
31931- return !atomic_read(&default_mirror->error_count);
31932+ return !atomic_read_unchecked(&default_mirror->error_count);
31933 }
31934
31935 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31936@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31937 */
31938 if (likely(region_in_sync(ms, region, 1)))
31939 m = choose_mirror(ms, bio->bi_sector);
31940- else if (m && atomic_read(&m->error_count))
31941+ else if (m && atomic_read_unchecked(&m->error_count))
31942 m = NULL;
31943
31944 if (likely(m))
31945@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31946 }
31947
31948 ms->mirror[mirror].ms = ms;
31949- atomic_set(&(ms->mirror[mirror].error_count), 0);
31950+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31951 ms->mirror[mirror].error_type = 0;
31952 ms->mirror[mirror].offset = offset;
31953
31954@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31955 */
31956 static char device_status_char(struct mirror *m)
31957 {
31958- if (!atomic_read(&(m->error_count)))
31959+ if (!atomic_read_unchecked(&(m->error_count)))
31960 return 'A';
31961
31962 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31963diff -urNp linux-2.6.32.45/drivers/md/dm-stripe.c linux-2.6.32.45/drivers/md/dm-stripe.c
31964--- linux-2.6.32.45/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31965+++ linux-2.6.32.45/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31966@@ -20,7 +20,7 @@ struct stripe {
31967 struct dm_dev *dev;
31968 sector_t physical_start;
31969
31970- atomic_t error_count;
31971+ atomic_unchecked_t error_count;
31972 };
31973
31974 struct stripe_c {
31975@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31976 kfree(sc);
31977 return r;
31978 }
31979- atomic_set(&(sc->stripe[i].error_count), 0);
31980+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31981 }
31982
31983 ti->private = sc;
31984@@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31985 DMEMIT("%d ", sc->stripes);
31986 for (i = 0; i < sc->stripes; i++) {
31987 DMEMIT("%s ", sc->stripe[i].dev->name);
31988- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31989+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31990 'D' : 'A';
31991 }
31992 buffer[i] = '\0';
31993@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31994 */
31995 for (i = 0; i < sc->stripes; i++)
31996 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31997- atomic_inc(&(sc->stripe[i].error_count));
31998- if (atomic_read(&(sc->stripe[i].error_count)) <
31999+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
32000+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
32001 DM_IO_ERROR_THRESHOLD)
32002 queue_work(kstriped, &sc->kstriped_ws);
32003 }
32004diff -urNp linux-2.6.32.45/drivers/md/dm-sysfs.c linux-2.6.32.45/drivers/md/dm-sysfs.c
32005--- linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
32006+++ linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
32007@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
32008 NULL,
32009 };
32010
32011-static struct sysfs_ops dm_sysfs_ops = {
32012+static const struct sysfs_ops dm_sysfs_ops = {
32013 .show = dm_attr_show,
32014 };
32015
32016diff -urNp linux-2.6.32.45/drivers/md/dm-table.c linux-2.6.32.45/drivers/md/dm-table.c
32017--- linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
32018+++ linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
32019@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
32020 if (!dev_size)
32021 return 0;
32022
32023- if ((start >= dev_size) || (start + len > dev_size)) {
32024+ if ((start >= dev_size) || (len > dev_size - start)) {
32025 DMWARN("%s: %s too small for target: "
32026 "start=%llu, len=%llu, dev_size=%llu",
32027 dm_device_name(ti->table->md), bdevname(bdev, b),
32028diff -urNp linux-2.6.32.45/drivers/md/md.c linux-2.6.32.45/drivers/md/md.c
32029--- linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
32030+++ linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
32031@@ -153,10 +153,10 @@ static int start_readonly;
32032 * start build, activate spare
32033 */
32034 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
32035-static atomic_t md_event_count;
32036+static atomic_unchecked_t md_event_count;
32037 void md_new_event(mddev_t *mddev)
32038 {
32039- atomic_inc(&md_event_count);
32040+ atomic_inc_unchecked(&md_event_count);
32041 wake_up(&md_event_waiters);
32042 }
32043 EXPORT_SYMBOL_GPL(md_new_event);
32044@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
32045 */
32046 static void md_new_event_inintr(mddev_t *mddev)
32047 {
32048- atomic_inc(&md_event_count);
32049+ atomic_inc_unchecked(&md_event_count);
32050 wake_up(&md_event_waiters);
32051 }
32052
32053@@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
32054
32055 rdev->preferred_minor = 0xffff;
32056 rdev->data_offset = le64_to_cpu(sb->data_offset);
32057- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32058+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32059
32060 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
32061 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
32062@@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
32063 else
32064 sb->resync_offset = cpu_to_le64(0);
32065
32066- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
32067+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
32068
32069 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
32070 sb->size = cpu_to_le64(mddev->dev_sectors);
32071@@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
32072 static ssize_t
32073 errors_show(mdk_rdev_t *rdev, char *page)
32074 {
32075- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
32076+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
32077 }
32078
32079 static ssize_t
32080@@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
32081 char *e;
32082 unsigned long n = simple_strtoul(buf, &e, 10);
32083 if (*buf && (*e == 0 || *e == '\n')) {
32084- atomic_set(&rdev->corrected_errors, n);
32085+ atomic_set_unchecked(&rdev->corrected_errors, n);
32086 return len;
32087 }
32088 return -EINVAL;
32089@@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
32090 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
32091 kfree(rdev);
32092 }
32093-static struct sysfs_ops rdev_sysfs_ops = {
32094+static const struct sysfs_ops rdev_sysfs_ops = {
32095 .show = rdev_attr_show,
32096 .store = rdev_attr_store,
32097 };
32098@@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
32099 rdev->data_offset = 0;
32100 rdev->sb_events = 0;
32101 atomic_set(&rdev->nr_pending, 0);
32102- atomic_set(&rdev->read_errors, 0);
32103- atomic_set(&rdev->corrected_errors, 0);
32104+ atomic_set_unchecked(&rdev->read_errors, 0);
32105+ atomic_set_unchecked(&rdev->corrected_errors, 0);
32106
32107 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
32108 if (!size) {
32109@@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
32110 kfree(mddev);
32111 }
32112
32113-static struct sysfs_ops md_sysfs_ops = {
32114+static const struct sysfs_ops md_sysfs_ops = {
32115 .show = md_attr_show,
32116 .store = md_attr_store,
32117 };
32118@@ -4474,7 +4474,8 @@ out:
32119 err = 0;
32120 blk_integrity_unregister(disk);
32121 md_new_event(mddev);
32122- sysfs_notify_dirent(mddev->sysfs_state);
32123+ if (mddev->sysfs_state)
32124+ sysfs_notify_dirent(mddev->sysfs_state);
32125 return err;
32126 }
32127
32128@@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
32129
32130 spin_unlock(&pers_lock);
32131 seq_printf(seq, "\n");
32132- mi->event = atomic_read(&md_event_count);
32133+ mi->event = atomic_read_unchecked(&md_event_count);
32134 return 0;
32135 }
32136 if (v == (void*)2) {
32137@@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
32138 chunk_kb ? "KB" : "B");
32139 if (bitmap->file) {
32140 seq_printf(seq, ", file: ");
32141- seq_path(seq, &bitmap->file->f_path, " \t\n");
32142+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32143 }
32144
32145 seq_printf(seq, "\n");
32146@@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
32147 else {
32148 struct seq_file *p = file->private_data;
32149 p->private = mi;
32150- mi->event = atomic_read(&md_event_count);
32151+ mi->event = atomic_read_unchecked(&md_event_count);
32152 }
32153 return error;
32154 }
32155@@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
32156 /* always allow read */
32157 mask = POLLIN | POLLRDNORM;
32158
32159- if (mi->event != atomic_read(&md_event_count))
32160+ if (mi->event != atomic_read_unchecked(&md_event_count))
32161 mask |= POLLERR | POLLPRI;
32162 return mask;
32163 }
32164@@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
32165 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
32166 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
32167 (int)part_stat_read(&disk->part0, sectors[1]) -
32168- atomic_read(&disk->sync_io);
32169+ atomic_read_unchecked(&disk->sync_io);
32170 /* sync IO will cause sync_io to increase before the disk_stats
32171 * as sync_io is counted when a request starts, and
32172 * disk_stats is counted when it completes.
32173diff -urNp linux-2.6.32.45/drivers/md/md.h linux-2.6.32.45/drivers/md/md.h
32174--- linux-2.6.32.45/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
32175+++ linux-2.6.32.45/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
32176@@ -94,10 +94,10 @@ struct mdk_rdev_s
32177 * only maintained for arrays that
32178 * support hot removal
32179 */
32180- atomic_t read_errors; /* number of consecutive read errors that
32181+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
32182 * we have tried to ignore.
32183 */
32184- atomic_t corrected_errors; /* number of corrected read errors,
32185+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
32186 * for reporting to userspace and storing
32187 * in superblock.
32188 */
32189@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
32190
32191 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
32192 {
32193- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32194+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32195 }
32196
32197 struct mdk_personality
32198diff -urNp linux-2.6.32.45/drivers/md/raid10.c linux-2.6.32.45/drivers/md/raid10.c
32199--- linux-2.6.32.45/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
32200+++ linux-2.6.32.45/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
32201@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
32202 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
32203 set_bit(R10BIO_Uptodate, &r10_bio->state);
32204 else {
32205- atomic_add(r10_bio->sectors,
32206+ atomic_add_unchecked(r10_bio->sectors,
32207 &conf->mirrors[d].rdev->corrected_errors);
32208 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
32209 md_error(r10_bio->mddev,
32210@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
32211 test_bit(In_sync, &rdev->flags)) {
32212 atomic_inc(&rdev->nr_pending);
32213 rcu_read_unlock();
32214- atomic_add(s, &rdev->corrected_errors);
32215+ atomic_add_unchecked(s, &rdev->corrected_errors);
32216 if (sync_page_io(rdev->bdev,
32217 r10_bio->devs[sl].addr +
32218 sect + rdev->data_offset,
32219diff -urNp linux-2.6.32.45/drivers/md/raid1.c linux-2.6.32.45/drivers/md/raid1.c
32220--- linux-2.6.32.45/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
32221+++ linux-2.6.32.45/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
32222@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
32223 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
32224 continue;
32225 rdev = conf->mirrors[d].rdev;
32226- atomic_add(s, &rdev->corrected_errors);
32227+ atomic_add_unchecked(s, &rdev->corrected_errors);
32228 if (sync_page_io(rdev->bdev,
32229 sect + rdev->data_offset,
32230 s<<9,
32231@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
32232 /* Well, this device is dead */
32233 md_error(mddev, rdev);
32234 else {
32235- atomic_add(s, &rdev->corrected_errors);
32236+ atomic_add_unchecked(s, &rdev->corrected_errors);
32237 printk(KERN_INFO
32238 "raid1:%s: read error corrected "
32239 "(%d sectors at %llu on %s)\n",
32240diff -urNp linux-2.6.32.45/drivers/md/raid5.c linux-2.6.32.45/drivers/md/raid5.c
32241--- linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
32242+++ linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
32243@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
32244 bi->bi_next = NULL;
32245 if ((rw & WRITE) &&
32246 test_bit(R5_ReWrite, &sh->dev[i].flags))
32247- atomic_add(STRIPE_SECTORS,
32248+ atomic_add_unchecked(STRIPE_SECTORS,
32249 &rdev->corrected_errors);
32250 generic_make_request(bi);
32251 } else {
32252@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
32253 clear_bit(R5_ReadError, &sh->dev[i].flags);
32254 clear_bit(R5_ReWrite, &sh->dev[i].flags);
32255 }
32256- if (atomic_read(&conf->disks[i].rdev->read_errors))
32257- atomic_set(&conf->disks[i].rdev->read_errors, 0);
32258+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
32259+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
32260 } else {
32261 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
32262 int retry = 0;
32263 rdev = conf->disks[i].rdev;
32264
32265 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32266- atomic_inc(&rdev->read_errors);
32267+ atomic_inc_unchecked(&rdev->read_errors);
32268 if (conf->mddev->degraded >= conf->max_degraded)
32269 printk_rl(KERN_WARNING
32270 "raid5:%s: read error not correctable "
32271@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
32272 (unsigned long long)(sh->sector
32273 + rdev->data_offset),
32274 bdn);
32275- else if (atomic_read(&rdev->read_errors)
32276+ else if (atomic_read_unchecked(&rdev->read_errors)
32277 > conf->max_nr_stripes)
32278 printk(KERN_WARNING
32279 "raid5:%s: Too many read errors, failing device %s.\n",
32280@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
32281 sector_t r_sector;
32282 struct stripe_head sh2;
32283
32284+ pax_track_stack();
32285
32286 chunk_offset = sector_div(new_sector, sectors_per_chunk);
32287 stripe = new_sector;
32288diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_hlp.c linux-2.6.32.45/drivers/media/common/saa7146_hlp.c
32289--- linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
32290+++ linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
32291@@ -353,6 +353,8 @@ static void calculate_clipping_registers
32292
32293 int x[32], y[32], w[32], h[32];
32294
32295+ pax_track_stack();
32296+
32297 /* clear out memory */
32298 memset(&line_list[0], 0x00, sizeof(u32)*32);
32299 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
32300diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
32301--- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
32302+++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
32303@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
32304 u8 buf[HOST_LINK_BUF_SIZE];
32305 int i;
32306
32307+ pax_track_stack();
32308+
32309 dprintk("%s\n", __func__);
32310
32311 /* check if we have space for a link buf in the rx_buffer */
32312@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
32313 unsigned long timeout;
32314 int written;
32315
32316+ pax_track_stack();
32317+
32318 dprintk("%s\n", __func__);
32319
32320 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
32321diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h
32322--- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-03-27 14:31:47.000000000 -0400
32323+++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:33:55.000000000 -0400
32324@@ -71,7 +71,7 @@ struct dvb_demux_feed {
32325 union {
32326 dmx_ts_cb ts;
32327 dmx_section_cb sec;
32328- } cb;
32329+ } __no_const cb;
32330
32331 struct dvb_demux *demux;
32332 void *priv;
32333diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c
32334--- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
32335+++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-23 21:22:32.000000000 -0400
32336@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapt
32337 const struct dvb_device *template, void *priv, int type)
32338 {
32339 struct dvb_device *dvbdev;
32340- struct file_operations *dvbdevfops;
32341+ file_operations_no_const *dvbdevfops;
32342 struct device *clsdev;
32343 int minor;
32344 int id;
32345diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c
32346--- linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-03-27 14:31:47.000000000 -0400
32347+++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:33:55.000000000 -0400
32348@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_di
32349 struct dib0700_adapter_state {
32350 int (*set_param_save) (struct dvb_frontend *,
32351 struct dvb_frontend_parameters *);
32352-};
32353+} __no_const;
32354
32355 static int dib7070_set_param_override(struct dvb_frontend *fe,
32356 struct dvb_frontend_parameters *fep)
32357diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c
32358--- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
32359+++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
32360@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
32361
32362 u8 buf[260];
32363
32364+ pax_track_stack();
32365+
32366 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
32367 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
32368
32369diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c
32370--- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-05-10 22:12:01.000000000 -0400
32371+++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-08-05 20:33:55.000000000 -0400
32372@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "
32373
32374 struct dib0700_adapter_state {
32375 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
32376-};
32377+} __no_const;
32378
32379 /* Hauppauge Nova-T 500 (aka Bristol)
32380 * has a LNA on GPIO0 which is enabled by setting 1 */
32381diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h
32382--- linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-03-27 14:31:47.000000000 -0400
32383+++ linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:33:55.000000000 -0400
32384@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
32385 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
32386 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
32387 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
32388-};
32389+} __no_const;
32390
32391 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
32392 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
32393diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c
32394--- linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
32395+++ linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
32396@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
32397 u8 tudata[585];
32398 int i;
32399
32400+ pax_track_stack();
32401+
32402 dprintk("Firmware is %zd bytes\n",fw->size);
32403
32404 /* Get eprom data */
32405diff -urNp linux-2.6.32.45/drivers/media/radio/radio-cadet.c linux-2.6.32.45/drivers/media/radio/radio-cadet.c
32406--- linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
32407+++ linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
32408@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
32409 while (i < count && dev->rdsin != dev->rdsout)
32410 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
32411
32412- if (copy_to_user(data, readbuf, i))
32413+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
32414 return -EFAULT;
32415 return i;
32416 }
32417diff -urNp linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c
32418--- linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
32419+++ linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
32420@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
32421
32422 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
32423
32424-static atomic_t cx18_instance = ATOMIC_INIT(0);
32425+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
32426
32427 /* Parameter declarations */
32428 static int cardtype[CX18_MAX_CARDS];
32429@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
32430 struct i2c_client c;
32431 u8 eedata[256];
32432
32433+ pax_track_stack();
32434+
32435 memset(&c, 0, sizeof(c));
32436 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
32437 c.adapter = &cx->i2c_adap[0];
32438@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
32439 struct cx18 *cx;
32440
32441 /* FIXME - module parameter arrays constrain max instances */
32442- i = atomic_inc_return(&cx18_instance) - 1;
32443+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
32444 if (i >= CX18_MAX_CARDS) {
32445 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
32446 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
32447diff -urNp linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c
32448--- linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
32449+++ linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
32450@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
32451 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
32452
32453 /* ivtv instance counter */
32454-static atomic_t ivtv_instance = ATOMIC_INIT(0);
32455+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
32456
32457 /* Parameter declarations */
32458 static int cardtype[IVTV_MAX_CARDS];
32459diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.c linux-2.6.32.45/drivers/media/video/omap24xxcam.c
32460--- linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
32461+++ linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
32462@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
32463 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
32464
32465 do_gettimeofday(&vb->ts);
32466- vb->field_count = atomic_add_return(2, &fh->field_count);
32467+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
32468 if (csr & csr_error) {
32469 vb->state = VIDEOBUF_ERROR;
32470 if (!atomic_read(&fh->cam->in_reset)) {
32471diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.h linux-2.6.32.45/drivers/media/video/omap24xxcam.h
32472--- linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
32473+++ linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
32474@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
32475 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
32476 struct videobuf_queue vbq;
32477 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
32478- atomic_t field_count; /* field counter for videobuf_buffer */
32479+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
32480 /* accessing cam here doesn't need serialisation: it's constant */
32481 struct omap24xxcam_device *cam;
32482 };
32483diff -urNp linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32484--- linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
32485+++ linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
32486@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
32487 u8 *eeprom;
32488 struct tveeprom tvdata;
32489
32490+ pax_track_stack();
32491+
32492 memset(&tvdata,0,sizeof(tvdata));
32493
32494 eeprom = pvr2_eeprom_fetch(hdw);
32495diff -urNp linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
32496--- linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-03-27 14:31:47.000000000 -0400
32497+++ linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-08-23 21:22:38.000000000 -0400
32498@@ -195,7 +195,7 @@ struct pvr2_hdw {
32499
32500 /* I2C stuff */
32501 struct i2c_adapter i2c_adap;
32502- struct i2c_algorithm i2c_algo;
32503+ i2c_algorithm_no_const i2c_algo;
32504 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
32505 int i2c_cx25840_hack_state;
32506 int i2c_linked;
32507diff -urNp linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c
32508--- linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
32509+++ linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
32510@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
32511 unsigned char localPAT[256];
32512 unsigned char localPMT[256];
32513
32514+ pax_track_stack();
32515+
32516 /* Set video format - must be done first as it resets other settings */
32517 set_reg8(client, 0x41, h->video_format);
32518
32519diff -urNp linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c
32520--- linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
32521+++ linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
32522@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
32523 wait_queue_head_t *q = 0;
32524 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32525
32526+ pax_track_stack();
32527+
32528 /* While any outstand message on the bus exists... */
32529 do {
32530
32531@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
32532 u8 tmp[512];
32533 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32534
32535+ pax_track_stack();
32536+
32537 while (loop) {
32538
32539 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
32540diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c
32541--- linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-03-27 14:31:47.000000000 -0400
32542+++ linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-08-05 20:33:55.000000000 -0400
32543@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] =
32544 static int __init ibmcam_init(void)
32545 {
32546 struct usbvideo_cb cbTbl;
32547- memset(&cbTbl, 0, sizeof(cbTbl));
32548- cbTbl.probe = ibmcam_probe;
32549- cbTbl.setupOnOpen = ibmcam_setup_on_open;
32550- cbTbl.videoStart = ibmcam_video_start;
32551- cbTbl.videoStop = ibmcam_video_stop;
32552- cbTbl.processData = ibmcam_ProcessIsocData;
32553- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32554- cbTbl.adjustPicture = ibmcam_adjust_picture;
32555- cbTbl.getFPS = ibmcam_calculate_fps;
32556+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
32557+ *(void **)&cbTbl.probe = ibmcam_probe;
32558+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
32559+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
32560+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
32561+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
32562+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32563+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
32564+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
32565 return usbvideo_register(
32566 &cams,
32567 MAX_IBMCAM,
32568diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c
32569--- linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
32570+++ linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-08-05 20:33:55.000000000 -0400
32571@@ -225,7 +225,7 @@ static void konicawc_register_input(stru
32572 int error;
32573
32574 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32575- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32576+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32577
32578 cam->input = input_dev = input_allocate_device();
32579 if (!input_dev) {
32580@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
32581 struct usbvideo_cb cbTbl;
32582 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
32583 DRIVER_DESC "\n");
32584- memset(&cbTbl, 0, sizeof(cbTbl));
32585- cbTbl.probe = konicawc_probe;
32586- cbTbl.setupOnOpen = konicawc_setup_on_open;
32587- cbTbl.processData = konicawc_process_isoc;
32588- cbTbl.getFPS = konicawc_calculate_fps;
32589- cbTbl.setVideoMode = konicawc_set_video_mode;
32590- cbTbl.startDataPump = konicawc_start_data;
32591- cbTbl.stopDataPump = konicawc_stop_data;
32592- cbTbl.adjustPicture = konicawc_adjust_picture;
32593- cbTbl.userFree = konicawc_free_uvd;
32594+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
32595+ *(void **)&cbTbl.probe = konicawc_probe;
32596+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
32597+ *(void **)&cbTbl.processData = konicawc_process_isoc;
32598+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
32599+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
32600+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
32601+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
32602+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
32603+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
32604 return usbvideo_register(
32605 &cams,
32606 MAX_CAMERAS,
32607diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c
32608--- linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
32609+++ linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
32610@@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
32611 int error;
32612
32613 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32614- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32615+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32616
32617 cam->input = input_dev = input_allocate_device();
32618 if (!input_dev) {
32619diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c
32620--- linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-03-27 14:31:47.000000000 -0400
32621+++ linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-08-05 20:33:55.000000000 -0400
32622@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
32623 {
32624 struct usbvideo_cb cbTbl;
32625 memset(&cbTbl, 0, sizeof(cbTbl));
32626- cbTbl.probe = ultracam_probe;
32627- cbTbl.setupOnOpen = ultracam_setup_on_open;
32628- cbTbl.videoStart = ultracam_video_start;
32629- cbTbl.videoStop = ultracam_video_stop;
32630- cbTbl.processData = ultracam_ProcessIsocData;
32631- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32632- cbTbl.adjustPicture = ultracam_adjust_picture;
32633- cbTbl.getFPS = ultracam_calculate_fps;
32634+ *(void **)&cbTbl.probe = ultracam_probe;
32635+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
32636+ *(void **)&cbTbl.videoStart = ultracam_video_start;
32637+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
32638+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
32639+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32640+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
32641+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
32642 return usbvideo_register(
32643 &cams,
32644 MAX_CAMERAS,
32645diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c
32646--- linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-03-27 14:31:47.000000000 -0400
32647+++ linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-08-05 20:33:55.000000000 -0400
32648@@ -697,15 +697,15 @@ int usbvideo_register(
32649 __func__, cams, base_size, num_cams);
32650
32651 /* Copy callbacks, apply defaults for those that are not set */
32652- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
32653+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
32654 if (cams->cb.getFrame == NULL)
32655- cams->cb.getFrame = usbvideo_GetFrame;
32656+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
32657 if (cams->cb.disconnect == NULL)
32658- cams->cb.disconnect = usbvideo_Disconnect;
32659+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
32660 if (cams->cb.startDataPump == NULL)
32661- cams->cb.startDataPump = usbvideo_StartDataPump;
32662+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
32663 if (cams->cb.stopDataPump == NULL)
32664- cams->cb.stopDataPump = usbvideo_StopDataPump;
32665+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
32666
32667 cams->num_cameras = num_cams;
32668 cams->cam = (struct uvd *) &cams[1];
32669diff -urNp linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c
32670--- linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
32671+++ linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
32672@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
32673 unsigned char rv, gv, bv;
32674 static unsigned char *Y, *U, *V;
32675
32676+ pax_track_stack();
32677+
32678 frame = usbvision->curFrame;
32679 imageSize = frame->frmwidth * frame->frmheight;
32680 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
32681diff -urNp linux-2.6.32.45/drivers/media/video/v4l2-device.c linux-2.6.32.45/drivers/media/video/v4l2-device.c
32682--- linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
32683+++ linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
32684@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
32685 EXPORT_SYMBOL_GPL(v4l2_device_register);
32686
32687 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
32688- atomic_t *instance)
32689+ atomic_unchecked_t *instance)
32690 {
32691- int num = atomic_inc_return(instance) - 1;
32692+ int num = atomic_inc_return_unchecked(instance) - 1;
32693 int len = strlen(basename);
32694
32695 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
32696diff -urNp linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c
32697--- linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
32698+++ linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
32699@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
32700 {
32701 struct videobuf_queue q;
32702
32703+ pax_track_stack();
32704+
32705 /* Required to make generic handler to call __videobuf_alloc */
32706 q.int_ops = &sg_ops;
32707
32708diff -urNp linux-2.6.32.45/drivers/message/fusion/mptbase.c linux-2.6.32.45/drivers/message/fusion/mptbase.c
32709--- linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
32710+++ linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
32711@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
32712 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
32713 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
32714
32715+#ifdef CONFIG_GRKERNSEC_HIDESYM
32716+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32717+ NULL, NULL);
32718+#else
32719 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32720 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
32721+#endif
32722+
32723 /*
32724 * Rounding UP to nearest 4-kB boundary here...
32725 */
32726diff -urNp linux-2.6.32.45/drivers/message/fusion/mptsas.c linux-2.6.32.45/drivers/message/fusion/mptsas.c
32727--- linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
32728+++ linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
32729@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
32730 return 0;
32731 }
32732
32733+static inline void
32734+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32735+{
32736+ if (phy_info->port_details) {
32737+ phy_info->port_details->rphy = rphy;
32738+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32739+ ioc->name, rphy));
32740+ }
32741+
32742+ if (rphy) {
32743+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32744+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32745+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32746+ ioc->name, rphy, rphy->dev.release));
32747+ }
32748+}
32749+
32750 /* no mutex */
32751 static void
32752 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
32753@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
32754 return NULL;
32755 }
32756
32757-static inline void
32758-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32759-{
32760- if (phy_info->port_details) {
32761- phy_info->port_details->rphy = rphy;
32762- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32763- ioc->name, rphy));
32764- }
32765-
32766- if (rphy) {
32767- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32768- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32769- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32770- ioc->name, rphy, rphy->dev.release));
32771- }
32772-}
32773-
32774 static inline struct sas_port *
32775 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32776 {
32777diff -urNp linux-2.6.32.45/drivers/message/fusion/mptscsih.c linux-2.6.32.45/drivers/message/fusion/mptscsih.c
32778--- linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
32779+++ linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
32780@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32781
32782 h = shost_priv(SChost);
32783
32784- if (h) {
32785- if (h->info_kbuf == NULL)
32786- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32787- return h->info_kbuf;
32788- h->info_kbuf[0] = '\0';
32789+ if (!h)
32790+ return NULL;
32791
32792- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32793- h->info_kbuf[size-1] = '\0';
32794- }
32795+ if (h->info_kbuf == NULL)
32796+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32797+ return h->info_kbuf;
32798+ h->info_kbuf[0] = '\0';
32799+
32800+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32801+ h->info_kbuf[size-1] = '\0';
32802
32803 return h->info_kbuf;
32804 }
32805diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_config.c linux-2.6.32.45/drivers/message/i2o/i2o_config.c
32806--- linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
32807+++ linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
32808@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
32809 struct i2o_message *msg;
32810 unsigned int iop;
32811
32812+ pax_track_stack();
32813+
32814 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32815 return -EFAULT;
32816
32817diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_proc.c linux-2.6.32.45/drivers/message/i2o/i2o_proc.c
32818--- linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
32819+++ linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
32820@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
32821 "Array Controller Device"
32822 };
32823
32824-static char *chtostr(u8 * chars, int n)
32825-{
32826- char tmp[256];
32827- tmp[0] = 0;
32828- return strncat(tmp, (char *)chars, n);
32829-}
32830-
32831 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32832 char *group)
32833 {
32834@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
32835
32836 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32837 seq_printf(seq, "%-#8x", ddm_table.module_id);
32838- seq_printf(seq, "%-29s",
32839- chtostr(ddm_table.module_name_version, 28));
32840+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32841 seq_printf(seq, "%9d ", ddm_table.data_size);
32842 seq_printf(seq, "%8d", ddm_table.code_size);
32843
32844@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
32845
32846 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32847 seq_printf(seq, "%-#8x", dst->module_id);
32848- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32849- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32850+ seq_printf(seq, "%-.28s", dst->module_name_version);
32851+ seq_printf(seq, "%-.8s", dst->date);
32852 seq_printf(seq, "%8d ", dst->module_size);
32853 seq_printf(seq, "%8d ", dst->mpb_size);
32854 seq_printf(seq, "0x%04x", dst->module_flags);
32855@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
32856 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32857 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32858 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32859- seq_printf(seq, "Vendor info : %s\n",
32860- chtostr((u8 *) (work32 + 2), 16));
32861- seq_printf(seq, "Product info : %s\n",
32862- chtostr((u8 *) (work32 + 6), 16));
32863- seq_printf(seq, "Description : %s\n",
32864- chtostr((u8 *) (work32 + 10), 16));
32865- seq_printf(seq, "Product rev. : %s\n",
32866- chtostr((u8 *) (work32 + 14), 8));
32867+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32868+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32869+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32870+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32871
32872 seq_printf(seq, "Serial number : ");
32873 print_serial_number(seq, (u8 *) (work32 + 16),
32874@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
32875 }
32876
32877 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32878- seq_printf(seq, "Module name : %s\n",
32879- chtostr(result.module_name, 24));
32880- seq_printf(seq, "Module revision : %s\n",
32881- chtostr(result.module_rev, 8));
32882+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
32883+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32884
32885 seq_printf(seq, "Serial number : ");
32886 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32887@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
32888 return 0;
32889 }
32890
32891- seq_printf(seq, "Device name : %s\n",
32892- chtostr(result.device_name, 64));
32893- seq_printf(seq, "Service name : %s\n",
32894- chtostr(result.service_name, 64));
32895- seq_printf(seq, "Physical name : %s\n",
32896- chtostr(result.physical_location, 64));
32897- seq_printf(seq, "Instance number : %s\n",
32898- chtostr(result.instance_number, 4));
32899+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
32900+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
32901+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32902+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32903
32904 return 0;
32905 }
32906diff -urNp linux-2.6.32.45/drivers/message/i2o/iop.c linux-2.6.32.45/drivers/message/i2o/iop.c
32907--- linux-2.6.32.45/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
32908+++ linux-2.6.32.45/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
32909@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
32910
32911 spin_lock_irqsave(&c->context_list_lock, flags);
32912
32913- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32914- atomic_inc(&c->context_list_counter);
32915+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32916+ atomic_inc_unchecked(&c->context_list_counter);
32917
32918- entry->context = atomic_read(&c->context_list_counter);
32919+ entry->context = atomic_read_unchecked(&c->context_list_counter);
32920
32921 list_add(&entry->list, &c->context_list);
32922
32923@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
32924
32925 #if BITS_PER_LONG == 64
32926 spin_lock_init(&c->context_list_lock);
32927- atomic_set(&c->context_list_counter, 0);
32928+ atomic_set_unchecked(&c->context_list_counter, 0);
32929 INIT_LIST_HEAD(&c->context_list);
32930 #endif
32931
32932diff -urNp linux-2.6.32.45/drivers/mfd/wm8350-i2c.c linux-2.6.32.45/drivers/mfd/wm8350-i2c.c
32933--- linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
32934+++ linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
32935@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
32936 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32937 int ret;
32938
32939+ pax_track_stack();
32940+
32941 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32942 return -EINVAL;
32943
32944diff -urNp linux-2.6.32.45/drivers/misc/kgdbts.c linux-2.6.32.45/drivers/misc/kgdbts.c
32945--- linux-2.6.32.45/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
32946+++ linux-2.6.32.45/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
32947@@ -118,7 +118,7 @@
32948 } while (0)
32949 #define MAX_CONFIG_LEN 40
32950
32951-static struct kgdb_io kgdbts_io_ops;
32952+static const struct kgdb_io kgdbts_io_ops;
32953 static char get_buf[BUFMAX];
32954 static int get_buf_cnt;
32955 static char put_buf[BUFMAX];
32956@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
32957 module_put(THIS_MODULE);
32958 }
32959
32960-static struct kgdb_io kgdbts_io_ops = {
32961+static const struct kgdb_io kgdbts_io_ops = {
32962 .name = "kgdbts",
32963 .read_char = kgdbts_get_char,
32964 .write_char = kgdbts_put_char,
32965diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c
32966--- linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
32967+++ linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
32968@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
32969
32970 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32971 {
32972- atomic_long_inc(&mcs_op_statistics[op].count);
32973- atomic_long_add(clks, &mcs_op_statistics[op].total);
32974+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32975+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
32976 if (mcs_op_statistics[op].max < clks)
32977 mcs_op_statistics[op].max = clks;
32978 }
32979diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c
32980--- linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
32981+++ linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
32982@@ -32,9 +32,9 @@
32983
32984 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32985
32986-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32987+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32988 {
32989- unsigned long val = atomic_long_read(v);
32990+ unsigned long val = atomic_long_read_unchecked(v);
32991
32992 if (val)
32993 seq_printf(s, "%16lu %s\n", val, id);
32994@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
32995 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
32996
32997 for (op = 0; op < mcsop_last; op++) {
32998- count = atomic_long_read(&mcs_op_statistics[op].count);
32999- total = atomic_long_read(&mcs_op_statistics[op].total);
33000+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
33001+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
33002 max = mcs_op_statistics[op].max;
33003 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
33004 count ? total / count : 0, max);
33005diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h
33006--- linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
33007+++ linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
33008@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
33009 * GRU statistics.
33010 */
33011 struct gru_stats_s {
33012- atomic_long_t vdata_alloc;
33013- atomic_long_t vdata_free;
33014- atomic_long_t gts_alloc;
33015- atomic_long_t gts_free;
33016- atomic_long_t vdata_double_alloc;
33017- atomic_long_t gts_double_allocate;
33018- atomic_long_t assign_context;
33019- atomic_long_t assign_context_failed;
33020- atomic_long_t free_context;
33021- atomic_long_t load_user_context;
33022- atomic_long_t load_kernel_context;
33023- atomic_long_t lock_kernel_context;
33024- atomic_long_t unlock_kernel_context;
33025- atomic_long_t steal_user_context;
33026- atomic_long_t steal_kernel_context;
33027- atomic_long_t steal_context_failed;
33028- atomic_long_t nopfn;
33029- atomic_long_t break_cow;
33030- atomic_long_t asid_new;
33031- atomic_long_t asid_next;
33032- atomic_long_t asid_wrap;
33033- atomic_long_t asid_reuse;
33034- atomic_long_t intr;
33035- atomic_long_t intr_mm_lock_failed;
33036- atomic_long_t call_os;
33037- atomic_long_t call_os_offnode_reference;
33038- atomic_long_t call_os_check_for_bug;
33039- atomic_long_t call_os_wait_queue;
33040- atomic_long_t user_flush_tlb;
33041- atomic_long_t user_unload_context;
33042- atomic_long_t user_exception;
33043- atomic_long_t set_context_option;
33044- atomic_long_t migrate_check;
33045- atomic_long_t migrated_retarget;
33046- atomic_long_t migrated_unload;
33047- atomic_long_t migrated_unload_delay;
33048- atomic_long_t migrated_nopfn_retarget;
33049- atomic_long_t migrated_nopfn_unload;
33050- atomic_long_t tlb_dropin;
33051- atomic_long_t tlb_dropin_fail_no_asid;
33052- atomic_long_t tlb_dropin_fail_upm;
33053- atomic_long_t tlb_dropin_fail_invalid;
33054- atomic_long_t tlb_dropin_fail_range_active;
33055- atomic_long_t tlb_dropin_fail_idle;
33056- atomic_long_t tlb_dropin_fail_fmm;
33057- atomic_long_t tlb_dropin_fail_no_exception;
33058- atomic_long_t tlb_dropin_fail_no_exception_war;
33059- atomic_long_t tfh_stale_on_fault;
33060- atomic_long_t mmu_invalidate_range;
33061- atomic_long_t mmu_invalidate_page;
33062- atomic_long_t mmu_clear_flush_young;
33063- atomic_long_t flush_tlb;
33064- atomic_long_t flush_tlb_gru;
33065- atomic_long_t flush_tlb_gru_tgh;
33066- atomic_long_t flush_tlb_gru_zero_asid;
33067-
33068- atomic_long_t copy_gpa;
33069-
33070- atomic_long_t mesq_receive;
33071- atomic_long_t mesq_receive_none;
33072- atomic_long_t mesq_send;
33073- atomic_long_t mesq_send_failed;
33074- atomic_long_t mesq_noop;
33075- atomic_long_t mesq_send_unexpected_error;
33076- atomic_long_t mesq_send_lb_overflow;
33077- atomic_long_t mesq_send_qlimit_reached;
33078- atomic_long_t mesq_send_amo_nacked;
33079- atomic_long_t mesq_send_put_nacked;
33080- atomic_long_t mesq_qf_not_full;
33081- atomic_long_t mesq_qf_locked;
33082- atomic_long_t mesq_qf_noop_not_full;
33083- atomic_long_t mesq_qf_switch_head_failed;
33084- atomic_long_t mesq_qf_unexpected_error;
33085- atomic_long_t mesq_noop_unexpected_error;
33086- atomic_long_t mesq_noop_lb_overflow;
33087- atomic_long_t mesq_noop_qlimit_reached;
33088- atomic_long_t mesq_noop_amo_nacked;
33089- atomic_long_t mesq_noop_put_nacked;
33090+ atomic_long_unchecked_t vdata_alloc;
33091+ atomic_long_unchecked_t vdata_free;
33092+ atomic_long_unchecked_t gts_alloc;
33093+ atomic_long_unchecked_t gts_free;
33094+ atomic_long_unchecked_t vdata_double_alloc;
33095+ atomic_long_unchecked_t gts_double_allocate;
33096+ atomic_long_unchecked_t assign_context;
33097+ atomic_long_unchecked_t assign_context_failed;
33098+ atomic_long_unchecked_t free_context;
33099+ atomic_long_unchecked_t load_user_context;
33100+ atomic_long_unchecked_t load_kernel_context;
33101+ atomic_long_unchecked_t lock_kernel_context;
33102+ atomic_long_unchecked_t unlock_kernel_context;
33103+ atomic_long_unchecked_t steal_user_context;
33104+ atomic_long_unchecked_t steal_kernel_context;
33105+ atomic_long_unchecked_t steal_context_failed;
33106+ atomic_long_unchecked_t nopfn;
33107+ atomic_long_unchecked_t break_cow;
33108+ atomic_long_unchecked_t asid_new;
33109+ atomic_long_unchecked_t asid_next;
33110+ atomic_long_unchecked_t asid_wrap;
33111+ atomic_long_unchecked_t asid_reuse;
33112+ atomic_long_unchecked_t intr;
33113+ atomic_long_unchecked_t intr_mm_lock_failed;
33114+ atomic_long_unchecked_t call_os;
33115+ atomic_long_unchecked_t call_os_offnode_reference;
33116+ atomic_long_unchecked_t call_os_check_for_bug;
33117+ atomic_long_unchecked_t call_os_wait_queue;
33118+ atomic_long_unchecked_t user_flush_tlb;
33119+ atomic_long_unchecked_t user_unload_context;
33120+ atomic_long_unchecked_t user_exception;
33121+ atomic_long_unchecked_t set_context_option;
33122+ atomic_long_unchecked_t migrate_check;
33123+ atomic_long_unchecked_t migrated_retarget;
33124+ atomic_long_unchecked_t migrated_unload;
33125+ atomic_long_unchecked_t migrated_unload_delay;
33126+ atomic_long_unchecked_t migrated_nopfn_retarget;
33127+ atomic_long_unchecked_t migrated_nopfn_unload;
33128+ atomic_long_unchecked_t tlb_dropin;
33129+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33130+ atomic_long_unchecked_t tlb_dropin_fail_upm;
33131+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
33132+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
33133+ atomic_long_unchecked_t tlb_dropin_fail_idle;
33134+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
33135+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33136+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
33137+ atomic_long_unchecked_t tfh_stale_on_fault;
33138+ atomic_long_unchecked_t mmu_invalidate_range;
33139+ atomic_long_unchecked_t mmu_invalidate_page;
33140+ atomic_long_unchecked_t mmu_clear_flush_young;
33141+ atomic_long_unchecked_t flush_tlb;
33142+ atomic_long_unchecked_t flush_tlb_gru;
33143+ atomic_long_unchecked_t flush_tlb_gru_tgh;
33144+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33145+
33146+ atomic_long_unchecked_t copy_gpa;
33147+
33148+ atomic_long_unchecked_t mesq_receive;
33149+ atomic_long_unchecked_t mesq_receive_none;
33150+ atomic_long_unchecked_t mesq_send;
33151+ atomic_long_unchecked_t mesq_send_failed;
33152+ atomic_long_unchecked_t mesq_noop;
33153+ atomic_long_unchecked_t mesq_send_unexpected_error;
33154+ atomic_long_unchecked_t mesq_send_lb_overflow;
33155+ atomic_long_unchecked_t mesq_send_qlimit_reached;
33156+ atomic_long_unchecked_t mesq_send_amo_nacked;
33157+ atomic_long_unchecked_t mesq_send_put_nacked;
33158+ atomic_long_unchecked_t mesq_qf_not_full;
33159+ atomic_long_unchecked_t mesq_qf_locked;
33160+ atomic_long_unchecked_t mesq_qf_noop_not_full;
33161+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
33162+ atomic_long_unchecked_t mesq_qf_unexpected_error;
33163+ atomic_long_unchecked_t mesq_noop_unexpected_error;
33164+ atomic_long_unchecked_t mesq_noop_lb_overflow;
33165+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
33166+ atomic_long_unchecked_t mesq_noop_amo_nacked;
33167+ atomic_long_unchecked_t mesq_noop_put_nacked;
33168
33169 };
33170
33171@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
33172 cchop_deallocate, tghop_invalidate, mcsop_last};
33173
33174 struct mcs_op_statistic {
33175- atomic_long_t count;
33176- atomic_long_t total;
33177+ atomic_long_unchecked_t count;
33178+ atomic_long_unchecked_t total;
33179 unsigned long max;
33180 };
33181
33182@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
33183
33184 #define STAT(id) do { \
33185 if (gru_options & OPT_STATS) \
33186- atomic_long_inc(&gru_stats.id); \
33187+ atomic_long_inc_unchecked(&gru_stats.id); \
33188 } while (0)
33189
33190 #ifdef CONFIG_SGI_GRU_DEBUG
33191diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h
33192--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-03-27 14:31:47.000000000 -0400
33193+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-08-05 20:33:55.000000000 -0400
33194@@ -876,7 +876,7 @@ extern struct xpc_registration xpc_regis
33195 /* found in xpc_main.c */
33196 extern struct device *xpc_part;
33197 extern struct device *xpc_chan;
33198-extern struct xpc_arch_operations xpc_arch_ops;
33199+extern const struct xpc_arch_operations xpc_arch_ops;
33200 extern int xpc_disengage_timelimit;
33201 extern int xpc_disengage_timedout;
33202 extern int xpc_activate_IRQ_rcvd;
33203diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c
33204--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-03-27 14:31:47.000000000 -0400
33205+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-08-05 20:33:55.000000000 -0400
33206@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_not
33207 .notifier_call = xpc_system_die,
33208 };
33209
33210-struct xpc_arch_operations xpc_arch_ops;
33211+const struct xpc_arch_operations xpc_arch_ops;
33212
33213 /*
33214 * Timer function to enforce the timelimit on the partition disengage.
33215diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c
33216--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-03-27 14:31:47.000000000 -0400
33217+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-08-05 20:33:55.000000000 -0400
33218@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_chan
33219 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
33220 }
33221
33222-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
33223+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
33224 .setup_partitions = xpc_setup_partitions_sn2,
33225 .teardown_partitions = xpc_teardown_partitions_sn2,
33226 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
33227@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
33228 int ret;
33229 size_t buf_size;
33230
33231- xpc_arch_ops = xpc_arch_ops_sn2;
33232+ pax_open_kernel();
33233+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
33234+ pax_close_kernel();
33235
33236 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
33237 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
33238diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c
33239--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-03-27 14:31:47.000000000 -0400
33240+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-08-05 20:33:55.000000000 -0400
33241@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_chann
33242 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
33243 }
33244
33245-static struct xpc_arch_operations xpc_arch_ops_uv = {
33246+static const struct xpc_arch_operations xpc_arch_ops_uv = {
33247 .setup_partitions = xpc_setup_partitions_uv,
33248 .teardown_partitions = xpc_teardown_partitions_uv,
33249 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
33250@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_ar
33251 int
33252 xpc_init_uv(void)
33253 {
33254- xpc_arch_ops = xpc_arch_ops_uv;
33255+ pax_open_kernel();
33256+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
33257+ pax_close_kernel();
33258
33259 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
33260 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
33261diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xp.h linux-2.6.32.45/drivers/misc/sgi-xp/xp.h
33262--- linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-03-27 14:31:47.000000000 -0400
33263+++ linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-08-05 20:33:55.000000000 -0400
33264@@ -289,7 +289,7 @@ struct xpc_interface {
33265 xpc_notify_func, void *);
33266 void (*received) (short, int, void *);
33267 enum xp_retval (*partid_to_nasids) (short, void *);
33268-};
33269+} __no_const;
33270
33271 extern struct xpc_interface xpc_interface;
33272
33273diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c
33274--- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
33275+++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
33276@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
33277 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
33278 unsigned long timeo = jiffies + HZ;
33279
33280+ pax_track_stack();
33281+
33282 /* Prevent setting state FL_SYNCING for chip in suspended state. */
33283 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
33284 goto sleep;
33285@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
33286 unsigned long initial_adr;
33287 int initial_len = len;
33288
33289+ pax_track_stack();
33290+
33291 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
33292 adr += chip->start;
33293 initial_adr = adr;
33294@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
33295 int retries = 3;
33296 int ret;
33297
33298+ pax_track_stack();
33299+
33300 adr += chip->start;
33301
33302 retry:
33303diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c
33304--- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
33305+++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
33306@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
33307 unsigned long cmd_addr;
33308 struct cfi_private *cfi = map->fldrv_priv;
33309
33310+ pax_track_stack();
33311+
33312 adr += chip->start;
33313
33314 /* Ensure cmd read/writes are aligned. */
33315@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
33316 DECLARE_WAITQUEUE(wait, current);
33317 int wbufsize, z;
33318
33319+ pax_track_stack();
33320+
33321 /* M58LW064A requires bus alignment for buffer wriets -- saw */
33322 if (adr & (map_bankwidth(map)-1))
33323 return -EINVAL;
33324@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
33325 DECLARE_WAITQUEUE(wait, current);
33326 int ret = 0;
33327
33328+ pax_track_stack();
33329+
33330 adr += chip->start;
33331
33332 /* Let's determine this according to the interleave only once */
33333@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
33334 unsigned long timeo = jiffies + HZ;
33335 DECLARE_WAITQUEUE(wait, current);
33336
33337+ pax_track_stack();
33338+
33339 adr += chip->start;
33340
33341 /* Let's determine this according to the interleave only once */
33342@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
33343 unsigned long timeo = jiffies + HZ;
33344 DECLARE_WAITQUEUE(wait, current);
33345
33346+ pax_track_stack();
33347+
33348 adr += chip->start;
33349
33350 /* Let's determine this according to the interleave only once */
33351diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2000.c linux-2.6.32.45/drivers/mtd/devices/doc2000.c
33352--- linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
33353+++ linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
33354@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
33355
33356 /* The ECC will not be calculated correctly if less than 512 is written */
33357 /* DBB-
33358- if (len != 0x200 && eccbuf)
33359+ if (len != 0x200)
33360 printk(KERN_WARNING
33361 "ECC needs a full sector write (adr: %lx size %lx)\n",
33362 (long) to, (long) len);
33363diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2001.c linux-2.6.32.45/drivers/mtd/devices/doc2001.c
33364--- linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
33365+++ linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
33366@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
33367 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33368
33369 /* Don't allow read past end of device */
33370- if (from >= this->totlen)
33371+ if (from >= this->totlen || !len)
33372 return -EINVAL;
33373
33374 /* Don't allow a single read to cross a 512-byte block boundary */
33375diff -urNp linux-2.6.32.45/drivers/mtd/ftl.c linux-2.6.32.45/drivers/mtd/ftl.c
33376--- linux-2.6.32.45/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
33377+++ linux-2.6.32.45/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
33378@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
33379 loff_t offset;
33380 uint16_t srcunitswap = cpu_to_le16(srcunit);
33381
33382+ pax_track_stack();
33383+
33384 eun = &part->EUNInfo[srcunit];
33385 xfer = &part->XferInfo[xferunit];
33386 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
33387diff -urNp linux-2.6.32.45/drivers/mtd/inftlcore.c linux-2.6.32.45/drivers/mtd/inftlcore.c
33388--- linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
33389+++ linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
33390@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
33391 struct inftl_oob oob;
33392 size_t retlen;
33393
33394+ pax_track_stack();
33395+
33396 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
33397 "pending=%d)\n", inftl, thisVUC, pendingblock);
33398
33399diff -urNp linux-2.6.32.45/drivers/mtd/inftlmount.c linux-2.6.32.45/drivers/mtd/inftlmount.c
33400--- linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
33401+++ linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
33402@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
33403 struct INFTLPartition *ip;
33404 size_t retlen;
33405
33406+ pax_track_stack();
33407+
33408 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
33409
33410 /*
33411diff -urNp linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c
33412--- linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
33413+++ linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
33414@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
33415 {
33416 map_word pfow_val[4];
33417
33418+ pax_track_stack();
33419+
33420 /* Check identification string */
33421 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
33422 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
33423diff -urNp linux-2.6.32.45/drivers/mtd/mtdchar.c linux-2.6.32.45/drivers/mtd/mtdchar.c
33424--- linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
33425+++ linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
33426@@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
33427 u_long size;
33428 struct mtd_info_user info;
33429
33430+ pax_track_stack();
33431+
33432 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
33433
33434 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
33435diff -urNp linux-2.6.32.45/drivers/mtd/nftlcore.c linux-2.6.32.45/drivers/mtd/nftlcore.c
33436--- linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
33437+++ linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
33438@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
33439 int inplace = 1;
33440 size_t retlen;
33441
33442+ pax_track_stack();
33443+
33444 memset(BlockMap, 0xff, sizeof(BlockMap));
33445 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
33446
33447diff -urNp linux-2.6.32.45/drivers/mtd/nftlmount.c linux-2.6.32.45/drivers/mtd/nftlmount.c
33448--- linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
33449+++ linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
33450@@ -23,6 +23,7 @@
33451 #include <asm/errno.h>
33452 #include <linux/delay.h>
33453 #include <linux/slab.h>
33454+#include <linux/sched.h>
33455 #include <linux/mtd/mtd.h>
33456 #include <linux/mtd/nand.h>
33457 #include <linux/mtd/nftl.h>
33458@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
33459 struct mtd_info *mtd = nftl->mbd.mtd;
33460 unsigned int i;
33461
33462+ pax_track_stack();
33463+
33464 /* Assume logical EraseSize == physical erasesize for starting the scan.
33465 We'll sort it out later if we find a MediaHeader which says otherwise */
33466 /* Actually, we won't. The new DiskOnChip driver has already scanned
33467diff -urNp linux-2.6.32.45/drivers/mtd/ubi/build.c linux-2.6.32.45/drivers/mtd/ubi/build.c
33468--- linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
33469+++ linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
33470@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
33471 static int __init bytes_str_to_int(const char *str)
33472 {
33473 char *endp;
33474- unsigned long result;
33475+ unsigned long result, scale = 1;
33476
33477 result = simple_strtoul(str, &endp, 0);
33478 if (str == endp || result >= INT_MAX) {
33479@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
33480
33481 switch (*endp) {
33482 case 'G':
33483- result *= 1024;
33484+ scale *= 1024;
33485 case 'M':
33486- result *= 1024;
33487+ scale *= 1024;
33488 case 'K':
33489- result *= 1024;
33490+ scale *= 1024;
33491 if (endp[1] == 'i' && endp[2] == 'B')
33492 endp += 2;
33493 case '\0':
33494@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
33495 return -EINVAL;
33496 }
33497
33498- return result;
33499+ if ((intoverflow_t)result*scale >= INT_MAX) {
33500+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33501+ str);
33502+ return -EINVAL;
33503+ }
33504+
33505+ return result*scale;
33506 }
33507
33508 /**
33509diff -urNp linux-2.6.32.45/drivers/net/bnx2.c linux-2.6.32.45/drivers/net/bnx2.c
33510--- linux-2.6.32.45/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
33511+++ linux-2.6.32.45/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
33512@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33513 int rc = 0;
33514 u32 magic, csum;
33515
33516+ pax_track_stack();
33517+
33518 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33519 goto test_nvram_done;
33520
33521diff -urNp linux-2.6.32.45/drivers/net/cxgb3/l2t.h linux-2.6.32.45/drivers/net/cxgb3/l2t.h
33522--- linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-03-27 14:31:47.000000000 -0400
33523+++ linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-08-05 20:33:55.000000000 -0400
33524@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
33525 */
33526 struct l2t_skb_cb {
33527 arp_failure_handler_func arp_failure_handler;
33528-};
33529+} __no_const;
33530
33531 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33532
33533diff -urNp linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c
33534--- linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
33535+++ linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
33536@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
33537 int i, addr, ret;
33538 struct t3_vpd vpd;
33539
33540+ pax_track_stack();
33541+
33542 /*
33543 * Card information is normally at VPD_BASE but some early cards had
33544 * it at 0.
33545diff -urNp linux-2.6.32.45/drivers/net/e1000e/82571.c linux-2.6.32.45/drivers/net/e1000e/82571.c
33546--- linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
33547+++ linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-08-23 21:22:32.000000000 -0400
33548@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(s
33549 {
33550 struct e1000_hw *hw = &adapter->hw;
33551 struct e1000_mac_info *mac = &hw->mac;
33552- struct e1000_mac_operations *func = &mac->ops;
33553+ e1000_mac_operations_no_const *func = &mac->ops;
33554 u32 swsm = 0;
33555 u32 swsm2 = 0;
33556 bool force_clear_smbi = false;
33557@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
33558 temp = er32(ICRXDMTC);
33559 }
33560
33561-static struct e1000_mac_operations e82571_mac_ops = {
33562+static const struct e1000_mac_operations e82571_mac_ops = {
33563 /* .check_mng_mode: mac type dependent */
33564 /* .check_for_link: media type dependent */
33565 .id_led_init = e1000e_id_led_init,
33566@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
33567 .setup_led = e1000e_setup_led_generic,
33568 };
33569
33570-static struct e1000_phy_operations e82_phy_ops_igp = {
33571+static const struct e1000_phy_operations e82_phy_ops_igp = {
33572 .acquire_phy = e1000_get_hw_semaphore_82571,
33573 .check_reset_block = e1000e_check_reset_block_generic,
33574 .commit_phy = NULL,
33575@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
33576 .cfg_on_link_up = NULL,
33577 };
33578
33579-static struct e1000_phy_operations e82_phy_ops_m88 = {
33580+static const struct e1000_phy_operations e82_phy_ops_m88 = {
33581 .acquire_phy = e1000_get_hw_semaphore_82571,
33582 .check_reset_block = e1000e_check_reset_block_generic,
33583 .commit_phy = e1000e_phy_sw_reset,
33584@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
33585 .cfg_on_link_up = NULL,
33586 };
33587
33588-static struct e1000_phy_operations e82_phy_ops_bm = {
33589+static const struct e1000_phy_operations e82_phy_ops_bm = {
33590 .acquire_phy = e1000_get_hw_semaphore_82571,
33591 .check_reset_block = e1000e_check_reset_block_generic,
33592 .commit_phy = e1000e_phy_sw_reset,
33593@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
33594 .cfg_on_link_up = NULL,
33595 };
33596
33597-static struct e1000_nvm_operations e82571_nvm_ops = {
33598+static const struct e1000_nvm_operations e82571_nvm_ops = {
33599 .acquire_nvm = e1000_acquire_nvm_82571,
33600 .read_nvm = e1000e_read_nvm_eerd,
33601 .release_nvm = e1000_release_nvm_82571,
33602diff -urNp linux-2.6.32.45/drivers/net/e1000e/e1000.h linux-2.6.32.45/drivers/net/e1000e/e1000.h
33603--- linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
33604+++ linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
33605@@ -375,9 +375,9 @@ struct e1000_info {
33606 u32 pba;
33607 u32 max_hw_frame_size;
33608 s32 (*get_variants)(struct e1000_adapter *);
33609- struct e1000_mac_operations *mac_ops;
33610- struct e1000_phy_operations *phy_ops;
33611- struct e1000_nvm_operations *nvm_ops;
33612+ const struct e1000_mac_operations *mac_ops;
33613+ const struct e1000_phy_operations *phy_ops;
33614+ const struct e1000_nvm_operations *nvm_ops;
33615 };
33616
33617 /* hardware capability, feature, and workaround flags */
33618diff -urNp linux-2.6.32.45/drivers/net/e1000e/es2lan.c linux-2.6.32.45/drivers/net/e1000e/es2lan.c
33619--- linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
33620+++ linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-08-23 21:22:32.000000000 -0400
33621@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es
33622 {
33623 struct e1000_hw *hw = &adapter->hw;
33624 struct e1000_mac_info *mac = &hw->mac;
33625- struct e1000_mac_operations *func = &mac->ops;
33626+ e1000_mac_operations_no_const *func = &mac->ops;
33627
33628 /* Set media type */
33629 switch (adapter->pdev->device) {
33630@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
33631 temp = er32(ICRXDMTC);
33632 }
33633
33634-static struct e1000_mac_operations es2_mac_ops = {
33635+static const struct e1000_mac_operations es2_mac_ops = {
33636 .id_led_init = e1000e_id_led_init,
33637 .check_mng_mode = e1000e_check_mng_mode_generic,
33638 /* check_for_link dependent on media type */
33639@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
33640 .setup_led = e1000e_setup_led_generic,
33641 };
33642
33643-static struct e1000_phy_operations es2_phy_ops = {
33644+static const struct e1000_phy_operations es2_phy_ops = {
33645 .acquire_phy = e1000_acquire_phy_80003es2lan,
33646 .check_reset_block = e1000e_check_reset_block_generic,
33647 .commit_phy = e1000e_phy_sw_reset,
33648@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
33649 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
33650 };
33651
33652-static struct e1000_nvm_operations es2_nvm_ops = {
33653+static const struct e1000_nvm_operations es2_nvm_ops = {
33654 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
33655 .read_nvm = e1000e_read_nvm_eerd,
33656 .release_nvm = e1000_release_nvm_80003es2lan,
33657diff -urNp linux-2.6.32.45/drivers/net/e1000e/hw.h linux-2.6.32.45/drivers/net/e1000e/hw.h
33658--- linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
33659+++ linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-08-23 21:27:38.000000000 -0400
33660@@ -753,6 +753,7 @@ struct e1000_mac_operations {
33661 s32 (*setup_physical_interface)(struct e1000_hw *);
33662 s32 (*setup_led)(struct e1000_hw *);
33663 };
33664+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33665
33666 /* Function pointers for the PHY. */
33667 struct e1000_phy_operations {
33668@@ -774,6 +775,7 @@ struct e1000_phy_operations {
33669 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33670 s32 (*cfg_on_link_up)(struct e1000_hw *);
33671 };
33672+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33673
33674 /* Function pointers for the NVM. */
33675 struct e1000_nvm_operations {
33676@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
33677 s32 (*validate_nvm)(struct e1000_hw *);
33678 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33679 };
33680+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33681
33682 struct e1000_mac_info {
33683- struct e1000_mac_operations ops;
33684+ e1000_mac_operations_no_const ops;
33685
33686 u8 addr[6];
33687 u8 perm_addr[6];
33688@@ -823,7 +826,7 @@ struct e1000_mac_info {
33689 };
33690
33691 struct e1000_phy_info {
33692- struct e1000_phy_operations ops;
33693+ e1000_phy_operations_no_const ops;
33694
33695 enum e1000_phy_type type;
33696
33697@@ -857,7 +860,7 @@ struct e1000_phy_info {
33698 };
33699
33700 struct e1000_nvm_info {
33701- struct e1000_nvm_operations ops;
33702+ e1000_nvm_operations_no_const ops;
33703
33704 enum e1000_nvm_type type;
33705 enum e1000_nvm_override override;
33706diff -urNp linux-2.6.32.45/drivers/net/e1000e/ich8lan.c linux-2.6.32.45/drivers/net/e1000e/ich8lan.c
33707--- linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
33708+++ linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-08-23 21:22:32.000000000 -0400
33709@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
33710 }
33711 }
33712
33713-static struct e1000_mac_operations ich8_mac_ops = {
33714+static const struct e1000_mac_operations ich8_mac_ops = {
33715 .id_led_init = e1000e_id_led_init,
33716 .check_mng_mode = e1000_check_mng_mode_ich8lan,
33717 .check_for_link = e1000_check_for_copper_link_ich8lan,
33718@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
33719 /* id_led_init dependent on mac type */
33720 };
33721
33722-static struct e1000_phy_operations ich8_phy_ops = {
33723+static const struct e1000_phy_operations ich8_phy_ops = {
33724 .acquire_phy = e1000_acquire_swflag_ich8lan,
33725 .check_reset_block = e1000_check_reset_block_ich8lan,
33726 .commit_phy = NULL,
33727@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
33728 .write_phy_reg = e1000e_write_phy_reg_igp,
33729 };
33730
33731-static struct e1000_nvm_operations ich8_nvm_ops = {
33732+static const struct e1000_nvm_operations ich8_nvm_ops = {
33733 .acquire_nvm = e1000_acquire_nvm_ich8lan,
33734 .read_nvm = e1000_read_nvm_ich8lan,
33735 .release_nvm = e1000_release_nvm_ich8lan,
33736diff -urNp linux-2.6.32.45/drivers/net/hamradio/6pack.c linux-2.6.32.45/drivers/net/hamradio/6pack.c
33737--- linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
33738+++ linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
33739@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
33740 unsigned char buf[512];
33741 int count1;
33742
33743+ pax_track_stack();
33744+
33745 if (!count)
33746 return;
33747
33748diff -urNp linux-2.6.32.45/drivers/net/ibmveth.c linux-2.6.32.45/drivers/net/ibmveth.c
33749--- linux-2.6.32.45/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
33750+++ linux-2.6.32.45/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
33751@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
33752 NULL,
33753 };
33754
33755-static struct sysfs_ops veth_pool_ops = {
33756+static const struct sysfs_ops veth_pool_ops = {
33757 .show = veth_pool_show,
33758 .store = veth_pool_store,
33759 };
33760diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_82575.c linux-2.6.32.45/drivers/net/igb/e1000_82575.c
33761--- linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
33762+++ linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-08-23 21:22:32.000000000 -0400
33763@@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
33764 wr32(E1000_VT_CTL, vt_ctl);
33765 }
33766
33767-static struct e1000_mac_operations e1000_mac_ops_82575 = {
33768+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
33769 .reset_hw = igb_reset_hw_82575,
33770 .init_hw = igb_init_hw_82575,
33771 .check_for_link = igb_check_for_link_82575,
33772@@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
33773 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
33774 };
33775
33776-static struct e1000_phy_operations e1000_phy_ops_82575 = {
33777+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
33778 .acquire = igb_acquire_phy_82575,
33779 .get_cfg_done = igb_get_cfg_done_82575,
33780 .release = igb_release_phy_82575,
33781 };
33782
33783-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33784+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33785 .acquire = igb_acquire_nvm_82575,
33786 .read = igb_read_nvm_eerd,
33787 .release = igb_release_nvm_82575,
33788diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_hw.h linux-2.6.32.45/drivers/net/igb/e1000_hw.h
33789--- linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
33790+++ linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-08-23 21:28:01.000000000 -0400
33791@@ -288,6 +288,7 @@ struct e1000_mac_operations {
33792 s32 (*read_mac_addr)(struct e1000_hw *);
33793 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
33794 };
33795+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33796
33797 struct e1000_phy_operations {
33798 s32 (*acquire)(struct e1000_hw *);
33799@@ -303,6 +304,7 @@ struct e1000_phy_operations {
33800 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33801 s32 (*write_reg)(struct e1000_hw *, u32, u16);
33802 };
33803+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33804
33805 struct e1000_nvm_operations {
33806 s32 (*acquire)(struct e1000_hw *);
33807@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
33808 void (*release)(struct e1000_hw *);
33809 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
33810 };
33811+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33812
33813 struct e1000_info {
33814 s32 (*get_invariants)(struct e1000_hw *);
33815@@ -321,7 +324,7 @@ struct e1000_info {
33816 extern const struct e1000_info e1000_82575_info;
33817
33818 struct e1000_mac_info {
33819- struct e1000_mac_operations ops;
33820+ e1000_mac_operations_no_const ops;
33821
33822 u8 addr[6];
33823 u8 perm_addr[6];
33824@@ -365,7 +368,7 @@ struct e1000_mac_info {
33825 };
33826
33827 struct e1000_phy_info {
33828- struct e1000_phy_operations ops;
33829+ e1000_phy_operations_no_const ops;
33830
33831 enum e1000_phy_type type;
33832
33833@@ -400,7 +403,7 @@ struct e1000_phy_info {
33834 };
33835
33836 struct e1000_nvm_info {
33837- struct e1000_nvm_operations ops;
33838+ e1000_nvm_operations_no_const ops;
33839
33840 enum e1000_nvm_type type;
33841 enum e1000_nvm_override override;
33842@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
33843 s32 (*check_for_ack)(struct e1000_hw *, u16);
33844 s32 (*check_for_rst)(struct e1000_hw *, u16);
33845 };
33846+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33847
33848 struct e1000_mbx_stats {
33849 u32 msgs_tx;
33850@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
33851 };
33852
33853 struct e1000_mbx_info {
33854- struct e1000_mbx_operations ops;
33855+ e1000_mbx_operations_no_const ops;
33856 struct e1000_mbx_stats stats;
33857 u32 timeout;
33858 u32 usec_delay;
33859diff -urNp linux-2.6.32.45/drivers/net/igbvf/vf.h linux-2.6.32.45/drivers/net/igbvf/vf.h
33860--- linux-2.6.32.45/drivers/net/igbvf/vf.h 2011-03-27 14:31:47.000000000 -0400
33861+++ linux-2.6.32.45/drivers/net/igbvf/vf.h 2011-08-23 21:22:38.000000000 -0400
33862@@ -187,9 +187,10 @@ struct e1000_mac_operations {
33863 s32 (*read_mac_addr)(struct e1000_hw *);
33864 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
33865 };
33866+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33867
33868 struct e1000_mac_info {
33869- struct e1000_mac_operations ops;
33870+ e1000_mac_operations_no_const ops;
33871 u8 addr[6];
33872 u8 perm_addr[6];
33873
33874@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
33875 s32 (*check_for_ack)(struct e1000_hw *);
33876 s32 (*check_for_rst)(struct e1000_hw *);
33877 };
33878+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33879
33880 struct e1000_mbx_stats {
33881 u32 msgs_tx;
33882@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
33883 };
33884
33885 struct e1000_mbx_info {
33886- struct e1000_mbx_operations ops;
33887+ e1000_mbx_operations_no_const ops;
33888 struct e1000_mbx_stats stats;
33889 u32 timeout;
33890 u32 usec_delay;
33891diff -urNp linux-2.6.32.45/drivers/net/iseries_veth.c linux-2.6.32.45/drivers/net/iseries_veth.c
33892--- linux-2.6.32.45/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
33893+++ linux-2.6.32.45/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
33894@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
33895 NULL
33896 };
33897
33898-static struct sysfs_ops veth_cnx_sysfs_ops = {
33899+static const struct sysfs_ops veth_cnx_sysfs_ops = {
33900 .show = veth_cnx_attribute_show
33901 };
33902
33903@@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
33904 NULL
33905 };
33906
33907-static struct sysfs_ops veth_port_sysfs_ops = {
33908+static const struct sysfs_ops veth_port_sysfs_ops = {
33909 .show = veth_port_attribute_show
33910 };
33911
33912diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c
33913--- linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
33914+++ linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
33915@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
33916 u32 rctl;
33917 int i;
33918
33919+ pax_track_stack();
33920+
33921 /* Check for Promiscuous and All Multicast modes */
33922
33923 rctl = IXGB_READ_REG(hw, RCTL);
33924diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c
33925--- linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
33926+++ linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
33927@@ -260,6 +260,9 @@ void __devinit
33928 ixgb_check_options(struct ixgb_adapter *adapter)
33929 {
33930 int bd = adapter->bd_number;
33931+
33932+ pax_track_stack();
33933+
33934 if (bd >= IXGB_MAX_NIC) {
33935 printk(KERN_NOTICE
33936 "Warning: no configuration for board #%i\n", bd);
33937diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h
33938--- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h 2011-03-27 14:31:47.000000000 -0400
33939+++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:22:38.000000000 -0400
33940@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
33941 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
33942 s32 (*update_checksum)(struct ixgbe_hw *);
33943 };
33944+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
33945
33946 struct ixgbe_mac_operations {
33947 s32 (*init_hw)(struct ixgbe_hw *);
33948@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
33949 /* Flow Control */
33950 s32 (*fc_enable)(struct ixgbe_hw *, s32);
33951 };
33952+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33953
33954 struct ixgbe_phy_operations {
33955 s32 (*identify)(struct ixgbe_hw *);
33956@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
33957 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
33958 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
33959 };
33960+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
33961
33962 struct ixgbe_eeprom_info {
33963- struct ixgbe_eeprom_operations ops;
33964+ ixgbe_eeprom_operations_no_const ops;
33965 enum ixgbe_eeprom_type type;
33966 u32 semaphore_delay;
33967 u16 word_size;
33968@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
33969 };
33970
33971 struct ixgbe_mac_info {
33972- struct ixgbe_mac_operations ops;
33973+ ixgbe_mac_operations_no_const ops;
33974 enum ixgbe_mac_type type;
33975 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33976 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33977@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
33978 };
33979
33980 struct ixgbe_phy_info {
33981- struct ixgbe_phy_operations ops;
33982+ ixgbe_phy_operations_no_const ops;
33983 struct mdio_if_info mdio;
33984 enum ixgbe_phy_type type;
33985 u32 id;
33986diff -urNp linux-2.6.32.45/drivers/net/mlx4/main.c linux-2.6.32.45/drivers/net/mlx4/main.c
33987--- linux-2.6.32.45/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
33988+++ linux-2.6.32.45/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
33989@@ -38,6 +38,7 @@
33990 #include <linux/errno.h>
33991 #include <linux/pci.h>
33992 #include <linux/dma-mapping.h>
33993+#include <linux/sched.h>
33994
33995 #include <linux/mlx4/device.h>
33996 #include <linux/mlx4/doorbell.h>
33997@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
33998 u64 icm_size;
33999 int err;
34000
34001+ pax_track_stack();
34002+
34003 err = mlx4_QUERY_FW(dev);
34004 if (err) {
34005 if (err == -EACCES)
34006diff -urNp linux-2.6.32.45/drivers/net/niu.c linux-2.6.32.45/drivers/net/niu.c
34007--- linux-2.6.32.45/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
34008+++ linux-2.6.32.45/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
34009@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
34010 int i, num_irqs, err;
34011 u8 first_ldg;
34012
34013+ pax_track_stack();
34014+
34015 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
34016 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
34017 ldg_num_map[i] = first_ldg + i;
34018diff -urNp linux-2.6.32.45/drivers/net/pcnet32.c linux-2.6.32.45/drivers/net/pcnet32.c
34019--- linux-2.6.32.45/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
34020+++ linux-2.6.32.45/drivers/net/pcnet32.c 2011-08-05 20:33:55.000000000 -0400
34021@@ -79,7 +79,7 @@ static int cards_found;
34022 /*
34023 * VLB I/O addresses
34024 */
34025-static unsigned int pcnet32_portlist[] __initdata =
34026+static unsigned int pcnet32_portlist[] __devinitdata =
34027 { 0x300, 0x320, 0x340, 0x360, 0 };
34028
34029 static int pcnet32_debug = 0;
34030@@ -267,7 +267,7 @@ struct pcnet32_private {
34031 struct sk_buff **rx_skbuff;
34032 dma_addr_t *tx_dma_addr;
34033 dma_addr_t *rx_dma_addr;
34034- struct pcnet32_access a;
34035+ struct pcnet32_access *a;
34036 spinlock_t lock; /* Guard lock */
34037 unsigned int cur_rx, cur_tx; /* The next free ring entry */
34038 unsigned int rx_ring_size; /* current rx ring size */
34039@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct n
34040 u16 val;
34041
34042 netif_wake_queue(dev);
34043- val = lp->a.read_csr(ioaddr, CSR3);
34044+ val = lp->a->read_csr(ioaddr, CSR3);
34045 val &= 0x00ff;
34046- lp->a.write_csr(ioaddr, CSR3, val);
34047+ lp->a->write_csr(ioaddr, CSR3, val);
34048 napi_enable(&lp->napi);
34049 }
34050
34051@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_d
34052 r = mii_link_ok(&lp->mii_if);
34053 } else if (lp->chip_version >= PCNET32_79C970A) {
34054 ulong ioaddr = dev->base_addr; /* card base I/O address */
34055- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34056+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34057 } else { /* can not detect link on really old chips */
34058 r = 1;
34059 }
34060@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct
34061 pcnet32_netif_stop(dev);
34062
34063 spin_lock_irqsave(&lp->lock, flags);
34064- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34065+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34066
34067 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
34068
34069@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct
34070 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
34071 {
34072 struct pcnet32_private *lp = netdev_priv(dev);
34073- struct pcnet32_access *a = &lp->a; /* access to registers */
34074+ struct pcnet32_access *a = lp->a; /* access to registers */
34075 ulong ioaddr = dev->base_addr; /* card base I/O address */
34076 struct sk_buff *skb; /* sk buff */
34077 int x, i; /* counters */
34078@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct
34079 pcnet32_netif_stop(dev);
34080
34081 spin_lock_irqsave(&lp->lock, flags);
34082- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34083+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34084
34085 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
34086
34087 /* Reset the PCNET32 */
34088- lp->a.reset(ioaddr);
34089- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34090+ lp->a->reset(ioaddr);
34091+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34092
34093 /* switch pcnet32 to 32bit mode */
34094- lp->a.write_bcr(ioaddr, 20, 2);
34095+ lp->a->write_bcr(ioaddr, 20, 2);
34096
34097 /* purge & init rings but don't actually restart */
34098 pcnet32_restart(dev, 0x0000);
34099
34100- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34101+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34102
34103 /* Initialize Transmit buffers. */
34104 size = data_len + 15;
34105@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct
34106
34107 /* set int loopback in CSR15 */
34108 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
34109- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
34110+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
34111
34112 teststatus = cpu_to_le16(0x8000);
34113- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34114+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34115
34116 /* Check status of descriptors */
34117 for (x = 0; x < numbuffs; x++) {
34118@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct
34119 }
34120 }
34121
34122- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34123+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34124 wmb();
34125 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
34126 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
34127@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct
34128 pcnet32_restart(dev, CSR0_NORMAL);
34129 } else {
34130 pcnet32_purge_rx_ring(dev);
34131- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34132+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34133 }
34134 spin_unlock_irqrestore(&lp->lock, flags);
34135
34136@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct
34137 static void pcnet32_led_blink_callback(struct net_device *dev)
34138 {
34139 struct pcnet32_private *lp = netdev_priv(dev);
34140- struct pcnet32_access *a = &lp->a;
34141+ struct pcnet32_access *a = lp->a;
34142 ulong ioaddr = dev->base_addr;
34143 unsigned long flags;
34144 int i;
34145@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(s
34146 static int pcnet32_phys_id(struct net_device *dev, u32 data)
34147 {
34148 struct pcnet32_private *lp = netdev_priv(dev);
34149- struct pcnet32_access *a = &lp->a;
34150+ struct pcnet32_access *a = lp->a;
34151 ulong ioaddr = dev->base_addr;
34152 unsigned long flags;
34153 int i, regs[4];
34154@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_de
34155 {
34156 int csr5;
34157 struct pcnet32_private *lp = netdev_priv(dev);
34158- struct pcnet32_access *a = &lp->a;
34159+ struct pcnet32_access *a = lp->a;
34160 ulong ioaddr = dev->base_addr;
34161 int ticks;
34162
34163@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_stru
34164 spin_lock_irqsave(&lp->lock, flags);
34165 if (pcnet32_tx(dev)) {
34166 /* reset the chip to clear the error condition, then restart */
34167- lp->a.reset(ioaddr);
34168- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34169+ lp->a->reset(ioaddr);
34170+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34171 pcnet32_restart(dev, CSR0_START);
34172 netif_wake_queue(dev);
34173 }
34174@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_stru
34175 __napi_complete(napi);
34176
34177 /* clear interrupt masks */
34178- val = lp->a.read_csr(ioaddr, CSR3);
34179+ val = lp->a->read_csr(ioaddr, CSR3);
34180 val &= 0x00ff;
34181- lp->a.write_csr(ioaddr, CSR3, val);
34182+ lp->a->write_csr(ioaddr, CSR3, val);
34183
34184 /* Set interrupt enable. */
34185- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
34186+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
34187
34188 spin_unlock_irqrestore(&lp->lock, flags);
34189 }
34190@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_
34191 int i, csr0;
34192 u16 *buff = ptr;
34193 struct pcnet32_private *lp = netdev_priv(dev);
34194- struct pcnet32_access *a = &lp->a;
34195+ struct pcnet32_access *a = lp->a;
34196 ulong ioaddr = dev->base_addr;
34197 unsigned long flags;
34198
34199@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_
34200 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
34201 if (lp->phymask & (1 << j)) {
34202 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
34203- lp->a.write_bcr(ioaddr, 33,
34204+ lp->a->write_bcr(ioaddr, 33,
34205 (j << 5) | i);
34206- *buff++ = lp->a.read_bcr(ioaddr, 34);
34207+ *buff++ = lp->a->read_bcr(ioaddr, 34);
34208 }
34209 }
34210 }
34211@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34212 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
34213 lp->options |= PCNET32_PORT_FD;
34214
34215- lp->a = *a;
34216+ lp->a = a;
34217
34218 /* prior to register_netdev, dev->name is not yet correct */
34219 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
34220@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34221 if (lp->mii) {
34222 /* lp->phycount and lp->phymask are set to 0 by memset above */
34223
34224- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34225+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34226 /* scan for PHYs */
34227 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34228 unsigned short id1, id2;
34229@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34230 "Found PHY %04x:%04x at address %d.\n",
34231 id1, id2, i);
34232 }
34233- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34234+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34235 if (lp->phycount > 1) {
34236 lp->options |= PCNET32_PORT_MII;
34237 }
34238@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_devic
34239 }
34240
34241 /* Reset the PCNET32 */
34242- lp->a.reset(ioaddr);
34243+ lp->a->reset(ioaddr);
34244
34245 /* switch pcnet32 to 32bit mode */
34246- lp->a.write_bcr(ioaddr, 20, 2);
34247+ lp->a->write_bcr(ioaddr, 20, 2);
34248
34249 if (netif_msg_ifup(lp))
34250 printk(KERN_DEBUG
34251@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_devic
34252 (u32) (lp->init_dma_addr));
34253
34254 /* set/reset autoselect bit */
34255- val = lp->a.read_bcr(ioaddr, 2) & ~2;
34256+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
34257 if (lp->options & PCNET32_PORT_ASEL)
34258 val |= 2;
34259- lp->a.write_bcr(ioaddr, 2, val);
34260+ lp->a->write_bcr(ioaddr, 2, val);
34261
34262 /* handle full duplex setting */
34263 if (lp->mii_if.full_duplex) {
34264- val = lp->a.read_bcr(ioaddr, 9) & ~3;
34265+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
34266 if (lp->options & PCNET32_PORT_FD) {
34267 val |= 1;
34268 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
34269@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_devic
34270 if (lp->chip_version == 0x2627)
34271 val |= 3;
34272 }
34273- lp->a.write_bcr(ioaddr, 9, val);
34274+ lp->a->write_bcr(ioaddr, 9, val);
34275 }
34276
34277 /* set/reset GPSI bit in test register */
34278- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
34279+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
34280 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
34281 val |= 0x10;
34282- lp->a.write_csr(ioaddr, 124, val);
34283+ lp->a->write_csr(ioaddr, 124, val);
34284
34285 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
34286 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
34287@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_devic
34288 * duplex, and/or enable auto negotiation, and clear DANAS
34289 */
34290 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
34291- lp->a.write_bcr(ioaddr, 32,
34292- lp->a.read_bcr(ioaddr, 32) | 0x0080);
34293+ lp->a->write_bcr(ioaddr, 32,
34294+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
34295 /* disable Auto Negotiation, set 10Mpbs, HD */
34296- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
34297+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
34298 if (lp->options & PCNET32_PORT_FD)
34299 val |= 0x10;
34300 if (lp->options & PCNET32_PORT_100)
34301 val |= 0x08;
34302- lp->a.write_bcr(ioaddr, 32, val);
34303+ lp->a->write_bcr(ioaddr, 32, val);
34304 } else {
34305 if (lp->options & PCNET32_PORT_ASEL) {
34306- lp->a.write_bcr(ioaddr, 32,
34307- lp->a.read_bcr(ioaddr,
34308+ lp->a->write_bcr(ioaddr, 32,
34309+ lp->a->read_bcr(ioaddr,
34310 32) | 0x0080);
34311 /* enable auto negotiate, setup, disable fd */
34312- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
34313+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
34314 val |= 0x20;
34315- lp->a.write_bcr(ioaddr, 32, val);
34316+ lp->a->write_bcr(ioaddr, 32, val);
34317 }
34318 }
34319 } else {
34320@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_devic
34321 * There is really no good other way to handle multiple PHYs
34322 * other than turning off all automatics
34323 */
34324- val = lp->a.read_bcr(ioaddr, 2);
34325- lp->a.write_bcr(ioaddr, 2, val & ~2);
34326- val = lp->a.read_bcr(ioaddr, 32);
34327- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34328+ val = lp->a->read_bcr(ioaddr, 2);
34329+ lp->a->write_bcr(ioaddr, 2, val & ~2);
34330+ val = lp->a->read_bcr(ioaddr, 32);
34331+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34332
34333 if (!(lp->options & PCNET32_PORT_ASEL)) {
34334 /* setup ecmd */
34335@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_devic
34336 ecmd.speed =
34337 lp->
34338 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
34339- bcr9 = lp->a.read_bcr(ioaddr, 9);
34340+ bcr9 = lp->a->read_bcr(ioaddr, 9);
34341
34342 if (lp->options & PCNET32_PORT_FD) {
34343 ecmd.duplex = DUPLEX_FULL;
34344@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_devic
34345 ecmd.duplex = DUPLEX_HALF;
34346 bcr9 |= ~(1 << 0);
34347 }
34348- lp->a.write_bcr(ioaddr, 9, bcr9);
34349+ lp->a->write_bcr(ioaddr, 9, bcr9);
34350 }
34351
34352 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34353@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_devic
34354
34355 #ifdef DO_DXSUFLO
34356 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
34357- val = lp->a.read_csr(ioaddr, CSR3);
34358+ val = lp->a->read_csr(ioaddr, CSR3);
34359 val |= 0x40;
34360- lp->a.write_csr(ioaddr, CSR3, val);
34361+ lp->a->write_csr(ioaddr, CSR3, val);
34362 }
34363 #endif
34364
34365@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_devic
34366 napi_enable(&lp->napi);
34367
34368 /* Re-initialize the PCNET32, and start it when done. */
34369- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34370- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34371+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34372+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34373
34374- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34375- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34376+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34377+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34378
34379 netif_start_queue(dev);
34380
34381@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_devic
34382
34383 i = 0;
34384 while (i++ < 100)
34385- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34386+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34387 break;
34388 /*
34389 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
34390 * reports that doing so triggers a bug in the '974.
34391 */
34392- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
34393+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
34394
34395 if (netif_msg_ifup(lp))
34396 printk(KERN_DEBUG
34397 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
34398 dev->name, i,
34399 (u32) (lp->init_dma_addr),
34400- lp->a.read_csr(ioaddr, CSR0));
34401+ lp->a->read_csr(ioaddr, CSR0));
34402
34403 spin_unlock_irqrestore(&lp->lock, flags);
34404
34405@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_devic
34406 * Switch back to 16bit mode to avoid problems with dumb
34407 * DOS packet driver after a warm reboot
34408 */
34409- lp->a.write_bcr(ioaddr, 20, 4);
34410+ lp->a->write_bcr(ioaddr, 20, 4);
34411
34412 err_free_irq:
34413 spin_unlock_irqrestore(&lp->lock, flags);
34414@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_d
34415
34416 /* wait for stop */
34417 for (i = 0; i < 100; i++)
34418- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
34419+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
34420 break;
34421
34422 if (i >= 100 && netif_msg_drv(lp))
34423@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_d
34424 return;
34425
34426 /* ReInit Ring */
34427- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34428+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34429 i = 0;
34430 while (i++ < 1000)
34431- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34432+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34433 break;
34434
34435- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
34436+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
34437 }
34438
34439 static void pcnet32_tx_timeout(struct net_device *dev)
34440@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct ne
34441 if (pcnet32_debug & NETIF_MSG_DRV)
34442 printk(KERN_ERR
34443 "%s: transmit timed out, status %4.4x, resetting.\n",
34444- dev->name, lp->a.read_csr(ioaddr, CSR0));
34445- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34446+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34447+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34448 dev->stats.tx_errors++;
34449 if (netif_msg_tx_err(lp)) {
34450 int i;
34451@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34452 if (netif_msg_tx_queued(lp)) {
34453 printk(KERN_DEBUG
34454 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
34455- dev->name, lp->a.read_csr(ioaddr, CSR0));
34456+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34457 }
34458
34459 /* Default status -- will not enable Successful-TxDone
34460@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34461 dev->stats.tx_bytes += skb->len;
34462
34463 /* Trigger an immediate send poll. */
34464- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34465+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34466
34467 dev->trans_start = jiffies;
34468
34469@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
34470
34471 spin_lock(&lp->lock);
34472
34473- csr0 = lp->a.read_csr(ioaddr, CSR0);
34474+ csr0 = lp->a->read_csr(ioaddr, CSR0);
34475 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
34476 if (csr0 == 0xffff) {
34477 break; /* PCMCIA remove happened */
34478 }
34479 /* Acknowledge all of the current interrupt sources ASAP. */
34480- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34481+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34482
34483 if (netif_msg_intr(lp))
34484 printk(KERN_DEBUG
34485 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
34486- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
34487+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
34488
34489 /* Log misc errors. */
34490 if (csr0 & 0x4000)
34491@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
34492 if (napi_schedule_prep(&lp->napi)) {
34493 u16 val;
34494 /* set interrupt masks */
34495- val = lp->a.read_csr(ioaddr, CSR3);
34496+ val = lp->a->read_csr(ioaddr, CSR3);
34497 val |= 0x5f00;
34498- lp->a.write_csr(ioaddr, CSR3, val);
34499+ lp->a->write_csr(ioaddr, CSR3, val);
34500
34501 __napi_schedule(&lp->napi);
34502 break;
34503 }
34504- csr0 = lp->a.read_csr(ioaddr, CSR0);
34505+ csr0 = lp->a->read_csr(ioaddr, CSR0);
34506 }
34507
34508 if (netif_msg_intr(lp))
34509 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
34510- dev->name, lp->a.read_csr(ioaddr, CSR0));
34511+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34512
34513 spin_unlock(&lp->lock);
34514
34515@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_devi
34516
34517 spin_lock_irqsave(&lp->lock, flags);
34518
34519- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34520+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34521
34522 if (netif_msg_ifdown(lp))
34523 printk(KERN_DEBUG
34524 "%s: Shutting down ethercard, status was %2.2x.\n",
34525- dev->name, lp->a.read_csr(ioaddr, CSR0));
34526+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34527
34528 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
34529- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34530+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34531
34532 /*
34533 * Switch back to 16bit mode to avoid problems with dumb
34534 * DOS packet driver after a warm reboot
34535 */
34536- lp->a.write_bcr(ioaddr, 20, 4);
34537+ lp->a->write_bcr(ioaddr, 20, 4);
34538
34539 spin_unlock_irqrestore(&lp->lock, flags);
34540
34541@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_
34542 unsigned long flags;
34543
34544 spin_lock_irqsave(&lp->lock, flags);
34545- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34546+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34547 spin_unlock_irqrestore(&lp->lock, flags);
34548
34549 return &dev->stats;
34550@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struc
34551 if (dev->flags & IFF_ALLMULTI) {
34552 ib->filter[0] = cpu_to_le32(~0U);
34553 ib->filter[1] = cpu_to_le32(~0U);
34554- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34555- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34556- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34557- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34558+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34559+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34560+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34561+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34562 return;
34563 }
34564 /* clear the multicast filter */
34565@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struc
34566 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
34567 }
34568 for (i = 0; i < 4; i++)
34569- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
34570+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
34571 le16_to_cpu(mcast_table[i]));
34572 return;
34573 }
34574@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(s
34575
34576 spin_lock_irqsave(&lp->lock, flags);
34577 suspended = pcnet32_suspend(dev, &flags, 0);
34578- csr15 = lp->a.read_csr(ioaddr, CSR15);
34579+ csr15 = lp->a->read_csr(ioaddr, CSR15);
34580 if (dev->flags & IFF_PROMISC) {
34581 /* Log any net taps. */
34582 if (netif_msg_hw(lp))
34583@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(s
34584 lp->init_block->mode =
34585 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
34586 7);
34587- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
34588+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
34589 } else {
34590 lp->init_block->mode =
34591 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
34592- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34593+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34594 pcnet32_load_multicast(dev);
34595 }
34596
34597 if (suspended) {
34598 int csr5;
34599 /* clear SUSPEND (SPND) - CSR5 bit 0 */
34600- csr5 = lp->a.read_csr(ioaddr, CSR5);
34601- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34602+ csr5 = lp->a->read_csr(ioaddr, CSR5);
34603+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34604 } else {
34605- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34606+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34607 pcnet32_restart(dev, CSR0_NORMAL);
34608 netif_wake_queue(dev);
34609 }
34610@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *
34611 if (!lp->mii)
34612 return 0;
34613
34614- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34615- val_out = lp->a.read_bcr(ioaddr, 34);
34616+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34617+ val_out = lp->a->read_bcr(ioaddr, 34);
34618
34619 return val_out;
34620 }
34621@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device
34622 if (!lp->mii)
34623 return;
34624
34625- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34626- lp->a.write_bcr(ioaddr, 34, val);
34627+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34628+ lp->a->write_bcr(ioaddr, 34, val);
34629 }
34630
34631 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34632@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct n
34633 curr_link = mii_link_ok(&lp->mii_if);
34634 } else {
34635 ulong ioaddr = dev->base_addr; /* card base I/O address */
34636- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34637+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34638 }
34639 if (!curr_link) {
34640 if (prev_link || verbose) {
34641@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct n
34642 (ecmd.duplex ==
34643 DUPLEX_FULL) ? "full" : "half");
34644 }
34645- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
34646+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
34647 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
34648 if (lp->mii_if.full_duplex)
34649 bcr9 |= (1 << 0);
34650 else
34651 bcr9 &= ~(1 << 0);
34652- lp->a.write_bcr(dev->base_addr, 9, bcr9);
34653+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
34654 }
34655 } else {
34656 if (netif_msg_link(lp))
34657diff -urNp linux-2.6.32.45/drivers/net/tg3.h linux-2.6.32.45/drivers/net/tg3.h
34658--- linux-2.6.32.45/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
34659+++ linux-2.6.32.45/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
34660@@ -95,6 +95,7 @@
34661 #define CHIPREV_ID_5750_A0 0x4000
34662 #define CHIPREV_ID_5750_A1 0x4001
34663 #define CHIPREV_ID_5750_A3 0x4003
34664+#define CHIPREV_ID_5750_C1 0x4201
34665 #define CHIPREV_ID_5750_C2 0x4202
34666 #define CHIPREV_ID_5752_A0_HW 0x5000
34667 #define CHIPREV_ID_5752_A0 0x6000
34668diff -urNp linux-2.6.32.45/drivers/net/tokenring/abyss.c linux-2.6.32.45/drivers/net/tokenring/abyss.c
34669--- linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-03-27 14:31:47.000000000 -0400
34670+++ linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-08-05 20:33:55.000000000 -0400
34671@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
34672
34673 static int __init abyss_init (void)
34674 {
34675- abyss_netdev_ops = tms380tr_netdev_ops;
34676+ pax_open_kernel();
34677+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34678
34679- abyss_netdev_ops.ndo_open = abyss_open;
34680- abyss_netdev_ops.ndo_stop = abyss_close;
34681+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34682+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34683+ pax_close_kernel();
34684
34685 return pci_register_driver(&abyss_driver);
34686 }
34687diff -urNp linux-2.6.32.45/drivers/net/tokenring/madgemc.c linux-2.6.32.45/drivers/net/tokenring/madgemc.c
34688--- linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-03-27 14:31:47.000000000 -0400
34689+++ linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-08-05 20:33:55.000000000 -0400
34690@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver
34691
34692 static int __init madgemc_init (void)
34693 {
34694- madgemc_netdev_ops = tms380tr_netdev_ops;
34695- madgemc_netdev_ops.ndo_open = madgemc_open;
34696- madgemc_netdev_ops.ndo_stop = madgemc_close;
34697+ pax_open_kernel();
34698+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34699+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34700+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34701+ pax_close_kernel();
34702
34703 return mca_register_driver (&madgemc_driver);
34704 }
34705diff -urNp linux-2.6.32.45/drivers/net/tokenring/proteon.c linux-2.6.32.45/drivers/net/tokenring/proteon.c
34706--- linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-03-27 14:31:47.000000000 -0400
34707+++ linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-08-05 20:33:55.000000000 -0400
34708@@ -353,9 +353,11 @@ static int __init proteon_init(void)
34709 struct platform_device *pdev;
34710 int i, num = 0, err = 0;
34711
34712- proteon_netdev_ops = tms380tr_netdev_ops;
34713- proteon_netdev_ops.ndo_open = proteon_open;
34714- proteon_netdev_ops.ndo_stop = tms380tr_close;
34715+ pax_open_kernel();
34716+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34717+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34718+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34719+ pax_close_kernel();
34720
34721 err = platform_driver_register(&proteon_driver);
34722 if (err)
34723diff -urNp linux-2.6.32.45/drivers/net/tokenring/skisa.c linux-2.6.32.45/drivers/net/tokenring/skisa.c
34724--- linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-03-27 14:31:47.000000000 -0400
34725+++ linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-08-05 20:33:55.000000000 -0400
34726@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34727 struct platform_device *pdev;
34728 int i, num = 0, err = 0;
34729
34730- sk_isa_netdev_ops = tms380tr_netdev_ops;
34731- sk_isa_netdev_ops.ndo_open = sk_isa_open;
34732- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34733+ pax_open_kernel();
34734+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34735+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34736+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34737+ pax_close_kernel();
34738
34739 err = platform_driver_register(&sk_isa_driver);
34740 if (err)
34741diff -urNp linux-2.6.32.45/drivers/net/tulip/de2104x.c linux-2.6.32.45/drivers/net/tulip/de2104x.c
34742--- linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
34743+++ linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
34744@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
34745 struct de_srom_info_leaf *il;
34746 void *bufp;
34747
34748+ pax_track_stack();
34749+
34750 /* download entire eeprom */
34751 for (i = 0; i < DE_EEPROM_WORDS; i++)
34752 ((__le16 *)ee_data)[i] =
34753diff -urNp linux-2.6.32.45/drivers/net/tulip/de4x5.c linux-2.6.32.45/drivers/net/tulip/de4x5.c
34754--- linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
34755+++ linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
34756@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
34757 for (i=0; i<ETH_ALEN; i++) {
34758 tmp.addr[i] = dev->dev_addr[i];
34759 }
34760- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34761+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34762 break;
34763
34764 case DE4X5_SET_HWADDR: /* Set the hardware address */
34765@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
34766 spin_lock_irqsave(&lp->lock, flags);
34767 memcpy(&statbuf, &lp->pktStats, ioc->len);
34768 spin_unlock_irqrestore(&lp->lock, flags);
34769- if (copy_to_user(ioc->data, &statbuf, ioc->len))
34770+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34771 return -EFAULT;
34772 break;
34773 }
34774diff -urNp linux-2.6.32.45/drivers/net/usb/hso.c linux-2.6.32.45/drivers/net/usb/hso.c
34775--- linux-2.6.32.45/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
34776+++ linux-2.6.32.45/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
34777@@ -71,7 +71,7 @@
34778 #include <asm/byteorder.h>
34779 #include <linux/serial_core.h>
34780 #include <linux/serial.h>
34781-
34782+#include <asm/local.h>
34783
34784 #define DRIVER_VERSION "1.2"
34785 #define MOD_AUTHOR "Option Wireless"
34786@@ -258,7 +258,7 @@ struct hso_serial {
34787
34788 /* from usb_serial_port */
34789 struct tty_struct *tty;
34790- int open_count;
34791+ local_t open_count;
34792 spinlock_t serial_lock;
34793
34794 int (*write_data) (struct hso_serial *serial);
34795@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
34796 struct urb *urb;
34797
34798 urb = serial->rx_urb[0];
34799- if (serial->open_count > 0) {
34800+ if (local_read(&serial->open_count) > 0) {
34801 count = put_rxbuf_data(urb, serial);
34802 if (count == -1)
34803 return;
34804@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
34805 DUMP1(urb->transfer_buffer, urb->actual_length);
34806
34807 /* Anyone listening? */
34808- if (serial->open_count == 0)
34809+ if (local_read(&serial->open_count) == 0)
34810 return;
34811
34812 if (status == 0) {
34813@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
34814 spin_unlock_irq(&serial->serial_lock);
34815
34816 /* check for port already opened, if not set the termios */
34817- serial->open_count++;
34818- if (serial->open_count == 1) {
34819+ if (local_inc_return(&serial->open_count) == 1) {
34820 tty->low_latency = 1;
34821 serial->rx_state = RX_IDLE;
34822 /* Force default termio settings */
34823@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
34824 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34825 if (result) {
34826 hso_stop_serial_device(serial->parent);
34827- serial->open_count--;
34828+ local_dec(&serial->open_count);
34829 kref_put(&serial->parent->ref, hso_serial_ref_free);
34830 }
34831 } else {
34832@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
34833
34834 /* reset the rts and dtr */
34835 /* do the actual close */
34836- serial->open_count--;
34837+ local_dec(&serial->open_count);
34838
34839- if (serial->open_count <= 0) {
34840- serial->open_count = 0;
34841+ if (local_read(&serial->open_count) <= 0) {
34842+ local_set(&serial->open_count, 0);
34843 spin_lock_irq(&serial->serial_lock);
34844 if (serial->tty == tty) {
34845 serial->tty->driver_data = NULL;
34846@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
34847
34848 /* the actual setup */
34849 spin_lock_irqsave(&serial->serial_lock, flags);
34850- if (serial->open_count)
34851+ if (local_read(&serial->open_count))
34852 _hso_serial_set_termios(tty, old);
34853 else
34854 tty->termios = old;
34855@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
34856 /* Start all serial ports */
34857 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34858 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34859- if (dev2ser(serial_table[i])->open_count) {
34860+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
34861 result =
34862 hso_start_serial_device(serial_table[i], GFP_NOIO);
34863 hso_kick_transmit(dev2ser(serial_table[i]));
34864diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-config.h linux-2.6.32.45/drivers/net/vxge/vxge-config.h
34865--- linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-03-27 14:31:47.000000000 -0400
34866+++ linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-08-05 20:33:55.000000000 -0400
34867@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
34868 void (*link_down)(struct __vxge_hw_device *devh);
34869 void (*crit_err)(struct __vxge_hw_device *devh,
34870 enum vxge_hw_event type, u64 ext_data);
34871-};
34872+} __no_const;
34873
34874 /*
34875 * struct __vxge_hw_blockpool_entry - Block private data structure
34876diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-main.c linux-2.6.32.45/drivers/net/vxge/vxge-main.c
34877--- linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
34878+++ linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
34879@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
34880 struct sk_buff *completed[NR_SKB_COMPLETED];
34881 int more;
34882
34883+ pax_track_stack();
34884+
34885 do {
34886 more = 0;
34887 skb_ptr = completed;
34888@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
34889 u8 mtable[256] = {0}; /* CPU to vpath mapping */
34890 int index;
34891
34892+ pax_track_stack();
34893+
34894 /*
34895 * Filling
34896 * - itable with bucket numbers
34897diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h
34898--- linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-03-27 14:31:47.000000000 -0400
34899+++ linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:33:55.000000000 -0400
34900@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
34901 struct vxge_hw_mempool_dma *dma_object,
34902 u32 index,
34903 u32 is_last);
34904-};
34905+} __no_const;
34906
34907 void
34908 __vxge_hw_mempool_destroy(
34909diff -urNp linux-2.6.32.45/drivers/net/wan/cycx_x25.c linux-2.6.32.45/drivers/net/wan/cycx_x25.c
34910--- linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
34911+++ linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
34912@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
34913 unsigned char hex[1024],
34914 * phex = hex;
34915
34916+ pax_track_stack();
34917+
34918 if (len >= (sizeof(hex) / 2))
34919 len = (sizeof(hex) / 2) - 1;
34920
34921diff -urNp linux-2.6.32.45/drivers/net/wan/hdlc_x25.c linux-2.6.32.45/drivers/net/wan/hdlc_x25.c
34922--- linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-03-27 14:31:47.000000000 -0400
34923+++ linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-08-05 20:33:55.000000000 -0400
34924@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
34925
34926 static int x25_open(struct net_device *dev)
34927 {
34928- struct lapb_register_struct cb;
34929+ static struct lapb_register_struct cb = {
34930+ .connect_confirmation = x25_connected,
34931+ .connect_indication = x25_connected,
34932+ .disconnect_confirmation = x25_disconnected,
34933+ .disconnect_indication = x25_disconnected,
34934+ .data_indication = x25_data_indication,
34935+ .data_transmit = x25_data_transmit
34936+ };
34937 int result;
34938
34939- cb.connect_confirmation = x25_connected;
34940- cb.connect_indication = x25_connected;
34941- cb.disconnect_confirmation = x25_disconnected;
34942- cb.disconnect_indication = x25_disconnected;
34943- cb.data_indication = x25_data_indication;
34944- cb.data_transmit = x25_data_transmit;
34945-
34946 result = lapb_register(dev, &cb);
34947 if (result != LAPB_OK)
34948 return result;
34949diff -urNp linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c
34950--- linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
34951+++ linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
34952@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
34953 int do_autopm = 1;
34954 DECLARE_COMPLETION_ONSTACK(notif_completion);
34955
34956+ pax_track_stack();
34957+
34958 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
34959 i2400m, ack, ack_size);
34960 BUG_ON(_ack == i2400m->bm_ack_buf);
34961diff -urNp linux-2.6.32.45/drivers/net/wireless/airo.c linux-2.6.32.45/drivers/net/wireless/airo.c
34962--- linux-2.6.32.45/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
34963+++ linux-2.6.32.45/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
34964@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
34965 BSSListElement * loop_net;
34966 BSSListElement * tmp_net;
34967
34968+ pax_track_stack();
34969+
34970 /* Blow away current list of scan results */
34971 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
34972 list_move_tail (&loop_net->list, &ai->network_free_list);
34973@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
34974 WepKeyRid wkr;
34975 int rc;
34976
34977+ pax_track_stack();
34978+
34979 memset( &mySsid, 0, sizeof( mySsid ) );
34980 kfree (ai->flash);
34981 ai->flash = NULL;
34982@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
34983 __le32 *vals = stats.vals;
34984 int len;
34985
34986+ pax_track_stack();
34987+
34988 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34989 return -ENOMEM;
34990 data = (struct proc_data *)file->private_data;
34991@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
34992 /* If doLoseSync is not 1, we won't do a Lose Sync */
34993 int doLoseSync = -1;
34994
34995+ pax_track_stack();
34996+
34997 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34998 return -ENOMEM;
34999 data = (struct proc_data *)file->private_data;
35000@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
35001 int i;
35002 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
35003
35004+ pax_track_stack();
35005+
35006 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
35007 if (!qual)
35008 return -ENOMEM;
35009@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
35010 CapabilityRid cap_rid;
35011 __le32 *vals = stats_rid.vals;
35012
35013+ pax_track_stack();
35014+
35015 /* Get stats out of the card */
35016 clear_bit(JOB_WSTATS, &local->jobs);
35017 if (local->power.event) {
35018diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c
35019--- linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
35020+++ linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
35021@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
35022 unsigned int v;
35023 u64 tsf;
35024
35025+ pax_track_stack();
35026+
35027 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
35028 len += snprintf(buf+len, sizeof(buf)-len,
35029 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
35030@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
35031 unsigned int len = 0;
35032 unsigned int i;
35033
35034+ pax_track_stack();
35035+
35036 len += snprintf(buf+len, sizeof(buf)-len,
35037 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
35038
35039diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c
35040--- linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
35041+++ linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
35042@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
35043 char buf[512];
35044 unsigned int len = 0;
35045
35046+ pax_track_stack();
35047+
35048 len += snprintf(buf + len, sizeof(buf) - len,
35049 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
35050 len += snprintf(buf + len, sizeof(buf) - len,
35051@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
35052 int i;
35053 u8 addr[ETH_ALEN];
35054
35055+ pax_track_stack();
35056+
35057 len += snprintf(buf + len, sizeof(buf) - len,
35058 "primary: %s (%s chan=%d ht=%d)\n",
35059 wiphy_name(sc->pri_wiphy->hw->wiphy),
35060diff -urNp linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c
35061--- linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35062+++ linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35063@@ -43,7 +43,7 @@ static struct dentry *rootdir;
35064 struct b43_debugfs_fops {
35065 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
35066 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
35067- struct file_operations fops;
35068+ const struct file_operations fops;
35069 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
35070 size_t file_struct_offset;
35071 };
35072diff -urNp linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c
35073--- linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35074+++ linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35075@@ -44,7 +44,7 @@ static struct dentry *rootdir;
35076 struct b43legacy_debugfs_fops {
35077 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
35078 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
35079- struct file_operations fops;
35080+ const struct file_operations fops;
35081 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
35082 size_t file_struct_offset;
35083 /* Take wl->irq_lock before calling read/write? */
35084diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c
35085--- linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
35086+++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
35087@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
35088 int err;
35089 DECLARE_SSID_BUF(ssid);
35090
35091+ pax_track_stack();
35092+
35093 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
35094
35095 if (ssid_len)
35096@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
35097 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
35098 int err;
35099
35100+ pax_track_stack();
35101+
35102 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
35103 idx, keylen, len);
35104
35105diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c
35106--- linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
35107+++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
35108@@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
35109 unsigned long flags;
35110 DECLARE_SSID_BUF(ssid);
35111
35112+ pax_track_stack();
35113+
35114 LIBIPW_DEBUG_SCAN("'%s' (%pM"
35115 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
35116 print_ssid(ssid, info_element->data, info_element->len),
35117diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c
35118--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
35119+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
35120@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
35121 },
35122 };
35123
35124-static struct iwl_ops iwl1000_ops = {
35125+static const struct iwl_ops iwl1000_ops = {
35126 .ucode = &iwl5000_ucode,
35127 .lib = &iwl1000_lib,
35128 .hcmd = &iwl5000_hcmd,
35129diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c
35130--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-03-27 14:31:47.000000000 -0400
35131+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-08-05 20:33:55.000000000 -0400
35132@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_
35133 */
35134 if (iwl3945_mod_params.disable_hw_scan) {
35135 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
35136- iwl3945_hw_ops.hw_scan = NULL;
35137+ pax_open_kernel();
35138+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
35139+ pax_close_kernel();
35140 }
35141
35142
35143diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c
35144--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
35145+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
35146@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
35147 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
35148 };
35149
35150-static struct iwl_ops iwl3945_ops = {
35151+static const struct iwl_ops iwl3945_ops = {
35152 .ucode = &iwl3945_ucode,
35153 .lib = &iwl3945_lib,
35154 .hcmd = &iwl3945_hcmd,
35155diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c
35156--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
35157+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
35158@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
35159 },
35160 };
35161
35162-static struct iwl_ops iwl4965_ops = {
35163+static const struct iwl_ops iwl4965_ops = {
35164 .ucode = &iwl4965_ucode,
35165 .lib = &iwl4965_lib,
35166 .hcmd = &iwl4965_hcmd,
35167diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c
35168--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
35169+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
35170@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
35171 },
35172 };
35173
35174-struct iwl_ops iwl5000_ops = {
35175+const struct iwl_ops iwl5000_ops = {
35176 .ucode = &iwl5000_ucode,
35177 .lib = &iwl5000_lib,
35178 .hcmd = &iwl5000_hcmd,
35179 .utils = &iwl5000_hcmd_utils,
35180 };
35181
35182-static struct iwl_ops iwl5150_ops = {
35183+static const struct iwl_ops iwl5150_ops = {
35184 .ucode = &iwl5000_ucode,
35185 .lib = &iwl5150_lib,
35186 .hcmd = &iwl5000_hcmd,
35187diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c
35188--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
35189+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
35190@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
35191 .calc_rssi = iwl5000_calc_rssi,
35192 };
35193
35194-static struct iwl_ops iwl6000_ops = {
35195+static const struct iwl_ops iwl6000_ops = {
35196 .ucode = &iwl5000_ucode,
35197 .lib = &iwl6000_lib,
35198 .hcmd = &iwl5000_hcmd,
35199diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c
35200--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-03-27 14:31:47.000000000 -0400
35201+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:33:55.000000000 -0400
35202@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev
35203 if (iwl_debug_level & IWL_DL_INFO)
35204 dev_printk(KERN_DEBUG, &(pdev->dev),
35205 "Disabling hw_scan\n");
35206- iwl_hw_ops.hw_scan = NULL;
35207+ pax_open_kernel();
35208+ *(void **)&iwl_hw_ops.hw_scan = NULL;
35209+ pax_close_kernel();
35210 }
35211
35212 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
35213diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35214--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
35215+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
35216@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
35217 u8 active_index = 0;
35218 s32 tpt = 0;
35219
35220+ pax_track_stack();
35221+
35222 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
35223
35224 if (!ieee80211_is_data(hdr->frame_control) ||
35225@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
35226 u8 valid_tx_ant = 0;
35227 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
35228
35229+ pax_track_stack();
35230+
35231 /* Override starting rate (index 0) if needed for debug purposes */
35232 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
35233
35234diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35235--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
35236+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
35237@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
35238 int pos = 0;
35239 const size_t bufsz = sizeof(buf);
35240
35241+ pax_track_stack();
35242+
35243 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
35244 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
35245 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
35246@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
35247 const size_t bufsz = sizeof(buf);
35248 ssize_t ret;
35249
35250+ pax_track_stack();
35251+
35252 for (i = 0; i < AC_NUM; i++) {
35253 pos += scnprintf(buf + pos, bufsz - pos,
35254 "\tcw_min\tcw_max\taifsn\ttxop\n");
35255diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h
35256--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
35257+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
35258@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
35259 #endif
35260
35261 #else
35262-#define IWL_DEBUG(__priv, level, fmt, args...)
35263-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
35264+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
35265+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
35266 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
35267 void *p, u32 len)
35268 {}
35269diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h
35270--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
35271+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
35272@@ -68,7 +68,7 @@ struct iwl_tx_queue;
35273
35274 /* shared structures from iwl-5000.c */
35275 extern struct iwl_mod_params iwl50_mod_params;
35276-extern struct iwl_ops iwl5000_ops;
35277+extern const struct iwl_ops iwl5000_ops;
35278 extern struct iwl_ucode_ops iwl5000_ucode;
35279 extern struct iwl_lib_ops iwl5000_lib;
35280 extern struct iwl_hcmd_ops iwl5000_hcmd;
35281diff -urNp linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c
35282--- linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35283+++ linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
35284@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
35285 int buf_len = 512;
35286 size_t len = 0;
35287
35288+ pax_track_stack();
35289+
35290 if (*ppos != 0)
35291 return 0;
35292 if (count < sizeof(buf))
35293diff -urNp linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c
35294--- linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35295+++ linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35296@@ -708,7 +708,7 @@ out_unlock:
35297 struct lbs_debugfs_files {
35298 const char *name;
35299 int perm;
35300- struct file_operations fops;
35301+ const struct file_operations fops;
35302 };
35303
35304 static const struct lbs_debugfs_files debugfs_files[] = {
35305diff -urNp linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c
35306--- linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
35307+++ linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
35308@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
35309
35310 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
35311
35312- if (rts_threshold < 0 || rts_threshold > 2347)
35313+ if (rts_threshold > 2347)
35314 rts_threshold = 2347;
35315
35316 tmp = cpu_to_le32(rts_threshold);
35317diff -urNp linux-2.6.32.45/drivers/oprofile/buffer_sync.c linux-2.6.32.45/drivers/oprofile/buffer_sync.c
35318--- linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
35319+++ linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
35320@@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
35321 if (cookie == NO_COOKIE)
35322 offset = pc;
35323 if (cookie == INVALID_COOKIE) {
35324- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35325+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35326 offset = pc;
35327 }
35328 if (cookie != last_cookie) {
35329@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
35330 /* add userspace sample */
35331
35332 if (!mm) {
35333- atomic_inc(&oprofile_stats.sample_lost_no_mm);
35334+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35335 return 0;
35336 }
35337
35338 cookie = lookup_dcookie(mm, s->eip, &offset);
35339
35340 if (cookie == INVALID_COOKIE) {
35341- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35342+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35343 return 0;
35344 }
35345
35346@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
35347 /* ignore backtraces if failed to add a sample */
35348 if (state == sb_bt_start) {
35349 state = sb_bt_ignore;
35350- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35351+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35352 }
35353 }
35354 release_mm(mm);
35355diff -urNp linux-2.6.32.45/drivers/oprofile/event_buffer.c linux-2.6.32.45/drivers/oprofile/event_buffer.c
35356--- linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
35357+++ linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
35358@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
35359 }
35360
35361 if (buffer_pos == buffer_size) {
35362- atomic_inc(&oprofile_stats.event_lost_overflow);
35363+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35364 return;
35365 }
35366
35367diff -urNp linux-2.6.32.45/drivers/oprofile/oprof.c linux-2.6.32.45/drivers/oprofile/oprof.c
35368--- linux-2.6.32.45/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
35369+++ linux-2.6.32.45/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
35370@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
35371 if (oprofile_ops.switch_events())
35372 return;
35373
35374- atomic_inc(&oprofile_stats.multiplex_counter);
35375+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35376 start_switch_worker();
35377 }
35378
35379diff -urNp linux-2.6.32.45/drivers/oprofile/oprofilefs.c linux-2.6.32.45/drivers/oprofile/oprofilefs.c
35380--- linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
35381+++ linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
35382@@ -187,7 +187,7 @@ static const struct file_operations atom
35383
35384
35385 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35386- char const *name, atomic_t *val)
35387+ char const *name, atomic_unchecked_t *val)
35388 {
35389 struct dentry *d = __oprofilefs_create_file(sb, root, name,
35390 &atomic_ro_fops, 0444);
35391diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.c linux-2.6.32.45/drivers/oprofile/oprofile_stats.c
35392--- linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
35393+++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
35394@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35395 cpu_buf->sample_invalid_eip = 0;
35396 }
35397
35398- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35399- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35400- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35401- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35402- atomic_set(&oprofile_stats.multiplex_counter, 0);
35403+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35404+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35405+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35406+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35407+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35408 }
35409
35410
35411diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.h linux-2.6.32.45/drivers/oprofile/oprofile_stats.h
35412--- linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
35413+++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
35414@@ -13,11 +13,11 @@
35415 #include <asm/atomic.h>
35416
35417 struct oprofile_stat_struct {
35418- atomic_t sample_lost_no_mm;
35419- atomic_t sample_lost_no_mapping;
35420- atomic_t bt_lost_no_mapping;
35421- atomic_t event_lost_overflow;
35422- atomic_t multiplex_counter;
35423+ atomic_unchecked_t sample_lost_no_mm;
35424+ atomic_unchecked_t sample_lost_no_mapping;
35425+ atomic_unchecked_t bt_lost_no_mapping;
35426+ atomic_unchecked_t event_lost_overflow;
35427+ atomic_unchecked_t multiplex_counter;
35428 };
35429
35430 extern struct oprofile_stat_struct oprofile_stats;
35431diff -urNp linux-2.6.32.45/drivers/parisc/pdc_stable.c linux-2.6.32.45/drivers/parisc/pdc_stable.c
35432--- linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
35433+++ linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
35434@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
35435 return ret;
35436 }
35437
35438-static struct sysfs_ops pdcspath_attr_ops = {
35439+static const struct sysfs_ops pdcspath_attr_ops = {
35440 .show = pdcspath_attr_show,
35441 .store = pdcspath_attr_store,
35442 };
35443diff -urNp linux-2.6.32.45/drivers/parport/procfs.c linux-2.6.32.45/drivers/parport/procfs.c
35444--- linux-2.6.32.45/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
35445+++ linux-2.6.32.45/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
35446@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
35447
35448 *ppos += len;
35449
35450- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35451+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35452 }
35453
35454 #ifdef CONFIG_PARPORT_1284
35455@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
35456
35457 *ppos += len;
35458
35459- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35460+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35461 }
35462 #endif /* IEEE1284.3 support. */
35463
35464diff -urNp linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c
35465--- linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
35466+++ linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
35467@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
35468 }
35469
35470
35471-static struct acpi_dock_ops acpiphp_dock_ops = {
35472+static const struct acpi_dock_ops acpiphp_dock_ops = {
35473 .handler = handle_hotplug_event_func,
35474 };
35475
35476diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h
35477--- linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-03-27 14:31:47.000000000 -0400
35478+++ linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:33:55.000000000 -0400
35479@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35480 int (*hardware_test) (struct slot* slot, u32 value);
35481 u8 (*get_power) (struct slot* slot);
35482 int (*set_power) (struct slot* slot, int value);
35483-};
35484+} __no_const;
35485
35486 struct cpci_hp_controller {
35487 unsigned int irq;
35488diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c
35489--- linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
35490+++ linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
35491@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
35492
35493 void compaq_nvram_init (void __iomem *rom_start)
35494 {
35495+
35496+#ifndef CONFIG_PAX_KERNEXEC
35497 if (rom_start) {
35498 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35499 }
35500+#endif
35501+
35502 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35503
35504 /* initialize our int15 lock */
35505diff -urNp linux-2.6.32.45/drivers/pci/hotplug/fakephp.c linux-2.6.32.45/drivers/pci/hotplug/fakephp.c
35506--- linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
35507+++ linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
35508@@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
35509 }
35510
35511 static struct kobj_type legacy_ktype = {
35512- .sysfs_ops = &(struct sysfs_ops){
35513+ .sysfs_ops = &(const struct sysfs_ops){
35514 .store = legacy_store, .show = legacy_show
35515 },
35516 .release = &legacy_release,
35517diff -urNp linux-2.6.32.45/drivers/pci/intel-iommu.c linux-2.6.32.45/drivers/pci/intel-iommu.c
35518--- linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
35519+++ linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
35520@@ -2643,7 +2643,7 @@ error:
35521 return 0;
35522 }
35523
35524-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
35525+dma_addr_t intel_map_page(struct device *dev, struct page *page,
35526 unsigned long offset, size_t size,
35527 enum dma_data_direction dir,
35528 struct dma_attrs *attrs)
35529@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
35530 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
35531 }
35532
35533-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35534+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35535 size_t size, enum dma_data_direction dir,
35536 struct dma_attrs *attrs)
35537 {
35538@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
35539 }
35540 }
35541
35542-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
35543+void *intel_alloc_coherent(struct device *hwdev, size_t size,
35544 dma_addr_t *dma_handle, gfp_t flags)
35545 {
35546 void *vaddr;
35547@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
35548 return NULL;
35549 }
35550
35551-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35552+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35553 dma_addr_t dma_handle)
35554 {
35555 int order;
35556@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
35557 free_pages((unsigned long)vaddr, order);
35558 }
35559
35560-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35561+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35562 int nelems, enum dma_data_direction dir,
35563 struct dma_attrs *attrs)
35564 {
35565@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
35566 return nelems;
35567 }
35568
35569-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35570+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35571 enum dma_data_direction dir, struct dma_attrs *attrs)
35572 {
35573 int i;
35574@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
35575 return nelems;
35576 }
35577
35578-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35579+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35580 {
35581 return !dma_addr;
35582 }
35583
35584-struct dma_map_ops intel_dma_ops = {
35585+const struct dma_map_ops intel_dma_ops = {
35586 .alloc_coherent = intel_alloc_coherent,
35587 .free_coherent = intel_free_coherent,
35588 .map_sg = intel_map_sg,
35589diff -urNp linux-2.6.32.45/drivers/pci/pcie/aspm.c linux-2.6.32.45/drivers/pci/pcie/aspm.c
35590--- linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
35591+++ linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
35592@@ -27,9 +27,9 @@
35593 #define MODULE_PARAM_PREFIX "pcie_aspm."
35594
35595 /* Note: those are not register definitions */
35596-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35597-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35598-#define ASPM_STATE_L1 (4) /* L1 state */
35599+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35600+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35601+#define ASPM_STATE_L1 (4U) /* L1 state */
35602 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35603 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35604
35605diff -urNp linux-2.6.32.45/drivers/pci/probe.c linux-2.6.32.45/drivers/pci/probe.c
35606--- linux-2.6.32.45/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
35607+++ linux-2.6.32.45/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
35608@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
35609 return ret;
35610 }
35611
35612-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
35613+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
35614 struct device_attribute *attr,
35615 char *buf)
35616 {
35617 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
35618 }
35619
35620-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
35621+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
35622 struct device_attribute *attr,
35623 char *buf)
35624 {
35625diff -urNp linux-2.6.32.45/drivers/pci/proc.c linux-2.6.32.45/drivers/pci/proc.c
35626--- linux-2.6.32.45/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
35627+++ linux-2.6.32.45/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
35628@@ -480,7 +480,16 @@ static const struct file_operations proc
35629 static int __init pci_proc_init(void)
35630 {
35631 struct pci_dev *dev = NULL;
35632+
35633+#ifdef CONFIG_GRKERNSEC_PROC_ADD
35634+#ifdef CONFIG_GRKERNSEC_PROC_USER
35635+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35636+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35637+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35638+#endif
35639+#else
35640 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35641+#endif
35642 proc_create("devices", 0, proc_bus_pci_dir,
35643 &proc_bus_pci_dev_operations);
35644 proc_initialized = 1;
35645diff -urNp linux-2.6.32.45/drivers/pci/slot.c linux-2.6.32.45/drivers/pci/slot.c
35646--- linux-2.6.32.45/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
35647+++ linux-2.6.32.45/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
35648@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
35649 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
35650 }
35651
35652-static struct sysfs_ops pci_slot_sysfs_ops = {
35653+static const struct sysfs_ops pci_slot_sysfs_ops = {
35654 .show = pci_slot_attr_show,
35655 .store = pci_slot_attr_store,
35656 };
35657diff -urNp linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c
35658--- linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
35659+++ linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
35660@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
35661 return -EFAULT;
35662 }
35663 }
35664- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35665+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35666 if (!buf)
35667 return -ENOMEM;
35668
35669diff -urNp linux-2.6.32.45/drivers/platform/x86/acer-wmi.c linux-2.6.32.45/drivers/platform/x86/acer-wmi.c
35670--- linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
35671+++ linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
35672@@ -918,7 +918,7 @@ static int update_bl_status(struct backl
35673 return 0;
35674 }
35675
35676-static struct backlight_ops acer_bl_ops = {
35677+static const struct backlight_ops acer_bl_ops = {
35678 .get_brightness = read_brightness,
35679 .update_status = update_bl_status,
35680 };
35681diff -urNp linux-2.6.32.45/drivers/platform/x86/asus_acpi.c linux-2.6.32.45/drivers/platform/x86/asus_acpi.c
35682--- linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
35683+++ linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
35684@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
35685 return 0;
35686 }
35687
35688-static struct backlight_ops asus_backlight_data = {
35689+static const struct backlight_ops asus_backlight_data = {
35690 .get_brightness = read_brightness,
35691 .update_status = set_brightness_status,
35692 };
35693diff -urNp linux-2.6.32.45/drivers/platform/x86/asus-laptop.c linux-2.6.32.45/drivers/platform/x86/asus-laptop.c
35694--- linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
35695+++ linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
35696@@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
35697 */
35698 static int read_brightness(struct backlight_device *bd);
35699 static int update_bl_status(struct backlight_device *bd);
35700-static struct backlight_ops asusbl_ops = {
35701+static const struct backlight_ops asusbl_ops = {
35702 .get_brightness = read_brightness,
35703 .update_status = update_bl_status,
35704 };
35705diff -urNp linux-2.6.32.45/drivers/platform/x86/compal-laptop.c linux-2.6.32.45/drivers/platform/x86/compal-laptop.c
35706--- linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
35707+++ linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
35708@@ -163,7 +163,7 @@ static int bl_update_status(struct backl
35709 return set_lcd_level(b->props.brightness);
35710 }
35711
35712-static struct backlight_ops compalbl_ops = {
35713+static const struct backlight_ops compalbl_ops = {
35714 .get_brightness = bl_get_brightness,
35715 .update_status = bl_update_status,
35716 };
35717diff -urNp linux-2.6.32.45/drivers/platform/x86/dell-laptop.c linux-2.6.32.45/drivers/platform/x86/dell-laptop.c
35718--- linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
35719+++ linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
35720@@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
35721 return buffer.output[1];
35722 }
35723
35724-static struct backlight_ops dell_ops = {
35725+static const struct backlight_ops dell_ops = {
35726 .get_brightness = dell_get_intensity,
35727 .update_status = dell_send_intensity,
35728 };
35729diff -urNp linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c
35730--- linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
35731+++ linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
35732@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
35733 */
35734 static int read_brightness(struct backlight_device *bd);
35735 static int update_bl_status(struct backlight_device *bd);
35736-static struct backlight_ops eeepcbl_ops = {
35737+static const struct backlight_ops eeepcbl_ops = {
35738 .get_brightness = read_brightness,
35739 .update_status = update_bl_status,
35740 };
35741diff -urNp linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c
35742--- linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
35743+++ linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
35744@@ -436,7 +436,7 @@ static int bl_update_status(struct backl
35745 return ret;
35746 }
35747
35748-static struct backlight_ops fujitsubl_ops = {
35749+static const struct backlight_ops fujitsubl_ops = {
35750 .get_brightness = bl_get_brightness,
35751 .update_status = bl_update_status,
35752 };
35753diff -urNp linux-2.6.32.45/drivers/platform/x86/msi-laptop.c linux-2.6.32.45/drivers/platform/x86/msi-laptop.c
35754--- linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
35755+++ linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
35756@@ -161,7 +161,7 @@ static int bl_update_status(struct backl
35757 return set_lcd_level(b->props.brightness);
35758 }
35759
35760-static struct backlight_ops msibl_ops = {
35761+static const struct backlight_ops msibl_ops = {
35762 .get_brightness = bl_get_brightness,
35763 .update_status = bl_update_status,
35764 };
35765diff -urNp linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c
35766--- linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
35767+++ linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
35768@@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
35769 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
35770 }
35771
35772-static struct backlight_ops pcc_backlight_ops = {
35773+static const struct backlight_ops pcc_backlight_ops = {
35774 .get_brightness = bl_get,
35775 .update_status = bl_set_status,
35776 };
35777diff -urNp linux-2.6.32.45/drivers/platform/x86/sony-laptop.c linux-2.6.32.45/drivers/platform/x86/sony-laptop.c
35778--- linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
35779+++ linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
35780@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
35781 }
35782
35783 static struct backlight_device *sony_backlight_device;
35784-static struct backlight_ops sony_backlight_ops = {
35785+static const struct backlight_ops sony_backlight_ops = {
35786 .update_status = sony_backlight_update_status,
35787 .get_brightness = sony_backlight_get_brightness,
35788 };
35789diff -urNp linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c
35790--- linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
35791+++ linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:33:55.000000000 -0400
35792@@ -2137,7 +2137,7 @@ static int hotkey_mask_get(void)
35793 return 0;
35794 }
35795
35796-void static hotkey_mask_warn_incomplete_mask(void)
35797+static void hotkey_mask_warn_incomplete_mask(void)
35798 {
35799 /* log only what the user can fix... */
35800 const u32 wantedmask = hotkey_driver_mask &
35801@@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
35802 BACKLIGHT_UPDATE_HOTKEY);
35803 }
35804
35805-static struct backlight_ops ibm_backlight_data = {
35806+static const struct backlight_ops ibm_backlight_data = {
35807 .get_brightness = brightness_get,
35808 .update_status = brightness_update_status,
35809 };
35810diff -urNp linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c
35811--- linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
35812+++ linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
35813@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
35814 return AE_OK;
35815 }
35816
35817-static struct backlight_ops toshiba_backlight_data = {
35818+static const struct backlight_ops toshiba_backlight_data = {
35819 .get_brightness = get_lcd,
35820 .update_status = set_lcd_status,
35821 };
35822diff -urNp linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c
35823--- linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
35824+++ linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
35825@@ -60,7 +60,7 @@ do { \
35826 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35827 } while(0)
35828
35829-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35830+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35831 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35832
35833 /*
35834@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
35835
35836 cpu = get_cpu();
35837 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35838+
35839+ pax_open_kernel();
35840 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35841+ pax_close_kernel();
35842
35843 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35844 spin_lock_irqsave(&pnp_bios_lock, flags);
35845@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
35846 :"memory");
35847 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35848
35849+ pax_open_kernel();
35850 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35851+ pax_close_kernel();
35852+
35853 put_cpu();
35854
35855 /* If we get here and this is set then the PnP BIOS faulted on us. */
35856@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
35857 return status;
35858 }
35859
35860-void pnpbios_calls_init(union pnp_bios_install_struct *header)
35861+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35862 {
35863 int i;
35864
35865@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
35866 pnp_bios_callpoint.offset = header->fields.pm16offset;
35867 pnp_bios_callpoint.segment = PNP_CS16;
35868
35869+ pax_open_kernel();
35870+
35871 for_each_possible_cpu(i) {
35872 struct desc_struct *gdt = get_cpu_gdt_table(i);
35873 if (!gdt)
35874@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
35875 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35876 (unsigned long)__va(header->fields.pm16dseg));
35877 }
35878+
35879+ pax_close_kernel();
35880 }
35881diff -urNp linux-2.6.32.45/drivers/pnp/resource.c linux-2.6.32.45/drivers/pnp/resource.c
35882--- linux-2.6.32.45/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
35883+++ linux-2.6.32.45/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
35884@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
35885 return 1;
35886
35887 /* check if the resource is valid */
35888- if (*irq < 0 || *irq > 15)
35889+ if (*irq > 15)
35890 return 0;
35891
35892 /* check if the resource is reserved */
35893@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
35894 return 1;
35895
35896 /* check if the resource is valid */
35897- if (*dma < 0 || *dma == 4 || *dma > 7)
35898+ if (*dma == 4 || *dma > 7)
35899 return 0;
35900
35901 /* check if the resource is reserved */
35902diff -urNp linux-2.6.32.45/drivers/power/bq27x00_battery.c linux-2.6.32.45/drivers/power/bq27x00_battery.c
35903--- linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-03-27 14:31:47.000000000 -0400
35904+++ linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-08-05 20:33:55.000000000 -0400
35905@@ -44,7 +44,7 @@ struct bq27x00_device_info;
35906 struct bq27x00_access_methods {
35907 int (*read)(u8 reg, int *rt_value, int b_single,
35908 struct bq27x00_device_info *di);
35909-};
35910+} __no_const;
35911
35912 struct bq27x00_device_info {
35913 struct device *dev;
35914diff -urNp linux-2.6.32.45/drivers/rtc/rtc-dev.c linux-2.6.32.45/drivers/rtc/rtc-dev.c
35915--- linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
35916+++ linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
35917@@ -14,6 +14,7 @@
35918 #include <linux/module.h>
35919 #include <linux/rtc.h>
35920 #include <linux/sched.h>
35921+#include <linux/grsecurity.h>
35922 #include "rtc-core.h"
35923
35924 static dev_t rtc_devt;
35925@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
35926 if (copy_from_user(&tm, uarg, sizeof(tm)))
35927 return -EFAULT;
35928
35929+ gr_log_timechange();
35930+
35931 return rtc_set_time(rtc, &tm);
35932
35933 case RTC_PIE_ON:
35934diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.c linux-2.6.32.45/drivers/s390/cio/qdio_perf.c
35935--- linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
35936+++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
35937@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
35938 static int qdio_perf_proc_show(struct seq_file *m, void *v)
35939 {
35940 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
35941- (long)atomic_long_read(&perf_stats.qdio_int));
35942+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
35943 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
35944- (long)atomic_long_read(&perf_stats.pci_int));
35945+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
35946 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
35947- (long)atomic_long_read(&perf_stats.thin_int));
35948+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
35949 seq_printf(m, "\n");
35950 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
35951- (long)atomic_long_read(&perf_stats.tasklet_inbound));
35952+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
35953 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
35954- (long)atomic_long_read(&perf_stats.tasklet_outbound));
35955+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
35956 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
35957- (long)atomic_long_read(&perf_stats.tasklet_thinint),
35958- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
35959+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
35960+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
35961 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
35962- (long)atomic_long_read(&perf_stats.thinint_inbound),
35963- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
35964+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
35965+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
35966 seq_printf(m, "\n");
35967 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
35968- (long)atomic_long_read(&perf_stats.siga_in));
35969+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
35970 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
35971- (long)atomic_long_read(&perf_stats.siga_out));
35972+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
35973 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
35974- (long)atomic_long_read(&perf_stats.siga_sync));
35975+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
35976 seq_printf(m, "\n");
35977 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
35978- (long)atomic_long_read(&perf_stats.inbound_handler));
35979+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
35980 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
35981- (long)atomic_long_read(&perf_stats.outbound_handler));
35982+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
35983 seq_printf(m, "\n");
35984 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
35985- (long)atomic_long_read(&perf_stats.fast_requeue));
35986+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
35987 seq_printf(m, "Number of outbound target full condition\t: %li\n",
35988- (long)atomic_long_read(&perf_stats.outbound_target_full));
35989+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
35990 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
35991- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
35992+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
35993 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
35994- (long)atomic_long_read(&perf_stats.debug_stop_polling));
35995+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
35996 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
35997- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
35998+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
35999 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
36000- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
36001- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
36002+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
36003+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
36004 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
36005- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
36006- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
36007+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
36008+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
36009 seq_printf(m, "\n");
36010 return 0;
36011 }
36012diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.h linux-2.6.32.45/drivers/s390/cio/qdio_perf.h
36013--- linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
36014+++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
36015@@ -13,46 +13,46 @@
36016
36017 struct qdio_perf_stats {
36018 /* interrupt handler calls */
36019- atomic_long_t qdio_int;
36020- atomic_long_t pci_int;
36021- atomic_long_t thin_int;
36022+ atomic_long_unchecked_t qdio_int;
36023+ atomic_long_unchecked_t pci_int;
36024+ atomic_long_unchecked_t thin_int;
36025
36026 /* tasklet runs */
36027- atomic_long_t tasklet_inbound;
36028- atomic_long_t tasklet_outbound;
36029- atomic_long_t tasklet_thinint;
36030- atomic_long_t tasklet_thinint_loop;
36031- atomic_long_t thinint_inbound;
36032- atomic_long_t thinint_inbound_loop;
36033- atomic_long_t thinint_inbound_loop2;
36034+ atomic_long_unchecked_t tasklet_inbound;
36035+ atomic_long_unchecked_t tasklet_outbound;
36036+ atomic_long_unchecked_t tasklet_thinint;
36037+ atomic_long_unchecked_t tasklet_thinint_loop;
36038+ atomic_long_unchecked_t thinint_inbound;
36039+ atomic_long_unchecked_t thinint_inbound_loop;
36040+ atomic_long_unchecked_t thinint_inbound_loop2;
36041
36042 /* signal adapter calls */
36043- atomic_long_t siga_out;
36044- atomic_long_t siga_in;
36045- atomic_long_t siga_sync;
36046+ atomic_long_unchecked_t siga_out;
36047+ atomic_long_unchecked_t siga_in;
36048+ atomic_long_unchecked_t siga_sync;
36049
36050 /* misc */
36051- atomic_long_t inbound_handler;
36052- atomic_long_t outbound_handler;
36053- atomic_long_t fast_requeue;
36054- atomic_long_t outbound_target_full;
36055+ atomic_long_unchecked_t inbound_handler;
36056+ atomic_long_unchecked_t outbound_handler;
36057+ atomic_long_unchecked_t fast_requeue;
36058+ atomic_long_unchecked_t outbound_target_full;
36059
36060 /* for debugging */
36061- atomic_long_t debug_tl_out_timer;
36062- atomic_long_t debug_stop_polling;
36063- atomic_long_t debug_eqbs_all;
36064- atomic_long_t debug_eqbs_incomplete;
36065- atomic_long_t debug_sqbs_all;
36066- atomic_long_t debug_sqbs_incomplete;
36067+ atomic_long_unchecked_t debug_tl_out_timer;
36068+ atomic_long_unchecked_t debug_stop_polling;
36069+ atomic_long_unchecked_t debug_eqbs_all;
36070+ atomic_long_unchecked_t debug_eqbs_incomplete;
36071+ atomic_long_unchecked_t debug_sqbs_all;
36072+ atomic_long_unchecked_t debug_sqbs_incomplete;
36073 };
36074
36075 extern struct qdio_perf_stats perf_stats;
36076 extern int qdio_performance_stats;
36077
36078-static inline void qdio_perf_stat_inc(atomic_long_t *count)
36079+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
36080 {
36081 if (qdio_performance_stats)
36082- atomic_long_inc(count);
36083+ atomic_long_inc_unchecked(count);
36084 }
36085
36086 int qdio_setup_perf_stats(void);
36087diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h
36088--- linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-03-27 14:31:47.000000000 -0400
36089+++ linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:33:55.000000000 -0400
36090@@ -471,7 +471,7 @@ struct adapter_ops
36091 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36092 /* Administrative operations */
36093 int (*adapter_comm)(struct aac_dev * dev, int comm);
36094-};
36095+} __no_const;
36096
36097 /*
36098 * Define which interrupt handler needs to be installed
36099diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c
36100--- linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
36101+++ linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
36102@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
36103 u32 actual_fibsize64, actual_fibsize = 0;
36104 int i;
36105
36106+ pax_track_stack();
36107
36108 if (dev->in_reset) {
36109 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
36110diff -urNp linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c
36111--- linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
36112+++ linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
36113@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
36114 flash_error_table[i].reason);
36115 }
36116
36117-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
36118+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
36119 asd_show_update_bios, asd_store_update_bios);
36120
36121 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
36122diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h
36123--- linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-03-27 14:31:47.000000000 -0400
36124+++ linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-08-05 20:33:55.000000000 -0400
36125@@ -61,7 +61,7 @@ struct bfa_hwif_s {
36126 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
36127 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
36128 u32 *nvecs, u32 *maxvec);
36129-};
36130+} __no_const;
36131 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36132
36133 struct bfa_iocfc_s {
36134diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h
36135--- linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-03-27 14:31:47.000000000 -0400
36136+++ linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:33:55.000000000 -0400
36137@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
36138 bfa_ioc_disable_cbfn_t disable_cbfn;
36139 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36140 bfa_ioc_reset_cbfn_t reset_cbfn;
36141-};
36142+} __no_const;
36143
36144 /**
36145 * Heartbeat failure notification queue element.
36146diff -urNp linux-2.6.32.45/drivers/scsi/BusLogic.c linux-2.6.32.45/drivers/scsi/BusLogic.c
36147--- linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
36148+++ linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
36149@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
36150 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
36151 *PrototypeHostAdapter)
36152 {
36153+ pax_track_stack();
36154+
36155 /*
36156 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
36157 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
36158diff -urNp linux-2.6.32.45/drivers/scsi/dpt_i2o.c linux-2.6.32.45/drivers/scsi/dpt_i2o.c
36159--- linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
36160+++ linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
36161@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
36162 dma_addr_t addr;
36163 ulong flags = 0;
36164
36165+ pax_track_stack();
36166+
36167 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
36168 // get user msg size in u32s
36169 if(get_user(size, &user_msg[0])){
36170@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
36171 s32 rcode;
36172 dma_addr_t addr;
36173
36174+ pax_track_stack();
36175+
36176 memset(msg, 0 , sizeof(msg));
36177 len = scsi_bufflen(cmd);
36178 direction = 0x00000000;
36179diff -urNp linux-2.6.32.45/drivers/scsi/eata.c linux-2.6.32.45/drivers/scsi/eata.c
36180--- linux-2.6.32.45/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
36181+++ linux-2.6.32.45/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
36182@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
36183 struct hostdata *ha;
36184 char name[16];
36185
36186+ pax_track_stack();
36187+
36188 sprintf(name, "%s%d", driver_name, j);
36189
36190 if (!request_region(port_base, REGION_SIZE, driver_name)) {
36191diff -urNp linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c
36192--- linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
36193+++ linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
36194@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
36195 size_t rlen;
36196 size_t dlen;
36197
36198+ pax_track_stack();
36199+
36200 fiph = (struct fip_header *)skb->data;
36201 sub = fiph->fip_subcode;
36202 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
36203diff -urNp linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c
36204--- linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-03-27 14:31:47.000000000 -0400
36205+++ linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-08-05 20:33:55.000000000 -0400
36206@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct p
36207 /* Start local port initiatialization */
36208
36209 lp->link_up = 0;
36210- lp->tt = fnic_transport_template;
36211+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
36212
36213 lp->max_retry_count = fnic->config.flogi_retries;
36214 lp->max_rport_retry_count = fnic->config.plogi_retries;
36215diff -urNp linux-2.6.32.45/drivers/scsi/gdth.c linux-2.6.32.45/drivers/scsi/gdth.c
36216--- linux-2.6.32.45/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
36217+++ linux-2.6.32.45/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
36218@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
36219 ulong flags;
36220 gdth_ha_str *ha;
36221
36222+ pax_track_stack();
36223+
36224 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
36225 return -EFAULT;
36226 ha = gdth_find_ha(ldrv.ionode);
36227@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
36228 gdth_ha_str *ha;
36229 int rval;
36230
36231+ pax_track_stack();
36232+
36233 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
36234 res.number >= MAX_HDRIVES)
36235 return -EFAULT;
36236@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
36237 gdth_ha_str *ha;
36238 int rval;
36239
36240+ pax_track_stack();
36241+
36242 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
36243 return -EFAULT;
36244 ha = gdth_find_ha(gen.ionode);
36245@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
36246 int i;
36247 gdth_cmd_str gdtcmd;
36248 char cmnd[MAX_COMMAND_SIZE];
36249+
36250+ pax_track_stack();
36251+
36252 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
36253
36254 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
36255diff -urNp linux-2.6.32.45/drivers/scsi/gdth_proc.c linux-2.6.32.45/drivers/scsi/gdth_proc.c
36256--- linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
36257+++ linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
36258@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
36259 ulong64 paddr;
36260
36261 char cmnd[MAX_COMMAND_SIZE];
36262+
36263+ pax_track_stack();
36264+
36265 memset(cmnd, 0xff, 12);
36266 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
36267
36268@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
36269 gdth_hget_str *phg;
36270 char cmnd[MAX_COMMAND_SIZE];
36271
36272+ pax_track_stack();
36273+
36274 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
36275 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
36276 if (!gdtcmd || !estr)
36277diff -urNp linux-2.6.32.45/drivers/scsi/hosts.c linux-2.6.32.45/drivers/scsi/hosts.c
36278--- linux-2.6.32.45/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
36279+++ linux-2.6.32.45/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
36280@@ -40,7 +40,7 @@
36281 #include "scsi_logging.h"
36282
36283
36284-static atomic_t scsi_host_next_hn; /* host_no for next new host */
36285+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36286
36287
36288 static void scsi_host_cls_release(struct device *dev)
36289@@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
36290 * subtract one because we increment first then return, but we need to
36291 * know what the next host number was before increment
36292 */
36293- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36294+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36295 shost->dma_channel = 0xff;
36296
36297 /* These three are default values which can be overridden */
36298diff -urNp linux-2.6.32.45/drivers/scsi/ipr.c linux-2.6.32.45/drivers/scsi/ipr.c
36299--- linux-2.6.32.45/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
36300+++ linux-2.6.32.45/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
36301@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
36302 return true;
36303 }
36304
36305-static struct ata_port_operations ipr_sata_ops = {
36306+static const struct ata_port_operations ipr_sata_ops = {
36307 .phy_reset = ipr_ata_phy_reset,
36308 .hardreset = ipr_sata_reset,
36309 .post_internal_cmd = ipr_ata_post_internal,
36310diff -urNp linux-2.6.32.45/drivers/scsi/ips.h linux-2.6.32.45/drivers/scsi/ips.h
36311--- linux-2.6.32.45/drivers/scsi/ips.h 2011-03-27 14:31:47.000000000 -0400
36312+++ linux-2.6.32.45/drivers/scsi/ips.h 2011-08-05 20:33:55.000000000 -0400
36313@@ -1027,7 +1027,7 @@ typedef struct {
36314 int (*intr)(struct ips_ha *);
36315 void (*enableint)(struct ips_ha *);
36316 uint32_t (*statupd)(struct ips_ha *);
36317-} ips_hw_func_t;
36318+} __no_const ips_hw_func_t;
36319
36320 typedef struct ips_ha {
36321 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36322diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c
36323--- linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
36324+++ linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:22:32.000000000 -0400
36325@@ -86,12 +86,12 @@ struct fc_exch_mgr {
36326 * all together if not used XXX
36327 */
36328 struct {
36329- atomic_t no_free_exch;
36330- atomic_t no_free_exch_xid;
36331- atomic_t xid_not_found;
36332- atomic_t xid_busy;
36333- atomic_t seq_not_found;
36334- atomic_t non_bls_resp;
36335+ atomic_unchecked_t no_free_exch;
36336+ atomic_unchecked_t no_free_exch_xid;
36337+ atomic_unchecked_t xid_not_found;
36338+ atomic_unchecked_t xid_busy;
36339+ atomic_unchecked_t seq_not_found;
36340+ atomic_unchecked_t non_bls_resp;
36341 } stats;
36342 };
36343 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
36344@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
36345 /* allocate memory for exchange */
36346 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36347 if (!ep) {
36348- atomic_inc(&mp->stats.no_free_exch);
36349+ atomic_inc_unchecked(&mp->stats.no_free_exch);
36350 goto out;
36351 }
36352 memset(ep, 0, sizeof(*ep));
36353@@ -557,7 +557,7 @@ out:
36354 return ep;
36355 err:
36356 spin_unlock_bh(&pool->lock);
36357- atomic_inc(&mp->stats.no_free_exch_xid);
36358+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36359 mempool_free(ep, mp->ep_pool);
36360 return NULL;
36361 }
36362@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36363 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36364 ep = fc_exch_find(mp, xid);
36365 if (!ep) {
36366- atomic_inc(&mp->stats.xid_not_found);
36367+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36368 reject = FC_RJT_OX_ID;
36369 goto out;
36370 }
36371@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36372 ep = fc_exch_find(mp, xid);
36373 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36374 if (ep) {
36375- atomic_inc(&mp->stats.xid_busy);
36376+ atomic_inc_unchecked(&mp->stats.xid_busy);
36377 reject = FC_RJT_RX_ID;
36378 goto rel;
36379 }
36380@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36381 }
36382 xid = ep->xid; /* get our XID */
36383 } else if (!ep) {
36384- atomic_inc(&mp->stats.xid_not_found);
36385+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36386 reject = FC_RJT_RX_ID; /* XID not found */
36387 goto out;
36388 }
36389@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36390 } else {
36391 sp = &ep->seq;
36392 if (sp->id != fh->fh_seq_id) {
36393- atomic_inc(&mp->stats.seq_not_found);
36394+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36395 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
36396 goto rel;
36397 }
36398@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
36399
36400 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36401 if (!ep) {
36402- atomic_inc(&mp->stats.xid_not_found);
36403+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36404 goto out;
36405 }
36406 if (ep->esb_stat & ESB_ST_COMPLETE) {
36407- atomic_inc(&mp->stats.xid_not_found);
36408+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36409 goto out;
36410 }
36411 if (ep->rxid == FC_XID_UNKNOWN)
36412 ep->rxid = ntohs(fh->fh_rx_id);
36413 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36414- atomic_inc(&mp->stats.xid_not_found);
36415+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36416 goto rel;
36417 }
36418 if (ep->did != ntoh24(fh->fh_s_id) &&
36419 ep->did != FC_FID_FLOGI) {
36420- atomic_inc(&mp->stats.xid_not_found);
36421+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36422 goto rel;
36423 }
36424 sof = fr_sof(fp);
36425@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
36426 } else {
36427 sp = &ep->seq;
36428 if (sp->id != fh->fh_seq_id) {
36429- atomic_inc(&mp->stats.seq_not_found);
36430+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36431 goto rel;
36432 }
36433 }
36434@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
36435 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36436
36437 if (!sp)
36438- atomic_inc(&mp->stats.xid_not_found);
36439+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36440 else
36441- atomic_inc(&mp->stats.non_bls_resp);
36442+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
36443
36444 fc_frame_free(fp);
36445 }
36446diff -urNp linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c
36447--- linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
36448+++ linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
36449@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
36450 }
36451 }
36452
36453-static struct ata_port_operations sas_sata_ops = {
36454+static const struct ata_port_operations sas_sata_ops = {
36455 .phy_reset = sas_ata_phy_reset,
36456 .post_internal_cmd = sas_ata_post_internal,
36457 .qc_defer = ata_std_qc_defer,
36458diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c
36459--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
36460+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
36461@@ -124,7 +124,7 @@ struct lpfc_debug {
36462 int len;
36463 };
36464
36465-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36466+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36467 static unsigned long lpfc_debugfs_start_time = 0L;
36468
36469 /**
36470@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
36471 lpfc_debugfs_enable = 0;
36472
36473 len = 0;
36474- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36475+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36476 (lpfc_debugfs_max_disc_trc - 1);
36477 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36478 dtp = vport->disc_trc + i;
36479@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
36480 lpfc_debugfs_enable = 0;
36481
36482 len = 0;
36483- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36484+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36485 (lpfc_debugfs_max_slow_ring_trc - 1);
36486 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36487 dtp = phba->slow_ring_trc + i;
36488@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
36489 uint32_t *ptr;
36490 char buffer[1024];
36491
36492+ pax_track_stack();
36493+
36494 off = 0;
36495 spin_lock_irq(&phba->hbalock);
36496
36497@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
36498 !vport || !vport->disc_trc)
36499 return;
36500
36501- index = atomic_inc_return(&vport->disc_trc_cnt) &
36502+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36503 (lpfc_debugfs_max_disc_trc - 1);
36504 dtp = vport->disc_trc + index;
36505 dtp->fmt = fmt;
36506 dtp->data1 = data1;
36507 dtp->data2 = data2;
36508 dtp->data3 = data3;
36509- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36510+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36511 dtp->jif = jiffies;
36512 #endif
36513 return;
36514@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
36515 !phba || !phba->slow_ring_trc)
36516 return;
36517
36518- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36519+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36520 (lpfc_debugfs_max_slow_ring_trc - 1);
36521 dtp = phba->slow_ring_trc + index;
36522 dtp->fmt = fmt;
36523 dtp->data1 = data1;
36524 dtp->data2 = data2;
36525 dtp->data3 = data3;
36526- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36527+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36528 dtp->jif = jiffies;
36529 #endif
36530 return;
36531@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36532 "slow_ring buffer\n");
36533 goto debug_failed;
36534 }
36535- atomic_set(&phba->slow_ring_trc_cnt, 0);
36536+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36537 memset(phba->slow_ring_trc, 0,
36538 (sizeof(struct lpfc_debugfs_trc) *
36539 lpfc_debugfs_max_slow_ring_trc));
36540@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36541 "buffer\n");
36542 goto debug_failed;
36543 }
36544- atomic_set(&vport->disc_trc_cnt, 0);
36545+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36546
36547 snprintf(name, sizeof(name), "discovery_trace");
36548 vport->debug_disc_trc =
36549diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h
36550--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
36551+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
36552@@ -400,7 +400,7 @@ struct lpfc_vport {
36553 struct dentry *debug_nodelist;
36554 struct dentry *vport_debugfs_root;
36555 struct lpfc_debugfs_trc *disc_trc;
36556- atomic_t disc_trc_cnt;
36557+ atomic_unchecked_t disc_trc_cnt;
36558 #endif
36559 uint8_t stat_data_enabled;
36560 uint8_t stat_data_blocked;
36561@@ -725,8 +725,8 @@ struct lpfc_hba {
36562 struct timer_list fabric_block_timer;
36563 unsigned long bit_flags;
36564 #define FABRIC_COMANDS_BLOCKED 0
36565- atomic_t num_rsrc_err;
36566- atomic_t num_cmd_success;
36567+ atomic_unchecked_t num_rsrc_err;
36568+ atomic_unchecked_t num_cmd_success;
36569 unsigned long last_rsrc_error_time;
36570 unsigned long last_ramp_down_time;
36571 unsigned long last_ramp_up_time;
36572@@ -740,7 +740,7 @@ struct lpfc_hba {
36573 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
36574 struct dentry *debug_slow_ring_trc;
36575 struct lpfc_debugfs_trc *slow_ring_trc;
36576- atomic_t slow_ring_trc_cnt;
36577+ atomic_unchecked_t slow_ring_trc_cnt;
36578 #endif
36579
36580 /* Used for deferred freeing of ELS data buffers */
36581diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c
36582--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-03-27 14:31:47.000000000 -0400
36583+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:33:55.000000000 -0400
36584@@ -8021,8 +8021,10 @@ lpfc_init(void)
36585 printk(LPFC_COPYRIGHT "\n");
36586
36587 if (lpfc_enable_npiv) {
36588- lpfc_transport_functions.vport_create = lpfc_vport_create;
36589- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36590+ pax_open_kernel();
36591+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36592+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36593+ pax_close_kernel();
36594 }
36595 lpfc_transport_template =
36596 fc_attach_transport(&lpfc_transport_functions);
36597diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c
36598--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
36599+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
36600@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
36601 uint32_t evt_posted;
36602
36603 spin_lock_irqsave(&phba->hbalock, flags);
36604- atomic_inc(&phba->num_rsrc_err);
36605+ atomic_inc_unchecked(&phba->num_rsrc_err);
36606 phba->last_rsrc_error_time = jiffies;
36607
36608 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36609@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
36610 unsigned long flags;
36611 struct lpfc_hba *phba = vport->phba;
36612 uint32_t evt_posted;
36613- atomic_inc(&phba->num_cmd_success);
36614+ atomic_inc_unchecked(&phba->num_cmd_success);
36615
36616 if (vport->cfg_lun_queue_depth <= queue_depth)
36617 return;
36618@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36619 int i;
36620 struct lpfc_rport_data *rdata;
36621
36622- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36623- num_cmd_success = atomic_read(&phba->num_cmd_success);
36624+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36625+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36626
36627 vports = lpfc_create_vport_work_array(phba);
36628 if (vports != NULL)
36629@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36630 }
36631 }
36632 lpfc_destroy_vport_work_array(phba, vports);
36633- atomic_set(&phba->num_rsrc_err, 0);
36634- atomic_set(&phba->num_cmd_success, 0);
36635+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36636+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36637 }
36638
36639 /**
36640@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
36641 }
36642 }
36643 lpfc_destroy_vport_work_array(phba, vports);
36644- atomic_set(&phba->num_rsrc_err, 0);
36645- atomic_set(&phba->num_cmd_success, 0);
36646+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36647+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36648 }
36649
36650 /**
36651diff -urNp linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c
36652--- linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
36653+++ linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
36654@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
36655 int rval;
36656 int i;
36657
36658+ pax_track_stack();
36659+
36660 // Allocate memory for the base list of scb for management module.
36661 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
36662
36663diff -urNp linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c
36664--- linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
36665+++ linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
36666@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
36667 int nelem = ARRAY_SIZE(get_attrs), a = 0;
36668 int ret;
36669
36670+ pax_track_stack();
36671+
36672 or = osd_start_request(od, GFP_KERNEL);
36673 if (!or)
36674 return -ENOMEM;
36675diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.c linux-2.6.32.45/drivers/scsi/pmcraid.c
36676--- linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:35:29.000000000 -0400
36677+++ linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:33:59.000000000 -0400
36678@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
36679 res->scsi_dev = scsi_dev;
36680 scsi_dev->hostdata = res;
36681 res->change_detected = 0;
36682- atomic_set(&res->read_failures, 0);
36683- atomic_set(&res->write_failures, 0);
36684+ atomic_set_unchecked(&res->read_failures, 0);
36685+ atomic_set_unchecked(&res->write_failures, 0);
36686 rc = 0;
36687 }
36688 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36689@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
36690
36691 /* If this was a SCSI read/write command keep count of errors */
36692 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36693- atomic_inc(&res->read_failures);
36694+ atomic_inc_unchecked(&res->read_failures);
36695 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36696- atomic_inc(&res->write_failures);
36697+ atomic_inc_unchecked(&res->write_failures);
36698
36699 if (!RES_IS_GSCSI(res->cfg_entry) &&
36700 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36701@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(stru
36702
36703 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36704 /* add resources only after host is added into system */
36705- if (!atomic_read(&pinstance->expose_resources))
36706+ if (!atomic_read_unchecked(&pinstance->expose_resources))
36707 return;
36708
36709 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
36710@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instan
36711 init_waitqueue_head(&pinstance->reset_wait_q);
36712
36713 atomic_set(&pinstance->outstanding_cmds, 0);
36714- atomic_set(&pinstance->expose_resources, 0);
36715+ atomic_set_unchecked(&pinstance->expose_resources, 0);
36716
36717 INIT_LIST_HEAD(&pinstance->free_res_q);
36718 INIT_LIST_HEAD(&pinstance->used_res_q);
36719@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
36720 /* Schedule worker thread to handle CCN and take care of adding and
36721 * removing devices to OS
36722 */
36723- atomic_set(&pinstance->expose_resources, 1);
36724+ atomic_set_unchecked(&pinstance->expose_resources, 1);
36725 schedule_work(&pinstance->worker_q);
36726 return rc;
36727
36728diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.h linux-2.6.32.45/drivers/scsi/pmcraid.h
36729--- linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
36730+++ linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
36731@@ -690,7 +690,7 @@ struct pmcraid_instance {
36732 atomic_t outstanding_cmds;
36733
36734 /* should add/delete resources to mid-layer now ?*/
36735- atomic_t expose_resources;
36736+ atomic_unchecked_t expose_resources;
36737
36738 /* Tasklet to handle deferred processing */
36739 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
36740@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
36741 struct list_head queue; /* link to "to be exposed" resources */
36742 struct pmcraid_config_table_entry cfg_entry;
36743 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36744- atomic_t read_failures; /* count of failed READ commands */
36745- atomic_t write_failures; /* count of failed WRITE commands */
36746+ atomic_unchecked_t read_failures; /* count of failed READ commands */
36747+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36748
36749 /* To indicate add/delete/modify during CCN */
36750 u8 change_detected;
36751diff -urNp linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h
36752--- linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-03-27 14:31:47.000000000 -0400
36753+++ linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:33:55.000000000 -0400
36754@@ -2089,7 +2089,7 @@ struct isp_operations {
36755
36756 int (*get_flash_version) (struct scsi_qla_host *, void *);
36757 int (*start_scsi) (srb_t *);
36758-};
36759+} __no_const;
36760
36761 /* MSI-X Support *************************************************************/
36762
36763diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h
36764--- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
36765+++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
36766@@ -240,7 +240,7 @@ struct ddb_entry {
36767 atomic_t retry_relogin_timer; /* Min Time between relogins
36768 * (4000 only) */
36769 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
36770- atomic_t relogin_retry_count; /* Num of times relogin has been
36771+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36772 * retried */
36773
36774 uint16_t port;
36775diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c
36776--- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
36777+++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
36778@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
36779 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
36780 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36781 atomic_set(&ddb_entry->relogin_timer, 0);
36782- atomic_set(&ddb_entry->relogin_retry_count, 0);
36783+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36784 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36785 list_add_tail(&ddb_entry->list, &ha->ddb_list);
36786 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
36787@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
36788 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36789 atomic_set(&ddb_entry->port_down_timer,
36790 ha->port_down_retry_count);
36791- atomic_set(&ddb_entry->relogin_retry_count, 0);
36792+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36793 atomic_set(&ddb_entry->relogin_timer, 0);
36794 clear_bit(DF_RELOGIN, &ddb_entry->flags);
36795 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
36796diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c
36797--- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
36798+++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
36799@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
36800 ddb_entry->fw_ddb_device_state ==
36801 DDB_DS_SESSION_FAILED) {
36802 /* Reset retry relogin timer */
36803- atomic_inc(&ddb_entry->relogin_retry_count);
36804+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36805 DEBUG2(printk("scsi%ld: index[%d] relogin"
36806 " timed out-retrying"
36807 " relogin (%d)\n",
36808 ha->host_no,
36809 ddb_entry->fw_ddb_index,
36810- atomic_read(&ddb_entry->
36811+ atomic_read_unchecked(&ddb_entry->
36812 relogin_retry_count))
36813 );
36814 start_dpc++;
36815diff -urNp linux-2.6.32.45/drivers/scsi/scsi.c linux-2.6.32.45/drivers/scsi/scsi.c
36816--- linux-2.6.32.45/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
36817+++ linux-2.6.32.45/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
36818@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
36819 unsigned long timeout;
36820 int rtn = 0;
36821
36822- atomic_inc(&cmd->device->iorequest_cnt);
36823+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36824
36825 /* check if the device is still usable */
36826 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36827diff -urNp linux-2.6.32.45/drivers/scsi/scsi_debug.c linux-2.6.32.45/drivers/scsi/scsi_debug.c
36828--- linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
36829+++ linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
36830@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
36831 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
36832 unsigned char *cmd = (unsigned char *)scp->cmnd;
36833
36834+ pax_track_stack();
36835+
36836 if ((errsts = check_readiness(scp, 1, devip)))
36837 return errsts;
36838 memset(arr, 0, sizeof(arr));
36839@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
36840 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
36841 unsigned char *cmd = (unsigned char *)scp->cmnd;
36842
36843+ pax_track_stack();
36844+
36845 if ((errsts = check_readiness(scp, 1, devip)))
36846 return errsts;
36847 memset(arr, 0, sizeof(arr));
36848diff -urNp linux-2.6.32.45/drivers/scsi/scsi_lib.c linux-2.6.32.45/drivers/scsi/scsi_lib.c
36849--- linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
36850+++ linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
36851@@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
36852
36853 scsi_init_cmd_errh(cmd);
36854 cmd->result = DID_NO_CONNECT << 16;
36855- atomic_inc(&cmd->device->iorequest_cnt);
36856+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36857
36858 /*
36859 * SCSI request completion path will do scsi_device_unbusy(),
36860@@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
36861 */
36862 cmd->serial_number = 0;
36863
36864- atomic_inc(&cmd->device->iodone_cnt);
36865+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
36866 if (cmd->result)
36867- atomic_inc(&cmd->device->ioerr_cnt);
36868+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36869
36870 disposition = scsi_decide_disposition(cmd);
36871 if (disposition != SUCCESS &&
36872diff -urNp linux-2.6.32.45/drivers/scsi/scsi_sysfs.c linux-2.6.32.45/drivers/scsi/scsi_sysfs.c
36873--- linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
36874+++ linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
36875@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
36876 char *buf) \
36877 { \
36878 struct scsi_device *sdev = to_scsi_device(dev); \
36879- unsigned long long count = atomic_read(&sdev->field); \
36880+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
36881 return snprintf(buf, 20, "0x%llx\n", count); \
36882 } \
36883 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36884diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c
36885--- linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
36886+++ linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
36887@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
36888 * Netlink Infrastructure
36889 */
36890
36891-static atomic_t fc_event_seq;
36892+static atomic_unchecked_t fc_event_seq;
36893
36894 /**
36895 * fc_get_event_number - Obtain the next sequential FC event number
36896@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
36897 u32
36898 fc_get_event_number(void)
36899 {
36900- return atomic_add_return(1, &fc_event_seq);
36901+ return atomic_add_return_unchecked(1, &fc_event_seq);
36902 }
36903 EXPORT_SYMBOL(fc_get_event_number);
36904
36905@@ -641,7 +641,7 @@ static __init int fc_transport_init(void
36906 {
36907 int error;
36908
36909- atomic_set(&fc_event_seq, 0);
36910+ atomic_set_unchecked(&fc_event_seq, 0);
36911
36912 error = transport_class_register(&fc_host_class);
36913 if (error)
36914diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c
36915--- linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
36916+++ linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
36917@@ -81,7 +81,7 @@ struct iscsi_internal {
36918 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
36919 };
36920
36921-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36922+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36923 static struct workqueue_struct *iscsi_eh_timer_workq;
36924
36925 /*
36926@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
36927 int err;
36928
36929 ihost = shost->shost_data;
36930- session->sid = atomic_add_return(1, &iscsi_session_nr);
36931+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36932
36933 if (id == ISCSI_MAX_TARGET) {
36934 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
36935@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
36936 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36937 ISCSI_TRANSPORT_VERSION);
36938
36939- atomic_set(&iscsi_session_nr, 0);
36940+ atomic_set_unchecked(&iscsi_session_nr, 0);
36941
36942 err = class_register(&iscsi_transport_class);
36943 if (err)
36944diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c
36945--- linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
36946+++ linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
36947@@ -33,7 +33,7 @@
36948 #include "scsi_transport_srp_internal.h"
36949
36950 struct srp_host_attrs {
36951- atomic_t next_port_id;
36952+ atomic_unchecked_t next_port_id;
36953 };
36954 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36955
36956@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
36957 struct Scsi_Host *shost = dev_to_shost(dev);
36958 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36959
36960- atomic_set(&srp_host->next_port_id, 0);
36961+ atomic_set_unchecked(&srp_host->next_port_id, 0);
36962 return 0;
36963 }
36964
36965@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
36966 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36967 rport->roles = ids->roles;
36968
36969- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36970+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36971 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36972
36973 transport_setup_device(&rport->dev);
36974diff -urNp linux-2.6.32.45/drivers/scsi/sg.c linux-2.6.32.45/drivers/scsi/sg.c
36975--- linux-2.6.32.45/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
36976+++ linux-2.6.32.45/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
36977@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
36978 const struct file_operations * fops;
36979 };
36980
36981-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36982+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36983 {"allow_dio", &adio_fops},
36984 {"debug", &debug_fops},
36985 {"def_reserved_size", &dressz_fops},
36986@@ -2307,7 +2307,7 @@ sg_proc_init(void)
36987 {
36988 int k, mask;
36989 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
36990- struct sg_proc_leaf * leaf;
36991+ const struct sg_proc_leaf * leaf;
36992
36993 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
36994 if (!sg_proc_sgp)
36995diff -urNp linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c
36996--- linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
36997+++ linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
36998@@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
36999 int do_iounmap = 0;
37000 int do_disable_device = 1;
37001
37002+ pax_track_stack();
37003+
37004 memset(&sym_dev, 0, sizeof(sym_dev));
37005 memset(&nvram, 0, sizeof(nvram));
37006 sym_dev.pdev = pdev;
37007diff -urNp linux-2.6.32.45/drivers/serial/kgdboc.c linux-2.6.32.45/drivers/serial/kgdboc.c
37008--- linux-2.6.32.45/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
37009+++ linux-2.6.32.45/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
37010@@ -18,7 +18,7 @@
37011
37012 #define MAX_CONFIG_LEN 40
37013
37014-static struct kgdb_io kgdboc_io_ops;
37015+static const struct kgdb_io kgdboc_io_ops;
37016
37017 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37018 static int configured = -1;
37019@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
37020 module_put(THIS_MODULE);
37021 }
37022
37023-static struct kgdb_io kgdboc_io_ops = {
37024+static const struct kgdb_io kgdboc_io_ops = {
37025 .name = "kgdboc",
37026 .read_char = kgdboc_get_char,
37027 .write_char = kgdboc_put_char,
37028diff -urNp linux-2.6.32.45/drivers/spi/spi.c linux-2.6.32.45/drivers/spi/spi.c
37029--- linux-2.6.32.45/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
37030+++ linux-2.6.32.45/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
37031@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
37032 EXPORT_SYMBOL_GPL(spi_sync);
37033
37034 /* portable code must never pass more than 32 bytes */
37035-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37036+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
37037
37038 static u8 *buf;
37039
37040diff -urNp linux-2.6.32.45/drivers/staging/android/binder.c linux-2.6.32.45/drivers/staging/android/binder.c
37041--- linux-2.6.32.45/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
37042+++ linux-2.6.32.45/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
37043@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
37044 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
37045 }
37046
37047-static struct vm_operations_struct binder_vm_ops = {
37048+static const struct vm_operations_struct binder_vm_ops = {
37049 .open = binder_vma_open,
37050 .close = binder_vma_close,
37051 };
37052diff -urNp linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c
37053--- linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
37054+++ linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
37055@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
37056 return VM_FAULT_NOPAGE;
37057 }
37058
37059-static struct vm_operations_struct b3dfg_vm_ops = {
37060+static const struct vm_operations_struct b3dfg_vm_ops = {
37061 .fault = b3dfg_vma_fault,
37062 };
37063
37064@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
37065 return r;
37066 }
37067
37068-static struct file_operations b3dfg_fops = {
37069+static const struct file_operations b3dfg_fops = {
37070 .owner = THIS_MODULE,
37071 .open = b3dfg_open,
37072 .release = b3dfg_release,
37073diff -urNp linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c
37074--- linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:35:29.000000000 -0400
37075+++ linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:34:00.000000000 -0400
37076@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
37077 mutex_unlock(&dev->mutex);
37078 }
37079
37080-static struct vm_operations_struct comedi_vm_ops = {
37081+static const struct vm_operations_struct comedi_vm_ops = {
37082 .close = comedi_unmap,
37083 };
37084
37085diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c
37086--- linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
37087+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
37088@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
37089 static dev_t adsp_devno;
37090 static struct class *adsp_class;
37091
37092-static struct file_operations adsp_fops = {
37093+static const struct file_operations adsp_fops = {
37094 .owner = THIS_MODULE,
37095 .open = adsp_open,
37096 .unlocked_ioctl = adsp_ioctl,
37097diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c
37098--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
37099+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
37100@@ -1022,7 +1022,7 @@ done:
37101 return rc;
37102 }
37103
37104-static struct file_operations audio_aac_fops = {
37105+static const struct file_operations audio_aac_fops = {
37106 .owner = THIS_MODULE,
37107 .open = audio_open,
37108 .release = audio_release,
37109diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c
37110--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
37111+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
37112@@ -833,7 +833,7 @@ done:
37113 return rc;
37114 }
37115
37116-static struct file_operations audio_amrnb_fops = {
37117+static const struct file_operations audio_amrnb_fops = {
37118 .owner = THIS_MODULE,
37119 .open = audamrnb_open,
37120 .release = audamrnb_release,
37121diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c
37122--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
37123+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
37124@@ -805,7 +805,7 @@ dma_fail:
37125 return rc;
37126 }
37127
37128-static struct file_operations audio_evrc_fops = {
37129+static const struct file_operations audio_evrc_fops = {
37130 .owner = THIS_MODULE,
37131 .open = audevrc_open,
37132 .release = audevrc_release,
37133diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c
37134--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
37135+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
37136@@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
37137 return 0;
37138 }
37139
37140-static struct file_operations audio_fops = {
37141+static const struct file_operations audio_fops = {
37142 .owner = THIS_MODULE,
37143 .open = audio_in_open,
37144 .release = audio_in_release,
37145@@ -922,7 +922,7 @@ static struct file_operations audio_fops
37146 .unlocked_ioctl = audio_in_ioctl,
37147 };
37148
37149-static struct file_operations audpre_fops = {
37150+static const struct file_operations audpre_fops = {
37151 .owner = THIS_MODULE,
37152 .open = audpre_open,
37153 .unlocked_ioctl = audpre_ioctl,
37154diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c
37155--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
37156+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
37157@@ -941,7 +941,7 @@ done:
37158 return rc;
37159 }
37160
37161-static struct file_operations audio_mp3_fops = {
37162+static const struct file_operations audio_mp3_fops = {
37163 .owner = THIS_MODULE,
37164 .open = audio_open,
37165 .release = audio_release,
37166diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c
37167--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
37168+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
37169@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
37170 return 0;
37171 }
37172
37173-static struct file_operations audio_fops = {
37174+static const struct file_operations audio_fops = {
37175 .owner = THIS_MODULE,
37176 .open = audio_open,
37177 .release = audio_release,
37178@@ -819,7 +819,7 @@ static struct file_operations audio_fops
37179 .unlocked_ioctl = audio_ioctl,
37180 };
37181
37182-static struct file_operations audpp_fops = {
37183+static const struct file_operations audpp_fops = {
37184 .owner = THIS_MODULE,
37185 .open = audpp_open,
37186 .unlocked_ioctl = audpp_ioctl,
37187diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c
37188--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
37189+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
37190@@ -816,7 +816,7 @@ err:
37191 return rc;
37192 }
37193
37194-static struct file_operations audio_qcelp_fops = {
37195+static const struct file_operations audio_qcelp_fops = {
37196 .owner = THIS_MODULE,
37197 .open = audqcelp_open,
37198 .release = audqcelp_release,
37199diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c
37200--- linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
37201+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
37202@@ -242,7 +242,7 @@ err:
37203 return rc;
37204 }
37205
37206-static struct file_operations snd_fops = {
37207+static const struct file_operations snd_fops = {
37208 .owner = THIS_MODULE,
37209 .open = snd_open,
37210 .release = snd_release,
37211diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c
37212--- linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
37213+++ linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
37214@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
37215 return 0;
37216 }
37217
37218-static struct file_operations qmi_fops = {
37219+static const struct file_operations qmi_fops = {
37220 .owner = THIS_MODULE,
37221 .read = qmi_read,
37222 .write = qmi_write,
37223diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c
37224--- linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
37225+++ linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
37226@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
37227 return rc;
37228 }
37229
37230-static struct file_operations rpcrouter_server_fops = {
37231+static const struct file_operations rpcrouter_server_fops = {
37232 .owner = THIS_MODULE,
37233 .open = rpcrouter_open,
37234 .release = rpcrouter_release,
37235@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
37236 .unlocked_ioctl = rpcrouter_ioctl,
37237 };
37238
37239-static struct file_operations rpcrouter_router_fops = {
37240+static const struct file_operations rpcrouter_router_fops = {
37241 .owner = THIS_MODULE,
37242 .open = rpcrouter_open,
37243 .release = rpcrouter_release,
37244diff -urNp linux-2.6.32.45/drivers/staging/dst/dcore.c linux-2.6.32.45/drivers/staging/dst/dcore.c
37245--- linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
37246+++ linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
37247@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
37248 return 0;
37249 }
37250
37251-static struct block_device_operations dst_blk_ops = {
37252+static const struct block_device_operations dst_blk_ops = {
37253 .open = dst_bdev_open,
37254 .release = dst_bdev_release,
37255 .owner = THIS_MODULE,
37256@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
37257 n->size = ctl->size;
37258
37259 atomic_set(&n->refcnt, 1);
37260- atomic_long_set(&n->gen, 0);
37261+ atomic_long_set_unchecked(&n->gen, 0);
37262 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
37263
37264 err = dst_node_sysfs_init(n);
37265diff -urNp linux-2.6.32.45/drivers/staging/dst/trans.c linux-2.6.32.45/drivers/staging/dst/trans.c
37266--- linux-2.6.32.45/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
37267+++ linux-2.6.32.45/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
37268@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
37269 t->error = 0;
37270 t->retries = 0;
37271 atomic_set(&t->refcnt, 1);
37272- t->gen = atomic_long_inc_return(&n->gen);
37273+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
37274
37275 t->enc = bio_data_dir(bio);
37276 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
37277diff -urNp linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c
37278--- linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
37279+++ linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
37280@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
37281 struct net_device_stats *stats = &etdev->net_stats;
37282
37283 if (pMpTcb->Flags & fMP_DEST_BROAD)
37284- atomic_inc(&etdev->Stats.brdcstxmt);
37285+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
37286 else if (pMpTcb->Flags & fMP_DEST_MULTI)
37287- atomic_inc(&etdev->Stats.multixmt);
37288+ atomic_inc_unchecked(&etdev->Stats.multixmt);
37289 else
37290- atomic_inc(&etdev->Stats.unixmt);
37291+ atomic_inc_unchecked(&etdev->Stats.unixmt);
37292
37293 if (pMpTcb->Packet) {
37294 stats->tx_bytes += pMpTcb->Packet->len;
37295diff -urNp linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h
37296--- linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
37297+++ linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
37298@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
37299 * operations
37300 */
37301 u32 unircv; /* # multicast packets received */
37302- atomic_t unixmt; /* # multicast packets for Tx */
37303+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
37304 u32 multircv; /* # multicast packets received */
37305- atomic_t multixmt; /* # multicast packets for Tx */
37306+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
37307 u32 brdcstrcv; /* # broadcast packets received */
37308- atomic_t brdcstxmt; /* # broadcast packets for Tx */
37309+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
37310 u32 norcvbuf; /* # Rx packets discarded */
37311 u32 noxmtbuf; /* # Tx packets discarded */
37312
37313diff -urNp linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c
37314--- linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
37315+++ linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
37316@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
37317 return 0;
37318 }
37319
37320-static struct vm_operations_struct go7007_vm_ops = {
37321+static const struct vm_operations_struct go7007_vm_ops = {
37322 .open = go7007_vm_open,
37323 .close = go7007_vm_close,
37324 .fault = go7007_vm_fault,
37325diff -urNp linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c
37326--- linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
37327+++ linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
37328@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
37329 /* The one and only one */
37330 static struct blkvsc_driver_context g_blkvsc_drv;
37331
37332-static struct block_device_operations block_ops = {
37333+static const struct block_device_operations block_ops = {
37334 .owner = THIS_MODULE,
37335 .open = blkvsc_open,
37336 .release = blkvsc_release,
37337diff -urNp linux-2.6.32.45/drivers/staging/hv/Channel.c linux-2.6.32.45/drivers/staging/hv/Channel.c
37338--- linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
37339+++ linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
37340@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
37341
37342 DPRINT_ENTER(VMBUS);
37343
37344- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
37345- atomic_inc(&gVmbusConnection.NextGpadlHandle);
37346+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
37347+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
37348
37349 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
37350 ASSERT(msgInfo != NULL);
37351diff -urNp linux-2.6.32.45/drivers/staging/hv/Hv.c linux-2.6.32.45/drivers/staging/hv/Hv.c
37352--- linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
37353+++ linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
37354@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
37355 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
37356 u32 outputAddressHi = outputAddress >> 32;
37357 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
37358- volatile void *hypercallPage = gHvContext.HypercallPage;
37359+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
37360
37361 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
37362 Control, Input, Output);
37363diff -urNp linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c
37364--- linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
37365+++ linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
37366@@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
37367 to_device_context(root_device_obj);
37368 struct device_context *child_device_ctx =
37369 to_device_context(child_device_obj);
37370- static atomic_t device_num = ATOMIC_INIT(0);
37371+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
37372
37373 DPRINT_ENTER(VMBUS_DRV);
37374
37375@@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
37376
37377 /* Set the device name. Otherwise, device_register() will fail. */
37378 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
37379- atomic_inc_return(&device_num));
37380+ atomic_inc_return_unchecked(&device_num));
37381
37382 /* The new device belongs to this bus */
37383 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
37384diff -urNp linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h
37385--- linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
37386+++ linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
37387@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
37388 struct VMBUS_CONNECTION {
37389 enum VMBUS_CONNECT_STATE ConnectState;
37390
37391- atomic_t NextGpadlHandle;
37392+ atomic_unchecked_t NextGpadlHandle;
37393
37394 /*
37395 * Represents channel interrupts. Each bit position represents a
37396diff -urNp linux-2.6.32.45/drivers/staging/iio/ring_generic.h linux-2.6.32.45/drivers/staging/iio/ring_generic.h
37397--- linux-2.6.32.45/drivers/staging/iio/ring_generic.h 2011-03-27 14:31:47.000000000 -0400
37398+++ linux-2.6.32.45/drivers/staging/iio/ring_generic.h 2011-08-23 20:24:26.000000000 -0400
37399@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
37400
37401 int (*is_enabled)(struct iio_ring_buffer *ring);
37402 int (*enable)(struct iio_ring_buffer *ring);
37403-};
37404+} __no_const;
37405
37406 /**
37407 * struct iio_ring_buffer - general ring buffer structure
37408diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet.c linux-2.6.32.45/drivers/staging/octeon/ethernet.c
37409--- linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
37410+++ linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
37411@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
37412 * since the RX tasklet also increments it.
37413 */
37414 #ifdef CONFIG_64BIT
37415- atomic64_add(rx_status.dropped_packets,
37416- (atomic64_t *)&priv->stats.rx_dropped);
37417+ atomic64_add_unchecked(rx_status.dropped_packets,
37418+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37419 #else
37420- atomic_add(rx_status.dropped_packets,
37421- (atomic_t *)&priv->stats.rx_dropped);
37422+ atomic_add_unchecked(rx_status.dropped_packets,
37423+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
37424 #endif
37425 }
37426
37427diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c
37428--- linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
37429+++ linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
37430@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
37431 /* Increment RX stats for virtual ports */
37432 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37433 #ifdef CONFIG_64BIT
37434- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37435- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37436+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37437+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37438 #else
37439- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37440- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37441+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37442+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37443 #endif
37444 }
37445 netif_receive_skb(skb);
37446@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
37447 dev->name);
37448 */
37449 #ifdef CONFIG_64BIT
37450- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37451+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
37452 #else
37453- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37454+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
37455 #endif
37456 dev_kfree_skb_irq(skb);
37457 }
37458diff -urNp linux-2.6.32.45/drivers/staging/panel/panel.c linux-2.6.32.45/drivers/staging/panel/panel.c
37459--- linux-2.6.32.45/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
37460+++ linux-2.6.32.45/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
37461@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
37462 return 0;
37463 }
37464
37465-static struct file_operations lcd_fops = {
37466+static const struct file_operations lcd_fops = {
37467 .write = lcd_write,
37468 .open = lcd_open,
37469 .release = lcd_release,
37470@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
37471 return 0;
37472 }
37473
37474-static struct file_operations keypad_fops = {
37475+static const struct file_operations keypad_fops = {
37476 .read = keypad_read, /* read */
37477 .open = keypad_open, /* open */
37478 .release = keypad_release, /* close */
37479diff -urNp linux-2.6.32.45/drivers/staging/phison/phison.c linux-2.6.32.45/drivers/staging/phison/phison.c
37480--- linux-2.6.32.45/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
37481+++ linux-2.6.32.45/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
37482@@ -43,7 +43,7 @@ static struct scsi_host_template phison_
37483 ATA_BMDMA_SHT(DRV_NAME),
37484 };
37485
37486-static struct ata_port_operations phison_ops = {
37487+static const struct ata_port_operations phison_ops = {
37488 .inherits = &ata_bmdma_port_ops,
37489 .prereset = phison_pre_reset,
37490 };
37491diff -urNp linux-2.6.32.45/drivers/staging/poch/poch.c linux-2.6.32.45/drivers/staging/poch/poch.c
37492--- linux-2.6.32.45/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
37493+++ linux-2.6.32.45/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
37494@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
37495 return 0;
37496 }
37497
37498-static struct file_operations poch_fops = {
37499+static const struct file_operations poch_fops = {
37500 .owner = THIS_MODULE,
37501 .open = poch_open,
37502 .release = poch_release,
37503diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/inode.c linux-2.6.32.45/drivers/staging/pohmelfs/inode.c
37504--- linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37505+++ linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
37506@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
37507 mutex_init(&psb->mcache_lock);
37508 psb->mcache_root = RB_ROOT;
37509 psb->mcache_timeout = msecs_to_jiffies(5000);
37510- atomic_long_set(&psb->mcache_gen, 0);
37511+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
37512
37513 psb->trans_max_pages = 100;
37514
37515@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
37516 INIT_LIST_HEAD(&psb->crypto_ready_list);
37517 INIT_LIST_HEAD(&psb->crypto_active_list);
37518
37519- atomic_set(&psb->trans_gen, 1);
37520+ atomic_set_unchecked(&psb->trans_gen, 1);
37521 atomic_long_set(&psb->total_inodes, 0);
37522
37523 mutex_init(&psb->state_lock);
37524diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c
37525--- linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
37526+++ linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
37527@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
37528 m->data = data;
37529 m->start = start;
37530 m->size = size;
37531- m->gen = atomic_long_inc_return(&psb->mcache_gen);
37532+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
37533
37534 mutex_lock(&psb->mcache_lock);
37535 err = pohmelfs_mcache_insert(psb, m);
37536diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h
37537--- linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
37538+++ linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
37539@@ -570,14 +570,14 @@ struct pohmelfs_config;
37540 struct pohmelfs_sb {
37541 struct rb_root mcache_root;
37542 struct mutex mcache_lock;
37543- atomic_long_t mcache_gen;
37544+ atomic_long_unchecked_t mcache_gen;
37545 unsigned long mcache_timeout;
37546
37547 unsigned int idx;
37548
37549 unsigned int trans_retries;
37550
37551- atomic_t trans_gen;
37552+ atomic_unchecked_t trans_gen;
37553
37554 unsigned int crypto_attached_size;
37555 unsigned int crypto_align_size;
37556diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/trans.c linux-2.6.32.45/drivers/staging/pohmelfs/trans.c
37557--- linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
37558+++ linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
37559@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
37560 int err;
37561 struct netfs_cmd *cmd = t->iovec.iov_base;
37562
37563- t->gen = atomic_inc_return(&psb->trans_gen);
37564+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
37565
37566 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
37567 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
37568diff -urNp linux-2.6.32.45/drivers/staging/sep/sep_driver.c linux-2.6.32.45/drivers/staging/sep/sep_driver.c
37569--- linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
37570+++ linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
37571@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
37572 static dev_t sep_devno;
37573
37574 /* the files operations structure of the driver */
37575-static struct file_operations sep_file_operations = {
37576+static const struct file_operations sep_file_operations = {
37577 .owner = THIS_MODULE,
37578 .ioctl = sep_ioctl,
37579 .poll = sep_poll,
37580diff -urNp linux-2.6.32.45/drivers/staging/usbip/usbip_common.h linux-2.6.32.45/drivers/staging/usbip/usbip_common.h
37581--- linux-2.6.32.45/drivers/staging/usbip/usbip_common.h 2011-04-17 17:00:52.000000000 -0400
37582+++ linux-2.6.32.45/drivers/staging/usbip/usbip_common.h 2011-08-23 20:24:26.000000000 -0400
37583@@ -374,7 +374,7 @@ struct usbip_device {
37584 void (*shutdown)(struct usbip_device *);
37585 void (*reset)(struct usbip_device *);
37586 void (*unusable)(struct usbip_device *);
37587- } eh_ops;
37588+ } __no_const eh_ops;
37589 };
37590
37591
37592diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci.h linux-2.6.32.45/drivers/staging/usbip/vhci.h
37593--- linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
37594+++ linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
37595@@ -92,7 +92,7 @@ struct vhci_hcd {
37596 unsigned resuming:1;
37597 unsigned long re_timeout;
37598
37599- atomic_t seqnum;
37600+ atomic_unchecked_t seqnum;
37601
37602 /*
37603 * NOTE:
37604diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c
37605--- linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
37606+++ linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
37607@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
37608 return;
37609 }
37610
37611- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37612+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37613 if (priv->seqnum == 0xffff)
37614 usbip_uinfo("seqnum max\n");
37615
37616@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
37617 return -ENOMEM;
37618 }
37619
37620- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37621+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37622 if (unlink->seqnum == 0xffff)
37623 usbip_uinfo("seqnum max\n");
37624
37625@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
37626 vdev->rhport = rhport;
37627 }
37628
37629- atomic_set(&vhci->seqnum, 0);
37630+ atomic_set_unchecked(&vhci->seqnum, 0);
37631 spin_lock_init(&vhci->lock);
37632
37633
37634diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c
37635--- linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
37636+++ linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
37637@@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
37638 usbip_uerr("cannot find a urb of seqnum %u\n",
37639 pdu->base.seqnum);
37640 usbip_uinfo("max seqnum %d\n",
37641- atomic_read(&the_controller->seqnum));
37642+ atomic_read_unchecked(&the_controller->seqnum));
37643 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37644 return;
37645 }
37646diff -urNp linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c
37647--- linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
37648+++ linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
37649@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
37650 static int __init vme_user_probe(struct device *, int, int);
37651 static int __exit vme_user_remove(struct device *, int, int);
37652
37653-static struct file_operations vme_user_fops = {
37654+static const struct file_operations vme_user_fops = {
37655 .open = vme_user_open,
37656 .release = vme_user_release,
37657 .read = vme_user_read,
37658diff -urNp linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c
37659--- linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c 2011-03-27 14:31:47.000000000 -0400
37660+++ linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 20:24:26.000000000 -0400
37661@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hf
37662
37663 struct usbctlx_completor {
37664 int (*complete) (struct usbctlx_completor *);
37665-};
37666+} __no_const;
37667 typedef struct usbctlx_completor usbctlx_completor_t;
37668
37669 static int
37670diff -urNp linux-2.6.32.45/drivers/telephony/ixj.c linux-2.6.32.45/drivers/telephony/ixj.c
37671--- linux-2.6.32.45/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
37672+++ linux-2.6.32.45/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
37673@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
37674 bool mContinue;
37675 char *pIn, *pOut;
37676
37677+ pax_track_stack();
37678+
37679 if (!SCI_Prepare(j))
37680 return 0;
37681
37682diff -urNp linux-2.6.32.45/drivers/uio/uio.c linux-2.6.32.45/drivers/uio/uio.c
37683--- linux-2.6.32.45/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
37684+++ linux-2.6.32.45/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
37685@@ -23,6 +23,7 @@
37686 #include <linux/string.h>
37687 #include <linux/kobject.h>
37688 #include <linux/uio_driver.h>
37689+#include <asm/local.h>
37690
37691 #define UIO_MAX_DEVICES 255
37692
37693@@ -30,10 +31,10 @@ struct uio_device {
37694 struct module *owner;
37695 struct device *dev;
37696 int minor;
37697- atomic_t event;
37698+ atomic_unchecked_t event;
37699 struct fasync_struct *async_queue;
37700 wait_queue_head_t wait;
37701- int vma_count;
37702+ local_t vma_count;
37703 struct uio_info *info;
37704 struct kobject *map_dir;
37705 struct kobject *portio_dir;
37706@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
37707 return entry->show(mem, buf);
37708 }
37709
37710-static struct sysfs_ops map_sysfs_ops = {
37711+static const struct sysfs_ops map_sysfs_ops = {
37712 .show = map_type_show,
37713 };
37714
37715@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
37716 return entry->show(port, buf);
37717 }
37718
37719-static struct sysfs_ops portio_sysfs_ops = {
37720+static const struct sysfs_ops portio_sysfs_ops = {
37721 .show = portio_type_show,
37722 };
37723
37724@@ -255,7 +256,7 @@ static ssize_t show_event(struct device
37725 struct uio_device *idev = dev_get_drvdata(dev);
37726 if (idev)
37727 return sprintf(buf, "%u\n",
37728- (unsigned int)atomic_read(&idev->event));
37729+ (unsigned int)atomic_read_unchecked(&idev->event));
37730 else
37731 return -ENODEV;
37732 }
37733@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
37734 {
37735 struct uio_device *idev = info->uio_dev;
37736
37737- atomic_inc(&idev->event);
37738+ atomic_inc_unchecked(&idev->event);
37739 wake_up_interruptible(&idev->wait);
37740 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37741 }
37742@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
37743 }
37744
37745 listener->dev = idev;
37746- listener->event_count = atomic_read(&idev->event);
37747+ listener->event_count = atomic_read_unchecked(&idev->event);
37748 filep->private_data = listener;
37749
37750 if (idev->info->open) {
37751@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
37752 return -EIO;
37753
37754 poll_wait(filep, &idev->wait, wait);
37755- if (listener->event_count != atomic_read(&idev->event))
37756+ if (listener->event_count != atomic_read_unchecked(&idev->event))
37757 return POLLIN | POLLRDNORM;
37758 return 0;
37759 }
37760@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
37761 do {
37762 set_current_state(TASK_INTERRUPTIBLE);
37763
37764- event_count = atomic_read(&idev->event);
37765+ event_count = atomic_read_unchecked(&idev->event);
37766 if (event_count != listener->event_count) {
37767 if (copy_to_user(buf, &event_count, count))
37768 retval = -EFAULT;
37769@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
37770 static void uio_vma_open(struct vm_area_struct *vma)
37771 {
37772 struct uio_device *idev = vma->vm_private_data;
37773- idev->vma_count++;
37774+ local_inc(&idev->vma_count);
37775 }
37776
37777 static void uio_vma_close(struct vm_area_struct *vma)
37778 {
37779 struct uio_device *idev = vma->vm_private_data;
37780- idev->vma_count--;
37781+ local_dec(&idev->vma_count);
37782 }
37783
37784 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37785@@ -840,7 +841,7 @@ int __uio_register_device(struct module
37786 idev->owner = owner;
37787 idev->info = info;
37788 init_waitqueue_head(&idev->wait);
37789- atomic_set(&idev->event, 0);
37790+ atomic_set_unchecked(&idev->event, 0);
37791
37792 ret = uio_get_minor(idev);
37793 if (ret)
37794diff -urNp linux-2.6.32.45/drivers/usb/atm/usbatm.c linux-2.6.32.45/drivers/usb/atm/usbatm.c
37795--- linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
37796+++ linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
37797@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
37798 if (printk_ratelimit())
37799 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37800 __func__, vpi, vci);
37801- atomic_inc(&vcc->stats->rx_err);
37802+ atomic_inc_unchecked(&vcc->stats->rx_err);
37803 return;
37804 }
37805
37806@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
37807 if (length > ATM_MAX_AAL5_PDU) {
37808 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37809 __func__, length, vcc);
37810- atomic_inc(&vcc->stats->rx_err);
37811+ atomic_inc_unchecked(&vcc->stats->rx_err);
37812 goto out;
37813 }
37814
37815@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
37816 if (sarb->len < pdu_length) {
37817 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37818 __func__, pdu_length, sarb->len, vcc);
37819- atomic_inc(&vcc->stats->rx_err);
37820+ atomic_inc_unchecked(&vcc->stats->rx_err);
37821 goto out;
37822 }
37823
37824 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37825 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37826 __func__, vcc);
37827- atomic_inc(&vcc->stats->rx_err);
37828+ atomic_inc_unchecked(&vcc->stats->rx_err);
37829 goto out;
37830 }
37831
37832@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
37833 if (printk_ratelimit())
37834 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37835 __func__, length);
37836- atomic_inc(&vcc->stats->rx_drop);
37837+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37838 goto out;
37839 }
37840
37841@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
37842
37843 vcc->push(vcc, skb);
37844
37845- atomic_inc(&vcc->stats->rx);
37846+ atomic_inc_unchecked(&vcc->stats->rx);
37847 out:
37848 skb_trim(sarb, 0);
37849 }
37850@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
37851 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37852
37853 usbatm_pop(vcc, skb);
37854- atomic_inc(&vcc->stats->tx);
37855+ atomic_inc_unchecked(&vcc->stats->tx);
37856
37857 skb = skb_dequeue(&instance->sndqueue);
37858 }
37859@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
37860 if (!left--)
37861 return sprintf(page,
37862 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37863- atomic_read(&atm_dev->stats.aal5.tx),
37864- atomic_read(&atm_dev->stats.aal5.tx_err),
37865- atomic_read(&atm_dev->stats.aal5.rx),
37866- atomic_read(&atm_dev->stats.aal5.rx_err),
37867- atomic_read(&atm_dev->stats.aal5.rx_drop));
37868+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37869+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37870+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37871+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37872+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37873
37874 if (!left--) {
37875 if (instance->disconnected)
37876diff -urNp linux-2.6.32.45/drivers/usb/class/cdc-wdm.c linux-2.6.32.45/drivers/usb/class/cdc-wdm.c
37877--- linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
37878+++ linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
37879@@ -314,7 +314,7 @@ static ssize_t wdm_write
37880 if (r < 0)
37881 goto outnp;
37882
37883- if (!file->f_flags && O_NONBLOCK)
37884+ if (!(file->f_flags & O_NONBLOCK))
37885 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
37886 &desc->flags));
37887 else
37888diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.c linux-2.6.32.45/drivers/usb/core/hcd.c
37889--- linux-2.6.32.45/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
37890+++ linux-2.6.32.45/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
37891@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
37892
37893 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
37894
37895-struct usb_mon_operations *mon_ops;
37896+const struct usb_mon_operations *mon_ops;
37897
37898 /*
37899 * The registration is unlocked.
37900@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
37901 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
37902 */
37903
37904-int usb_mon_register (struct usb_mon_operations *ops)
37905+int usb_mon_register (const struct usb_mon_operations *ops)
37906 {
37907
37908 if (mon_ops)
37909diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.h linux-2.6.32.45/drivers/usb/core/hcd.h
37910--- linux-2.6.32.45/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
37911+++ linux-2.6.32.45/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
37912@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
37913 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
37914
37915 struct usb_mon_operations {
37916- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
37917- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
37918- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
37919+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
37920+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
37921+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
37922 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
37923 };
37924
37925-extern struct usb_mon_operations *mon_ops;
37926+extern const struct usb_mon_operations *mon_ops;
37927
37928 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
37929 {
37930@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
37931 (*mon_ops->urb_complete)(bus, urb, status);
37932 }
37933
37934-int usb_mon_register(struct usb_mon_operations *ops);
37935+int usb_mon_register(const struct usb_mon_operations *ops);
37936 void usb_mon_deregister(void);
37937
37938 #else
37939diff -urNp linux-2.6.32.45/drivers/usb/core/message.c linux-2.6.32.45/drivers/usb/core/message.c
37940--- linux-2.6.32.45/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
37941+++ linux-2.6.32.45/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
37942@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
37943 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
37944 if (buf) {
37945 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
37946- if (len > 0) {
37947- smallbuf = kmalloc(++len, GFP_NOIO);
37948+ if (len++ > 0) {
37949+ smallbuf = kmalloc(len, GFP_NOIO);
37950 if (!smallbuf)
37951 return buf;
37952 memcpy(smallbuf, buf, len);
37953diff -urNp linux-2.6.32.45/drivers/usb/misc/appledisplay.c linux-2.6.32.45/drivers/usb/misc/appledisplay.c
37954--- linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
37955+++ linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
37956@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
37957 return pdata->msgdata[1];
37958 }
37959
37960-static struct backlight_ops appledisplay_bl_data = {
37961+static const struct backlight_ops appledisplay_bl_data = {
37962 .get_brightness = appledisplay_bl_get_brightness,
37963 .update_status = appledisplay_bl_update_status,
37964 };
37965diff -urNp linux-2.6.32.45/drivers/usb/mon/mon_main.c linux-2.6.32.45/drivers/usb/mon/mon_main.c
37966--- linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
37967+++ linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
37968@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
37969 /*
37970 * Ops
37971 */
37972-static struct usb_mon_operations mon_ops_0 = {
37973+static const struct usb_mon_operations mon_ops_0 = {
37974 .urb_submit = mon_submit,
37975 .urb_submit_error = mon_submit_error,
37976 .urb_complete = mon_complete,
37977diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h
37978--- linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
37979+++ linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
37980@@ -192,7 +192,7 @@ struct wahc {
37981 struct list_head xfer_delayed_list;
37982 spinlock_t xfer_list_lock;
37983 struct work_struct xfer_work;
37984- atomic_t xfer_id_count;
37985+ atomic_unchecked_t xfer_id_count;
37986 };
37987
37988
37989@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
37990 INIT_LIST_HEAD(&wa->xfer_delayed_list);
37991 spin_lock_init(&wa->xfer_list_lock);
37992 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
37993- atomic_set(&wa->xfer_id_count, 1);
37994+ atomic_set_unchecked(&wa->xfer_id_count, 1);
37995 }
37996
37997 /**
37998diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c
37999--- linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
38000+++ linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
38001@@ -293,7 +293,7 @@ out:
38002 */
38003 static void wa_xfer_id_init(struct wa_xfer *xfer)
38004 {
38005- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38006+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38007 }
38008
38009 /*
38010diff -urNp linux-2.6.32.45/drivers/uwb/wlp/messages.c linux-2.6.32.45/drivers/uwb/wlp/messages.c
38011--- linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
38012+++ linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
38013@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
38014 size_t len = skb->len;
38015 size_t used;
38016 ssize_t result;
38017- struct wlp_nonce enonce, rnonce;
38018+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
38019 enum wlp_assc_error assc_err;
38020 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
38021 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
38022diff -urNp linux-2.6.32.45/drivers/uwb/wlp/sysfs.c linux-2.6.32.45/drivers/uwb/wlp/sysfs.c
38023--- linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
38024+++ linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
38025@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
38026 return ret;
38027 }
38028
38029-static
38030-struct sysfs_ops wss_sysfs_ops = {
38031+static const struct sysfs_ops wss_sysfs_ops = {
38032 .show = wlp_wss_attr_show,
38033 .store = wlp_wss_attr_store,
38034 };
38035diff -urNp linux-2.6.32.45/drivers/video/atmel_lcdfb.c linux-2.6.32.45/drivers/video/atmel_lcdfb.c
38036--- linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
38037+++ linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
38038@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
38039 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
38040 }
38041
38042-static struct backlight_ops atmel_lcdc_bl_ops = {
38043+static const struct backlight_ops atmel_lcdc_bl_ops = {
38044 .update_status = atmel_bl_update_status,
38045 .get_brightness = atmel_bl_get_brightness,
38046 };
38047diff -urNp linux-2.6.32.45/drivers/video/aty/aty128fb.c linux-2.6.32.45/drivers/video/aty/aty128fb.c
38048--- linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
38049+++ linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
38050@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
38051 return bd->props.brightness;
38052 }
38053
38054-static struct backlight_ops aty128_bl_data = {
38055+static const struct backlight_ops aty128_bl_data = {
38056 .get_brightness = aty128_bl_get_brightness,
38057 .update_status = aty128_bl_update_status,
38058 };
38059diff -urNp linux-2.6.32.45/drivers/video/aty/atyfb_base.c linux-2.6.32.45/drivers/video/aty/atyfb_base.c
38060--- linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
38061+++ linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
38062@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
38063 return bd->props.brightness;
38064 }
38065
38066-static struct backlight_ops aty_bl_data = {
38067+static const struct backlight_ops aty_bl_data = {
38068 .get_brightness = aty_bl_get_brightness,
38069 .update_status = aty_bl_update_status,
38070 };
38071diff -urNp linux-2.6.32.45/drivers/video/aty/radeon_backlight.c linux-2.6.32.45/drivers/video/aty/radeon_backlight.c
38072--- linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
38073+++ linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
38074@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
38075 return bd->props.brightness;
38076 }
38077
38078-static struct backlight_ops radeon_bl_data = {
38079+static const struct backlight_ops radeon_bl_data = {
38080 .get_brightness = radeon_bl_get_brightness,
38081 .update_status = radeon_bl_update_status,
38082 };
38083diff -urNp linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c
38084--- linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
38085+++ linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
38086@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
38087 return error ? data->current_brightness : reg_val;
38088 }
38089
38090-static struct backlight_ops adp5520_bl_ops = {
38091+static const struct backlight_ops adp5520_bl_ops = {
38092 .update_status = adp5520_bl_update_status,
38093 .get_brightness = adp5520_bl_get_brightness,
38094 };
38095diff -urNp linux-2.6.32.45/drivers/video/backlight/adx_bl.c linux-2.6.32.45/drivers/video/backlight/adx_bl.c
38096--- linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
38097+++ linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
38098@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
38099 return 1;
38100 }
38101
38102-static struct backlight_ops adx_backlight_ops = {
38103+static const struct backlight_ops adx_backlight_ops = {
38104 .options = 0,
38105 .update_status = adx_backlight_update_status,
38106 .get_brightness = adx_backlight_get_brightness,
38107diff -urNp linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c
38108--- linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
38109+++ linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
38110@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
38111 return pwm_channel_enable(&pwmbl->pwmc);
38112 }
38113
38114-static struct backlight_ops atmel_pwm_bl_ops = {
38115+static const struct backlight_ops atmel_pwm_bl_ops = {
38116 .get_brightness = atmel_pwm_bl_get_intensity,
38117 .update_status = atmel_pwm_bl_set_intensity,
38118 };
38119diff -urNp linux-2.6.32.45/drivers/video/backlight/backlight.c linux-2.6.32.45/drivers/video/backlight/backlight.c
38120--- linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
38121+++ linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
38122@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
38123 * ERR_PTR() or a pointer to the newly allocated device.
38124 */
38125 struct backlight_device *backlight_device_register(const char *name,
38126- struct device *parent, void *devdata, struct backlight_ops *ops)
38127+ struct device *parent, void *devdata, const struct backlight_ops *ops)
38128 {
38129 struct backlight_device *new_bd;
38130 int rc;
38131diff -urNp linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c
38132--- linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
38133+++ linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
38134@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
38135 }
38136 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
38137
38138-static struct backlight_ops corgi_bl_ops = {
38139+static const struct backlight_ops corgi_bl_ops = {
38140 .get_brightness = corgi_bl_get_intensity,
38141 .update_status = corgi_bl_update_status,
38142 };
38143diff -urNp linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c
38144--- linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
38145+++ linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
38146@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
38147 return intensity;
38148 }
38149
38150-static struct backlight_ops cr_backlight_ops = {
38151+static const struct backlight_ops cr_backlight_ops = {
38152 .get_brightness = cr_backlight_get_intensity,
38153 .update_status = cr_backlight_set_intensity,
38154 };
38155diff -urNp linux-2.6.32.45/drivers/video/backlight/da903x_bl.c linux-2.6.32.45/drivers/video/backlight/da903x_bl.c
38156--- linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
38157+++ linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
38158@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
38159 return data->current_brightness;
38160 }
38161
38162-static struct backlight_ops da903x_backlight_ops = {
38163+static const struct backlight_ops da903x_backlight_ops = {
38164 .update_status = da903x_backlight_update_status,
38165 .get_brightness = da903x_backlight_get_brightness,
38166 };
38167diff -urNp linux-2.6.32.45/drivers/video/backlight/generic_bl.c linux-2.6.32.45/drivers/video/backlight/generic_bl.c
38168--- linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
38169+++ linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
38170@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
38171 }
38172 EXPORT_SYMBOL(corgibl_limit_intensity);
38173
38174-static struct backlight_ops genericbl_ops = {
38175+static const struct backlight_ops genericbl_ops = {
38176 .options = BL_CORE_SUSPENDRESUME,
38177 .get_brightness = genericbl_get_intensity,
38178 .update_status = genericbl_send_intensity,
38179diff -urNp linux-2.6.32.45/drivers/video/backlight/hp680_bl.c linux-2.6.32.45/drivers/video/backlight/hp680_bl.c
38180--- linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
38181+++ linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
38182@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
38183 return current_intensity;
38184 }
38185
38186-static struct backlight_ops hp680bl_ops = {
38187+static const struct backlight_ops hp680bl_ops = {
38188 .get_brightness = hp680bl_get_intensity,
38189 .update_status = hp680bl_set_intensity,
38190 };
38191diff -urNp linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c
38192--- linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
38193+++ linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
38194@@ -93,7 +93,7 @@ out:
38195 return ret;
38196 }
38197
38198-static struct backlight_ops jornada_bl_ops = {
38199+static const struct backlight_ops jornada_bl_ops = {
38200 .get_brightness = jornada_bl_get_brightness,
38201 .update_status = jornada_bl_update_status,
38202 .options = BL_CORE_SUSPENDRESUME,
38203diff -urNp linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c
38204--- linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
38205+++ linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
38206@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
38207 return kb3886bl_intensity;
38208 }
38209
38210-static struct backlight_ops kb3886bl_ops = {
38211+static const struct backlight_ops kb3886bl_ops = {
38212 .get_brightness = kb3886bl_get_intensity,
38213 .update_status = kb3886bl_send_intensity,
38214 };
38215diff -urNp linux-2.6.32.45/drivers/video/backlight/locomolcd.c linux-2.6.32.45/drivers/video/backlight/locomolcd.c
38216--- linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
38217+++ linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
38218@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
38219 return current_intensity;
38220 }
38221
38222-static struct backlight_ops locomobl_data = {
38223+static const struct backlight_ops locomobl_data = {
38224 .get_brightness = locomolcd_get_intensity,
38225 .update_status = locomolcd_set_intensity,
38226 };
38227diff -urNp linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c
38228--- linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
38229+++ linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
38230@@ -33,7 +33,7 @@ struct dmi_match_data {
38231 unsigned long iostart;
38232 unsigned long iolen;
38233 /* Backlight operations structure. */
38234- struct backlight_ops backlight_ops;
38235+ const struct backlight_ops backlight_ops;
38236 };
38237
38238 /* Module parameters. */
38239diff -urNp linux-2.6.32.45/drivers/video/backlight/omap1_bl.c linux-2.6.32.45/drivers/video/backlight/omap1_bl.c
38240--- linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
38241+++ linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
38242@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
38243 return bl->current_intensity;
38244 }
38245
38246-static struct backlight_ops omapbl_ops = {
38247+static const struct backlight_ops omapbl_ops = {
38248 .get_brightness = omapbl_get_intensity,
38249 .update_status = omapbl_update_status,
38250 };
38251diff -urNp linux-2.6.32.45/drivers/video/backlight/progear_bl.c linux-2.6.32.45/drivers/video/backlight/progear_bl.c
38252--- linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
38253+++ linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
38254@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
38255 return intensity - HW_LEVEL_MIN;
38256 }
38257
38258-static struct backlight_ops progearbl_ops = {
38259+static const struct backlight_ops progearbl_ops = {
38260 .get_brightness = progearbl_get_intensity,
38261 .update_status = progearbl_set_intensity,
38262 };
38263diff -urNp linux-2.6.32.45/drivers/video/backlight/pwm_bl.c linux-2.6.32.45/drivers/video/backlight/pwm_bl.c
38264--- linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
38265+++ linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
38266@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
38267 return bl->props.brightness;
38268 }
38269
38270-static struct backlight_ops pwm_backlight_ops = {
38271+static const struct backlight_ops pwm_backlight_ops = {
38272 .update_status = pwm_backlight_update_status,
38273 .get_brightness = pwm_backlight_get_brightness,
38274 };
38275diff -urNp linux-2.6.32.45/drivers/video/backlight/tosa_bl.c linux-2.6.32.45/drivers/video/backlight/tosa_bl.c
38276--- linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
38277+++ linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
38278@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
38279 return props->brightness;
38280 }
38281
38282-static struct backlight_ops bl_ops = {
38283+static const struct backlight_ops bl_ops = {
38284 .get_brightness = tosa_bl_get_brightness,
38285 .update_status = tosa_bl_update_status,
38286 };
38287diff -urNp linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c
38288--- linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
38289+++ linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
38290@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
38291 return data->current_brightness;
38292 }
38293
38294-static struct backlight_ops wm831x_backlight_ops = {
38295+static const struct backlight_ops wm831x_backlight_ops = {
38296 .options = BL_CORE_SUSPENDRESUME,
38297 .update_status = wm831x_backlight_update_status,
38298 .get_brightness = wm831x_backlight_get_brightness,
38299diff -urNp linux-2.6.32.45/drivers/video/bf54x-lq043fb.c linux-2.6.32.45/drivers/video/bf54x-lq043fb.c
38300--- linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
38301+++ linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
38302@@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
38303 return 0;
38304 }
38305
38306-static struct backlight_ops bfin_lq043fb_bl_ops = {
38307+static const struct backlight_ops bfin_lq043fb_bl_ops = {
38308 .get_brightness = bl_get_brightness,
38309 };
38310
38311diff -urNp linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c
38312--- linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
38313+++ linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
38314@@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
38315 return 0;
38316 }
38317
38318-static struct backlight_ops bfin_lq043fb_bl_ops = {
38319+static const struct backlight_ops bfin_lq043fb_bl_ops = {
38320 .get_brightness = bl_get_brightness,
38321 };
38322
38323diff -urNp linux-2.6.32.45/drivers/video/fbcmap.c linux-2.6.32.45/drivers/video/fbcmap.c
38324--- linux-2.6.32.45/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
38325+++ linux-2.6.32.45/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
38326@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
38327 rc = -ENODEV;
38328 goto out;
38329 }
38330- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38331- !info->fbops->fb_setcmap)) {
38332+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38333 rc = -EINVAL;
38334 goto out1;
38335 }
38336diff -urNp linux-2.6.32.45/drivers/video/fbmem.c linux-2.6.32.45/drivers/video/fbmem.c
38337--- linux-2.6.32.45/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
38338+++ linux-2.6.32.45/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
38339@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
38340 image->dx += image->width + 8;
38341 }
38342 } else if (rotate == FB_ROTATE_UD) {
38343- for (x = 0; x < num && image->dx >= 0; x++) {
38344+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38345 info->fbops->fb_imageblit(info, image);
38346 image->dx -= image->width + 8;
38347 }
38348@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
38349 image->dy += image->height + 8;
38350 }
38351 } else if (rotate == FB_ROTATE_CCW) {
38352- for (x = 0; x < num && image->dy >= 0; x++) {
38353+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38354 info->fbops->fb_imageblit(info, image);
38355 image->dy -= image->height + 8;
38356 }
38357@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
38358 int flags = info->flags;
38359 int ret = 0;
38360
38361+ pax_track_stack();
38362+
38363 if (var->activate & FB_ACTIVATE_INV_MODE) {
38364 struct fb_videomode mode1, mode2;
38365
38366@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
38367 void __user *argp = (void __user *)arg;
38368 long ret = 0;
38369
38370+ pax_track_stack();
38371+
38372 switch (cmd) {
38373 case FBIOGET_VSCREENINFO:
38374 if (!lock_fb_info(info))
38375@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
38376 return -EFAULT;
38377 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38378 return -EINVAL;
38379- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38380+ if (con2fb.framebuffer >= FB_MAX)
38381 return -EINVAL;
38382 if (!registered_fb[con2fb.framebuffer])
38383 request_module("fb%d", con2fb.framebuffer);
38384diff -urNp linux-2.6.32.45/drivers/video/i810/i810_accel.c linux-2.6.32.45/drivers/video/i810/i810_accel.c
38385--- linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
38386+++ linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
38387@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
38388 }
38389 }
38390 printk("ringbuffer lockup!!!\n");
38391+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38392 i810_report_error(mmio);
38393 par->dev_flags |= LOCKUP;
38394 info->pixmap.scan_align = 1;
38395diff -urNp linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c
38396--- linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
38397+++ linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
38398@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
38399 return bd->props.brightness;
38400 }
38401
38402-static struct backlight_ops nvidia_bl_ops = {
38403+static const struct backlight_ops nvidia_bl_ops = {
38404 .get_brightness = nvidia_bl_get_brightness,
38405 .update_status = nvidia_bl_update_status,
38406 };
38407diff -urNp linux-2.6.32.45/drivers/video/riva/fbdev.c linux-2.6.32.45/drivers/video/riva/fbdev.c
38408--- linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
38409+++ linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
38410@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
38411 return bd->props.brightness;
38412 }
38413
38414-static struct backlight_ops riva_bl_ops = {
38415+static const struct backlight_ops riva_bl_ops = {
38416 .get_brightness = riva_bl_get_brightness,
38417 .update_status = riva_bl_update_status,
38418 };
38419diff -urNp linux-2.6.32.45/drivers/video/uvesafb.c linux-2.6.32.45/drivers/video/uvesafb.c
38420--- linux-2.6.32.45/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
38421+++ linux-2.6.32.45/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
38422@@ -18,6 +18,7 @@
38423 #include <linux/fb.h>
38424 #include <linux/io.h>
38425 #include <linux/mutex.h>
38426+#include <linux/moduleloader.h>
38427 #include <video/edid.h>
38428 #include <video/uvesafb.h>
38429 #ifdef CONFIG_X86
38430@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
38431 NULL,
38432 };
38433
38434- return call_usermodehelper(v86d_path, argv, envp, 1);
38435+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38436 }
38437
38438 /*
38439@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
38440 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38441 par->pmi_setpal = par->ypan = 0;
38442 } else {
38443+
38444+#ifdef CONFIG_PAX_KERNEXEC
38445+#ifdef CONFIG_MODULES
38446+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38447+#endif
38448+ if (!par->pmi_code) {
38449+ par->pmi_setpal = par->ypan = 0;
38450+ return 0;
38451+ }
38452+#endif
38453+
38454 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38455 + task->t.regs.edi);
38456+
38457+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38458+ pax_open_kernel();
38459+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
38460+ pax_close_kernel();
38461+
38462+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
38463+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
38464+#else
38465 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
38466 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
38467+#endif
38468+
38469 printk(KERN_INFO "uvesafb: protected mode interface info at "
38470 "%04x:%04x\n",
38471 (u16)task->t.regs.es, (u16)task->t.regs.edi);
38472@@ -1799,6 +1822,11 @@ out:
38473 if (par->vbe_modes)
38474 kfree(par->vbe_modes);
38475
38476+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38477+ if (par->pmi_code)
38478+ module_free_exec(NULL, par->pmi_code);
38479+#endif
38480+
38481 framebuffer_release(info);
38482 return err;
38483 }
38484@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
38485 kfree(par->vbe_state_orig);
38486 if (par->vbe_state_saved)
38487 kfree(par->vbe_state_saved);
38488+
38489+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38490+ if (par->pmi_code)
38491+ module_free_exec(NULL, par->pmi_code);
38492+#endif
38493+
38494 }
38495
38496 framebuffer_release(info);
38497diff -urNp linux-2.6.32.45/drivers/video/vesafb.c linux-2.6.32.45/drivers/video/vesafb.c
38498--- linux-2.6.32.45/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
38499+++ linux-2.6.32.45/drivers/video/vesafb.c 2011-08-05 20:33:55.000000000 -0400
38500@@ -9,6 +9,7 @@
38501 */
38502
38503 #include <linux/module.h>
38504+#include <linux/moduleloader.h>
38505 #include <linux/kernel.h>
38506 #include <linux/errno.h>
38507 #include <linux/string.h>
38508@@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
38509 static int vram_total __initdata; /* Set total amount of memory */
38510 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
38511 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
38512-static void (*pmi_start)(void) __read_mostly;
38513-static void (*pmi_pal) (void) __read_mostly;
38514+static void (*pmi_start)(void) __read_only;
38515+static void (*pmi_pal) (void) __read_only;
38516 static int depth __read_mostly;
38517 static int vga_compat __read_mostly;
38518 /* --------------------------------------------------------------------- */
38519@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
38520 unsigned int size_vmode;
38521 unsigned int size_remap;
38522 unsigned int size_total;
38523+ void *pmi_code = NULL;
38524
38525 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
38526 return -ENODEV;
38527@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
38528 size_remap = size_total;
38529 vesafb_fix.smem_len = size_remap;
38530
38531-#ifndef __i386__
38532- screen_info.vesapm_seg = 0;
38533-#endif
38534-
38535 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
38536 printk(KERN_WARNING
38537 "vesafb: cannot reserve video memory at 0x%lx\n",
38538@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
38539 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
38540 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
38541
38542+#ifdef __i386__
38543+
38544+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38545+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
38546+ if (!pmi_code)
38547+#elif !defined(CONFIG_PAX_KERNEXEC)
38548+ if (0)
38549+#endif
38550+
38551+#endif
38552+ screen_info.vesapm_seg = 0;
38553+
38554 if (screen_info.vesapm_seg) {
38555- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
38556- screen_info.vesapm_seg,screen_info.vesapm_off);
38557+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
38558+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
38559 }
38560
38561 if (screen_info.vesapm_seg < 0xc000)
38562@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
38563
38564 if (ypan || pmi_setpal) {
38565 unsigned short *pmi_base;
38566+
38567 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
38568- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
38569- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
38570+
38571+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38572+ pax_open_kernel();
38573+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
38574+#else
38575+ pmi_code = pmi_base;
38576+#endif
38577+
38578+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
38579+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
38580+
38581+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38582+ pmi_start = ktva_ktla(pmi_start);
38583+ pmi_pal = ktva_ktla(pmi_pal);
38584+ pax_close_kernel();
38585+#endif
38586+
38587 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
38588 if (pmi_base[3]) {
38589 printk(KERN_INFO "vesafb: pmi: ports = ");
38590@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
38591 info->node, info->fix.id);
38592 return 0;
38593 err:
38594+
38595+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38596+ module_free_exec(NULL, pmi_code);
38597+#endif
38598+
38599 if (info->screen_base)
38600 iounmap(info->screen_base);
38601 framebuffer_release(info);
38602diff -urNp linux-2.6.32.45/drivers/xen/sys-hypervisor.c linux-2.6.32.45/drivers/xen/sys-hypervisor.c
38603--- linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
38604+++ linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
38605@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
38606 return 0;
38607 }
38608
38609-static struct sysfs_ops hyp_sysfs_ops = {
38610+static const struct sysfs_ops hyp_sysfs_ops = {
38611 .show = hyp_sysfs_show,
38612 .store = hyp_sysfs_store,
38613 };
38614diff -urNp linux-2.6.32.45/fs/9p/vfs_inode.c linux-2.6.32.45/fs/9p/vfs_inode.c
38615--- linux-2.6.32.45/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
38616+++ linux-2.6.32.45/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
38617@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
38618 static void
38619 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38620 {
38621- char *s = nd_get_link(nd);
38622+ const char *s = nd_get_link(nd);
38623
38624 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
38625 IS_ERR(s) ? "<error>" : s);
38626diff -urNp linux-2.6.32.45/fs/aio.c linux-2.6.32.45/fs/aio.c
38627--- linux-2.6.32.45/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
38628+++ linux-2.6.32.45/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
38629@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
38630 size += sizeof(struct io_event) * nr_events;
38631 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
38632
38633- if (nr_pages < 0)
38634+ if (nr_pages <= 0)
38635 return -EINVAL;
38636
38637 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
38638@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
38639 struct aio_timeout to;
38640 int retry = 0;
38641
38642+ pax_track_stack();
38643+
38644 /* needed to zero any padding within an entry (there shouldn't be
38645 * any, but C is fun!
38646 */
38647@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
38648 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
38649 {
38650 ssize_t ret;
38651+ struct iovec iovstack;
38652
38653 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
38654 kiocb->ki_nbytes, 1,
38655- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
38656+ &iovstack, &kiocb->ki_iovec);
38657 if (ret < 0)
38658 goto out;
38659
38660+ if (kiocb->ki_iovec == &iovstack) {
38661+ kiocb->ki_inline_vec = iovstack;
38662+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
38663+ }
38664 kiocb->ki_nr_segs = kiocb->ki_nbytes;
38665 kiocb->ki_cur_seg = 0;
38666 /* ki_nbytes/left now reflect bytes instead of segs */
38667diff -urNp linux-2.6.32.45/fs/attr.c linux-2.6.32.45/fs/attr.c
38668--- linux-2.6.32.45/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
38669+++ linux-2.6.32.45/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
38670@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
38671 unsigned long limit;
38672
38673 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
38674+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
38675 if (limit != RLIM_INFINITY && offset > limit)
38676 goto out_sig;
38677 if (offset > inode->i_sb->s_maxbytes)
38678diff -urNp linux-2.6.32.45/fs/autofs/root.c linux-2.6.32.45/fs/autofs/root.c
38679--- linux-2.6.32.45/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
38680+++ linux-2.6.32.45/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
38681@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
38682 set_bit(n,sbi->symlink_bitmap);
38683 sl = &sbi->symlink[n];
38684 sl->len = strlen(symname);
38685- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
38686+ slsize = sl->len+1;
38687+ sl->data = kmalloc(slsize, GFP_KERNEL);
38688 if (!sl->data) {
38689 clear_bit(n,sbi->symlink_bitmap);
38690 unlock_kernel();
38691diff -urNp linux-2.6.32.45/fs/autofs4/symlink.c linux-2.6.32.45/fs/autofs4/symlink.c
38692--- linux-2.6.32.45/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
38693+++ linux-2.6.32.45/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
38694@@ -15,7 +15,7 @@
38695 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
38696 {
38697 struct autofs_info *ino = autofs4_dentry_ino(dentry);
38698- nd_set_link(nd, (char *)ino->u.symlink);
38699+ nd_set_link(nd, ino->u.symlink);
38700 return NULL;
38701 }
38702
38703diff -urNp linux-2.6.32.45/fs/befs/linuxvfs.c linux-2.6.32.45/fs/befs/linuxvfs.c
38704--- linux-2.6.32.45/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
38705+++ linux-2.6.32.45/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
38706@@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
38707 {
38708 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
38709 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
38710- char *link = nd_get_link(nd);
38711+ const char *link = nd_get_link(nd);
38712 if (!IS_ERR(link))
38713 kfree(link);
38714 }
38715diff -urNp linux-2.6.32.45/fs/binfmt_aout.c linux-2.6.32.45/fs/binfmt_aout.c
38716--- linux-2.6.32.45/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
38717+++ linux-2.6.32.45/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
38718@@ -16,6 +16,7 @@
38719 #include <linux/string.h>
38720 #include <linux/fs.h>
38721 #include <linux/file.h>
38722+#include <linux/security.h>
38723 #include <linux/stat.h>
38724 #include <linux/fcntl.h>
38725 #include <linux/ptrace.h>
38726@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
38727 #endif
38728 # define START_STACK(u) (u.start_stack)
38729
38730+ memset(&dump, 0, sizeof(dump));
38731+
38732 fs = get_fs();
38733 set_fs(KERNEL_DS);
38734 has_dumped = 1;
38735@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
38736
38737 /* If the size of the dump file exceeds the rlimit, then see what would happen
38738 if we wrote the stack, but not the data area. */
38739+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
38740 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
38741 dump.u_dsize = 0;
38742
38743 /* Make sure we have enough room to write the stack and data areas. */
38744+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
38745 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
38746 dump.u_ssize = 0;
38747
38748@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
38749 dump_size = dump.u_ssize << PAGE_SHIFT;
38750 DUMP_WRITE(dump_start,dump_size);
38751 }
38752-/* Finally dump the task struct. Not be used by gdb, but could be useful */
38753- set_fs(KERNEL_DS);
38754- DUMP_WRITE(current,sizeof(*current));
38755+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
38756 end_coredump:
38757 set_fs(fs);
38758 return has_dumped;
38759@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
38760 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
38761 if (rlim >= RLIM_INFINITY)
38762 rlim = ~0;
38763+
38764+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
38765 if (ex.a_data + ex.a_bss > rlim)
38766 return -ENOMEM;
38767
38768@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
38769 install_exec_creds(bprm);
38770 current->flags &= ~PF_FORKNOEXEC;
38771
38772+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
38773+ current->mm->pax_flags = 0UL;
38774+#endif
38775+
38776+#ifdef CONFIG_PAX_PAGEEXEC
38777+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
38778+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
38779+
38780+#ifdef CONFIG_PAX_EMUTRAMP
38781+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
38782+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
38783+#endif
38784+
38785+#ifdef CONFIG_PAX_MPROTECT
38786+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
38787+ current->mm->pax_flags |= MF_PAX_MPROTECT;
38788+#endif
38789+
38790+ }
38791+#endif
38792+
38793 if (N_MAGIC(ex) == OMAGIC) {
38794 unsigned long text_addr, map_size;
38795 loff_t pos;
38796@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
38797
38798 down_write(&current->mm->mmap_sem);
38799 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
38800- PROT_READ | PROT_WRITE | PROT_EXEC,
38801+ PROT_READ | PROT_WRITE,
38802 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
38803 fd_offset + ex.a_text);
38804 up_write(&current->mm->mmap_sem);
38805diff -urNp linux-2.6.32.45/fs/binfmt_elf.c linux-2.6.32.45/fs/binfmt_elf.c
38806--- linux-2.6.32.45/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
38807+++ linux-2.6.32.45/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
38808@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
38809 #define elf_core_dump NULL
38810 #endif
38811
38812+#ifdef CONFIG_PAX_MPROTECT
38813+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
38814+#endif
38815+
38816 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
38817 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
38818 #else
38819@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
38820 .load_binary = load_elf_binary,
38821 .load_shlib = load_elf_library,
38822 .core_dump = elf_core_dump,
38823+
38824+#ifdef CONFIG_PAX_MPROTECT
38825+ .handle_mprotect= elf_handle_mprotect,
38826+#endif
38827+
38828 .min_coredump = ELF_EXEC_PAGESIZE,
38829 .hasvdso = 1
38830 };
38831@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
38832
38833 static int set_brk(unsigned long start, unsigned long end)
38834 {
38835+ unsigned long e = end;
38836+
38837 start = ELF_PAGEALIGN(start);
38838 end = ELF_PAGEALIGN(end);
38839 if (end > start) {
38840@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
38841 if (BAD_ADDR(addr))
38842 return addr;
38843 }
38844- current->mm->start_brk = current->mm->brk = end;
38845+ current->mm->start_brk = current->mm->brk = e;
38846 return 0;
38847 }
38848
38849@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
38850 elf_addr_t __user *u_rand_bytes;
38851 const char *k_platform = ELF_PLATFORM;
38852 const char *k_base_platform = ELF_BASE_PLATFORM;
38853- unsigned char k_rand_bytes[16];
38854+ u32 k_rand_bytes[4];
38855 int items;
38856 elf_addr_t *elf_info;
38857 int ei_index = 0;
38858 const struct cred *cred = current_cred();
38859 struct vm_area_struct *vma;
38860+ unsigned long saved_auxv[AT_VECTOR_SIZE];
38861+
38862+ pax_track_stack();
38863
38864 /*
38865 * In some cases (e.g. Hyper-Threading), we want to avoid L1
38866@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
38867 * Generate 16 random bytes for userspace PRNG seeding.
38868 */
38869 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
38870- u_rand_bytes = (elf_addr_t __user *)
38871- STACK_ALLOC(p, sizeof(k_rand_bytes));
38872+ srandom32(k_rand_bytes[0] ^ random32());
38873+ srandom32(k_rand_bytes[1] ^ random32());
38874+ srandom32(k_rand_bytes[2] ^ random32());
38875+ srandom32(k_rand_bytes[3] ^ random32());
38876+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
38877+ u_rand_bytes = (elf_addr_t __user *) p;
38878 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
38879 return -EFAULT;
38880
38881@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
38882 return -EFAULT;
38883 current->mm->env_end = p;
38884
38885+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
38886+
38887 /* Put the elf_info on the stack in the right place. */
38888 sp = (elf_addr_t __user *)envp + 1;
38889- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
38890+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
38891 return -EFAULT;
38892 return 0;
38893 }
38894@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
38895 {
38896 struct elf_phdr *elf_phdata;
38897 struct elf_phdr *eppnt;
38898- unsigned long load_addr = 0;
38899+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
38900 int load_addr_set = 0;
38901 unsigned long last_bss = 0, elf_bss = 0;
38902- unsigned long error = ~0UL;
38903+ unsigned long error = -EINVAL;
38904 unsigned long total_size;
38905 int retval, i, size;
38906
38907@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
38908 goto out_close;
38909 }
38910
38911+#ifdef CONFIG_PAX_SEGMEXEC
38912+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
38913+ pax_task_size = SEGMEXEC_TASK_SIZE;
38914+#endif
38915+
38916 eppnt = elf_phdata;
38917 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
38918 if (eppnt->p_type == PT_LOAD) {
38919@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
38920 k = load_addr + eppnt->p_vaddr;
38921 if (BAD_ADDR(k) ||
38922 eppnt->p_filesz > eppnt->p_memsz ||
38923- eppnt->p_memsz > TASK_SIZE ||
38924- TASK_SIZE - eppnt->p_memsz < k) {
38925+ eppnt->p_memsz > pax_task_size ||
38926+ pax_task_size - eppnt->p_memsz < k) {
38927 error = -ENOMEM;
38928 goto out_close;
38929 }
38930@@ -532,6 +557,194 @@ out:
38931 return error;
38932 }
38933
38934+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
38935+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
38936+{
38937+ unsigned long pax_flags = 0UL;
38938+
38939+#ifdef CONFIG_PAX_PAGEEXEC
38940+ if (elf_phdata->p_flags & PF_PAGEEXEC)
38941+ pax_flags |= MF_PAX_PAGEEXEC;
38942+#endif
38943+
38944+#ifdef CONFIG_PAX_SEGMEXEC
38945+ if (elf_phdata->p_flags & PF_SEGMEXEC)
38946+ pax_flags |= MF_PAX_SEGMEXEC;
38947+#endif
38948+
38949+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38950+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38951+ if (nx_enabled)
38952+ pax_flags &= ~MF_PAX_SEGMEXEC;
38953+ else
38954+ pax_flags &= ~MF_PAX_PAGEEXEC;
38955+ }
38956+#endif
38957+
38958+#ifdef CONFIG_PAX_EMUTRAMP
38959+ if (elf_phdata->p_flags & PF_EMUTRAMP)
38960+ pax_flags |= MF_PAX_EMUTRAMP;
38961+#endif
38962+
38963+#ifdef CONFIG_PAX_MPROTECT
38964+ if (elf_phdata->p_flags & PF_MPROTECT)
38965+ pax_flags |= MF_PAX_MPROTECT;
38966+#endif
38967+
38968+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
38969+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
38970+ pax_flags |= MF_PAX_RANDMMAP;
38971+#endif
38972+
38973+ return pax_flags;
38974+}
38975+#endif
38976+
38977+#ifdef CONFIG_PAX_PT_PAX_FLAGS
38978+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
38979+{
38980+ unsigned long pax_flags = 0UL;
38981+
38982+#ifdef CONFIG_PAX_PAGEEXEC
38983+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
38984+ pax_flags |= MF_PAX_PAGEEXEC;
38985+#endif
38986+
38987+#ifdef CONFIG_PAX_SEGMEXEC
38988+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
38989+ pax_flags |= MF_PAX_SEGMEXEC;
38990+#endif
38991+
38992+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38993+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38994+ if (nx_enabled)
38995+ pax_flags &= ~MF_PAX_SEGMEXEC;
38996+ else
38997+ pax_flags &= ~MF_PAX_PAGEEXEC;
38998+ }
38999+#endif
39000+
39001+#ifdef CONFIG_PAX_EMUTRAMP
39002+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39003+ pax_flags |= MF_PAX_EMUTRAMP;
39004+#endif
39005+
39006+#ifdef CONFIG_PAX_MPROTECT
39007+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39008+ pax_flags |= MF_PAX_MPROTECT;
39009+#endif
39010+
39011+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39012+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39013+ pax_flags |= MF_PAX_RANDMMAP;
39014+#endif
39015+
39016+ return pax_flags;
39017+}
39018+#endif
39019+
39020+#ifdef CONFIG_PAX_EI_PAX
39021+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39022+{
39023+ unsigned long pax_flags = 0UL;
39024+
39025+#ifdef CONFIG_PAX_PAGEEXEC
39026+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39027+ pax_flags |= MF_PAX_PAGEEXEC;
39028+#endif
39029+
39030+#ifdef CONFIG_PAX_SEGMEXEC
39031+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39032+ pax_flags |= MF_PAX_SEGMEXEC;
39033+#endif
39034+
39035+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39036+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39037+ if (nx_enabled)
39038+ pax_flags &= ~MF_PAX_SEGMEXEC;
39039+ else
39040+ pax_flags &= ~MF_PAX_PAGEEXEC;
39041+ }
39042+#endif
39043+
39044+#ifdef CONFIG_PAX_EMUTRAMP
39045+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39046+ pax_flags |= MF_PAX_EMUTRAMP;
39047+#endif
39048+
39049+#ifdef CONFIG_PAX_MPROTECT
39050+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39051+ pax_flags |= MF_PAX_MPROTECT;
39052+#endif
39053+
39054+#ifdef CONFIG_PAX_ASLR
39055+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39056+ pax_flags |= MF_PAX_RANDMMAP;
39057+#endif
39058+
39059+ return pax_flags;
39060+}
39061+#endif
39062+
39063+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39064+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39065+{
39066+ unsigned long pax_flags = 0UL;
39067+
39068+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39069+ unsigned long i;
39070+ int found_flags = 0;
39071+#endif
39072+
39073+#ifdef CONFIG_PAX_EI_PAX
39074+ pax_flags = pax_parse_ei_pax(elf_ex);
39075+#endif
39076+
39077+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39078+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39079+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39080+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39081+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39082+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39083+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39084+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39085+ return -EINVAL;
39086+
39087+#ifdef CONFIG_PAX_SOFTMODE
39088+ if (pax_softmode)
39089+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
39090+ else
39091+#endif
39092+
39093+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39094+ found_flags = 1;
39095+ break;
39096+ }
39097+#endif
39098+
39099+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39100+ if (found_flags == 0) {
39101+ struct elf_phdr phdr;
39102+ memset(&phdr, 0, sizeof(phdr));
39103+ phdr.p_flags = PF_NOEMUTRAMP;
39104+#ifdef CONFIG_PAX_SOFTMODE
39105+ if (pax_softmode)
39106+ pax_flags = pax_parse_softmode(&phdr);
39107+ else
39108+#endif
39109+ pax_flags = pax_parse_hardmode(&phdr);
39110+ }
39111+#endif
39112+
39113+
39114+ if (0 > pax_check_flags(&pax_flags))
39115+ return -EINVAL;
39116+
39117+ current->mm->pax_flags = pax_flags;
39118+ return 0;
39119+}
39120+#endif
39121+
39122 /*
39123 * These are the functions used to load ELF style executables and shared
39124 * libraries. There is no binary dependent code anywhere else.
39125@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
39126 {
39127 unsigned int random_variable = 0;
39128
39129+#ifdef CONFIG_PAX_RANDUSTACK
39130+ if (randomize_va_space)
39131+ return stack_top - current->mm->delta_stack;
39132+#endif
39133+
39134 if ((current->flags & PF_RANDOMIZE) &&
39135 !(current->personality & ADDR_NO_RANDOMIZE)) {
39136 random_variable = get_random_int() & STACK_RND_MASK;
39137@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
39138 unsigned long load_addr = 0, load_bias = 0;
39139 int load_addr_set = 0;
39140 char * elf_interpreter = NULL;
39141- unsigned long error;
39142+ unsigned long error = 0;
39143 struct elf_phdr *elf_ppnt, *elf_phdata;
39144 unsigned long elf_bss, elf_brk;
39145 int retval, i;
39146@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
39147 unsigned long start_code, end_code, start_data, end_data;
39148 unsigned long reloc_func_desc = 0;
39149 int executable_stack = EXSTACK_DEFAULT;
39150- unsigned long def_flags = 0;
39151 struct {
39152 struct elfhdr elf_ex;
39153 struct elfhdr interp_elf_ex;
39154 } *loc;
39155+ unsigned long pax_task_size = TASK_SIZE;
39156
39157 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39158 if (!loc) {
39159@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
39160
39161 /* OK, This is the point of no return */
39162 current->flags &= ~PF_FORKNOEXEC;
39163- current->mm->def_flags = def_flags;
39164+
39165+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39166+ current->mm->pax_flags = 0UL;
39167+#endif
39168+
39169+#ifdef CONFIG_PAX_DLRESOLVE
39170+ current->mm->call_dl_resolve = 0UL;
39171+#endif
39172+
39173+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39174+ current->mm->call_syscall = 0UL;
39175+#endif
39176+
39177+#ifdef CONFIG_PAX_ASLR
39178+ current->mm->delta_mmap = 0UL;
39179+ current->mm->delta_stack = 0UL;
39180+#endif
39181+
39182+ current->mm->def_flags = 0;
39183+
39184+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39185+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39186+ send_sig(SIGKILL, current, 0);
39187+ goto out_free_dentry;
39188+ }
39189+#endif
39190+
39191+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39192+ pax_set_initial_flags(bprm);
39193+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39194+ if (pax_set_initial_flags_func)
39195+ (pax_set_initial_flags_func)(bprm);
39196+#endif
39197+
39198+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39199+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
39200+ current->mm->context.user_cs_limit = PAGE_SIZE;
39201+ current->mm->def_flags |= VM_PAGEEXEC;
39202+ }
39203+#endif
39204+
39205+#ifdef CONFIG_PAX_SEGMEXEC
39206+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39207+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39208+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39209+ pax_task_size = SEGMEXEC_TASK_SIZE;
39210+ }
39211+#endif
39212+
39213+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39214+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39215+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39216+ put_cpu();
39217+ }
39218+#endif
39219
39220 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39221 may depend on the personality. */
39222 SET_PERSONALITY(loc->elf_ex);
39223+
39224+#ifdef CONFIG_PAX_ASLR
39225+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39226+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39227+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39228+ }
39229+#endif
39230+
39231+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39232+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39233+ executable_stack = EXSTACK_DISABLE_X;
39234+ current->personality &= ~READ_IMPLIES_EXEC;
39235+ } else
39236+#endif
39237+
39238 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39239 current->personality |= READ_IMPLIES_EXEC;
39240
39241@@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
39242 #else
39243 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39244 #endif
39245+
39246+#ifdef CONFIG_PAX_RANDMMAP
39247+ /* PaX: randomize base address at the default exe base if requested */
39248+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39249+#ifdef CONFIG_SPARC64
39250+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39251+#else
39252+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39253+#endif
39254+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39255+ elf_flags |= MAP_FIXED;
39256+ }
39257+#endif
39258+
39259 }
39260
39261 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39262@@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
39263 * allowed task size. Note that p_filesz must always be
39264 * <= p_memsz so it is only necessary to check p_memsz.
39265 */
39266- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39267- elf_ppnt->p_memsz > TASK_SIZE ||
39268- TASK_SIZE - elf_ppnt->p_memsz < k) {
39269+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39270+ elf_ppnt->p_memsz > pax_task_size ||
39271+ pax_task_size - elf_ppnt->p_memsz < k) {
39272 /* set_brk can never work. Avoid overflows. */
39273 send_sig(SIGKILL, current, 0);
39274 retval = -EINVAL;
39275@@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
39276 start_data += load_bias;
39277 end_data += load_bias;
39278
39279+#ifdef CONFIG_PAX_RANDMMAP
39280+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39281+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39282+#endif
39283+
39284 /* Calling set_brk effectively mmaps the pages that we need
39285 * for the bss and break sections. We must do this before
39286 * mapping in the interpreter, to make sure it doesn't wind
39287@@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
39288 goto out_free_dentry;
39289 }
39290 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39291- send_sig(SIGSEGV, current, 0);
39292- retval = -EFAULT; /* Nobody gets to see this, but.. */
39293- goto out_free_dentry;
39294+ /*
39295+ * This bss-zeroing can fail if the ELF
39296+ * file specifies odd protections. So
39297+ * we don't check the return value
39298+ */
39299 }
39300
39301 if (elf_interpreter) {
39302@@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
39303 unsigned long n = off;
39304 if (n > PAGE_SIZE)
39305 n = PAGE_SIZE;
39306- if (!dump_write(file, buf, n))
39307+ if (!dump_write(file, buf, n)) {
39308+ free_page((unsigned long)buf);
39309 return 0;
39310+ }
39311 off -= n;
39312 }
39313 free_page((unsigned long)buf);
39314@@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
39315 * Decide what to dump of a segment, part, all or none.
39316 */
39317 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39318- unsigned long mm_flags)
39319+ unsigned long mm_flags, long signr)
39320 {
39321 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39322
39323@@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
39324 if (vma->vm_file == NULL)
39325 return 0;
39326
39327- if (FILTER(MAPPED_PRIVATE))
39328+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39329 goto whole;
39330
39331 /*
39332@@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
39333 #undef DUMP_WRITE
39334
39335 #define DUMP_WRITE(addr, nr) \
39336+ do { \
39337+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
39338 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
39339- goto end_coredump;
39340+ goto end_coredump; \
39341+ } while (0);
39342
39343 static void fill_elf_header(struct elfhdr *elf, int segs,
39344 u16 machine, u32 flags, u8 osabi)
39345@@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
39346 {
39347 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39348 int i = 0;
39349- do
39350+ do {
39351 i += 2;
39352- while (auxv[i - 2] != AT_NULL);
39353+ } while (auxv[i - 2] != AT_NULL);
39354 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39355 }
39356
39357@@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
39358 phdr.p_offset = offset;
39359 phdr.p_vaddr = vma->vm_start;
39360 phdr.p_paddr = 0;
39361- phdr.p_filesz = vma_dump_size(vma, mm_flags);
39362+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
39363 phdr.p_memsz = vma->vm_end - vma->vm_start;
39364 offset += phdr.p_filesz;
39365 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39366@@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
39367 unsigned long addr;
39368 unsigned long end;
39369
39370- end = vma->vm_start + vma_dump_size(vma, mm_flags);
39371+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
39372
39373 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39374 struct page *page;
39375@@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
39376 page = get_dump_page(addr);
39377 if (page) {
39378 void *kaddr = kmap(page);
39379+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39380 stop = ((size += PAGE_SIZE) > limit) ||
39381 !dump_write(file, kaddr, PAGE_SIZE);
39382 kunmap(page);
39383@@ -2042,6 +2356,97 @@ out:
39384
39385 #endif /* USE_ELF_CORE_DUMP */
39386
39387+#ifdef CONFIG_PAX_MPROTECT
39388+/* PaX: non-PIC ELF libraries need relocations on their executable segments
39389+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39390+ * we'll remove VM_MAYWRITE for good on RELRO segments.
39391+ *
39392+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39393+ * basis because we want to allow the common case and not the special ones.
39394+ */
39395+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39396+{
39397+ struct elfhdr elf_h;
39398+ struct elf_phdr elf_p;
39399+ unsigned long i;
39400+ unsigned long oldflags;
39401+ bool is_textrel_rw, is_textrel_rx, is_relro;
39402+
39403+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39404+ return;
39405+
39406+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39407+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39408+
39409+#ifdef CONFIG_PAX_ELFRELOCS
39410+ /* possible TEXTREL */
39411+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39412+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39413+#else
39414+ is_textrel_rw = false;
39415+ is_textrel_rx = false;
39416+#endif
39417+
39418+ /* possible RELRO */
39419+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
39420+
39421+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
39422+ return;
39423+
39424+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
39425+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
39426+
39427+#ifdef CONFIG_PAX_ETEXECRELOCS
39428+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39429+#else
39430+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
39431+#endif
39432+
39433+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39434+ !elf_check_arch(&elf_h) ||
39435+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
39436+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
39437+ return;
39438+
39439+ for (i = 0UL; i < elf_h.e_phnum; i++) {
39440+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
39441+ return;
39442+ switch (elf_p.p_type) {
39443+ case PT_DYNAMIC:
39444+ if (!is_textrel_rw && !is_textrel_rx)
39445+ continue;
39446+ i = 0UL;
39447+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
39448+ elf_dyn dyn;
39449+
39450+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
39451+ return;
39452+ if (dyn.d_tag == DT_NULL)
39453+ return;
39454+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
39455+ gr_log_textrel(vma);
39456+ if (is_textrel_rw)
39457+ vma->vm_flags |= VM_MAYWRITE;
39458+ else
39459+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
39460+ vma->vm_flags &= ~VM_MAYWRITE;
39461+ return;
39462+ }
39463+ i++;
39464+ }
39465+ return;
39466+
39467+ case PT_GNU_RELRO:
39468+ if (!is_relro)
39469+ continue;
39470+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
39471+ vma->vm_flags &= ~VM_MAYWRITE;
39472+ return;
39473+ }
39474+ }
39475+}
39476+#endif
39477+
39478 static int __init init_elf_binfmt(void)
39479 {
39480 return register_binfmt(&elf_format);
39481diff -urNp linux-2.6.32.45/fs/binfmt_flat.c linux-2.6.32.45/fs/binfmt_flat.c
39482--- linux-2.6.32.45/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
39483+++ linux-2.6.32.45/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
39484@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
39485 realdatastart = (unsigned long) -ENOMEM;
39486 printk("Unable to allocate RAM for process data, errno %d\n",
39487 (int)-realdatastart);
39488+ down_write(&current->mm->mmap_sem);
39489 do_munmap(current->mm, textpos, text_len);
39490+ up_write(&current->mm->mmap_sem);
39491 ret = realdatastart;
39492 goto err;
39493 }
39494@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
39495 }
39496 if (IS_ERR_VALUE(result)) {
39497 printk("Unable to read data+bss, errno %d\n", (int)-result);
39498+ down_write(&current->mm->mmap_sem);
39499 do_munmap(current->mm, textpos, text_len);
39500 do_munmap(current->mm, realdatastart, data_len + extra);
39501+ up_write(&current->mm->mmap_sem);
39502 ret = result;
39503 goto err;
39504 }
39505@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
39506 }
39507 if (IS_ERR_VALUE(result)) {
39508 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
39509+ down_write(&current->mm->mmap_sem);
39510 do_munmap(current->mm, textpos, text_len + data_len + extra +
39511 MAX_SHARED_LIBS * sizeof(unsigned long));
39512+ up_write(&current->mm->mmap_sem);
39513 ret = result;
39514 goto err;
39515 }
39516diff -urNp linux-2.6.32.45/fs/bio.c linux-2.6.32.45/fs/bio.c
39517--- linux-2.6.32.45/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
39518+++ linux-2.6.32.45/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
39519@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
39520
39521 i = 0;
39522 while (i < bio_slab_nr) {
39523- struct bio_slab *bslab = &bio_slabs[i];
39524+ bslab = &bio_slabs[i];
39525
39526 if (!bslab->slab && entry == -1)
39527 entry = i;
39528@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
39529 const int read = bio_data_dir(bio) == READ;
39530 struct bio_map_data *bmd = bio->bi_private;
39531 int i;
39532- char *p = bmd->sgvecs[0].iov_base;
39533+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
39534
39535 __bio_for_each_segment(bvec, bio, i, 0) {
39536 char *addr = page_address(bvec->bv_page);
39537diff -urNp linux-2.6.32.45/fs/block_dev.c linux-2.6.32.45/fs/block_dev.c
39538--- linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:35:29.000000000 -0400
39539+++ linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:34:00.000000000 -0400
39540@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
39541 else if (bdev->bd_contains == bdev)
39542 res = 0; /* is a whole device which isn't held */
39543
39544- else if (bdev->bd_contains->bd_holder == bd_claim)
39545+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
39546 res = 0; /* is a partition of a device that is being partitioned */
39547 else if (bdev->bd_contains->bd_holder != NULL)
39548 res = -EBUSY; /* is a partition of a held device */
39549diff -urNp linux-2.6.32.45/fs/btrfs/ctree.c linux-2.6.32.45/fs/btrfs/ctree.c
39550--- linux-2.6.32.45/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
39551+++ linux-2.6.32.45/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
39552@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
39553 free_extent_buffer(buf);
39554 add_root_to_dirty_list(root);
39555 } else {
39556- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
39557- parent_start = parent->start;
39558- else
39559+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
39560+ if (parent)
39561+ parent_start = parent->start;
39562+ else
39563+ parent_start = 0;
39564+ } else
39565 parent_start = 0;
39566
39567 WARN_ON(trans->transid != btrfs_header_generation(parent));
39568@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
39569
39570 ret = 0;
39571 if (slot == 0) {
39572- struct btrfs_disk_key disk_key;
39573 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
39574 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
39575 }
39576diff -urNp linux-2.6.32.45/fs/btrfs/disk-io.c linux-2.6.32.45/fs/btrfs/disk-io.c
39577--- linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
39578+++ linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
39579@@ -39,7 +39,7 @@
39580 #include "tree-log.h"
39581 #include "free-space-cache.h"
39582
39583-static struct extent_io_ops btree_extent_io_ops;
39584+static const struct extent_io_ops btree_extent_io_ops;
39585 static void end_workqueue_fn(struct btrfs_work *work);
39586 static void free_fs_root(struct btrfs_root *root);
39587
39588@@ -2607,7 +2607,7 @@ out:
39589 return 0;
39590 }
39591
39592-static struct extent_io_ops btree_extent_io_ops = {
39593+static const struct extent_io_ops btree_extent_io_ops = {
39594 .write_cache_pages_lock_hook = btree_lock_page_hook,
39595 .readpage_end_io_hook = btree_readpage_end_io_hook,
39596 .submit_bio_hook = btree_submit_bio_hook,
39597diff -urNp linux-2.6.32.45/fs/btrfs/extent_io.h linux-2.6.32.45/fs/btrfs/extent_io.h
39598--- linux-2.6.32.45/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
39599+++ linux-2.6.32.45/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
39600@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
39601 struct bio *bio, int mirror_num,
39602 unsigned long bio_flags);
39603 struct extent_io_ops {
39604- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
39605+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
39606 u64 start, u64 end, int *page_started,
39607 unsigned long *nr_written);
39608- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
39609- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
39610+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
39611+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
39612 extent_submit_bio_hook_t *submit_bio_hook;
39613- int (*merge_bio_hook)(struct page *page, unsigned long offset,
39614+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
39615 size_t size, struct bio *bio,
39616 unsigned long bio_flags);
39617- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
39618- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
39619+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
39620+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
39621 u64 start, u64 end,
39622 struct extent_state *state);
39623- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
39624+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
39625 u64 start, u64 end,
39626 struct extent_state *state);
39627- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39628+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39629 struct extent_state *state);
39630- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39631+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39632 struct extent_state *state, int uptodate);
39633- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
39634+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
39635 unsigned long old, unsigned long bits);
39636- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
39637+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
39638 unsigned long bits);
39639- int (*merge_extent_hook)(struct inode *inode,
39640+ int (* const merge_extent_hook)(struct inode *inode,
39641 struct extent_state *new,
39642 struct extent_state *other);
39643- int (*split_extent_hook)(struct inode *inode,
39644+ int (* const split_extent_hook)(struct inode *inode,
39645 struct extent_state *orig, u64 split);
39646- int (*write_cache_pages_lock_hook)(struct page *page);
39647+ int (* const write_cache_pages_lock_hook)(struct page *page);
39648 };
39649
39650 struct extent_io_tree {
39651@@ -88,7 +88,7 @@ struct extent_io_tree {
39652 u64 dirty_bytes;
39653 spinlock_t lock;
39654 spinlock_t buffer_lock;
39655- struct extent_io_ops *ops;
39656+ const struct extent_io_ops *ops;
39657 };
39658
39659 struct extent_state {
39660diff -urNp linux-2.6.32.45/fs/btrfs/extent-tree.c linux-2.6.32.45/fs/btrfs/extent-tree.c
39661--- linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
39662+++ linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
39663@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
39664 u64 group_start = group->key.objectid;
39665 new_extents = kmalloc(sizeof(*new_extents),
39666 GFP_NOFS);
39667+ if (!new_extents) {
39668+ ret = -ENOMEM;
39669+ goto out;
39670+ }
39671 nr_extents = 1;
39672 ret = get_new_locations(reloc_inode,
39673 extent_key,
39674diff -urNp linux-2.6.32.45/fs/btrfs/free-space-cache.c linux-2.6.32.45/fs/btrfs/free-space-cache.c
39675--- linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
39676+++ linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
39677@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
39678
39679 while(1) {
39680 if (entry->bytes < bytes || entry->offset < min_start) {
39681- struct rb_node *node;
39682-
39683 node = rb_next(&entry->offset_index);
39684 if (!node)
39685 break;
39686@@ -1226,7 +1224,7 @@ again:
39687 */
39688 while (entry->bitmap || found_bitmap ||
39689 (!entry->bitmap && entry->bytes < min_bytes)) {
39690- struct rb_node *node = rb_next(&entry->offset_index);
39691+ node = rb_next(&entry->offset_index);
39692
39693 if (entry->bitmap && entry->bytes > bytes + empty_size) {
39694 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
39695diff -urNp linux-2.6.32.45/fs/btrfs/inode.c linux-2.6.32.45/fs/btrfs/inode.c
39696--- linux-2.6.32.45/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
39697+++ linux-2.6.32.45/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
39698@@ -63,7 +63,7 @@ static const struct inode_operations btr
39699 static const struct address_space_operations btrfs_aops;
39700 static const struct address_space_operations btrfs_symlink_aops;
39701 static const struct file_operations btrfs_dir_file_operations;
39702-static struct extent_io_ops btrfs_extent_io_ops;
39703+static const struct extent_io_ops btrfs_extent_io_ops;
39704
39705 static struct kmem_cache *btrfs_inode_cachep;
39706 struct kmem_cache *btrfs_trans_handle_cachep;
39707@@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
39708 1, 0, NULL, GFP_NOFS);
39709 while (start < end) {
39710 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
39711+ BUG_ON(!async_cow);
39712 async_cow->inode = inode;
39713 async_cow->root = root;
39714 async_cow->locked_page = locked_page;
39715@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
39716 inline_size = btrfs_file_extent_inline_item_len(leaf,
39717 btrfs_item_nr(leaf, path->slots[0]));
39718 tmp = kmalloc(inline_size, GFP_NOFS);
39719+ if (!tmp)
39720+ return -ENOMEM;
39721 ptr = btrfs_file_extent_inline_start(item);
39722
39723 read_extent_buffer(leaf, tmp, ptr, inline_size);
39724@@ -5410,7 +5413,7 @@ fail:
39725 return -ENOMEM;
39726 }
39727
39728-static int btrfs_getattr(struct vfsmount *mnt,
39729+int btrfs_getattr(struct vfsmount *mnt,
39730 struct dentry *dentry, struct kstat *stat)
39731 {
39732 struct inode *inode = dentry->d_inode;
39733@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
39734 return 0;
39735 }
39736
39737+EXPORT_SYMBOL(btrfs_getattr);
39738+
39739+dev_t get_btrfs_dev_from_inode(struct inode *inode)
39740+{
39741+ return BTRFS_I(inode)->root->anon_super.s_dev;
39742+}
39743+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
39744+
39745 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
39746 struct inode *new_dir, struct dentry *new_dentry)
39747 {
39748@@ -5972,7 +5983,7 @@ static const struct file_operations btrf
39749 .fsync = btrfs_sync_file,
39750 };
39751
39752-static struct extent_io_ops btrfs_extent_io_ops = {
39753+static const struct extent_io_ops btrfs_extent_io_ops = {
39754 .fill_delalloc = run_delalloc_range,
39755 .submit_bio_hook = btrfs_submit_bio_hook,
39756 .merge_bio_hook = btrfs_merge_bio_hook,
39757diff -urNp linux-2.6.32.45/fs/btrfs/relocation.c linux-2.6.32.45/fs/btrfs/relocation.c
39758--- linux-2.6.32.45/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
39759+++ linux-2.6.32.45/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
39760@@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
39761 }
39762 spin_unlock(&rc->reloc_root_tree.lock);
39763
39764- BUG_ON((struct btrfs_root *)node->data != root);
39765+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
39766
39767 if (!del) {
39768 spin_lock(&rc->reloc_root_tree.lock);
39769diff -urNp linux-2.6.32.45/fs/btrfs/sysfs.c linux-2.6.32.45/fs/btrfs/sysfs.c
39770--- linux-2.6.32.45/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
39771+++ linux-2.6.32.45/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
39772@@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
39773 complete(&root->kobj_unregister);
39774 }
39775
39776-static struct sysfs_ops btrfs_super_attr_ops = {
39777+static const struct sysfs_ops btrfs_super_attr_ops = {
39778 .show = btrfs_super_attr_show,
39779 .store = btrfs_super_attr_store,
39780 };
39781
39782-static struct sysfs_ops btrfs_root_attr_ops = {
39783+static const struct sysfs_ops btrfs_root_attr_ops = {
39784 .show = btrfs_root_attr_show,
39785 .store = btrfs_root_attr_store,
39786 };
39787diff -urNp linux-2.6.32.45/fs/buffer.c linux-2.6.32.45/fs/buffer.c
39788--- linux-2.6.32.45/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
39789+++ linux-2.6.32.45/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
39790@@ -25,6 +25,7 @@
39791 #include <linux/percpu.h>
39792 #include <linux/slab.h>
39793 #include <linux/capability.h>
39794+#include <linux/security.h>
39795 #include <linux/blkdev.h>
39796 #include <linux/file.h>
39797 #include <linux/quotaops.h>
39798diff -urNp linux-2.6.32.45/fs/cachefiles/bind.c linux-2.6.32.45/fs/cachefiles/bind.c
39799--- linux-2.6.32.45/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
39800+++ linux-2.6.32.45/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
39801@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
39802 args);
39803
39804 /* start by checking things over */
39805- ASSERT(cache->fstop_percent >= 0 &&
39806- cache->fstop_percent < cache->fcull_percent &&
39807+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
39808 cache->fcull_percent < cache->frun_percent &&
39809 cache->frun_percent < 100);
39810
39811- ASSERT(cache->bstop_percent >= 0 &&
39812- cache->bstop_percent < cache->bcull_percent &&
39813+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
39814 cache->bcull_percent < cache->brun_percent &&
39815 cache->brun_percent < 100);
39816
39817diff -urNp linux-2.6.32.45/fs/cachefiles/daemon.c linux-2.6.32.45/fs/cachefiles/daemon.c
39818--- linux-2.6.32.45/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
39819+++ linux-2.6.32.45/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
39820@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
39821 if (test_bit(CACHEFILES_DEAD, &cache->flags))
39822 return -EIO;
39823
39824- if (datalen < 0 || datalen > PAGE_SIZE - 1)
39825+ if (datalen > PAGE_SIZE - 1)
39826 return -EOPNOTSUPP;
39827
39828 /* drag the command string into the kernel so we can parse it */
39829@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
39830 if (args[0] != '%' || args[1] != '\0')
39831 return -EINVAL;
39832
39833- if (fstop < 0 || fstop >= cache->fcull_percent)
39834+ if (fstop >= cache->fcull_percent)
39835 return cachefiles_daemon_range_error(cache, args);
39836
39837 cache->fstop_percent = fstop;
39838@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
39839 if (args[0] != '%' || args[1] != '\0')
39840 return -EINVAL;
39841
39842- if (bstop < 0 || bstop >= cache->bcull_percent)
39843+ if (bstop >= cache->bcull_percent)
39844 return cachefiles_daemon_range_error(cache, args);
39845
39846 cache->bstop_percent = bstop;
39847diff -urNp linux-2.6.32.45/fs/cachefiles/internal.h linux-2.6.32.45/fs/cachefiles/internal.h
39848--- linux-2.6.32.45/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
39849+++ linux-2.6.32.45/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
39850@@ -56,7 +56,7 @@ struct cachefiles_cache {
39851 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
39852 struct rb_root active_nodes; /* active nodes (can't be culled) */
39853 rwlock_t active_lock; /* lock for active_nodes */
39854- atomic_t gravecounter; /* graveyard uniquifier */
39855+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
39856 unsigned frun_percent; /* when to stop culling (% files) */
39857 unsigned fcull_percent; /* when to start culling (% files) */
39858 unsigned fstop_percent; /* when to stop allocating (% files) */
39859@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
39860 * proc.c
39861 */
39862 #ifdef CONFIG_CACHEFILES_HISTOGRAM
39863-extern atomic_t cachefiles_lookup_histogram[HZ];
39864-extern atomic_t cachefiles_mkdir_histogram[HZ];
39865-extern atomic_t cachefiles_create_histogram[HZ];
39866+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
39867+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
39868+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
39869
39870 extern int __init cachefiles_proc_init(void);
39871 extern void cachefiles_proc_cleanup(void);
39872 static inline
39873-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
39874+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
39875 {
39876 unsigned long jif = jiffies - start_jif;
39877 if (jif >= HZ)
39878 jif = HZ - 1;
39879- atomic_inc(&histogram[jif]);
39880+ atomic_inc_unchecked(&histogram[jif]);
39881 }
39882
39883 #else
39884diff -urNp linux-2.6.32.45/fs/cachefiles/namei.c linux-2.6.32.45/fs/cachefiles/namei.c
39885--- linux-2.6.32.45/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
39886+++ linux-2.6.32.45/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
39887@@ -250,7 +250,7 @@ try_again:
39888 /* first step is to make up a grave dentry in the graveyard */
39889 sprintf(nbuffer, "%08x%08x",
39890 (uint32_t) get_seconds(),
39891- (uint32_t) atomic_inc_return(&cache->gravecounter));
39892+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
39893
39894 /* do the multiway lock magic */
39895 trap = lock_rename(cache->graveyard, dir);
39896diff -urNp linux-2.6.32.45/fs/cachefiles/proc.c linux-2.6.32.45/fs/cachefiles/proc.c
39897--- linux-2.6.32.45/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
39898+++ linux-2.6.32.45/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
39899@@ -14,9 +14,9 @@
39900 #include <linux/seq_file.h>
39901 #include "internal.h"
39902
39903-atomic_t cachefiles_lookup_histogram[HZ];
39904-atomic_t cachefiles_mkdir_histogram[HZ];
39905-atomic_t cachefiles_create_histogram[HZ];
39906+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
39907+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
39908+atomic_unchecked_t cachefiles_create_histogram[HZ];
39909
39910 /*
39911 * display the latency histogram
39912@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
39913 return 0;
39914 default:
39915 index = (unsigned long) v - 3;
39916- x = atomic_read(&cachefiles_lookup_histogram[index]);
39917- y = atomic_read(&cachefiles_mkdir_histogram[index]);
39918- z = atomic_read(&cachefiles_create_histogram[index]);
39919+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
39920+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
39921+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
39922 if (x == 0 && y == 0 && z == 0)
39923 return 0;
39924
39925diff -urNp linux-2.6.32.45/fs/cachefiles/rdwr.c linux-2.6.32.45/fs/cachefiles/rdwr.c
39926--- linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
39927+++ linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
39928@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
39929 old_fs = get_fs();
39930 set_fs(KERNEL_DS);
39931 ret = file->f_op->write(
39932- file, (const void __user *) data, len, &pos);
39933+ file, (__force const void __user *) data, len, &pos);
39934 set_fs(old_fs);
39935 kunmap(page);
39936 if (ret != len)
39937diff -urNp linux-2.6.32.45/fs/cifs/cifs_debug.c linux-2.6.32.45/fs/cifs/cifs_debug.c
39938--- linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
39939+++ linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
39940@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
39941 tcon = list_entry(tmp3,
39942 struct cifsTconInfo,
39943 tcon_list);
39944- atomic_set(&tcon->num_smbs_sent, 0);
39945- atomic_set(&tcon->num_writes, 0);
39946- atomic_set(&tcon->num_reads, 0);
39947- atomic_set(&tcon->num_oplock_brks, 0);
39948- atomic_set(&tcon->num_opens, 0);
39949- atomic_set(&tcon->num_posixopens, 0);
39950- atomic_set(&tcon->num_posixmkdirs, 0);
39951- atomic_set(&tcon->num_closes, 0);
39952- atomic_set(&tcon->num_deletes, 0);
39953- atomic_set(&tcon->num_mkdirs, 0);
39954- atomic_set(&tcon->num_rmdirs, 0);
39955- atomic_set(&tcon->num_renames, 0);
39956- atomic_set(&tcon->num_t2renames, 0);
39957- atomic_set(&tcon->num_ffirst, 0);
39958- atomic_set(&tcon->num_fnext, 0);
39959- atomic_set(&tcon->num_fclose, 0);
39960- atomic_set(&tcon->num_hardlinks, 0);
39961- atomic_set(&tcon->num_symlinks, 0);
39962- atomic_set(&tcon->num_locks, 0);
39963+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
39964+ atomic_set_unchecked(&tcon->num_writes, 0);
39965+ atomic_set_unchecked(&tcon->num_reads, 0);
39966+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
39967+ atomic_set_unchecked(&tcon->num_opens, 0);
39968+ atomic_set_unchecked(&tcon->num_posixopens, 0);
39969+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
39970+ atomic_set_unchecked(&tcon->num_closes, 0);
39971+ atomic_set_unchecked(&tcon->num_deletes, 0);
39972+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
39973+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
39974+ atomic_set_unchecked(&tcon->num_renames, 0);
39975+ atomic_set_unchecked(&tcon->num_t2renames, 0);
39976+ atomic_set_unchecked(&tcon->num_ffirst, 0);
39977+ atomic_set_unchecked(&tcon->num_fnext, 0);
39978+ atomic_set_unchecked(&tcon->num_fclose, 0);
39979+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
39980+ atomic_set_unchecked(&tcon->num_symlinks, 0);
39981+ atomic_set_unchecked(&tcon->num_locks, 0);
39982 }
39983 }
39984 }
39985@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
39986 if (tcon->need_reconnect)
39987 seq_puts(m, "\tDISCONNECTED ");
39988 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
39989- atomic_read(&tcon->num_smbs_sent),
39990- atomic_read(&tcon->num_oplock_brks));
39991+ atomic_read_unchecked(&tcon->num_smbs_sent),
39992+ atomic_read_unchecked(&tcon->num_oplock_brks));
39993 seq_printf(m, "\nReads: %d Bytes: %lld",
39994- atomic_read(&tcon->num_reads),
39995+ atomic_read_unchecked(&tcon->num_reads),
39996 (long long)(tcon->bytes_read));
39997 seq_printf(m, "\nWrites: %d Bytes: %lld",
39998- atomic_read(&tcon->num_writes),
39999+ atomic_read_unchecked(&tcon->num_writes),
40000 (long long)(tcon->bytes_written));
40001 seq_printf(m, "\nFlushes: %d",
40002- atomic_read(&tcon->num_flushes));
40003+ atomic_read_unchecked(&tcon->num_flushes));
40004 seq_printf(m, "\nLocks: %d HardLinks: %d "
40005 "Symlinks: %d",
40006- atomic_read(&tcon->num_locks),
40007- atomic_read(&tcon->num_hardlinks),
40008- atomic_read(&tcon->num_symlinks));
40009+ atomic_read_unchecked(&tcon->num_locks),
40010+ atomic_read_unchecked(&tcon->num_hardlinks),
40011+ atomic_read_unchecked(&tcon->num_symlinks));
40012 seq_printf(m, "\nOpens: %d Closes: %d "
40013 "Deletes: %d",
40014- atomic_read(&tcon->num_opens),
40015- atomic_read(&tcon->num_closes),
40016- atomic_read(&tcon->num_deletes));
40017+ atomic_read_unchecked(&tcon->num_opens),
40018+ atomic_read_unchecked(&tcon->num_closes),
40019+ atomic_read_unchecked(&tcon->num_deletes));
40020 seq_printf(m, "\nPosix Opens: %d "
40021 "Posix Mkdirs: %d",
40022- atomic_read(&tcon->num_posixopens),
40023- atomic_read(&tcon->num_posixmkdirs));
40024+ atomic_read_unchecked(&tcon->num_posixopens),
40025+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40026 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40027- atomic_read(&tcon->num_mkdirs),
40028- atomic_read(&tcon->num_rmdirs));
40029+ atomic_read_unchecked(&tcon->num_mkdirs),
40030+ atomic_read_unchecked(&tcon->num_rmdirs));
40031 seq_printf(m, "\nRenames: %d T2 Renames %d",
40032- atomic_read(&tcon->num_renames),
40033- atomic_read(&tcon->num_t2renames));
40034+ atomic_read_unchecked(&tcon->num_renames),
40035+ atomic_read_unchecked(&tcon->num_t2renames));
40036 seq_printf(m, "\nFindFirst: %d FNext %d "
40037 "FClose %d",
40038- atomic_read(&tcon->num_ffirst),
40039- atomic_read(&tcon->num_fnext),
40040- atomic_read(&tcon->num_fclose));
40041+ atomic_read_unchecked(&tcon->num_ffirst),
40042+ atomic_read_unchecked(&tcon->num_fnext),
40043+ atomic_read_unchecked(&tcon->num_fclose));
40044 }
40045 }
40046 }
40047diff -urNp linux-2.6.32.45/fs/cifs/cifsfs.c linux-2.6.32.45/fs/cifs/cifsfs.c
40048--- linux-2.6.32.45/fs/cifs/cifsfs.c 2011-03-27 14:31:47.000000000 -0400
40049+++ linux-2.6.32.45/fs/cifs/cifsfs.c 2011-08-25 17:17:57.000000000 -0400
40050@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
40051 cifs_req_cachep = kmem_cache_create("cifs_request",
40052 CIFSMaxBufSize +
40053 MAX_CIFS_HDR_SIZE, 0,
40054- SLAB_HWCACHE_ALIGN, NULL);
40055+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40056 if (cifs_req_cachep == NULL)
40057 return -ENOMEM;
40058
40059@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
40060 efficient to alloc 1 per page off the slab compared to 17K (5page)
40061 alloc of large cifs buffers even when page debugging is on */
40062 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40063- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40064+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40065 NULL);
40066 if (cifs_sm_req_cachep == NULL) {
40067 mempool_destroy(cifs_req_poolp);
40068@@ -991,8 +991,8 @@ init_cifs(void)
40069 atomic_set(&bufAllocCount, 0);
40070 atomic_set(&smBufAllocCount, 0);
40071 #ifdef CONFIG_CIFS_STATS2
40072- atomic_set(&totBufAllocCount, 0);
40073- atomic_set(&totSmBufAllocCount, 0);
40074+ atomic_set_unchecked(&totBufAllocCount, 0);
40075+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40076 #endif /* CONFIG_CIFS_STATS2 */
40077
40078 atomic_set(&midCount, 0);
40079diff -urNp linux-2.6.32.45/fs/cifs/cifsglob.h linux-2.6.32.45/fs/cifs/cifsglob.h
40080--- linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:35:29.000000000 -0400
40081+++ linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-25 17:17:57.000000000 -0400
40082@@ -252,28 +252,28 @@ struct cifsTconInfo {
40083 __u16 Flags; /* optional support bits */
40084 enum statusEnum tidStatus;
40085 #ifdef CONFIG_CIFS_STATS
40086- atomic_t num_smbs_sent;
40087- atomic_t num_writes;
40088- atomic_t num_reads;
40089- atomic_t num_flushes;
40090- atomic_t num_oplock_brks;
40091- atomic_t num_opens;
40092- atomic_t num_closes;
40093- atomic_t num_deletes;
40094- atomic_t num_mkdirs;
40095- atomic_t num_posixopens;
40096- atomic_t num_posixmkdirs;
40097- atomic_t num_rmdirs;
40098- atomic_t num_renames;
40099- atomic_t num_t2renames;
40100- atomic_t num_ffirst;
40101- atomic_t num_fnext;
40102- atomic_t num_fclose;
40103- atomic_t num_hardlinks;
40104- atomic_t num_symlinks;
40105- atomic_t num_locks;
40106- atomic_t num_acl_get;
40107- atomic_t num_acl_set;
40108+ atomic_unchecked_t num_smbs_sent;
40109+ atomic_unchecked_t num_writes;
40110+ atomic_unchecked_t num_reads;
40111+ atomic_unchecked_t num_flushes;
40112+ atomic_unchecked_t num_oplock_brks;
40113+ atomic_unchecked_t num_opens;
40114+ atomic_unchecked_t num_closes;
40115+ atomic_unchecked_t num_deletes;
40116+ atomic_unchecked_t num_mkdirs;
40117+ atomic_unchecked_t num_posixopens;
40118+ atomic_unchecked_t num_posixmkdirs;
40119+ atomic_unchecked_t num_rmdirs;
40120+ atomic_unchecked_t num_renames;
40121+ atomic_unchecked_t num_t2renames;
40122+ atomic_unchecked_t num_ffirst;
40123+ atomic_unchecked_t num_fnext;
40124+ atomic_unchecked_t num_fclose;
40125+ atomic_unchecked_t num_hardlinks;
40126+ atomic_unchecked_t num_symlinks;
40127+ atomic_unchecked_t num_locks;
40128+ atomic_unchecked_t num_acl_get;
40129+ atomic_unchecked_t num_acl_set;
40130 #ifdef CONFIG_CIFS_STATS2
40131 unsigned long long time_writes;
40132 unsigned long long time_reads;
40133@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
40134 }
40135
40136 #ifdef CONFIG_CIFS_STATS
40137-#define cifs_stats_inc atomic_inc
40138+#define cifs_stats_inc atomic_inc_unchecked
40139
40140 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
40141 unsigned int bytes)
40142@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
40143 /* Various Debug counters */
40144 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40145 #ifdef CONFIG_CIFS_STATS2
40146-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
40147-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
40148+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
40149+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
40150 #endif
40151 GLOBAL_EXTERN atomic_t smBufAllocCount;
40152 GLOBAL_EXTERN atomic_t midCount;
40153diff -urNp linux-2.6.32.45/fs/cifs/link.c linux-2.6.32.45/fs/cifs/link.c
40154--- linux-2.6.32.45/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
40155+++ linux-2.6.32.45/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
40156@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
40157
40158 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40159 {
40160- char *p = nd_get_link(nd);
40161+ const char *p = nd_get_link(nd);
40162 if (!IS_ERR(p))
40163 kfree(p);
40164 }
40165diff -urNp linux-2.6.32.45/fs/cifs/misc.c linux-2.6.32.45/fs/cifs/misc.c
40166--- linux-2.6.32.45/fs/cifs/misc.c 2011-03-27 14:31:47.000000000 -0400
40167+++ linux-2.6.32.45/fs/cifs/misc.c 2011-08-25 17:17:57.000000000 -0400
40168@@ -155,7 +155,7 @@ cifs_buf_get(void)
40169 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
40170 atomic_inc(&bufAllocCount);
40171 #ifdef CONFIG_CIFS_STATS2
40172- atomic_inc(&totBufAllocCount);
40173+ atomic_inc_unchecked(&totBufAllocCount);
40174 #endif /* CONFIG_CIFS_STATS2 */
40175 }
40176
40177@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
40178 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
40179 atomic_inc(&smBufAllocCount);
40180 #ifdef CONFIG_CIFS_STATS2
40181- atomic_inc(&totSmBufAllocCount);
40182+ atomic_inc_unchecked(&totSmBufAllocCount);
40183 #endif /* CONFIG_CIFS_STATS2 */
40184
40185 }
40186diff -urNp linux-2.6.32.45/fs/coda/cache.c linux-2.6.32.45/fs/coda/cache.c
40187--- linux-2.6.32.45/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
40188+++ linux-2.6.32.45/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
40189@@ -24,14 +24,14 @@
40190 #include <linux/coda_fs_i.h>
40191 #include <linux/coda_cache.h>
40192
40193-static atomic_t permission_epoch = ATOMIC_INIT(0);
40194+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40195
40196 /* replace or extend an acl cache hit */
40197 void coda_cache_enter(struct inode *inode, int mask)
40198 {
40199 struct coda_inode_info *cii = ITOC(inode);
40200
40201- cii->c_cached_epoch = atomic_read(&permission_epoch);
40202+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40203 if (cii->c_uid != current_fsuid()) {
40204 cii->c_uid = current_fsuid();
40205 cii->c_cached_perm = mask;
40206@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
40207 void coda_cache_clear_inode(struct inode *inode)
40208 {
40209 struct coda_inode_info *cii = ITOC(inode);
40210- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40211+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40212 }
40213
40214 /* remove all acl caches */
40215 void coda_cache_clear_all(struct super_block *sb)
40216 {
40217- atomic_inc(&permission_epoch);
40218+ atomic_inc_unchecked(&permission_epoch);
40219 }
40220
40221
40222@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
40223
40224 hit = (mask & cii->c_cached_perm) == mask &&
40225 cii->c_uid == current_fsuid() &&
40226- cii->c_cached_epoch == atomic_read(&permission_epoch);
40227+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40228
40229 return hit;
40230 }
40231diff -urNp linux-2.6.32.45/fs/compat_binfmt_elf.c linux-2.6.32.45/fs/compat_binfmt_elf.c
40232--- linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
40233+++ linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
40234@@ -29,10 +29,12 @@
40235 #undef elfhdr
40236 #undef elf_phdr
40237 #undef elf_note
40238+#undef elf_dyn
40239 #undef elf_addr_t
40240 #define elfhdr elf32_hdr
40241 #define elf_phdr elf32_phdr
40242 #define elf_note elf32_note
40243+#define elf_dyn Elf32_Dyn
40244 #define elf_addr_t Elf32_Addr
40245
40246 /*
40247diff -urNp linux-2.6.32.45/fs/compat.c linux-2.6.32.45/fs/compat.c
40248--- linux-2.6.32.45/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
40249+++ linux-2.6.32.45/fs/compat.c 2011-08-11 19:56:56.000000000 -0400
40250@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
40251
40252 struct compat_readdir_callback {
40253 struct compat_old_linux_dirent __user *dirent;
40254+ struct file * file;
40255 int result;
40256 };
40257
40258@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
40259 buf->result = -EOVERFLOW;
40260 return -EOVERFLOW;
40261 }
40262+
40263+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40264+ return 0;
40265+
40266 buf->result++;
40267 dirent = buf->dirent;
40268 if (!access_ok(VERIFY_WRITE, dirent,
40269@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
40270
40271 buf.result = 0;
40272 buf.dirent = dirent;
40273+ buf.file = file;
40274
40275 error = vfs_readdir(file, compat_fillonedir, &buf);
40276 if (buf.result)
40277@@ -899,6 +905,7 @@ struct compat_linux_dirent {
40278 struct compat_getdents_callback {
40279 struct compat_linux_dirent __user *current_dir;
40280 struct compat_linux_dirent __user *previous;
40281+ struct file * file;
40282 int count;
40283 int error;
40284 };
40285@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
40286 buf->error = -EOVERFLOW;
40287 return -EOVERFLOW;
40288 }
40289+
40290+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40291+ return 0;
40292+
40293 dirent = buf->previous;
40294 if (dirent) {
40295 if (__put_user(offset, &dirent->d_off))
40296@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
40297 buf.previous = NULL;
40298 buf.count = count;
40299 buf.error = 0;
40300+ buf.file = file;
40301
40302 error = vfs_readdir(file, compat_filldir, &buf);
40303 if (error >= 0)
40304@@ -987,6 +999,7 @@ out:
40305 struct compat_getdents_callback64 {
40306 struct linux_dirent64 __user *current_dir;
40307 struct linux_dirent64 __user *previous;
40308+ struct file * file;
40309 int count;
40310 int error;
40311 };
40312@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
40313 buf->error = -EINVAL; /* only used if we fail.. */
40314 if (reclen > buf->count)
40315 return -EINVAL;
40316+
40317+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40318+ return 0;
40319+
40320 dirent = buf->previous;
40321
40322 if (dirent) {
40323@@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
40324 buf.previous = NULL;
40325 buf.count = count;
40326 buf.error = 0;
40327+ buf.file = file;
40328
40329 error = vfs_readdir(file, compat_filldir64, &buf);
40330 if (error >= 0)
40331@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
40332 * verify all the pointers
40333 */
40334 ret = -EINVAL;
40335- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
40336+ if (nr_segs > UIO_MAXIOV)
40337 goto out;
40338 if (!file->f_op)
40339 goto out;
40340@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
40341 compat_uptr_t __user *envp,
40342 struct pt_regs * regs)
40343 {
40344+#ifdef CONFIG_GRKERNSEC
40345+ struct file *old_exec_file;
40346+ struct acl_subject_label *old_acl;
40347+ struct rlimit old_rlim[RLIM_NLIMITS];
40348+#endif
40349 struct linux_binprm *bprm;
40350 struct file *file;
40351 struct files_struct *displaced;
40352 bool clear_in_exec;
40353 int retval;
40354+ const struct cred *cred = current_cred();
40355+
40356+ /*
40357+ * We move the actual failure in case of RLIMIT_NPROC excess from
40358+ * set*uid() to execve() because too many poorly written programs
40359+ * don't check setuid() return code. Here we additionally recheck
40360+ * whether NPROC limit is still exceeded.
40361+ */
40362+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40363+
40364+ if ((current->flags & PF_NPROC_EXCEEDED) &&
40365+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40366+ retval = -EAGAIN;
40367+ goto out_ret;
40368+ }
40369+
40370+ /* We're below the limit (still or again), so we don't want to make
40371+ * further execve() calls fail. */
40372+ current->flags &= ~PF_NPROC_EXCEEDED;
40373
40374 retval = unshare_files(&displaced);
40375 if (retval)
40376@@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
40377 bprm->filename = filename;
40378 bprm->interp = filename;
40379
40380+ if (gr_process_user_ban()) {
40381+ retval = -EPERM;
40382+ goto out_file;
40383+ }
40384+
40385+ retval = -EACCES;
40386+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
40387+ goto out_file;
40388+
40389 retval = bprm_mm_init(bprm);
40390 if (retval)
40391 goto out_file;
40392@@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
40393 if (retval < 0)
40394 goto out;
40395
40396+ if (!gr_tpe_allow(file)) {
40397+ retval = -EACCES;
40398+ goto out;
40399+ }
40400+
40401+ if (gr_check_crash_exec(file)) {
40402+ retval = -EACCES;
40403+ goto out;
40404+ }
40405+
40406+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40407+
40408+ gr_handle_exec_args_compat(bprm, argv);
40409+
40410+#ifdef CONFIG_GRKERNSEC
40411+ old_acl = current->acl;
40412+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40413+ old_exec_file = current->exec_file;
40414+ get_file(file);
40415+ current->exec_file = file;
40416+#endif
40417+
40418+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40419+ bprm->unsafe & LSM_UNSAFE_SHARE);
40420+ if (retval < 0)
40421+ goto out_fail;
40422+
40423 retval = search_binary_handler(bprm, regs);
40424 if (retval < 0)
40425- goto out;
40426+ goto out_fail;
40427+#ifdef CONFIG_GRKERNSEC
40428+ if (old_exec_file)
40429+ fput(old_exec_file);
40430+#endif
40431
40432 /* execve succeeded */
40433 current->fs->in_exec = 0;
40434@@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
40435 put_files_struct(displaced);
40436 return retval;
40437
40438+out_fail:
40439+#ifdef CONFIG_GRKERNSEC
40440+ current->acl = old_acl;
40441+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40442+ fput(current->exec_file);
40443+ current->exec_file = old_exec_file;
40444+#endif
40445+
40446 out:
40447 if (bprm->mm) {
40448 acct_arg_size(bprm, 0);
40449@@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat
40450 struct fdtable *fdt;
40451 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40452
40453+ pax_track_stack();
40454+
40455 if (n < 0)
40456 goto out_nofds;
40457
40458diff -urNp linux-2.6.32.45/fs/compat_ioctl.c linux-2.6.32.45/fs/compat_ioctl.c
40459--- linux-2.6.32.45/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
40460+++ linux-2.6.32.45/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
40461@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
40462 up = (struct compat_video_spu_palette __user *) arg;
40463 err = get_user(palp, &up->palette);
40464 err |= get_user(length, &up->length);
40465+ if (err)
40466+ return -EFAULT;
40467
40468 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40469 err = put_user(compat_ptr(palp), &up_native->palette);
40470diff -urNp linux-2.6.32.45/fs/configfs/dir.c linux-2.6.32.45/fs/configfs/dir.c
40471--- linux-2.6.32.45/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40472+++ linux-2.6.32.45/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
40473@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
40474 }
40475 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40476 struct configfs_dirent *next;
40477- const char * name;
40478+ const unsigned char * name;
40479+ char d_name[sizeof(next->s_dentry->d_iname)];
40480 int len;
40481
40482 next = list_entry(p, struct configfs_dirent,
40483@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
40484 continue;
40485
40486 name = configfs_get_name(next);
40487- len = strlen(name);
40488+ if (next->s_dentry && name == next->s_dentry->d_iname) {
40489+ len = next->s_dentry->d_name.len;
40490+ memcpy(d_name, name, len);
40491+ name = d_name;
40492+ } else
40493+ len = strlen(name);
40494 if (next->s_dentry)
40495 ino = next->s_dentry->d_inode->i_ino;
40496 else
40497diff -urNp linux-2.6.32.45/fs/dcache.c linux-2.6.32.45/fs/dcache.c
40498--- linux-2.6.32.45/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
40499+++ linux-2.6.32.45/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
40500@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
40501
40502 static struct kmem_cache *dentry_cache __read_mostly;
40503
40504-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
40505-
40506 /*
40507 * This is the single most critical data structure when it comes
40508 * to the dcache: the hashtable for lookups. Somebody should try
40509@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
40510 mempages -= reserve;
40511
40512 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40513- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40514+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40515
40516 dcache_init();
40517 inode_init();
40518diff -urNp linux-2.6.32.45/fs/dlm/lockspace.c linux-2.6.32.45/fs/dlm/lockspace.c
40519--- linux-2.6.32.45/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
40520+++ linux-2.6.32.45/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
40521@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
40522 kfree(ls);
40523 }
40524
40525-static struct sysfs_ops dlm_attr_ops = {
40526+static const struct sysfs_ops dlm_attr_ops = {
40527 .show = dlm_attr_show,
40528 .store = dlm_attr_store,
40529 };
40530diff -urNp linux-2.6.32.45/fs/ecryptfs/inode.c linux-2.6.32.45/fs/ecryptfs/inode.c
40531--- linux-2.6.32.45/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40532+++ linux-2.6.32.45/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40533@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
40534 old_fs = get_fs();
40535 set_fs(get_ds());
40536 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40537- (char __user *)lower_buf,
40538+ (__force char __user *)lower_buf,
40539 lower_bufsiz);
40540 set_fs(old_fs);
40541 if (rc < 0)
40542@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
40543 }
40544 old_fs = get_fs();
40545 set_fs(get_ds());
40546- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40547+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
40548 set_fs(old_fs);
40549 if (rc < 0)
40550 goto out_free;
40551diff -urNp linux-2.6.32.45/fs/exec.c linux-2.6.32.45/fs/exec.c
40552--- linux-2.6.32.45/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
40553+++ linux-2.6.32.45/fs/exec.c 2011-08-11 19:56:19.000000000 -0400
40554@@ -56,12 +56,24 @@
40555 #include <linux/fsnotify.h>
40556 #include <linux/fs_struct.h>
40557 #include <linux/pipe_fs_i.h>
40558+#include <linux/random.h>
40559+#include <linux/seq_file.h>
40560+
40561+#ifdef CONFIG_PAX_REFCOUNT
40562+#include <linux/kallsyms.h>
40563+#include <linux/kdebug.h>
40564+#endif
40565
40566 #include <asm/uaccess.h>
40567 #include <asm/mmu_context.h>
40568 #include <asm/tlb.h>
40569 #include "internal.h"
40570
40571+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
40572+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
40573+EXPORT_SYMBOL(pax_set_initial_flags_func);
40574+#endif
40575+
40576 int core_uses_pid;
40577 char core_pattern[CORENAME_MAX_SIZE] = "core";
40578 unsigned int core_pipe_limit;
40579@@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
40580 goto out;
40581
40582 file = do_filp_open(AT_FDCWD, tmp,
40583- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40584+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40585 MAY_READ | MAY_EXEC | MAY_OPEN);
40586 putname(tmp);
40587 error = PTR_ERR(file);
40588@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
40589 int write)
40590 {
40591 struct page *page;
40592- int ret;
40593
40594-#ifdef CONFIG_STACK_GROWSUP
40595- if (write) {
40596- ret = expand_stack_downwards(bprm->vma, pos);
40597- if (ret < 0)
40598- return NULL;
40599- }
40600-#endif
40601- ret = get_user_pages(current, bprm->mm, pos,
40602- 1, write, 1, &page, NULL);
40603- if (ret <= 0)
40604+ if (0 > expand_stack_downwards(bprm->vma, pos))
40605+ return NULL;
40606+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
40607 return NULL;
40608
40609 if (write) {
40610@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
40611 vma->vm_end = STACK_TOP_MAX;
40612 vma->vm_start = vma->vm_end - PAGE_SIZE;
40613 vma->vm_flags = VM_STACK_FLAGS;
40614+
40615+#ifdef CONFIG_PAX_SEGMEXEC
40616+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
40617+#endif
40618+
40619 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
40620
40621 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
40622@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
40623 mm->stack_vm = mm->total_vm = 1;
40624 up_write(&mm->mmap_sem);
40625 bprm->p = vma->vm_end - sizeof(void *);
40626+
40627+#ifdef CONFIG_PAX_RANDUSTACK
40628+ if (randomize_va_space)
40629+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
40630+#endif
40631+
40632 return 0;
40633 err:
40634 up_write(&mm->mmap_sem);
40635@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
40636 int r;
40637 mm_segment_t oldfs = get_fs();
40638 set_fs(KERNEL_DS);
40639- r = copy_strings(argc, (char __user * __user *)argv, bprm);
40640+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
40641 set_fs(oldfs);
40642 return r;
40643 }
40644@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
40645 unsigned long new_end = old_end - shift;
40646 struct mmu_gather *tlb;
40647
40648- BUG_ON(new_start > new_end);
40649+ if (new_start >= new_end || new_start < mmap_min_addr)
40650+ return -ENOMEM;
40651
40652 /*
40653 * ensure there are no vmas between where we want to go
40654@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
40655 if (vma != find_vma(mm, new_start))
40656 return -EFAULT;
40657
40658+#ifdef CONFIG_PAX_SEGMEXEC
40659+ BUG_ON(pax_find_mirror_vma(vma));
40660+#endif
40661+
40662 /*
40663 * cover the whole range: [new_start, old_end)
40664 */
40665@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
40666 stack_top = arch_align_stack(stack_top);
40667 stack_top = PAGE_ALIGN(stack_top);
40668
40669- if (unlikely(stack_top < mmap_min_addr) ||
40670- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
40671- return -ENOMEM;
40672-
40673 stack_shift = vma->vm_end - stack_top;
40674
40675 bprm->p -= stack_shift;
40676@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
40677 bprm->exec -= stack_shift;
40678
40679 down_write(&mm->mmap_sem);
40680+
40681+ /* Move stack pages down in memory. */
40682+ if (stack_shift) {
40683+ ret = shift_arg_pages(vma, stack_shift);
40684+ if (ret)
40685+ goto out_unlock;
40686+ }
40687+
40688 vm_flags = VM_STACK_FLAGS;
40689
40690 /*
40691@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
40692 vm_flags &= ~VM_EXEC;
40693 vm_flags |= mm->def_flags;
40694
40695+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40696+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40697+ vm_flags &= ~VM_EXEC;
40698+
40699+#ifdef CONFIG_PAX_MPROTECT
40700+ if (mm->pax_flags & MF_PAX_MPROTECT)
40701+ vm_flags &= ~VM_MAYEXEC;
40702+#endif
40703+
40704+ }
40705+#endif
40706+
40707 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
40708 vm_flags);
40709 if (ret)
40710 goto out_unlock;
40711 BUG_ON(prev != vma);
40712
40713- /* Move stack pages down in memory. */
40714- if (stack_shift) {
40715- ret = shift_arg_pages(vma, stack_shift);
40716- if (ret)
40717- goto out_unlock;
40718- }
40719-
40720 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
40721 stack_size = vma->vm_end - vma->vm_start;
40722 /*
40723@@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
40724 int err;
40725
40726 file = do_filp_open(AT_FDCWD, name,
40727- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40728+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40729 MAY_EXEC | MAY_OPEN);
40730 if (IS_ERR(file))
40731 goto out;
40732@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
40733 old_fs = get_fs();
40734 set_fs(get_ds());
40735 /* The cast to a user pointer is valid due to the set_fs() */
40736- result = vfs_read(file, (void __user *)addr, count, &pos);
40737+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
40738 set_fs(old_fs);
40739 return result;
40740 }
40741@@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
40742 }
40743 rcu_read_unlock();
40744
40745- if (p->fs->users > n_fs) {
40746+ if (atomic_read(&p->fs->users) > n_fs) {
40747 bprm->unsafe |= LSM_UNSAFE_SHARE;
40748 } else {
40749 res = -EAGAIN;
40750@@ -1347,11 +1376,35 @@ int do_execve(char * filename,
40751 char __user *__user *envp,
40752 struct pt_regs * regs)
40753 {
40754+#ifdef CONFIG_GRKERNSEC
40755+ struct file *old_exec_file;
40756+ struct acl_subject_label *old_acl;
40757+ struct rlimit old_rlim[RLIM_NLIMITS];
40758+#endif
40759 struct linux_binprm *bprm;
40760 struct file *file;
40761 struct files_struct *displaced;
40762 bool clear_in_exec;
40763 int retval;
40764+ const struct cred *cred = current_cred();
40765+
40766+ /*
40767+ * We move the actual failure in case of RLIMIT_NPROC excess from
40768+ * set*uid() to execve() because too many poorly written programs
40769+ * don't check setuid() return code. Here we additionally recheck
40770+ * whether NPROC limit is still exceeded.
40771+ */
40772+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40773+
40774+ if ((current->flags & PF_NPROC_EXCEEDED) &&
40775+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40776+ retval = -EAGAIN;
40777+ goto out_ret;
40778+ }
40779+
40780+ /* We're below the limit (still or again), so we don't want to make
40781+ * further execve() calls fail. */
40782+ current->flags &= ~PF_NPROC_EXCEEDED;
40783
40784 retval = unshare_files(&displaced);
40785 if (retval)
40786@@ -1383,6 +1436,16 @@ int do_execve(char * filename,
40787 bprm->filename = filename;
40788 bprm->interp = filename;
40789
40790+ if (gr_process_user_ban()) {
40791+ retval = -EPERM;
40792+ goto out_file;
40793+ }
40794+
40795+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
40796+ retval = -EACCES;
40797+ goto out_file;
40798+ }
40799+
40800 retval = bprm_mm_init(bprm);
40801 if (retval)
40802 goto out_file;
40803@@ -1412,10 +1475,41 @@ int do_execve(char * filename,
40804 if (retval < 0)
40805 goto out;
40806
40807+ if (!gr_tpe_allow(file)) {
40808+ retval = -EACCES;
40809+ goto out;
40810+ }
40811+
40812+ if (gr_check_crash_exec(file)) {
40813+ retval = -EACCES;
40814+ goto out;
40815+ }
40816+
40817+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40818+
40819+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
40820+
40821+#ifdef CONFIG_GRKERNSEC
40822+ old_acl = current->acl;
40823+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40824+ old_exec_file = current->exec_file;
40825+ get_file(file);
40826+ current->exec_file = file;
40827+#endif
40828+
40829+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40830+ bprm->unsafe & LSM_UNSAFE_SHARE);
40831+ if (retval < 0)
40832+ goto out_fail;
40833+
40834 current->flags &= ~PF_KTHREAD;
40835 retval = search_binary_handler(bprm,regs);
40836 if (retval < 0)
40837- goto out;
40838+ goto out_fail;
40839+#ifdef CONFIG_GRKERNSEC
40840+ if (old_exec_file)
40841+ fput(old_exec_file);
40842+#endif
40843
40844 /* execve succeeded */
40845 current->fs->in_exec = 0;
40846@@ -1426,6 +1520,14 @@ int do_execve(char * filename,
40847 put_files_struct(displaced);
40848 return retval;
40849
40850+out_fail:
40851+#ifdef CONFIG_GRKERNSEC
40852+ current->acl = old_acl;
40853+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40854+ fput(current->exec_file);
40855+ current->exec_file = old_exec_file;
40856+#endif
40857+
40858 out:
40859 if (bprm->mm) {
40860 acct_arg_size(bprm, 0);
40861@@ -1591,6 +1693,220 @@ out:
40862 return ispipe;
40863 }
40864
40865+int pax_check_flags(unsigned long *flags)
40866+{
40867+ int retval = 0;
40868+
40869+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
40870+ if (*flags & MF_PAX_SEGMEXEC)
40871+ {
40872+ *flags &= ~MF_PAX_SEGMEXEC;
40873+ retval = -EINVAL;
40874+ }
40875+#endif
40876+
40877+ if ((*flags & MF_PAX_PAGEEXEC)
40878+
40879+#ifdef CONFIG_PAX_PAGEEXEC
40880+ && (*flags & MF_PAX_SEGMEXEC)
40881+#endif
40882+
40883+ )
40884+ {
40885+ *flags &= ~MF_PAX_PAGEEXEC;
40886+ retval = -EINVAL;
40887+ }
40888+
40889+ if ((*flags & MF_PAX_MPROTECT)
40890+
40891+#ifdef CONFIG_PAX_MPROTECT
40892+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
40893+#endif
40894+
40895+ )
40896+ {
40897+ *flags &= ~MF_PAX_MPROTECT;
40898+ retval = -EINVAL;
40899+ }
40900+
40901+ if ((*flags & MF_PAX_EMUTRAMP)
40902+
40903+#ifdef CONFIG_PAX_EMUTRAMP
40904+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
40905+#endif
40906+
40907+ )
40908+ {
40909+ *flags &= ~MF_PAX_EMUTRAMP;
40910+ retval = -EINVAL;
40911+ }
40912+
40913+ return retval;
40914+}
40915+
40916+EXPORT_SYMBOL(pax_check_flags);
40917+
40918+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40919+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
40920+{
40921+ struct task_struct *tsk = current;
40922+ struct mm_struct *mm = current->mm;
40923+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
40924+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
40925+ char *path_exec = NULL;
40926+ char *path_fault = NULL;
40927+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
40928+
40929+ if (buffer_exec && buffer_fault) {
40930+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
40931+
40932+ down_read(&mm->mmap_sem);
40933+ vma = mm->mmap;
40934+ while (vma && (!vma_exec || !vma_fault)) {
40935+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
40936+ vma_exec = vma;
40937+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
40938+ vma_fault = vma;
40939+ vma = vma->vm_next;
40940+ }
40941+ if (vma_exec) {
40942+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
40943+ if (IS_ERR(path_exec))
40944+ path_exec = "<path too long>";
40945+ else {
40946+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
40947+ if (path_exec) {
40948+ *path_exec = 0;
40949+ path_exec = buffer_exec;
40950+ } else
40951+ path_exec = "<path too long>";
40952+ }
40953+ }
40954+ if (vma_fault) {
40955+ start = vma_fault->vm_start;
40956+ end = vma_fault->vm_end;
40957+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
40958+ if (vma_fault->vm_file) {
40959+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
40960+ if (IS_ERR(path_fault))
40961+ path_fault = "<path too long>";
40962+ else {
40963+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
40964+ if (path_fault) {
40965+ *path_fault = 0;
40966+ path_fault = buffer_fault;
40967+ } else
40968+ path_fault = "<path too long>";
40969+ }
40970+ } else
40971+ path_fault = "<anonymous mapping>";
40972+ }
40973+ up_read(&mm->mmap_sem);
40974+ }
40975+ if (tsk->signal->curr_ip)
40976+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
40977+ else
40978+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
40979+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
40980+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
40981+ task_uid(tsk), task_euid(tsk), pc, sp);
40982+ free_page((unsigned long)buffer_exec);
40983+ free_page((unsigned long)buffer_fault);
40984+ pax_report_insns(pc, sp);
40985+ do_coredump(SIGKILL, SIGKILL, regs);
40986+}
40987+#endif
40988+
40989+#ifdef CONFIG_PAX_REFCOUNT
40990+void pax_report_refcount_overflow(struct pt_regs *regs)
40991+{
40992+ if (current->signal->curr_ip)
40993+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
40994+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
40995+ else
40996+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
40997+ current->comm, task_pid_nr(current), current_uid(), current_euid());
40998+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
40999+ show_regs(regs);
41000+ force_sig_specific(SIGKILL, current);
41001+}
41002+#endif
41003+
41004+#ifdef CONFIG_PAX_USERCOPY
41005+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41006+int object_is_on_stack(const void *obj, unsigned long len)
41007+{
41008+ const void * const stack = task_stack_page(current);
41009+ const void * const stackend = stack + THREAD_SIZE;
41010+
41011+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41012+ const void *frame = NULL;
41013+ const void *oldframe;
41014+#endif
41015+
41016+ if (obj + len < obj)
41017+ return -1;
41018+
41019+ if (obj + len <= stack || stackend <= obj)
41020+ return 0;
41021+
41022+ if (obj < stack || stackend < obj + len)
41023+ return -1;
41024+
41025+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41026+ oldframe = __builtin_frame_address(1);
41027+ if (oldframe)
41028+ frame = __builtin_frame_address(2);
41029+ /*
41030+ low ----------------------------------------------> high
41031+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41032+ ^----------------^
41033+ allow copies only within here
41034+ */
41035+ while (stack <= frame && frame < stackend) {
41036+ /* if obj + len extends past the last frame, this
41037+ check won't pass and the next frame will be 0,
41038+ causing us to bail out and correctly report
41039+ the copy as invalid
41040+ */
41041+ if (obj + len <= frame)
41042+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41043+ oldframe = frame;
41044+ frame = *(const void * const *)frame;
41045+ }
41046+ return -1;
41047+#else
41048+ return 1;
41049+#endif
41050+}
41051+
41052+
41053+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41054+{
41055+ if (current->signal->curr_ip)
41056+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41057+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41058+ else
41059+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41060+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41061+
41062+ dump_stack();
41063+ gr_handle_kernel_exploit();
41064+ do_group_exit(SIGKILL);
41065+}
41066+#endif
41067+
41068+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41069+void pax_track_stack(void)
41070+{
41071+ unsigned long sp = (unsigned long)&sp;
41072+ if (sp < current_thread_info()->lowest_stack &&
41073+ sp > (unsigned long)task_stack_page(current))
41074+ current_thread_info()->lowest_stack = sp;
41075+}
41076+EXPORT_SYMBOL(pax_track_stack);
41077+#endif
41078+
41079 static int zap_process(struct task_struct *start)
41080 {
41081 struct task_struct *t;
41082@@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct
41083 pipe = file->f_path.dentry->d_inode->i_pipe;
41084
41085 pipe_lock(pipe);
41086- pipe->readers++;
41087- pipe->writers--;
41088+ atomic_inc(&pipe->readers);
41089+ atomic_dec(&pipe->writers);
41090
41091- while ((pipe->readers > 1) && (!signal_pending(current))) {
41092+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41093 wake_up_interruptible_sync(&pipe->wait);
41094 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41095 pipe_wait(pipe);
41096 }
41097
41098- pipe->readers--;
41099- pipe->writers++;
41100+ atomic_dec(&pipe->readers);
41101+ atomic_inc(&pipe->writers);
41102 pipe_unlock(pipe);
41103
41104 }
41105@@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_co
41106 char **helper_argv = NULL;
41107 int helper_argc = 0;
41108 int dump_count = 0;
41109- static atomic_t core_dump_count = ATOMIC_INIT(0);
41110+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41111
41112 audit_core_dumps(signr);
41113
41114+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41115+ gr_handle_brute_attach(current, mm->flags);
41116+
41117 binfmt = mm->binfmt;
41118 if (!binfmt || !binfmt->core_dump)
41119 goto fail;
41120@@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_co
41121 */
41122 clear_thread_flag(TIF_SIGPENDING);
41123
41124+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41125+
41126 /*
41127 * lock_kernel() because format_corename() is controlled by sysctl, which
41128 * uses lock_kernel()
41129@@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_co
41130 goto fail_unlock;
41131 }
41132
41133- dump_count = atomic_inc_return(&core_dump_count);
41134+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
41135 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41136 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41137 task_tgid_vnr(current), current->comm);
41138@@ -1972,7 +2293,7 @@ close_fail:
41139 filp_close(file, NULL);
41140 fail_dropcount:
41141 if (dump_count)
41142- atomic_dec(&core_dump_count);
41143+ atomic_dec_unchecked(&core_dump_count);
41144 fail_unlock:
41145 if (helper_argv)
41146 argv_free(helper_argv);
41147diff -urNp linux-2.6.32.45/fs/ext2/balloc.c linux-2.6.32.45/fs/ext2/balloc.c
41148--- linux-2.6.32.45/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
41149+++ linux-2.6.32.45/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
41150@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41151
41152 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41153 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41154- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41155+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41156 sbi->s_resuid != current_fsuid() &&
41157 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41158 return 0;
41159diff -urNp linux-2.6.32.45/fs/ext3/balloc.c linux-2.6.32.45/fs/ext3/balloc.c
41160--- linux-2.6.32.45/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
41161+++ linux-2.6.32.45/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
41162@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
41163
41164 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41165 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41166- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41167+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41168 sbi->s_resuid != current_fsuid() &&
41169 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41170 return 0;
41171diff -urNp linux-2.6.32.45/fs/ext4/balloc.c linux-2.6.32.45/fs/ext4/balloc.c
41172--- linux-2.6.32.45/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
41173+++ linux-2.6.32.45/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
41174@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
41175 /* Hm, nope. Are (enough) root reserved blocks available? */
41176 if (sbi->s_resuid == current_fsuid() ||
41177 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41178- capable(CAP_SYS_RESOURCE)) {
41179+ capable_nolog(CAP_SYS_RESOURCE)) {
41180 if (free_blocks >= (nblocks + dirty_blocks))
41181 return 1;
41182 }
41183diff -urNp linux-2.6.32.45/fs/ext4/ext4.h linux-2.6.32.45/fs/ext4/ext4.h
41184--- linux-2.6.32.45/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
41185+++ linux-2.6.32.45/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
41186@@ -1078,19 +1078,19 @@ struct ext4_sb_info {
41187
41188 /* stats for buddy allocator */
41189 spinlock_t s_mb_pa_lock;
41190- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41191- atomic_t s_bal_success; /* we found long enough chunks */
41192- atomic_t s_bal_allocated; /* in blocks */
41193- atomic_t s_bal_ex_scanned; /* total extents scanned */
41194- atomic_t s_bal_goals; /* goal hits */
41195- atomic_t s_bal_breaks; /* too long searches */
41196- atomic_t s_bal_2orders; /* 2^order hits */
41197+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41198+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41199+ atomic_unchecked_t s_bal_allocated; /* in blocks */
41200+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41201+ atomic_unchecked_t s_bal_goals; /* goal hits */
41202+ atomic_unchecked_t s_bal_breaks; /* too long searches */
41203+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41204 spinlock_t s_bal_lock;
41205 unsigned long s_mb_buddies_generated;
41206 unsigned long long s_mb_generation_time;
41207- atomic_t s_mb_lost_chunks;
41208- atomic_t s_mb_preallocated;
41209- atomic_t s_mb_discarded;
41210+ atomic_unchecked_t s_mb_lost_chunks;
41211+ atomic_unchecked_t s_mb_preallocated;
41212+ atomic_unchecked_t s_mb_discarded;
41213 atomic_t s_lock_busy;
41214
41215 /* locality groups */
41216diff -urNp linux-2.6.32.45/fs/ext4/mballoc.c linux-2.6.32.45/fs/ext4/mballoc.c
41217--- linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
41218+++ linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
41219@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
41220 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41221
41222 if (EXT4_SB(sb)->s_mb_stats)
41223- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41224+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41225
41226 break;
41227 }
41228@@ -2131,7 +2131,7 @@ repeat:
41229 ac->ac_status = AC_STATUS_CONTINUE;
41230 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41231 cr = 3;
41232- atomic_inc(&sbi->s_mb_lost_chunks);
41233+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41234 goto repeat;
41235 }
41236 }
41237@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
41238 ext4_grpblk_t counters[16];
41239 } sg;
41240
41241+ pax_track_stack();
41242+
41243 group--;
41244 if (group == 0)
41245 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41246@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
41247 if (sbi->s_mb_stats) {
41248 printk(KERN_INFO
41249 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41250- atomic_read(&sbi->s_bal_allocated),
41251- atomic_read(&sbi->s_bal_reqs),
41252- atomic_read(&sbi->s_bal_success));
41253+ atomic_read_unchecked(&sbi->s_bal_allocated),
41254+ atomic_read_unchecked(&sbi->s_bal_reqs),
41255+ atomic_read_unchecked(&sbi->s_bal_success));
41256 printk(KERN_INFO
41257 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41258 "%u 2^N hits, %u breaks, %u lost\n",
41259- atomic_read(&sbi->s_bal_ex_scanned),
41260- atomic_read(&sbi->s_bal_goals),
41261- atomic_read(&sbi->s_bal_2orders),
41262- atomic_read(&sbi->s_bal_breaks),
41263- atomic_read(&sbi->s_mb_lost_chunks));
41264+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41265+ atomic_read_unchecked(&sbi->s_bal_goals),
41266+ atomic_read_unchecked(&sbi->s_bal_2orders),
41267+ atomic_read_unchecked(&sbi->s_bal_breaks),
41268+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41269 printk(KERN_INFO
41270 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41271 sbi->s_mb_buddies_generated++,
41272 sbi->s_mb_generation_time);
41273 printk(KERN_INFO
41274 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41275- atomic_read(&sbi->s_mb_preallocated),
41276- atomic_read(&sbi->s_mb_discarded));
41277+ atomic_read_unchecked(&sbi->s_mb_preallocated),
41278+ atomic_read_unchecked(&sbi->s_mb_discarded));
41279 }
41280
41281 free_percpu(sbi->s_locality_groups);
41282@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
41283 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41284
41285 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41286- atomic_inc(&sbi->s_bal_reqs);
41287- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41288+ atomic_inc_unchecked(&sbi->s_bal_reqs);
41289+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41290 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
41291- atomic_inc(&sbi->s_bal_success);
41292- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41293+ atomic_inc_unchecked(&sbi->s_bal_success);
41294+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41295 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41296 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41297- atomic_inc(&sbi->s_bal_goals);
41298+ atomic_inc_unchecked(&sbi->s_bal_goals);
41299 if (ac->ac_found > sbi->s_mb_max_to_scan)
41300- atomic_inc(&sbi->s_bal_breaks);
41301+ atomic_inc_unchecked(&sbi->s_bal_breaks);
41302 }
41303
41304 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41305@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41306 trace_ext4_mb_new_inode_pa(ac, pa);
41307
41308 ext4_mb_use_inode_pa(ac, pa);
41309- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41310+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41311
41312 ei = EXT4_I(ac->ac_inode);
41313 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41314@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41315 trace_ext4_mb_new_group_pa(ac, pa);
41316
41317 ext4_mb_use_group_pa(ac, pa);
41318- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41319+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41320
41321 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41322 lg = ac->ac_lg;
41323@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41324 * from the bitmap and continue.
41325 */
41326 }
41327- atomic_add(free, &sbi->s_mb_discarded);
41328+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
41329
41330 return err;
41331 }
41332@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41333 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41334 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41335 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41336- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41337+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41338
41339 if (ac) {
41340 ac->ac_sb = sb;
41341diff -urNp linux-2.6.32.45/fs/ext4/super.c linux-2.6.32.45/fs/ext4/super.c
41342--- linux-2.6.32.45/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
41343+++ linux-2.6.32.45/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
41344@@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
41345 }
41346
41347
41348-static struct sysfs_ops ext4_attr_ops = {
41349+static const struct sysfs_ops ext4_attr_ops = {
41350 .show = ext4_attr_show,
41351 .store = ext4_attr_store,
41352 };
41353diff -urNp linux-2.6.32.45/fs/fcntl.c linux-2.6.32.45/fs/fcntl.c
41354--- linux-2.6.32.45/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
41355+++ linux-2.6.32.45/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
41356@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
41357 if (err)
41358 return err;
41359
41360+ if (gr_handle_chroot_fowner(pid, type))
41361+ return -ENOENT;
41362+ if (gr_check_protected_task_fowner(pid, type))
41363+ return -EACCES;
41364+
41365 f_modown(filp, pid, type, force);
41366 return 0;
41367 }
41368@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
41369 switch (cmd) {
41370 case F_DUPFD:
41371 case F_DUPFD_CLOEXEC:
41372+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41373 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41374 break;
41375 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41376diff -urNp linux-2.6.32.45/fs/fifo.c linux-2.6.32.45/fs/fifo.c
41377--- linux-2.6.32.45/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
41378+++ linux-2.6.32.45/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
41379@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
41380 */
41381 filp->f_op = &read_pipefifo_fops;
41382 pipe->r_counter++;
41383- if (pipe->readers++ == 0)
41384+ if (atomic_inc_return(&pipe->readers) == 1)
41385 wake_up_partner(inode);
41386
41387- if (!pipe->writers) {
41388+ if (!atomic_read(&pipe->writers)) {
41389 if ((filp->f_flags & O_NONBLOCK)) {
41390 /* suppress POLLHUP until we have
41391 * seen a writer */
41392@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
41393 * errno=ENXIO when there is no process reading the FIFO.
41394 */
41395 ret = -ENXIO;
41396- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
41397+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
41398 goto err;
41399
41400 filp->f_op = &write_pipefifo_fops;
41401 pipe->w_counter++;
41402- if (!pipe->writers++)
41403+ if (atomic_inc_return(&pipe->writers) == 1)
41404 wake_up_partner(inode);
41405
41406- if (!pipe->readers) {
41407+ if (!atomic_read(&pipe->readers)) {
41408 wait_for_partner(inode, &pipe->r_counter);
41409 if (signal_pending(current))
41410 goto err_wr;
41411@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
41412 */
41413 filp->f_op = &rdwr_pipefifo_fops;
41414
41415- pipe->readers++;
41416- pipe->writers++;
41417+ atomic_inc(&pipe->readers);
41418+ atomic_inc(&pipe->writers);
41419 pipe->r_counter++;
41420 pipe->w_counter++;
41421- if (pipe->readers == 1 || pipe->writers == 1)
41422+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
41423 wake_up_partner(inode);
41424 break;
41425
41426@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
41427 return 0;
41428
41429 err_rd:
41430- if (!--pipe->readers)
41431+ if (atomic_dec_and_test(&pipe->readers))
41432 wake_up_interruptible(&pipe->wait);
41433 ret = -ERESTARTSYS;
41434 goto err;
41435
41436 err_wr:
41437- if (!--pipe->writers)
41438+ if (atomic_dec_and_test(&pipe->writers))
41439 wake_up_interruptible(&pipe->wait);
41440 ret = -ERESTARTSYS;
41441 goto err;
41442
41443 err:
41444- if (!pipe->readers && !pipe->writers)
41445+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
41446 free_pipe_info(inode);
41447
41448 err_nocleanup:
41449diff -urNp linux-2.6.32.45/fs/file.c linux-2.6.32.45/fs/file.c
41450--- linux-2.6.32.45/fs/file.c 2011-03-27 14:31:47.000000000 -0400
41451+++ linux-2.6.32.45/fs/file.c 2011-04-17 15:56:46.000000000 -0400
41452@@ -14,6 +14,7 @@
41453 #include <linux/slab.h>
41454 #include <linux/vmalloc.h>
41455 #include <linux/file.h>
41456+#include <linux/security.h>
41457 #include <linux/fdtable.h>
41458 #include <linux/bitops.h>
41459 #include <linux/interrupt.h>
41460@@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
41461 * N.B. For clone tasks sharing a files structure, this test
41462 * will limit the total number of files that can be opened.
41463 */
41464+
41465+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
41466 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41467 return -EMFILE;
41468
41469diff -urNp linux-2.6.32.45/fs/filesystems.c linux-2.6.32.45/fs/filesystems.c
41470--- linux-2.6.32.45/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
41471+++ linux-2.6.32.45/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
41472@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
41473 int len = dot ? dot - name : strlen(name);
41474
41475 fs = __get_fs_type(name, len);
41476+
41477+#ifdef CONFIG_GRKERNSEC_MODHARDEN
41478+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
41479+#else
41480 if (!fs && (request_module("%.*s", len, name) == 0))
41481+#endif
41482 fs = __get_fs_type(name, len);
41483
41484 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
41485diff -urNp linux-2.6.32.45/fs/fscache/cookie.c linux-2.6.32.45/fs/fscache/cookie.c
41486--- linux-2.6.32.45/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
41487+++ linux-2.6.32.45/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
41488@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
41489 parent ? (char *) parent->def->name : "<no-parent>",
41490 def->name, netfs_data);
41491
41492- fscache_stat(&fscache_n_acquires);
41493+ fscache_stat_unchecked(&fscache_n_acquires);
41494
41495 /* if there's no parent cookie, then we don't create one here either */
41496 if (!parent) {
41497- fscache_stat(&fscache_n_acquires_null);
41498+ fscache_stat_unchecked(&fscache_n_acquires_null);
41499 _leave(" [no parent]");
41500 return NULL;
41501 }
41502@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
41503 /* allocate and initialise a cookie */
41504 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
41505 if (!cookie) {
41506- fscache_stat(&fscache_n_acquires_oom);
41507+ fscache_stat_unchecked(&fscache_n_acquires_oom);
41508 _leave(" [ENOMEM]");
41509 return NULL;
41510 }
41511@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
41512
41513 switch (cookie->def->type) {
41514 case FSCACHE_COOKIE_TYPE_INDEX:
41515- fscache_stat(&fscache_n_cookie_index);
41516+ fscache_stat_unchecked(&fscache_n_cookie_index);
41517 break;
41518 case FSCACHE_COOKIE_TYPE_DATAFILE:
41519- fscache_stat(&fscache_n_cookie_data);
41520+ fscache_stat_unchecked(&fscache_n_cookie_data);
41521 break;
41522 default:
41523- fscache_stat(&fscache_n_cookie_special);
41524+ fscache_stat_unchecked(&fscache_n_cookie_special);
41525 break;
41526 }
41527
41528@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
41529 if (fscache_acquire_non_index_cookie(cookie) < 0) {
41530 atomic_dec(&parent->n_children);
41531 __fscache_cookie_put(cookie);
41532- fscache_stat(&fscache_n_acquires_nobufs);
41533+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
41534 _leave(" = NULL");
41535 return NULL;
41536 }
41537 }
41538
41539- fscache_stat(&fscache_n_acquires_ok);
41540+ fscache_stat_unchecked(&fscache_n_acquires_ok);
41541 _leave(" = %p", cookie);
41542 return cookie;
41543 }
41544@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
41545 cache = fscache_select_cache_for_object(cookie->parent);
41546 if (!cache) {
41547 up_read(&fscache_addremove_sem);
41548- fscache_stat(&fscache_n_acquires_no_cache);
41549+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
41550 _leave(" = -ENOMEDIUM [no cache]");
41551 return -ENOMEDIUM;
41552 }
41553@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
41554 object = cache->ops->alloc_object(cache, cookie);
41555 fscache_stat_d(&fscache_n_cop_alloc_object);
41556 if (IS_ERR(object)) {
41557- fscache_stat(&fscache_n_object_no_alloc);
41558+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
41559 ret = PTR_ERR(object);
41560 goto error;
41561 }
41562
41563- fscache_stat(&fscache_n_object_alloc);
41564+ fscache_stat_unchecked(&fscache_n_object_alloc);
41565
41566 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
41567
41568@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
41569 struct fscache_object *object;
41570 struct hlist_node *_p;
41571
41572- fscache_stat(&fscache_n_updates);
41573+ fscache_stat_unchecked(&fscache_n_updates);
41574
41575 if (!cookie) {
41576- fscache_stat(&fscache_n_updates_null);
41577+ fscache_stat_unchecked(&fscache_n_updates_null);
41578 _leave(" [no cookie]");
41579 return;
41580 }
41581@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
41582 struct fscache_object *object;
41583 unsigned long event;
41584
41585- fscache_stat(&fscache_n_relinquishes);
41586+ fscache_stat_unchecked(&fscache_n_relinquishes);
41587 if (retire)
41588- fscache_stat(&fscache_n_relinquishes_retire);
41589+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
41590
41591 if (!cookie) {
41592- fscache_stat(&fscache_n_relinquishes_null);
41593+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
41594 _leave(" [no cookie]");
41595 return;
41596 }
41597@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
41598
41599 /* wait for the cookie to finish being instantiated (or to fail) */
41600 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
41601- fscache_stat(&fscache_n_relinquishes_waitcrt);
41602+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
41603 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
41604 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
41605 }
41606diff -urNp linux-2.6.32.45/fs/fscache/internal.h linux-2.6.32.45/fs/fscache/internal.h
41607--- linux-2.6.32.45/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
41608+++ linux-2.6.32.45/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
41609@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
41610 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
41611 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
41612
41613-extern atomic_t fscache_n_op_pend;
41614-extern atomic_t fscache_n_op_run;
41615-extern atomic_t fscache_n_op_enqueue;
41616-extern atomic_t fscache_n_op_deferred_release;
41617-extern atomic_t fscache_n_op_release;
41618-extern atomic_t fscache_n_op_gc;
41619-extern atomic_t fscache_n_op_cancelled;
41620-extern atomic_t fscache_n_op_rejected;
41621-
41622-extern atomic_t fscache_n_attr_changed;
41623-extern atomic_t fscache_n_attr_changed_ok;
41624-extern atomic_t fscache_n_attr_changed_nobufs;
41625-extern atomic_t fscache_n_attr_changed_nomem;
41626-extern atomic_t fscache_n_attr_changed_calls;
41627-
41628-extern atomic_t fscache_n_allocs;
41629-extern atomic_t fscache_n_allocs_ok;
41630-extern atomic_t fscache_n_allocs_wait;
41631-extern atomic_t fscache_n_allocs_nobufs;
41632-extern atomic_t fscache_n_allocs_intr;
41633-extern atomic_t fscache_n_allocs_object_dead;
41634-extern atomic_t fscache_n_alloc_ops;
41635-extern atomic_t fscache_n_alloc_op_waits;
41636-
41637-extern atomic_t fscache_n_retrievals;
41638-extern atomic_t fscache_n_retrievals_ok;
41639-extern atomic_t fscache_n_retrievals_wait;
41640-extern atomic_t fscache_n_retrievals_nodata;
41641-extern atomic_t fscache_n_retrievals_nobufs;
41642-extern atomic_t fscache_n_retrievals_intr;
41643-extern atomic_t fscache_n_retrievals_nomem;
41644-extern atomic_t fscache_n_retrievals_object_dead;
41645-extern atomic_t fscache_n_retrieval_ops;
41646-extern atomic_t fscache_n_retrieval_op_waits;
41647-
41648-extern atomic_t fscache_n_stores;
41649-extern atomic_t fscache_n_stores_ok;
41650-extern atomic_t fscache_n_stores_again;
41651-extern atomic_t fscache_n_stores_nobufs;
41652-extern atomic_t fscache_n_stores_oom;
41653-extern atomic_t fscache_n_store_ops;
41654-extern atomic_t fscache_n_store_calls;
41655-extern atomic_t fscache_n_store_pages;
41656-extern atomic_t fscache_n_store_radix_deletes;
41657-extern atomic_t fscache_n_store_pages_over_limit;
41658-
41659-extern atomic_t fscache_n_store_vmscan_not_storing;
41660-extern atomic_t fscache_n_store_vmscan_gone;
41661-extern atomic_t fscache_n_store_vmscan_busy;
41662-extern atomic_t fscache_n_store_vmscan_cancelled;
41663-
41664-extern atomic_t fscache_n_marks;
41665-extern atomic_t fscache_n_uncaches;
41666-
41667-extern atomic_t fscache_n_acquires;
41668-extern atomic_t fscache_n_acquires_null;
41669-extern atomic_t fscache_n_acquires_no_cache;
41670-extern atomic_t fscache_n_acquires_ok;
41671-extern atomic_t fscache_n_acquires_nobufs;
41672-extern atomic_t fscache_n_acquires_oom;
41673-
41674-extern atomic_t fscache_n_updates;
41675-extern atomic_t fscache_n_updates_null;
41676-extern atomic_t fscache_n_updates_run;
41677-
41678-extern atomic_t fscache_n_relinquishes;
41679-extern atomic_t fscache_n_relinquishes_null;
41680-extern atomic_t fscache_n_relinquishes_waitcrt;
41681-extern atomic_t fscache_n_relinquishes_retire;
41682-
41683-extern atomic_t fscache_n_cookie_index;
41684-extern atomic_t fscache_n_cookie_data;
41685-extern atomic_t fscache_n_cookie_special;
41686-
41687-extern atomic_t fscache_n_object_alloc;
41688-extern atomic_t fscache_n_object_no_alloc;
41689-extern atomic_t fscache_n_object_lookups;
41690-extern atomic_t fscache_n_object_lookups_negative;
41691-extern atomic_t fscache_n_object_lookups_positive;
41692-extern atomic_t fscache_n_object_lookups_timed_out;
41693-extern atomic_t fscache_n_object_created;
41694-extern atomic_t fscache_n_object_avail;
41695-extern atomic_t fscache_n_object_dead;
41696-
41697-extern atomic_t fscache_n_checkaux_none;
41698-extern atomic_t fscache_n_checkaux_okay;
41699-extern atomic_t fscache_n_checkaux_update;
41700-extern atomic_t fscache_n_checkaux_obsolete;
41701+extern atomic_unchecked_t fscache_n_op_pend;
41702+extern atomic_unchecked_t fscache_n_op_run;
41703+extern atomic_unchecked_t fscache_n_op_enqueue;
41704+extern atomic_unchecked_t fscache_n_op_deferred_release;
41705+extern atomic_unchecked_t fscache_n_op_release;
41706+extern atomic_unchecked_t fscache_n_op_gc;
41707+extern atomic_unchecked_t fscache_n_op_cancelled;
41708+extern atomic_unchecked_t fscache_n_op_rejected;
41709+
41710+extern atomic_unchecked_t fscache_n_attr_changed;
41711+extern atomic_unchecked_t fscache_n_attr_changed_ok;
41712+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
41713+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
41714+extern atomic_unchecked_t fscache_n_attr_changed_calls;
41715+
41716+extern atomic_unchecked_t fscache_n_allocs;
41717+extern atomic_unchecked_t fscache_n_allocs_ok;
41718+extern atomic_unchecked_t fscache_n_allocs_wait;
41719+extern atomic_unchecked_t fscache_n_allocs_nobufs;
41720+extern atomic_unchecked_t fscache_n_allocs_intr;
41721+extern atomic_unchecked_t fscache_n_allocs_object_dead;
41722+extern atomic_unchecked_t fscache_n_alloc_ops;
41723+extern atomic_unchecked_t fscache_n_alloc_op_waits;
41724+
41725+extern atomic_unchecked_t fscache_n_retrievals;
41726+extern atomic_unchecked_t fscache_n_retrievals_ok;
41727+extern atomic_unchecked_t fscache_n_retrievals_wait;
41728+extern atomic_unchecked_t fscache_n_retrievals_nodata;
41729+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
41730+extern atomic_unchecked_t fscache_n_retrievals_intr;
41731+extern atomic_unchecked_t fscache_n_retrievals_nomem;
41732+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
41733+extern atomic_unchecked_t fscache_n_retrieval_ops;
41734+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
41735+
41736+extern atomic_unchecked_t fscache_n_stores;
41737+extern atomic_unchecked_t fscache_n_stores_ok;
41738+extern atomic_unchecked_t fscache_n_stores_again;
41739+extern atomic_unchecked_t fscache_n_stores_nobufs;
41740+extern atomic_unchecked_t fscache_n_stores_oom;
41741+extern atomic_unchecked_t fscache_n_store_ops;
41742+extern atomic_unchecked_t fscache_n_store_calls;
41743+extern atomic_unchecked_t fscache_n_store_pages;
41744+extern atomic_unchecked_t fscache_n_store_radix_deletes;
41745+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
41746+
41747+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
41748+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
41749+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
41750+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
41751+
41752+extern atomic_unchecked_t fscache_n_marks;
41753+extern atomic_unchecked_t fscache_n_uncaches;
41754+
41755+extern atomic_unchecked_t fscache_n_acquires;
41756+extern atomic_unchecked_t fscache_n_acquires_null;
41757+extern atomic_unchecked_t fscache_n_acquires_no_cache;
41758+extern atomic_unchecked_t fscache_n_acquires_ok;
41759+extern atomic_unchecked_t fscache_n_acquires_nobufs;
41760+extern atomic_unchecked_t fscache_n_acquires_oom;
41761+
41762+extern atomic_unchecked_t fscache_n_updates;
41763+extern atomic_unchecked_t fscache_n_updates_null;
41764+extern atomic_unchecked_t fscache_n_updates_run;
41765+
41766+extern atomic_unchecked_t fscache_n_relinquishes;
41767+extern atomic_unchecked_t fscache_n_relinquishes_null;
41768+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
41769+extern atomic_unchecked_t fscache_n_relinquishes_retire;
41770+
41771+extern atomic_unchecked_t fscache_n_cookie_index;
41772+extern atomic_unchecked_t fscache_n_cookie_data;
41773+extern atomic_unchecked_t fscache_n_cookie_special;
41774+
41775+extern atomic_unchecked_t fscache_n_object_alloc;
41776+extern atomic_unchecked_t fscache_n_object_no_alloc;
41777+extern atomic_unchecked_t fscache_n_object_lookups;
41778+extern atomic_unchecked_t fscache_n_object_lookups_negative;
41779+extern atomic_unchecked_t fscache_n_object_lookups_positive;
41780+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
41781+extern atomic_unchecked_t fscache_n_object_created;
41782+extern atomic_unchecked_t fscache_n_object_avail;
41783+extern atomic_unchecked_t fscache_n_object_dead;
41784+
41785+extern atomic_unchecked_t fscache_n_checkaux_none;
41786+extern atomic_unchecked_t fscache_n_checkaux_okay;
41787+extern atomic_unchecked_t fscache_n_checkaux_update;
41788+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
41789
41790 extern atomic_t fscache_n_cop_alloc_object;
41791 extern atomic_t fscache_n_cop_lookup_object;
41792@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
41793 atomic_inc(stat);
41794 }
41795
41796+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
41797+{
41798+ atomic_inc_unchecked(stat);
41799+}
41800+
41801 static inline void fscache_stat_d(atomic_t *stat)
41802 {
41803 atomic_dec(stat);
41804@@ -259,6 +264,7 @@ extern const struct file_operations fsca
41805
41806 #define __fscache_stat(stat) (NULL)
41807 #define fscache_stat(stat) do {} while (0)
41808+#define fscache_stat_unchecked(stat) do {} while (0)
41809 #define fscache_stat_d(stat) do {} while (0)
41810 #endif
41811
41812diff -urNp linux-2.6.32.45/fs/fscache/object.c linux-2.6.32.45/fs/fscache/object.c
41813--- linux-2.6.32.45/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
41814+++ linux-2.6.32.45/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
41815@@ -144,7 +144,7 @@ static void fscache_object_state_machine
41816 /* update the object metadata on disk */
41817 case FSCACHE_OBJECT_UPDATING:
41818 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
41819- fscache_stat(&fscache_n_updates_run);
41820+ fscache_stat_unchecked(&fscache_n_updates_run);
41821 fscache_stat(&fscache_n_cop_update_object);
41822 object->cache->ops->update_object(object);
41823 fscache_stat_d(&fscache_n_cop_update_object);
41824@@ -233,7 +233,7 @@ static void fscache_object_state_machine
41825 spin_lock(&object->lock);
41826 object->state = FSCACHE_OBJECT_DEAD;
41827 spin_unlock(&object->lock);
41828- fscache_stat(&fscache_n_object_dead);
41829+ fscache_stat_unchecked(&fscache_n_object_dead);
41830 goto terminal_transit;
41831
41832 /* handle the parent cache of this object being withdrawn from
41833@@ -248,7 +248,7 @@ static void fscache_object_state_machine
41834 spin_lock(&object->lock);
41835 object->state = FSCACHE_OBJECT_DEAD;
41836 spin_unlock(&object->lock);
41837- fscache_stat(&fscache_n_object_dead);
41838+ fscache_stat_unchecked(&fscache_n_object_dead);
41839 goto terminal_transit;
41840
41841 /* complain about the object being woken up once it is
41842@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
41843 parent->cookie->def->name, cookie->def->name,
41844 object->cache->tag->name);
41845
41846- fscache_stat(&fscache_n_object_lookups);
41847+ fscache_stat_unchecked(&fscache_n_object_lookups);
41848 fscache_stat(&fscache_n_cop_lookup_object);
41849 ret = object->cache->ops->lookup_object(object);
41850 fscache_stat_d(&fscache_n_cop_lookup_object);
41851@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
41852 if (ret == -ETIMEDOUT) {
41853 /* probably stuck behind another object, so move this one to
41854 * the back of the queue */
41855- fscache_stat(&fscache_n_object_lookups_timed_out);
41856+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
41857 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
41858 }
41859
41860@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
41861
41862 spin_lock(&object->lock);
41863 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
41864- fscache_stat(&fscache_n_object_lookups_negative);
41865+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
41866
41867 /* transit here to allow write requests to begin stacking up
41868 * and read requests to begin returning ENODATA */
41869@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
41870 * result, in which case there may be data available */
41871 spin_lock(&object->lock);
41872 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
41873- fscache_stat(&fscache_n_object_lookups_positive);
41874+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
41875
41876 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
41877
41878@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
41879 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
41880 } else {
41881 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
41882- fscache_stat(&fscache_n_object_created);
41883+ fscache_stat_unchecked(&fscache_n_object_created);
41884
41885 object->state = FSCACHE_OBJECT_AVAILABLE;
41886 spin_unlock(&object->lock);
41887@@ -633,7 +633,7 @@ static void fscache_object_available(str
41888 fscache_enqueue_dependents(object);
41889
41890 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
41891- fscache_stat(&fscache_n_object_avail);
41892+ fscache_stat_unchecked(&fscache_n_object_avail);
41893
41894 _leave("");
41895 }
41896@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
41897 enum fscache_checkaux result;
41898
41899 if (!object->cookie->def->check_aux) {
41900- fscache_stat(&fscache_n_checkaux_none);
41901+ fscache_stat_unchecked(&fscache_n_checkaux_none);
41902 return FSCACHE_CHECKAUX_OKAY;
41903 }
41904
41905@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
41906 switch (result) {
41907 /* entry okay as is */
41908 case FSCACHE_CHECKAUX_OKAY:
41909- fscache_stat(&fscache_n_checkaux_okay);
41910+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
41911 break;
41912
41913 /* entry requires update */
41914 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
41915- fscache_stat(&fscache_n_checkaux_update);
41916+ fscache_stat_unchecked(&fscache_n_checkaux_update);
41917 break;
41918
41919 /* entry requires deletion */
41920 case FSCACHE_CHECKAUX_OBSOLETE:
41921- fscache_stat(&fscache_n_checkaux_obsolete);
41922+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
41923 break;
41924
41925 default:
41926diff -urNp linux-2.6.32.45/fs/fscache/operation.c linux-2.6.32.45/fs/fscache/operation.c
41927--- linux-2.6.32.45/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
41928+++ linux-2.6.32.45/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
41929@@ -16,7 +16,7 @@
41930 #include <linux/seq_file.h>
41931 #include "internal.h"
41932
41933-atomic_t fscache_op_debug_id;
41934+atomic_unchecked_t fscache_op_debug_id;
41935 EXPORT_SYMBOL(fscache_op_debug_id);
41936
41937 /**
41938@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
41939 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
41940 ASSERTCMP(atomic_read(&op->usage), >, 0);
41941
41942- fscache_stat(&fscache_n_op_enqueue);
41943+ fscache_stat_unchecked(&fscache_n_op_enqueue);
41944 switch (op->flags & FSCACHE_OP_TYPE) {
41945 case FSCACHE_OP_FAST:
41946 _debug("queue fast");
41947@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
41948 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
41949 if (op->processor)
41950 fscache_enqueue_operation(op);
41951- fscache_stat(&fscache_n_op_run);
41952+ fscache_stat_unchecked(&fscache_n_op_run);
41953 }
41954
41955 /*
41956@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
41957 if (object->n_ops > 0) {
41958 atomic_inc(&op->usage);
41959 list_add_tail(&op->pend_link, &object->pending_ops);
41960- fscache_stat(&fscache_n_op_pend);
41961+ fscache_stat_unchecked(&fscache_n_op_pend);
41962 } else if (!list_empty(&object->pending_ops)) {
41963 atomic_inc(&op->usage);
41964 list_add_tail(&op->pend_link, &object->pending_ops);
41965- fscache_stat(&fscache_n_op_pend);
41966+ fscache_stat_unchecked(&fscache_n_op_pend);
41967 fscache_start_operations(object);
41968 } else {
41969 ASSERTCMP(object->n_in_progress, ==, 0);
41970@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
41971 object->n_exclusive++; /* reads and writes must wait */
41972 atomic_inc(&op->usage);
41973 list_add_tail(&op->pend_link, &object->pending_ops);
41974- fscache_stat(&fscache_n_op_pend);
41975+ fscache_stat_unchecked(&fscache_n_op_pend);
41976 ret = 0;
41977 } else {
41978 /* not allowed to submit ops in any other state */
41979@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
41980 if (object->n_exclusive > 0) {
41981 atomic_inc(&op->usage);
41982 list_add_tail(&op->pend_link, &object->pending_ops);
41983- fscache_stat(&fscache_n_op_pend);
41984+ fscache_stat_unchecked(&fscache_n_op_pend);
41985 } else if (!list_empty(&object->pending_ops)) {
41986 atomic_inc(&op->usage);
41987 list_add_tail(&op->pend_link, &object->pending_ops);
41988- fscache_stat(&fscache_n_op_pend);
41989+ fscache_stat_unchecked(&fscache_n_op_pend);
41990 fscache_start_operations(object);
41991 } else {
41992 ASSERTCMP(object->n_exclusive, ==, 0);
41993@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
41994 object->n_ops++;
41995 atomic_inc(&op->usage);
41996 list_add_tail(&op->pend_link, &object->pending_ops);
41997- fscache_stat(&fscache_n_op_pend);
41998+ fscache_stat_unchecked(&fscache_n_op_pend);
41999 ret = 0;
42000 } else if (object->state == FSCACHE_OBJECT_DYING ||
42001 object->state == FSCACHE_OBJECT_LC_DYING ||
42002 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42003- fscache_stat(&fscache_n_op_rejected);
42004+ fscache_stat_unchecked(&fscache_n_op_rejected);
42005 ret = -ENOBUFS;
42006 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42007 fscache_report_unexpected_submission(object, op, ostate);
42008@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
42009
42010 ret = -EBUSY;
42011 if (!list_empty(&op->pend_link)) {
42012- fscache_stat(&fscache_n_op_cancelled);
42013+ fscache_stat_unchecked(&fscache_n_op_cancelled);
42014 list_del_init(&op->pend_link);
42015 object->n_ops--;
42016 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42017@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
42018 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42019 BUG();
42020
42021- fscache_stat(&fscache_n_op_release);
42022+ fscache_stat_unchecked(&fscache_n_op_release);
42023
42024 if (op->release) {
42025 op->release(op);
42026@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
42027 * lock, and defer it otherwise */
42028 if (!spin_trylock(&object->lock)) {
42029 _debug("defer put");
42030- fscache_stat(&fscache_n_op_deferred_release);
42031+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
42032
42033 cache = object->cache;
42034 spin_lock(&cache->op_gc_list_lock);
42035@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
42036
42037 _debug("GC DEFERRED REL OBJ%x OP%x",
42038 object->debug_id, op->debug_id);
42039- fscache_stat(&fscache_n_op_gc);
42040+ fscache_stat_unchecked(&fscache_n_op_gc);
42041
42042 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42043
42044diff -urNp linux-2.6.32.45/fs/fscache/page.c linux-2.6.32.45/fs/fscache/page.c
42045--- linux-2.6.32.45/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
42046+++ linux-2.6.32.45/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
42047@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
42048 val = radix_tree_lookup(&cookie->stores, page->index);
42049 if (!val) {
42050 rcu_read_unlock();
42051- fscache_stat(&fscache_n_store_vmscan_not_storing);
42052+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42053 __fscache_uncache_page(cookie, page);
42054 return true;
42055 }
42056@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
42057 spin_unlock(&cookie->stores_lock);
42058
42059 if (xpage) {
42060- fscache_stat(&fscache_n_store_vmscan_cancelled);
42061- fscache_stat(&fscache_n_store_radix_deletes);
42062+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42063+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42064 ASSERTCMP(xpage, ==, page);
42065 } else {
42066- fscache_stat(&fscache_n_store_vmscan_gone);
42067+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42068 }
42069
42070 wake_up_bit(&cookie->flags, 0);
42071@@ -106,7 +106,7 @@ page_busy:
42072 /* we might want to wait here, but that could deadlock the allocator as
42073 * the slow-work threads writing to the cache may all end up sleeping
42074 * on memory allocation */
42075- fscache_stat(&fscache_n_store_vmscan_busy);
42076+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42077 return false;
42078 }
42079 EXPORT_SYMBOL(__fscache_maybe_release_page);
42080@@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
42081 FSCACHE_COOKIE_STORING_TAG);
42082 if (!radix_tree_tag_get(&cookie->stores, page->index,
42083 FSCACHE_COOKIE_PENDING_TAG)) {
42084- fscache_stat(&fscache_n_store_radix_deletes);
42085+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42086 xpage = radix_tree_delete(&cookie->stores, page->index);
42087 }
42088 spin_unlock(&cookie->stores_lock);
42089@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
42090
42091 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42092
42093- fscache_stat(&fscache_n_attr_changed_calls);
42094+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42095
42096 if (fscache_object_is_active(object)) {
42097 fscache_set_op_state(op, "CallFS");
42098@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
42099
42100 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42101
42102- fscache_stat(&fscache_n_attr_changed);
42103+ fscache_stat_unchecked(&fscache_n_attr_changed);
42104
42105 op = kzalloc(sizeof(*op), GFP_KERNEL);
42106 if (!op) {
42107- fscache_stat(&fscache_n_attr_changed_nomem);
42108+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42109 _leave(" = -ENOMEM");
42110 return -ENOMEM;
42111 }
42112@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
42113 if (fscache_submit_exclusive_op(object, op) < 0)
42114 goto nobufs;
42115 spin_unlock(&cookie->lock);
42116- fscache_stat(&fscache_n_attr_changed_ok);
42117+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42118 fscache_put_operation(op);
42119 _leave(" = 0");
42120 return 0;
42121@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
42122 nobufs:
42123 spin_unlock(&cookie->lock);
42124 kfree(op);
42125- fscache_stat(&fscache_n_attr_changed_nobufs);
42126+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42127 _leave(" = %d", -ENOBUFS);
42128 return -ENOBUFS;
42129 }
42130@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
42131 /* allocate a retrieval operation and attempt to submit it */
42132 op = kzalloc(sizeof(*op), GFP_NOIO);
42133 if (!op) {
42134- fscache_stat(&fscache_n_retrievals_nomem);
42135+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42136 return NULL;
42137 }
42138
42139@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
42140 return 0;
42141 }
42142
42143- fscache_stat(&fscache_n_retrievals_wait);
42144+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
42145
42146 jif = jiffies;
42147 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42148 fscache_wait_bit_interruptible,
42149 TASK_INTERRUPTIBLE) != 0) {
42150- fscache_stat(&fscache_n_retrievals_intr);
42151+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42152 _leave(" = -ERESTARTSYS");
42153 return -ERESTARTSYS;
42154 }
42155@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
42156 */
42157 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42158 struct fscache_retrieval *op,
42159- atomic_t *stat_op_waits,
42160- atomic_t *stat_object_dead)
42161+ atomic_unchecked_t *stat_op_waits,
42162+ atomic_unchecked_t *stat_object_dead)
42163 {
42164 int ret;
42165
42166@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
42167 goto check_if_dead;
42168
42169 _debug(">>> WT");
42170- fscache_stat(stat_op_waits);
42171+ fscache_stat_unchecked(stat_op_waits);
42172 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42173 fscache_wait_bit_interruptible,
42174 TASK_INTERRUPTIBLE) < 0) {
42175@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
42176
42177 check_if_dead:
42178 if (unlikely(fscache_object_is_dead(object))) {
42179- fscache_stat(stat_object_dead);
42180+ fscache_stat_unchecked(stat_object_dead);
42181 return -ENOBUFS;
42182 }
42183 return 0;
42184@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
42185
42186 _enter("%p,%p,,,", cookie, page);
42187
42188- fscache_stat(&fscache_n_retrievals);
42189+ fscache_stat_unchecked(&fscache_n_retrievals);
42190
42191 if (hlist_empty(&cookie->backing_objects))
42192 goto nobufs;
42193@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
42194 goto nobufs_unlock;
42195 spin_unlock(&cookie->lock);
42196
42197- fscache_stat(&fscache_n_retrieval_ops);
42198+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42199
42200 /* pin the netfs read context in case we need to do the actual netfs
42201 * read because we've encountered a cache read failure */
42202@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
42203
42204 error:
42205 if (ret == -ENOMEM)
42206- fscache_stat(&fscache_n_retrievals_nomem);
42207+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42208 else if (ret == -ERESTARTSYS)
42209- fscache_stat(&fscache_n_retrievals_intr);
42210+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42211 else if (ret == -ENODATA)
42212- fscache_stat(&fscache_n_retrievals_nodata);
42213+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42214 else if (ret < 0)
42215- fscache_stat(&fscache_n_retrievals_nobufs);
42216+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42217 else
42218- fscache_stat(&fscache_n_retrievals_ok);
42219+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42220
42221 fscache_put_retrieval(op);
42222 _leave(" = %d", ret);
42223@@ -453,7 +453,7 @@ nobufs_unlock:
42224 spin_unlock(&cookie->lock);
42225 kfree(op);
42226 nobufs:
42227- fscache_stat(&fscache_n_retrievals_nobufs);
42228+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42229 _leave(" = -ENOBUFS");
42230 return -ENOBUFS;
42231 }
42232@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
42233
42234 _enter("%p,,%d,,,", cookie, *nr_pages);
42235
42236- fscache_stat(&fscache_n_retrievals);
42237+ fscache_stat_unchecked(&fscache_n_retrievals);
42238
42239 if (hlist_empty(&cookie->backing_objects))
42240 goto nobufs;
42241@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
42242 goto nobufs_unlock;
42243 spin_unlock(&cookie->lock);
42244
42245- fscache_stat(&fscache_n_retrieval_ops);
42246+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42247
42248 /* pin the netfs read context in case we need to do the actual netfs
42249 * read because we've encountered a cache read failure */
42250@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
42251
42252 error:
42253 if (ret == -ENOMEM)
42254- fscache_stat(&fscache_n_retrievals_nomem);
42255+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42256 else if (ret == -ERESTARTSYS)
42257- fscache_stat(&fscache_n_retrievals_intr);
42258+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42259 else if (ret == -ENODATA)
42260- fscache_stat(&fscache_n_retrievals_nodata);
42261+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42262 else if (ret < 0)
42263- fscache_stat(&fscache_n_retrievals_nobufs);
42264+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42265 else
42266- fscache_stat(&fscache_n_retrievals_ok);
42267+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42268
42269 fscache_put_retrieval(op);
42270 _leave(" = %d", ret);
42271@@ -570,7 +570,7 @@ nobufs_unlock:
42272 spin_unlock(&cookie->lock);
42273 kfree(op);
42274 nobufs:
42275- fscache_stat(&fscache_n_retrievals_nobufs);
42276+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42277 _leave(" = -ENOBUFS");
42278 return -ENOBUFS;
42279 }
42280@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
42281
42282 _enter("%p,%p,,,", cookie, page);
42283
42284- fscache_stat(&fscache_n_allocs);
42285+ fscache_stat_unchecked(&fscache_n_allocs);
42286
42287 if (hlist_empty(&cookie->backing_objects))
42288 goto nobufs;
42289@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
42290 goto nobufs_unlock;
42291 spin_unlock(&cookie->lock);
42292
42293- fscache_stat(&fscache_n_alloc_ops);
42294+ fscache_stat_unchecked(&fscache_n_alloc_ops);
42295
42296 ret = fscache_wait_for_retrieval_activation(
42297 object, op,
42298@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
42299
42300 error:
42301 if (ret == -ERESTARTSYS)
42302- fscache_stat(&fscache_n_allocs_intr);
42303+ fscache_stat_unchecked(&fscache_n_allocs_intr);
42304 else if (ret < 0)
42305- fscache_stat(&fscache_n_allocs_nobufs);
42306+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42307 else
42308- fscache_stat(&fscache_n_allocs_ok);
42309+ fscache_stat_unchecked(&fscache_n_allocs_ok);
42310
42311 fscache_put_retrieval(op);
42312 _leave(" = %d", ret);
42313@@ -651,7 +651,7 @@ nobufs_unlock:
42314 spin_unlock(&cookie->lock);
42315 kfree(op);
42316 nobufs:
42317- fscache_stat(&fscache_n_allocs_nobufs);
42318+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42319 _leave(" = -ENOBUFS");
42320 return -ENOBUFS;
42321 }
42322@@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
42323
42324 spin_lock(&cookie->stores_lock);
42325
42326- fscache_stat(&fscache_n_store_calls);
42327+ fscache_stat_unchecked(&fscache_n_store_calls);
42328
42329 /* find a page to store */
42330 page = NULL;
42331@@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
42332 page = results[0];
42333 _debug("gang %d [%lx]", n, page->index);
42334 if (page->index > op->store_limit) {
42335- fscache_stat(&fscache_n_store_pages_over_limit);
42336+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
42337 goto superseded;
42338 }
42339
42340@@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
42341
42342 if (page) {
42343 fscache_set_op_state(&op->op, "Store");
42344- fscache_stat(&fscache_n_store_pages);
42345+ fscache_stat_unchecked(&fscache_n_store_pages);
42346 fscache_stat(&fscache_n_cop_write_page);
42347 ret = object->cache->ops->write_page(op, page);
42348 fscache_stat_d(&fscache_n_cop_write_page);
42349@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
42350 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42351 ASSERT(PageFsCache(page));
42352
42353- fscache_stat(&fscache_n_stores);
42354+ fscache_stat_unchecked(&fscache_n_stores);
42355
42356 op = kzalloc(sizeof(*op), GFP_NOIO);
42357 if (!op)
42358@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
42359 spin_unlock(&cookie->stores_lock);
42360 spin_unlock(&object->lock);
42361
42362- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
42363+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
42364 op->store_limit = object->store_limit;
42365
42366 if (fscache_submit_op(object, &op->op) < 0)
42367@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
42368
42369 spin_unlock(&cookie->lock);
42370 radix_tree_preload_end();
42371- fscache_stat(&fscache_n_store_ops);
42372- fscache_stat(&fscache_n_stores_ok);
42373+ fscache_stat_unchecked(&fscache_n_store_ops);
42374+ fscache_stat_unchecked(&fscache_n_stores_ok);
42375
42376 /* the slow work queue now carries its own ref on the object */
42377 fscache_put_operation(&op->op);
42378@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
42379 return 0;
42380
42381 already_queued:
42382- fscache_stat(&fscache_n_stores_again);
42383+ fscache_stat_unchecked(&fscache_n_stores_again);
42384 already_pending:
42385 spin_unlock(&cookie->stores_lock);
42386 spin_unlock(&object->lock);
42387 spin_unlock(&cookie->lock);
42388 radix_tree_preload_end();
42389 kfree(op);
42390- fscache_stat(&fscache_n_stores_ok);
42391+ fscache_stat_unchecked(&fscache_n_stores_ok);
42392 _leave(" = 0");
42393 return 0;
42394
42395@@ -886,14 +886,14 @@ nobufs:
42396 spin_unlock(&cookie->lock);
42397 radix_tree_preload_end();
42398 kfree(op);
42399- fscache_stat(&fscache_n_stores_nobufs);
42400+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
42401 _leave(" = -ENOBUFS");
42402 return -ENOBUFS;
42403
42404 nomem_free:
42405 kfree(op);
42406 nomem:
42407- fscache_stat(&fscache_n_stores_oom);
42408+ fscache_stat_unchecked(&fscache_n_stores_oom);
42409 _leave(" = -ENOMEM");
42410 return -ENOMEM;
42411 }
42412@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
42413 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42414 ASSERTCMP(page, !=, NULL);
42415
42416- fscache_stat(&fscache_n_uncaches);
42417+ fscache_stat_unchecked(&fscache_n_uncaches);
42418
42419 /* cache withdrawal may beat us to it */
42420 if (!PageFsCache(page))
42421@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
42422 unsigned long loop;
42423
42424 #ifdef CONFIG_FSCACHE_STATS
42425- atomic_add(pagevec->nr, &fscache_n_marks);
42426+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
42427 #endif
42428
42429 for (loop = 0; loop < pagevec->nr; loop++) {
42430diff -urNp linux-2.6.32.45/fs/fscache/stats.c linux-2.6.32.45/fs/fscache/stats.c
42431--- linux-2.6.32.45/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
42432+++ linux-2.6.32.45/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
42433@@ -18,95 +18,95 @@
42434 /*
42435 * operation counters
42436 */
42437-atomic_t fscache_n_op_pend;
42438-atomic_t fscache_n_op_run;
42439-atomic_t fscache_n_op_enqueue;
42440-atomic_t fscache_n_op_requeue;
42441-atomic_t fscache_n_op_deferred_release;
42442-atomic_t fscache_n_op_release;
42443-atomic_t fscache_n_op_gc;
42444-atomic_t fscache_n_op_cancelled;
42445-atomic_t fscache_n_op_rejected;
42446-
42447-atomic_t fscache_n_attr_changed;
42448-atomic_t fscache_n_attr_changed_ok;
42449-atomic_t fscache_n_attr_changed_nobufs;
42450-atomic_t fscache_n_attr_changed_nomem;
42451-atomic_t fscache_n_attr_changed_calls;
42452-
42453-atomic_t fscache_n_allocs;
42454-atomic_t fscache_n_allocs_ok;
42455-atomic_t fscache_n_allocs_wait;
42456-atomic_t fscache_n_allocs_nobufs;
42457-atomic_t fscache_n_allocs_intr;
42458-atomic_t fscache_n_allocs_object_dead;
42459-atomic_t fscache_n_alloc_ops;
42460-atomic_t fscache_n_alloc_op_waits;
42461-
42462-atomic_t fscache_n_retrievals;
42463-atomic_t fscache_n_retrievals_ok;
42464-atomic_t fscache_n_retrievals_wait;
42465-atomic_t fscache_n_retrievals_nodata;
42466-atomic_t fscache_n_retrievals_nobufs;
42467-atomic_t fscache_n_retrievals_intr;
42468-atomic_t fscache_n_retrievals_nomem;
42469-atomic_t fscache_n_retrievals_object_dead;
42470-atomic_t fscache_n_retrieval_ops;
42471-atomic_t fscache_n_retrieval_op_waits;
42472-
42473-atomic_t fscache_n_stores;
42474-atomic_t fscache_n_stores_ok;
42475-atomic_t fscache_n_stores_again;
42476-atomic_t fscache_n_stores_nobufs;
42477-atomic_t fscache_n_stores_oom;
42478-atomic_t fscache_n_store_ops;
42479-atomic_t fscache_n_store_calls;
42480-atomic_t fscache_n_store_pages;
42481-atomic_t fscache_n_store_radix_deletes;
42482-atomic_t fscache_n_store_pages_over_limit;
42483-
42484-atomic_t fscache_n_store_vmscan_not_storing;
42485-atomic_t fscache_n_store_vmscan_gone;
42486-atomic_t fscache_n_store_vmscan_busy;
42487-atomic_t fscache_n_store_vmscan_cancelled;
42488-
42489-atomic_t fscache_n_marks;
42490-atomic_t fscache_n_uncaches;
42491-
42492-atomic_t fscache_n_acquires;
42493-atomic_t fscache_n_acquires_null;
42494-atomic_t fscache_n_acquires_no_cache;
42495-atomic_t fscache_n_acquires_ok;
42496-atomic_t fscache_n_acquires_nobufs;
42497-atomic_t fscache_n_acquires_oom;
42498-
42499-atomic_t fscache_n_updates;
42500-atomic_t fscache_n_updates_null;
42501-atomic_t fscache_n_updates_run;
42502-
42503-atomic_t fscache_n_relinquishes;
42504-atomic_t fscache_n_relinquishes_null;
42505-atomic_t fscache_n_relinquishes_waitcrt;
42506-atomic_t fscache_n_relinquishes_retire;
42507-
42508-atomic_t fscache_n_cookie_index;
42509-atomic_t fscache_n_cookie_data;
42510-atomic_t fscache_n_cookie_special;
42511-
42512-atomic_t fscache_n_object_alloc;
42513-atomic_t fscache_n_object_no_alloc;
42514-atomic_t fscache_n_object_lookups;
42515-atomic_t fscache_n_object_lookups_negative;
42516-atomic_t fscache_n_object_lookups_positive;
42517-atomic_t fscache_n_object_lookups_timed_out;
42518-atomic_t fscache_n_object_created;
42519-atomic_t fscache_n_object_avail;
42520-atomic_t fscache_n_object_dead;
42521-
42522-atomic_t fscache_n_checkaux_none;
42523-atomic_t fscache_n_checkaux_okay;
42524-atomic_t fscache_n_checkaux_update;
42525-atomic_t fscache_n_checkaux_obsolete;
42526+atomic_unchecked_t fscache_n_op_pend;
42527+atomic_unchecked_t fscache_n_op_run;
42528+atomic_unchecked_t fscache_n_op_enqueue;
42529+atomic_unchecked_t fscache_n_op_requeue;
42530+atomic_unchecked_t fscache_n_op_deferred_release;
42531+atomic_unchecked_t fscache_n_op_release;
42532+atomic_unchecked_t fscache_n_op_gc;
42533+atomic_unchecked_t fscache_n_op_cancelled;
42534+atomic_unchecked_t fscache_n_op_rejected;
42535+
42536+atomic_unchecked_t fscache_n_attr_changed;
42537+atomic_unchecked_t fscache_n_attr_changed_ok;
42538+atomic_unchecked_t fscache_n_attr_changed_nobufs;
42539+atomic_unchecked_t fscache_n_attr_changed_nomem;
42540+atomic_unchecked_t fscache_n_attr_changed_calls;
42541+
42542+atomic_unchecked_t fscache_n_allocs;
42543+atomic_unchecked_t fscache_n_allocs_ok;
42544+atomic_unchecked_t fscache_n_allocs_wait;
42545+atomic_unchecked_t fscache_n_allocs_nobufs;
42546+atomic_unchecked_t fscache_n_allocs_intr;
42547+atomic_unchecked_t fscache_n_allocs_object_dead;
42548+atomic_unchecked_t fscache_n_alloc_ops;
42549+atomic_unchecked_t fscache_n_alloc_op_waits;
42550+
42551+atomic_unchecked_t fscache_n_retrievals;
42552+atomic_unchecked_t fscache_n_retrievals_ok;
42553+atomic_unchecked_t fscache_n_retrievals_wait;
42554+atomic_unchecked_t fscache_n_retrievals_nodata;
42555+atomic_unchecked_t fscache_n_retrievals_nobufs;
42556+atomic_unchecked_t fscache_n_retrievals_intr;
42557+atomic_unchecked_t fscache_n_retrievals_nomem;
42558+atomic_unchecked_t fscache_n_retrievals_object_dead;
42559+atomic_unchecked_t fscache_n_retrieval_ops;
42560+atomic_unchecked_t fscache_n_retrieval_op_waits;
42561+
42562+atomic_unchecked_t fscache_n_stores;
42563+atomic_unchecked_t fscache_n_stores_ok;
42564+atomic_unchecked_t fscache_n_stores_again;
42565+atomic_unchecked_t fscache_n_stores_nobufs;
42566+atomic_unchecked_t fscache_n_stores_oom;
42567+atomic_unchecked_t fscache_n_store_ops;
42568+atomic_unchecked_t fscache_n_store_calls;
42569+atomic_unchecked_t fscache_n_store_pages;
42570+atomic_unchecked_t fscache_n_store_radix_deletes;
42571+atomic_unchecked_t fscache_n_store_pages_over_limit;
42572+
42573+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42574+atomic_unchecked_t fscache_n_store_vmscan_gone;
42575+atomic_unchecked_t fscache_n_store_vmscan_busy;
42576+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42577+
42578+atomic_unchecked_t fscache_n_marks;
42579+atomic_unchecked_t fscache_n_uncaches;
42580+
42581+atomic_unchecked_t fscache_n_acquires;
42582+atomic_unchecked_t fscache_n_acquires_null;
42583+atomic_unchecked_t fscache_n_acquires_no_cache;
42584+atomic_unchecked_t fscache_n_acquires_ok;
42585+atomic_unchecked_t fscache_n_acquires_nobufs;
42586+atomic_unchecked_t fscache_n_acquires_oom;
42587+
42588+atomic_unchecked_t fscache_n_updates;
42589+atomic_unchecked_t fscache_n_updates_null;
42590+atomic_unchecked_t fscache_n_updates_run;
42591+
42592+atomic_unchecked_t fscache_n_relinquishes;
42593+atomic_unchecked_t fscache_n_relinquishes_null;
42594+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42595+atomic_unchecked_t fscache_n_relinquishes_retire;
42596+
42597+atomic_unchecked_t fscache_n_cookie_index;
42598+atomic_unchecked_t fscache_n_cookie_data;
42599+atomic_unchecked_t fscache_n_cookie_special;
42600+
42601+atomic_unchecked_t fscache_n_object_alloc;
42602+atomic_unchecked_t fscache_n_object_no_alloc;
42603+atomic_unchecked_t fscache_n_object_lookups;
42604+atomic_unchecked_t fscache_n_object_lookups_negative;
42605+atomic_unchecked_t fscache_n_object_lookups_positive;
42606+atomic_unchecked_t fscache_n_object_lookups_timed_out;
42607+atomic_unchecked_t fscache_n_object_created;
42608+atomic_unchecked_t fscache_n_object_avail;
42609+atomic_unchecked_t fscache_n_object_dead;
42610+
42611+atomic_unchecked_t fscache_n_checkaux_none;
42612+atomic_unchecked_t fscache_n_checkaux_okay;
42613+atomic_unchecked_t fscache_n_checkaux_update;
42614+atomic_unchecked_t fscache_n_checkaux_obsolete;
42615
42616 atomic_t fscache_n_cop_alloc_object;
42617 atomic_t fscache_n_cop_lookup_object;
42618@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
42619 seq_puts(m, "FS-Cache statistics\n");
42620
42621 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
42622- atomic_read(&fscache_n_cookie_index),
42623- atomic_read(&fscache_n_cookie_data),
42624- atomic_read(&fscache_n_cookie_special));
42625+ atomic_read_unchecked(&fscache_n_cookie_index),
42626+ atomic_read_unchecked(&fscache_n_cookie_data),
42627+ atomic_read_unchecked(&fscache_n_cookie_special));
42628
42629 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
42630- atomic_read(&fscache_n_object_alloc),
42631- atomic_read(&fscache_n_object_no_alloc),
42632- atomic_read(&fscache_n_object_avail),
42633- atomic_read(&fscache_n_object_dead));
42634+ atomic_read_unchecked(&fscache_n_object_alloc),
42635+ atomic_read_unchecked(&fscache_n_object_no_alloc),
42636+ atomic_read_unchecked(&fscache_n_object_avail),
42637+ atomic_read_unchecked(&fscache_n_object_dead));
42638 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
42639- atomic_read(&fscache_n_checkaux_none),
42640- atomic_read(&fscache_n_checkaux_okay),
42641- atomic_read(&fscache_n_checkaux_update),
42642- atomic_read(&fscache_n_checkaux_obsolete));
42643+ atomic_read_unchecked(&fscache_n_checkaux_none),
42644+ atomic_read_unchecked(&fscache_n_checkaux_okay),
42645+ atomic_read_unchecked(&fscache_n_checkaux_update),
42646+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
42647
42648 seq_printf(m, "Pages : mrk=%u unc=%u\n",
42649- atomic_read(&fscache_n_marks),
42650- atomic_read(&fscache_n_uncaches));
42651+ atomic_read_unchecked(&fscache_n_marks),
42652+ atomic_read_unchecked(&fscache_n_uncaches));
42653
42654 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
42655 " oom=%u\n",
42656- atomic_read(&fscache_n_acquires),
42657- atomic_read(&fscache_n_acquires_null),
42658- atomic_read(&fscache_n_acquires_no_cache),
42659- atomic_read(&fscache_n_acquires_ok),
42660- atomic_read(&fscache_n_acquires_nobufs),
42661- atomic_read(&fscache_n_acquires_oom));
42662+ atomic_read_unchecked(&fscache_n_acquires),
42663+ atomic_read_unchecked(&fscache_n_acquires_null),
42664+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
42665+ atomic_read_unchecked(&fscache_n_acquires_ok),
42666+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
42667+ atomic_read_unchecked(&fscache_n_acquires_oom));
42668
42669 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
42670- atomic_read(&fscache_n_object_lookups),
42671- atomic_read(&fscache_n_object_lookups_negative),
42672- atomic_read(&fscache_n_object_lookups_positive),
42673- atomic_read(&fscache_n_object_lookups_timed_out),
42674- atomic_read(&fscache_n_object_created));
42675+ atomic_read_unchecked(&fscache_n_object_lookups),
42676+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
42677+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
42678+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
42679+ atomic_read_unchecked(&fscache_n_object_created));
42680
42681 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
42682- atomic_read(&fscache_n_updates),
42683- atomic_read(&fscache_n_updates_null),
42684- atomic_read(&fscache_n_updates_run));
42685+ atomic_read_unchecked(&fscache_n_updates),
42686+ atomic_read_unchecked(&fscache_n_updates_null),
42687+ atomic_read_unchecked(&fscache_n_updates_run));
42688
42689 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
42690- atomic_read(&fscache_n_relinquishes),
42691- atomic_read(&fscache_n_relinquishes_null),
42692- atomic_read(&fscache_n_relinquishes_waitcrt),
42693- atomic_read(&fscache_n_relinquishes_retire));
42694+ atomic_read_unchecked(&fscache_n_relinquishes),
42695+ atomic_read_unchecked(&fscache_n_relinquishes_null),
42696+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
42697+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
42698
42699 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
42700- atomic_read(&fscache_n_attr_changed),
42701- atomic_read(&fscache_n_attr_changed_ok),
42702- atomic_read(&fscache_n_attr_changed_nobufs),
42703- atomic_read(&fscache_n_attr_changed_nomem),
42704- atomic_read(&fscache_n_attr_changed_calls));
42705+ atomic_read_unchecked(&fscache_n_attr_changed),
42706+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
42707+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
42708+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
42709+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
42710
42711 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
42712- atomic_read(&fscache_n_allocs),
42713- atomic_read(&fscache_n_allocs_ok),
42714- atomic_read(&fscache_n_allocs_wait),
42715- atomic_read(&fscache_n_allocs_nobufs),
42716- atomic_read(&fscache_n_allocs_intr));
42717+ atomic_read_unchecked(&fscache_n_allocs),
42718+ atomic_read_unchecked(&fscache_n_allocs_ok),
42719+ atomic_read_unchecked(&fscache_n_allocs_wait),
42720+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
42721+ atomic_read_unchecked(&fscache_n_allocs_intr));
42722 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
42723- atomic_read(&fscache_n_alloc_ops),
42724- atomic_read(&fscache_n_alloc_op_waits),
42725- atomic_read(&fscache_n_allocs_object_dead));
42726+ atomic_read_unchecked(&fscache_n_alloc_ops),
42727+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
42728+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
42729
42730 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
42731 " int=%u oom=%u\n",
42732- atomic_read(&fscache_n_retrievals),
42733- atomic_read(&fscache_n_retrievals_ok),
42734- atomic_read(&fscache_n_retrievals_wait),
42735- atomic_read(&fscache_n_retrievals_nodata),
42736- atomic_read(&fscache_n_retrievals_nobufs),
42737- atomic_read(&fscache_n_retrievals_intr),
42738- atomic_read(&fscache_n_retrievals_nomem));
42739+ atomic_read_unchecked(&fscache_n_retrievals),
42740+ atomic_read_unchecked(&fscache_n_retrievals_ok),
42741+ atomic_read_unchecked(&fscache_n_retrievals_wait),
42742+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
42743+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
42744+ atomic_read_unchecked(&fscache_n_retrievals_intr),
42745+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
42746 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
42747- atomic_read(&fscache_n_retrieval_ops),
42748- atomic_read(&fscache_n_retrieval_op_waits),
42749- atomic_read(&fscache_n_retrievals_object_dead));
42750+ atomic_read_unchecked(&fscache_n_retrieval_ops),
42751+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
42752+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
42753
42754 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
42755- atomic_read(&fscache_n_stores),
42756- atomic_read(&fscache_n_stores_ok),
42757- atomic_read(&fscache_n_stores_again),
42758- atomic_read(&fscache_n_stores_nobufs),
42759- atomic_read(&fscache_n_stores_oom));
42760+ atomic_read_unchecked(&fscache_n_stores),
42761+ atomic_read_unchecked(&fscache_n_stores_ok),
42762+ atomic_read_unchecked(&fscache_n_stores_again),
42763+ atomic_read_unchecked(&fscache_n_stores_nobufs),
42764+ atomic_read_unchecked(&fscache_n_stores_oom));
42765 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
42766- atomic_read(&fscache_n_store_ops),
42767- atomic_read(&fscache_n_store_calls),
42768- atomic_read(&fscache_n_store_pages),
42769- atomic_read(&fscache_n_store_radix_deletes),
42770- atomic_read(&fscache_n_store_pages_over_limit));
42771+ atomic_read_unchecked(&fscache_n_store_ops),
42772+ atomic_read_unchecked(&fscache_n_store_calls),
42773+ atomic_read_unchecked(&fscache_n_store_pages),
42774+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
42775+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
42776
42777 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
42778- atomic_read(&fscache_n_store_vmscan_not_storing),
42779- atomic_read(&fscache_n_store_vmscan_gone),
42780- atomic_read(&fscache_n_store_vmscan_busy),
42781- atomic_read(&fscache_n_store_vmscan_cancelled));
42782+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
42783+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
42784+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
42785+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
42786
42787 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
42788- atomic_read(&fscache_n_op_pend),
42789- atomic_read(&fscache_n_op_run),
42790- atomic_read(&fscache_n_op_enqueue),
42791- atomic_read(&fscache_n_op_cancelled),
42792- atomic_read(&fscache_n_op_rejected));
42793+ atomic_read_unchecked(&fscache_n_op_pend),
42794+ atomic_read_unchecked(&fscache_n_op_run),
42795+ atomic_read_unchecked(&fscache_n_op_enqueue),
42796+ atomic_read_unchecked(&fscache_n_op_cancelled),
42797+ atomic_read_unchecked(&fscache_n_op_rejected));
42798 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
42799- atomic_read(&fscache_n_op_deferred_release),
42800- atomic_read(&fscache_n_op_release),
42801- atomic_read(&fscache_n_op_gc));
42802+ atomic_read_unchecked(&fscache_n_op_deferred_release),
42803+ atomic_read_unchecked(&fscache_n_op_release),
42804+ atomic_read_unchecked(&fscache_n_op_gc));
42805
42806 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
42807 atomic_read(&fscache_n_cop_alloc_object),
42808diff -urNp linux-2.6.32.45/fs/fs_struct.c linux-2.6.32.45/fs/fs_struct.c
42809--- linux-2.6.32.45/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
42810+++ linux-2.6.32.45/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
42811@@ -4,6 +4,7 @@
42812 #include <linux/path.h>
42813 #include <linux/slab.h>
42814 #include <linux/fs_struct.h>
42815+#include <linux/grsecurity.h>
42816
42817 /*
42818 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
42819@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
42820 old_root = fs->root;
42821 fs->root = *path;
42822 path_get(path);
42823+ gr_set_chroot_entries(current, path);
42824 write_unlock(&fs->lock);
42825 if (old_root.dentry)
42826 path_put(&old_root);
42827@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
42828 && fs->root.mnt == old_root->mnt) {
42829 path_get(new_root);
42830 fs->root = *new_root;
42831+ gr_set_chroot_entries(p, new_root);
42832 count++;
42833 }
42834 if (fs->pwd.dentry == old_root->dentry
42835@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
42836 task_lock(tsk);
42837 write_lock(&fs->lock);
42838 tsk->fs = NULL;
42839- kill = !--fs->users;
42840+ gr_clear_chroot_entries(tsk);
42841+ kill = !atomic_dec_return(&fs->users);
42842 write_unlock(&fs->lock);
42843 task_unlock(tsk);
42844 if (kill)
42845@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
42846 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42847 /* We don't need to lock fs - think why ;-) */
42848 if (fs) {
42849- fs->users = 1;
42850+ atomic_set(&fs->users, 1);
42851 fs->in_exec = 0;
42852 rwlock_init(&fs->lock);
42853 fs->umask = old->umask;
42854@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
42855
42856 task_lock(current);
42857 write_lock(&fs->lock);
42858- kill = !--fs->users;
42859+ kill = !atomic_dec_return(&fs->users);
42860 current->fs = new_fs;
42861+ gr_set_chroot_entries(current, &new_fs->root);
42862 write_unlock(&fs->lock);
42863 task_unlock(current);
42864
42865@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
42866
42867 /* to be mentioned only in INIT_TASK */
42868 struct fs_struct init_fs = {
42869- .users = 1,
42870+ .users = ATOMIC_INIT(1),
42871 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
42872 .umask = 0022,
42873 };
42874@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
42875 task_lock(current);
42876
42877 write_lock(&init_fs.lock);
42878- init_fs.users++;
42879+ atomic_inc(&init_fs.users);
42880 write_unlock(&init_fs.lock);
42881
42882 write_lock(&fs->lock);
42883 current->fs = &init_fs;
42884- kill = !--fs->users;
42885+ gr_set_chroot_entries(current, &current->fs->root);
42886+ kill = !atomic_dec_return(&fs->users);
42887 write_unlock(&fs->lock);
42888
42889 task_unlock(current);
42890diff -urNp linux-2.6.32.45/fs/fuse/cuse.c linux-2.6.32.45/fs/fuse/cuse.c
42891--- linux-2.6.32.45/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
42892+++ linux-2.6.32.45/fs/fuse/cuse.c 2011-08-05 20:33:55.000000000 -0400
42893@@ -576,10 +576,12 @@ static int __init cuse_init(void)
42894 INIT_LIST_HEAD(&cuse_conntbl[i]);
42895
42896 /* inherit and extend fuse_dev_operations */
42897- cuse_channel_fops = fuse_dev_operations;
42898- cuse_channel_fops.owner = THIS_MODULE;
42899- cuse_channel_fops.open = cuse_channel_open;
42900- cuse_channel_fops.release = cuse_channel_release;
42901+ pax_open_kernel();
42902+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
42903+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
42904+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
42905+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
42906+ pax_close_kernel();
42907
42908 cuse_class = class_create(THIS_MODULE, "cuse");
42909 if (IS_ERR(cuse_class))
42910diff -urNp linux-2.6.32.45/fs/fuse/dev.c linux-2.6.32.45/fs/fuse/dev.c
42911--- linux-2.6.32.45/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
42912+++ linux-2.6.32.45/fs/fuse/dev.c 2011-08-05 20:33:55.000000000 -0400
42913@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
42914 {
42915 struct fuse_notify_inval_entry_out outarg;
42916 int err = -EINVAL;
42917- char buf[FUSE_NAME_MAX+1];
42918+ char *buf = NULL;
42919 struct qstr name;
42920
42921 if (size < sizeof(outarg))
42922@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
42923 if (outarg.namelen > FUSE_NAME_MAX)
42924 goto err;
42925
42926+ err = -ENOMEM;
42927+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
42928+ if (!buf)
42929+ goto err;
42930+
42931 name.name = buf;
42932 name.len = outarg.namelen;
42933 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
42934@@ -910,17 +915,15 @@ static int fuse_notify_inval_entry(struc
42935
42936 down_read(&fc->killsb);
42937 err = -ENOENT;
42938- if (!fc->sb)
42939- goto err_unlock;
42940-
42941- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
42942-
42943-err_unlock:
42944+ if (fc->sb)
42945+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
42946 up_read(&fc->killsb);
42947+ kfree(buf);
42948 return err;
42949
42950 err:
42951 fuse_copy_finish(cs);
42952+ kfree(buf);
42953 return err;
42954 }
42955
42956diff -urNp linux-2.6.32.45/fs/fuse/dir.c linux-2.6.32.45/fs/fuse/dir.c
42957--- linux-2.6.32.45/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
42958+++ linux-2.6.32.45/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
42959@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
42960 return link;
42961 }
42962
42963-static void free_link(char *link)
42964+static void free_link(const char *link)
42965 {
42966 if (!IS_ERR(link))
42967 free_page((unsigned long) link);
42968diff -urNp linux-2.6.32.45/fs/gfs2/ops_inode.c linux-2.6.32.45/fs/gfs2/ops_inode.c
42969--- linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
42970+++ linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
42971@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
42972 unsigned int x;
42973 int error;
42974
42975+ pax_track_stack();
42976+
42977 if (ndentry->d_inode) {
42978 nip = GFS2_I(ndentry->d_inode);
42979 if (ip == nip)
42980diff -urNp linux-2.6.32.45/fs/gfs2/sys.c linux-2.6.32.45/fs/gfs2/sys.c
42981--- linux-2.6.32.45/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
42982+++ linux-2.6.32.45/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
42983@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
42984 return a->store ? a->store(sdp, buf, len) : len;
42985 }
42986
42987-static struct sysfs_ops gfs2_attr_ops = {
42988+static const struct sysfs_ops gfs2_attr_ops = {
42989 .show = gfs2_attr_show,
42990 .store = gfs2_attr_store,
42991 };
42992@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
42993 return 0;
42994 }
42995
42996-static struct kset_uevent_ops gfs2_uevent_ops = {
42997+static const struct kset_uevent_ops gfs2_uevent_ops = {
42998 .uevent = gfs2_uevent,
42999 };
43000
43001diff -urNp linux-2.6.32.45/fs/hfsplus/catalog.c linux-2.6.32.45/fs/hfsplus/catalog.c
43002--- linux-2.6.32.45/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
43003+++ linux-2.6.32.45/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
43004@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
43005 int err;
43006 u16 type;
43007
43008+ pax_track_stack();
43009+
43010 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43011 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43012 if (err)
43013@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
43014 int entry_size;
43015 int err;
43016
43017+ pax_track_stack();
43018+
43019 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
43020 sb = dir->i_sb;
43021 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
43022@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
43023 int entry_size, type;
43024 int err = 0;
43025
43026+ pax_track_stack();
43027+
43028 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
43029 dst_dir->i_ino, dst_name->name);
43030 sb = src_dir->i_sb;
43031diff -urNp linux-2.6.32.45/fs/hfsplus/dir.c linux-2.6.32.45/fs/hfsplus/dir.c
43032--- linux-2.6.32.45/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
43033+++ linux-2.6.32.45/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
43034@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
43035 struct hfsplus_readdir_data *rd;
43036 u16 type;
43037
43038+ pax_track_stack();
43039+
43040 if (filp->f_pos >= inode->i_size)
43041 return 0;
43042
43043diff -urNp linux-2.6.32.45/fs/hfsplus/inode.c linux-2.6.32.45/fs/hfsplus/inode.c
43044--- linux-2.6.32.45/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
43045+++ linux-2.6.32.45/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
43046@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
43047 int res = 0;
43048 u16 type;
43049
43050+ pax_track_stack();
43051+
43052 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43053
43054 HFSPLUS_I(inode).dev = 0;
43055@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
43056 struct hfs_find_data fd;
43057 hfsplus_cat_entry entry;
43058
43059+ pax_track_stack();
43060+
43061 if (HFSPLUS_IS_RSRC(inode))
43062 main_inode = HFSPLUS_I(inode).rsrc_inode;
43063
43064diff -urNp linux-2.6.32.45/fs/hfsplus/ioctl.c linux-2.6.32.45/fs/hfsplus/ioctl.c
43065--- linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43066+++ linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
43067@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
43068 struct hfsplus_cat_file *file;
43069 int res;
43070
43071+ pax_track_stack();
43072+
43073 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43074 return -EOPNOTSUPP;
43075
43076@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43077 struct hfsplus_cat_file *file;
43078 ssize_t res = 0;
43079
43080+ pax_track_stack();
43081+
43082 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43083 return -EOPNOTSUPP;
43084
43085diff -urNp linux-2.6.32.45/fs/hfsplus/super.c linux-2.6.32.45/fs/hfsplus/super.c
43086--- linux-2.6.32.45/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
43087+++ linux-2.6.32.45/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
43088@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
43089 struct nls_table *nls = NULL;
43090 int err = -EINVAL;
43091
43092+ pax_track_stack();
43093+
43094 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43095 if (!sbi)
43096 return -ENOMEM;
43097diff -urNp linux-2.6.32.45/fs/hugetlbfs/inode.c linux-2.6.32.45/fs/hugetlbfs/inode.c
43098--- linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43099+++ linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
43100@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
43101 .kill_sb = kill_litter_super,
43102 };
43103
43104-static struct vfsmount *hugetlbfs_vfsmount;
43105+struct vfsmount *hugetlbfs_vfsmount;
43106
43107 static int can_do_hugetlb_shm(void)
43108 {
43109diff -urNp linux-2.6.32.45/fs/ioctl.c linux-2.6.32.45/fs/ioctl.c
43110--- linux-2.6.32.45/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43111+++ linux-2.6.32.45/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
43112@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
43113 u64 phys, u64 len, u32 flags)
43114 {
43115 struct fiemap_extent extent;
43116- struct fiemap_extent *dest = fieinfo->fi_extents_start;
43117+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
43118
43119 /* only count the extents */
43120 if (fieinfo->fi_extents_max == 0) {
43121@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
43122
43123 fieinfo.fi_flags = fiemap.fm_flags;
43124 fieinfo.fi_extents_max = fiemap.fm_extent_count;
43125- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
43126+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
43127
43128 if (fiemap.fm_extent_count != 0 &&
43129 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
43130@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
43131 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
43132 fiemap.fm_flags = fieinfo.fi_flags;
43133 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
43134- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
43135+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
43136 error = -EFAULT;
43137
43138 return error;
43139diff -urNp linux-2.6.32.45/fs/jbd/checkpoint.c linux-2.6.32.45/fs/jbd/checkpoint.c
43140--- linux-2.6.32.45/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
43141+++ linux-2.6.32.45/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
43142@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
43143 tid_t this_tid;
43144 int result;
43145
43146+ pax_track_stack();
43147+
43148 jbd_debug(1, "Start checkpoint\n");
43149
43150 /*
43151diff -urNp linux-2.6.32.45/fs/jffs2/compr_rtime.c linux-2.6.32.45/fs/jffs2/compr_rtime.c
43152--- linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
43153+++ linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
43154@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43155 int outpos = 0;
43156 int pos=0;
43157
43158+ pax_track_stack();
43159+
43160 memset(positions,0,sizeof(positions));
43161
43162 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43163@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
43164 int outpos = 0;
43165 int pos=0;
43166
43167+ pax_track_stack();
43168+
43169 memset(positions,0,sizeof(positions));
43170
43171 while (outpos<destlen) {
43172diff -urNp linux-2.6.32.45/fs/jffs2/compr_rubin.c linux-2.6.32.45/fs/jffs2/compr_rubin.c
43173--- linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
43174+++ linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
43175@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43176 int ret;
43177 uint32_t mysrclen, mydstlen;
43178
43179+ pax_track_stack();
43180+
43181 mysrclen = *sourcelen;
43182 mydstlen = *dstlen - 8;
43183
43184diff -urNp linux-2.6.32.45/fs/jffs2/erase.c linux-2.6.32.45/fs/jffs2/erase.c
43185--- linux-2.6.32.45/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
43186+++ linux-2.6.32.45/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
43187@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
43188 struct jffs2_unknown_node marker = {
43189 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43190 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43191- .totlen = cpu_to_je32(c->cleanmarker_size)
43192+ .totlen = cpu_to_je32(c->cleanmarker_size),
43193+ .hdr_crc = cpu_to_je32(0)
43194 };
43195
43196 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43197diff -urNp linux-2.6.32.45/fs/jffs2/wbuf.c linux-2.6.32.45/fs/jffs2/wbuf.c
43198--- linux-2.6.32.45/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
43199+++ linux-2.6.32.45/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
43200@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43201 {
43202 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43203 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43204- .totlen = constant_cpu_to_je32(8)
43205+ .totlen = constant_cpu_to_je32(8),
43206+ .hdr_crc = constant_cpu_to_je32(0)
43207 };
43208
43209 /*
43210diff -urNp linux-2.6.32.45/fs/jffs2/xattr.c linux-2.6.32.45/fs/jffs2/xattr.c
43211--- linux-2.6.32.45/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
43212+++ linux-2.6.32.45/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
43213@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43214
43215 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43216
43217+ pax_track_stack();
43218+
43219 /* Phase.1 : Merge same xref */
43220 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43221 xref_tmphash[i] = NULL;
43222diff -urNp linux-2.6.32.45/fs/jfs/super.c linux-2.6.32.45/fs/jfs/super.c
43223--- linux-2.6.32.45/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
43224+++ linux-2.6.32.45/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
43225@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
43226
43227 jfs_inode_cachep =
43228 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43229- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43230+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43231 init_once);
43232 if (jfs_inode_cachep == NULL)
43233 return -ENOMEM;
43234diff -urNp linux-2.6.32.45/fs/Kconfig.binfmt linux-2.6.32.45/fs/Kconfig.binfmt
43235--- linux-2.6.32.45/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
43236+++ linux-2.6.32.45/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
43237@@ -86,7 +86,7 @@ config HAVE_AOUT
43238
43239 config BINFMT_AOUT
43240 tristate "Kernel support for a.out and ECOFF binaries"
43241- depends on HAVE_AOUT
43242+ depends on HAVE_AOUT && BROKEN
43243 ---help---
43244 A.out (Assembler.OUTput) is a set of formats for libraries and
43245 executables used in the earliest versions of UNIX. Linux used
43246diff -urNp linux-2.6.32.45/fs/libfs.c linux-2.6.32.45/fs/libfs.c
43247--- linux-2.6.32.45/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
43248+++ linux-2.6.32.45/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
43249@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
43250
43251 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43252 struct dentry *next;
43253+ char d_name[sizeof(next->d_iname)];
43254+ const unsigned char *name;
43255+
43256 next = list_entry(p, struct dentry, d_u.d_child);
43257 if (d_unhashed(next) || !next->d_inode)
43258 continue;
43259
43260 spin_unlock(&dcache_lock);
43261- if (filldir(dirent, next->d_name.name,
43262+ name = next->d_name.name;
43263+ if (name == next->d_iname) {
43264+ memcpy(d_name, name, next->d_name.len);
43265+ name = d_name;
43266+ }
43267+ if (filldir(dirent, name,
43268 next->d_name.len, filp->f_pos,
43269 next->d_inode->i_ino,
43270 dt_type(next->d_inode)) < 0)
43271diff -urNp linux-2.6.32.45/fs/lockd/clntproc.c linux-2.6.32.45/fs/lockd/clntproc.c
43272--- linux-2.6.32.45/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
43273+++ linux-2.6.32.45/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
43274@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43275 /*
43276 * Cookie counter for NLM requests
43277 */
43278-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43279+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43280
43281 void nlmclnt_next_cookie(struct nlm_cookie *c)
43282 {
43283- u32 cookie = atomic_inc_return(&nlm_cookie);
43284+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43285
43286 memcpy(c->data, &cookie, 4);
43287 c->len=4;
43288@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43289 struct nlm_rqst reqst, *req;
43290 int status;
43291
43292+ pax_track_stack();
43293+
43294 req = &reqst;
43295 memset(req, 0, sizeof(*req));
43296 locks_init_lock(&req->a_args.lock.fl);
43297diff -urNp linux-2.6.32.45/fs/lockd/svc.c linux-2.6.32.45/fs/lockd/svc.c
43298--- linux-2.6.32.45/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
43299+++ linux-2.6.32.45/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
43300@@ -43,7 +43,7 @@
43301
43302 static struct svc_program nlmsvc_program;
43303
43304-struct nlmsvc_binding * nlmsvc_ops;
43305+const struct nlmsvc_binding * nlmsvc_ops;
43306 EXPORT_SYMBOL_GPL(nlmsvc_ops);
43307
43308 static DEFINE_MUTEX(nlmsvc_mutex);
43309diff -urNp linux-2.6.32.45/fs/locks.c linux-2.6.32.45/fs/locks.c
43310--- linux-2.6.32.45/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
43311+++ linux-2.6.32.45/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
43312@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
43313
43314 static struct kmem_cache *filelock_cache __read_mostly;
43315
43316+static void locks_init_lock_always(struct file_lock *fl)
43317+{
43318+ fl->fl_next = NULL;
43319+ fl->fl_fasync = NULL;
43320+ fl->fl_owner = NULL;
43321+ fl->fl_pid = 0;
43322+ fl->fl_nspid = NULL;
43323+ fl->fl_file = NULL;
43324+ fl->fl_flags = 0;
43325+ fl->fl_type = 0;
43326+ fl->fl_start = fl->fl_end = 0;
43327+}
43328+
43329 /* Allocate an empty lock structure. */
43330 static struct file_lock *locks_alloc_lock(void)
43331 {
43332- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43333+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43334+
43335+ if (fl)
43336+ locks_init_lock_always(fl);
43337+
43338+ return fl;
43339 }
43340
43341 void locks_release_private(struct file_lock *fl)
43342@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
43343 INIT_LIST_HEAD(&fl->fl_link);
43344 INIT_LIST_HEAD(&fl->fl_block);
43345 init_waitqueue_head(&fl->fl_wait);
43346- fl->fl_next = NULL;
43347- fl->fl_fasync = NULL;
43348- fl->fl_owner = NULL;
43349- fl->fl_pid = 0;
43350- fl->fl_nspid = NULL;
43351- fl->fl_file = NULL;
43352- fl->fl_flags = 0;
43353- fl->fl_type = 0;
43354- fl->fl_start = fl->fl_end = 0;
43355 fl->fl_ops = NULL;
43356 fl->fl_lmops = NULL;
43357+ locks_init_lock_always(fl);
43358 }
43359
43360 EXPORT_SYMBOL(locks_init_lock);
43361@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
43362 return;
43363
43364 if (filp->f_op && filp->f_op->flock) {
43365- struct file_lock fl = {
43366+ struct file_lock flock = {
43367 .fl_pid = current->tgid,
43368 .fl_file = filp,
43369 .fl_flags = FL_FLOCK,
43370 .fl_type = F_UNLCK,
43371 .fl_end = OFFSET_MAX,
43372 };
43373- filp->f_op->flock(filp, F_SETLKW, &fl);
43374- if (fl.fl_ops && fl.fl_ops->fl_release_private)
43375- fl.fl_ops->fl_release_private(&fl);
43376+ filp->f_op->flock(filp, F_SETLKW, &flock);
43377+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
43378+ flock.fl_ops->fl_release_private(&flock);
43379 }
43380
43381 lock_kernel();
43382diff -urNp linux-2.6.32.45/fs/mbcache.c linux-2.6.32.45/fs/mbcache.c
43383--- linux-2.6.32.45/fs/mbcache.c 2011-03-27 14:31:47.000000000 -0400
43384+++ linux-2.6.32.45/fs/mbcache.c 2011-08-05 20:33:55.000000000 -0400
43385@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct
43386 if (!cache)
43387 goto fail;
43388 cache->c_name = name;
43389- cache->c_op.free = NULL;
43390+ *(void **)&cache->c_op.free = NULL;
43391 if (cache_op)
43392- cache->c_op.free = cache_op->free;
43393+ *(void **)&cache->c_op.free = cache_op->free;
43394 atomic_set(&cache->c_entry_count, 0);
43395 cache->c_bucket_bits = bucket_bits;
43396 #ifdef MB_CACHE_INDEXES_COUNT
43397diff -urNp linux-2.6.32.45/fs/namei.c linux-2.6.32.45/fs/namei.c
43398--- linux-2.6.32.45/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
43399+++ linux-2.6.32.45/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
43400@@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
43401 return ret;
43402
43403 /*
43404- * Read/write DACs are always overridable.
43405- * Executable DACs are overridable if at least one exec bit is set.
43406- */
43407- if (!(mask & MAY_EXEC) || execute_ok(inode))
43408- if (capable(CAP_DAC_OVERRIDE))
43409- return 0;
43410-
43411- /*
43412 * Searching includes executable on directories, else just read.
43413 */
43414 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43415@@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
43416 if (capable(CAP_DAC_READ_SEARCH))
43417 return 0;
43418
43419+ /*
43420+ * Read/write DACs are always overridable.
43421+ * Executable DACs are overridable if at least one exec bit is set.
43422+ */
43423+ if (!(mask & MAY_EXEC) || execute_ok(inode))
43424+ if (capable(CAP_DAC_OVERRIDE))
43425+ return 0;
43426+
43427 return -EACCES;
43428 }
43429
43430@@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
43431 if (!ret)
43432 goto ok;
43433
43434- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
43435+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
43436+ capable(CAP_DAC_OVERRIDE))
43437 goto ok;
43438
43439 return ret;
43440@@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
43441 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
43442 error = PTR_ERR(cookie);
43443 if (!IS_ERR(cookie)) {
43444- char *s = nd_get_link(nd);
43445+ const char *s = nd_get_link(nd);
43446 error = 0;
43447 if (s)
43448 error = __vfs_follow_link(nd, s);
43449@@ -669,6 +670,13 @@ static inline int do_follow_link(struct
43450 err = security_inode_follow_link(path->dentry, nd);
43451 if (err)
43452 goto loop;
43453+
43454+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
43455+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
43456+ err = -EACCES;
43457+ goto loop;
43458+ }
43459+
43460 current->link_count++;
43461 current->total_link_count++;
43462 nd->depth++;
43463@@ -1016,11 +1024,18 @@ return_reval:
43464 break;
43465 }
43466 return_base:
43467+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43468+ path_put(&nd->path);
43469+ return -ENOENT;
43470+ }
43471 return 0;
43472 out_dput:
43473 path_put_conditional(&next, nd);
43474 break;
43475 }
43476+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
43477+ err = -ENOENT;
43478+
43479 path_put(&nd->path);
43480 return_err:
43481 return err;
43482@@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
43483 int retval = path_init(dfd, name, flags, nd);
43484 if (!retval)
43485 retval = path_walk(name, nd);
43486- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
43487- nd->path.dentry->d_inode))
43488- audit_inode(name, nd->path.dentry);
43489+
43490+ if (likely(!retval)) {
43491+ if (nd->path.dentry && nd->path.dentry->d_inode) {
43492+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43493+ retval = -ENOENT;
43494+ if (!audit_dummy_context())
43495+ audit_inode(name, nd->path.dentry);
43496+ }
43497+ }
43498 if (nd->root.mnt) {
43499 path_put(&nd->root);
43500 nd->root.mnt = NULL;
43501 }
43502+
43503 return retval;
43504 }
43505
43506@@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
43507 if (error)
43508 goto err_out;
43509
43510+
43511+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
43512+ error = -EPERM;
43513+ goto err_out;
43514+ }
43515+ if (gr_handle_rawio(inode)) {
43516+ error = -EPERM;
43517+ goto err_out;
43518+ }
43519+ if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
43520+ error = -EACCES;
43521+ goto err_out;
43522+ }
43523+
43524 if (flag & O_TRUNC) {
43525 error = get_write_access(inode);
43526 if (error)
43527@@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
43528 int error;
43529 struct dentry *dir = nd->path.dentry;
43530
43531+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
43532+ error = -EACCES;
43533+ goto out_unlock;
43534+ }
43535+
43536 if (!IS_POSIXACL(dir->d_inode))
43537 mode &= ~current_umask();
43538 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
43539 if (error)
43540 goto out_unlock;
43541 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
43542+ if (!error)
43543+ gr_handle_create(path->dentry, nd->path.mnt);
43544 out_unlock:
43545 mutex_unlock(&dir->d_inode->i_mutex);
43546 dput(nd->path.dentry);
43547@@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
43548 &nd, flag);
43549 if (error)
43550 return ERR_PTR(error);
43551+
43552+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
43553+ error = -EPERM;
43554+ goto exit;
43555+ }
43556+
43557+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
43558+ error = -EPERM;
43559+ goto exit;
43560+ }
43561+
43562+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
43563+ error = -EACCES;
43564+ goto exit;
43565+ }
43566+
43567 goto ok;
43568 }
43569
43570@@ -1795,6 +1854,14 @@ do_last:
43571 /*
43572 * It already exists.
43573 */
43574+
43575+ /* only check if O_CREAT is specified, all other checks need
43576+ to go into may_open */
43577+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
43578+ error = -EACCES;
43579+ goto exit_mutex_unlock;
43580+ }
43581+
43582 mutex_unlock(&dir->d_inode->i_mutex);
43583 audit_inode(pathname, path.dentry);
43584
43585@@ -1887,6 +1954,13 @@ do_link:
43586 error = security_inode_follow_link(path.dentry, &nd);
43587 if (error)
43588 goto exit_dput;
43589+
43590+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
43591+ path.dentry, nd.path.mnt)) {
43592+ error = -EACCES;
43593+ goto exit_dput;
43594+ }
43595+
43596 error = __do_follow_link(&path, &nd);
43597 if (error) {
43598 /* Does someone understand code flow here? Or it is only
43599@@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43600 error = may_mknod(mode);
43601 if (error)
43602 goto out_dput;
43603+
43604+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
43605+ error = -EPERM;
43606+ goto out_dput;
43607+ }
43608+
43609+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
43610+ error = -EACCES;
43611+ goto out_dput;
43612+ }
43613+
43614 error = mnt_want_write(nd.path.mnt);
43615 if (error)
43616 goto out_dput;
43617@@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43618 }
43619 out_drop_write:
43620 mnt_drop_write(nd.path.mnt);
43621+
43622+ if (!error)
43623+ gr_handle_create(dentry, nd.path.mnt);
43624 out_dput:
43625 dput(dentry);
43626 out_unlock:
43627@@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43628 if (IS_ERR(dentry))
43629 goto out_unlock;
43630
43631+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
43632+ error = -EACCES;
43633+ goto out_dput;
43634+ }
43635+
43636 if (!IS_POSIXACL(nd.path.dentry->d_inode))
43637 mode &= ~current_umask();
43638 error = mnt_want_write(nd.path.mnt);
43639@@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43640 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
43641 out_drop_write:
43642 mnt_drop_write(nd.path.mnt);
43643+
43644+ if (!error)
43645+ gr_handle_create(dentry, nd.path.mnt);
43646+
43647 out_dput:
43648 dput(dentry);
43649 out_unlock:
43650@@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
43651 char * name;
43652 struct dentry *dentry;
43653 struct nameidata nd;
43654+ ino_t saved_ino = 0;
43655+ dev_t saved_dev = 0;
43656
43657 error = user_path_parent(dfd, pathname, &nd, &name);
43658 if (error)
43659@@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
43660 error = PTR_ERR(dentry);
43661 if (IS_ERR(dentry))
43662 goto exit2;
43663+
43664+ if (dentry->d_inode != NULL) {
43665+ if (dentry->d_inode->i_nlink <= 1) {
43666+ saved_ino = dentry->d_inode->i_ino;
43667+ saved_dev = gr_get_dev_from_dentry(dentry);
43668+ }
43669+
43670+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
43671+ error = -EACCES;
43672+ goto exit3;
43673+ }
43674+ }
43675+
43676 error = mnt_want_write(nd.path.mnt);
43677 if (error)
43678 goto exit3;
43679@@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
43680 if (error)
43681 goto exit4;
43682 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
43683+ if (!error && (saved_dev || saved_ino))
43684+ gr_handle_delete(saved_ino, saved_dev);
43685 exit4:
43686 mnt_drop_write(nd.path.mnt);
43687 exit3:
43688@@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
43689 struct dentry *dentry;
43690 struct nameidata nd;
43691 struct inode *inode = NULL;
43692+ ino_t saved_ino = 0;
43693+ dev_t saved_dev = 0;
43694
43695 error = user_path_parent(dfd, pathname, &nd, &name);
43696 if (error)
43697@@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
43698 if (nd.last.name[nd.last.len])
43699 goto slashes;
43700 inode = dentry->d_inode;
43701- if (inode)
43702+ if (inode) {
43703+ if (inode->i_nlink <= 1) {
43704+ saved_ino = inode->i_ino;
43705+ saved_dev = gr_get_dev_from_dentry(dentry);
43706+ }
43707+
43708 atomic_inc(&inode->i_count);
43709+
43710+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
43711+ error = -EACCES;
43712+ goto exit2;
43713+ }
43714+ }
43715 error = mnt_want_write(nd.path.mnt);
43716 if (error)
43717 goto exit2;
43718@@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
43719 if (error)
43720 goto exit3;
43721 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
43722+ if (!error && (saved_ino || saved_dev))
43723+ gr_handle_delete(saved_ino, saved_dev);
43724 exit3:
43725 mnt_drop_write(nd.path.mnt);
43726 exit2:
43727@@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
43728 if (IS_ERR(dentry))
43729 goto out_unlock;
43730
43731+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
43732+ error = -EACCES;
43733+ goto out_dput;
43734+ }
43735+
43736 error = mnt_want_write(nd.path.mnt);
43737 if (error)
43738 goto out_dput;
43739@@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
43740 if (error)
43741 goto out_drop_write;
43742 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
43743+ if (!error)
43744+ gr_handle_create(dentry, nd.path.mnt);
43745 out_drop_write:
43746 mnt_drop_write(nd.path.mnt);
43747 out_dput:
43748@@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
43749 error = PTR_ERR(new_dentry);
43750 if (IS_ERR(new_dentry))
43751 goto out_unlock;
43752+
43753+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
43754+ old_path.dentry->d_inode,
43755+ old_path.dentry->d_inode->i_mode, to)) {
43756+ error = -EACCES;
43757+ goto out_dput;
43758+ }
43759+
43760+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
43761+ old_path.dentry, old_path.mnt, to)) {
43762+ error = -EACCES;
43763+ goto out_dput;
43764+ }
43765+
43766 error = mnt_want_write(nd.path.mnt);
43767 if (error)
43768 goto out_dput;
43769@@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
43770 if (error)
43771 goto out_drop_write;
43772 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
43773+ if (!error)
43774+ gr_handle_create(new_dentry, nd.path.mnt);
43775 out_drop_write:
43776 mnt_drop_write(nd.path.mnt);
43777 out_dput:
43778@@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43779 char *to;
43780 int error;
43781
43782+ pax_track_stack();
43783+
43784 error = user_path_parent(olddfd, oldname, &oldnd, &from);
43785 if (error)
43786 goto exit;
43787@@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43788 if (new_dentry == trap)
43789 goto exit5;
43790
43791+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
43792+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
43793+ to);
43794+ if (error)
43795+ goto exit5;
43796+
43797 error = mnt_want_write(oldnd.path.mnt);
43798 if (error)
43799 goto exit5;
43800@@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43801 goto exit6;
43802 error = vfs_rename(old_dir->d_inode, old_dentry,
43803 new_dir->d_inode, new_dentry);
43804+ if (!error)
43805+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
43806+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
43807 exit6:
43808 mnt_drop_write(oldnd.path.mnt);
43809 exit5:
43810@@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
43811
43812 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
43813 {
43814+ char tmpbuf[64];
43815+ const char *newlink;
43816 int len;
43817
43818 len = PTR_ERR(link);
43819@@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
43820 len = strlen(link);
43821 if (len > (unsigned) buflen)
43822 len = buflen;
43823- if (copy_to_user(buffer, link, len))
43824+
43825+ if (len < sizeof(tmpbuf)) {
43826+ memcpy(tmpbuf, link, len);
43827+ newlink = tmpbuf;
43828+ } else
43829+ newlink = link;
43830+
43831+ if (copy_to_user(buffer, newlink, len))
43832 len = -EFAULT;
43833 out:
43834 return len;
43835diff -urNp linux-2.6.32.45/fs/namespace.c linux-2.6.32.45/fs/namespace.c
43836--- linux-2.6.32.45/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
43837+++ linux-2.6.32.45/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
43838@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
43839 if (!(sb->s_flags & MS_RDONLY))
43840 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
43841 up_write(&sb->s_umount);
43842+
43843+ gr_log_remount(mnt->mnt_devname, retval);
43844+
43845 return retval;
43846 }
43847
43848@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
43849 security_sb_umount_busy(mnt);
43850 up_write(&namespace_sem);
43851 release_mounts(&umount_list);
43852+
43853+ gr_log_unmount(mnt->mnt_devname, retval);
43854+
43855 return retval;
43856 }
43857
43858@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
43859 if (retval)
43860 goto dput_out;
43861
43862+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
43863+ retval = -EPERM;
43864+ goto dput_out;
43865+ }
43866+
43867+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
43868+ retval = -EPERM;
43869+ goto dput_out;
43870+ }
43871+
43872 if (flags & MS_REMOUNT)
43873 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
43874 data_page);
43875@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
43876 dev_name, data_page);
43877 dput_out:
43878 path_put(&path);
43879+
43880+ gr_log_mount(dev_name, dir_name, retval);
43881+
43882 return retval;
43883 }
43884
43885@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
43886 goto out1;
43887 }
43888
43889+ if (gr_handle_chroot_pivot()) {
43890+ error = -EPERM;
43891+ path_put(&old);
43892+ goto out1;
43893+ }
43894+
43895 read_lock(&current->fs->lock);
43896 root = current->fs->root;
43897 path_get(&current->fs->root);
43898diff -urNp linux-2.6.32.45/fs/ncpfs/dir.c linux-2.6.32.45/fs/ncpfs/dir.c
43899--- linux-2.6.32.45/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
43900+++ linux-2.6.32.45/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
43901@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
43902 int res, val = 0, len;
43903 __u8 __name[NCP_MAXPATHLEN + 1];
43904
43905+ pax_track_stack();
43906+
43907 parent = dget_parent(dentry);
43908 dir = parent->d_inode;
43909
43910@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
43911 int error, res, len;
43912 __u8 __name[NCP_MAXPATHLEN + 1];
43913
43914+ pax_track_stack();
43915+
43916 lock_kernel();
43917 error = -EIO;
43918 if (!ncp_conn_valid(server))
43919@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
43920 int error, result, len;
43921 int opmode;
43922 __u8 __name[NCP_MAXPATHLEN + 1];
43923-
43924+
43925 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
43926 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
43927
43928+ pax_track_stack();
43929+
43930 error = -EIO;
43931 lock_kernel();
43932 if (!ncp_conn_valid(server))
43933@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
43934 int error, len;
43935 __u8 __name[NCP_MAXPATHLEN + 1];
43936
43937+ pax_track_stack();
43938+
43939 DPRINTK("ncp_mkdir: making %s/%s\n",
43940 dentry->d_parent->d_name.name, dentry->d_name.name);
43941
43942@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
43943 if (!ncp_conn_valid(server))
43944 goto out;
43945
43946+ pax_track_stack();
43947+
43948 ncp_age_dentry(server, dentry);
43949 len = sizeof(__name);
43950 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
43951@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
43952 int old_len, new_len;
43953 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
43954
43955+ pax_track_stack();
43956+
43957 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
43958 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
43959 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
43960diff -urNp linux-2.6.32.45/fs/ncpfs/inode.c linux-2.6.32.45/fs/ncpfs/inode.c
43961--- linux-2.6.32.45/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43962+++ linux-2.6.32.45/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
43963@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
43964 #endif
43965 struct ncp_entry_info finfo;
43966
43967+ pax_track_stack();
43968+
43969 data.wdog_pid = NULL;
43970 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
43971 if (!server)
43972diff -urNp linux-2.6.32.45/fs/nfs/inode.c linux-2.6.32.45/fs/nfs/inode.c
43973--- linux-2.6.32.45/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
43974+++ linux-2.6.32.45/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
43975@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
43976 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
43977 nfsi->attrtimeo_timestamp = jiffies;
43978
43979- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
43980+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
43981 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
43982 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
43983 else
43984@@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
43985 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
43986 }
43987
43988-static atomic_long_t nfs_attr_generation_counter;
43989+static atomic_long_unchecked_t nfs_attr_generation_counter;
43990
43991 static unsigned long nfs_read_attr_generation_counter(void)
43992 {
43993- return atomic_long_read(&nfs_attr_generation_counter);
43994+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
43995 }
43996
43997 unsigned long nfs_inc_attr_generation_counter(void)
43998 {
43999- return atomic_long_inc_return(&nfs_attr_generation_counter);
44000+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44001 }
44002
44003 void nfs_fattr_init(struct nfs_fattr *fattr)
44004diff -urNp linux-2.6.32.45/fs/nfsd/lockd.c linux-2.6.32.45/fs/nfsd/lockd.c
44005--- linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
44006+++ linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
44007@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
44008 fput(filp);
44009 }
44010
44011-static struct nlmsvc_binding nfsd_nlm_ops = {
44012+static const struct nlmsvc_binding nfsd_nlm_ops = {
44013 .fopen = nlm_fopen, /* open file for locking */
44014 .fclose = nlm_fclose, /* close file */
44015 };
44016diff -urNp linux-2.6.32.45/fs/nfsd/nfs4state.c linux-2.6.32.45/fs/nfsd/nfs4state.c
44017--- linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
44018+++ linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
44019@@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44020 unsigned int cmd;
44021 int err;
44022
44023+ pax_track_stack();
44024+
44025 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44026 (long long) lock->lk_offset,
44027 (long long) lock->lk_length);
44028diff -urNp linux-2.6.32.45/fs/nfsd/nfs4xdr.c linux-2.6.32.45/fs/nfsd/nfs4xdr.c
44029--- linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
44030+++ linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
44031@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44032 struct nfsd4_compoundres *resp = rqstp->rq_resp;
44033 u32 minorversion = resp->cstate.minorversion;
44034
44035+ pax_track_stack();
44036+
44037 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44038 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44039 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44040diff -urNp linux-2.6.32.45/fs/nfsd/vfs.c linux-2.6.32.45/fs/nfsd/vfs.c
44041--- linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
44042+++ linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
44043@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44044 } else {
44045 oldfs = get_fs();
44046 set_fs(KERNEL_DS);
44047- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44048+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
44049 set_fs(oldfs);
44050 }
44051
44052@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44053
44054 /* Write the data. */
44055 oldfs = get_fs(); set_fs(KERNEL_DS);
44056- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44057+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
44058 set_fs(oldfs);
44059 if (host_err < 0)
44060 goto out_nfserr;
44061@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44062 */
44063
44064 oldfs = get_fs(); set_fs(KERNEL_DS);
44065- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44066+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
44067 set_fs(oldfs);
44068
44069 if (host_err < 0)
44070diff -urNp linux-2.6.32.45/fs/nilfs2/ioctl.c linux-2.6.32.45/fs/nilfs2/ioctl.c
44071--- linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
44072+++ linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
44073@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
44074 unsigned int cmd, void __user *argp)
44075 {
44076 struct nilfs_argv argv[5];
44077- const static size_t argsz[5] = {
44078+ static const size_t argsz[5] = {
44079 sizeof(struct nilfs_vdesc),
44080 sizeof(struct nilfs_period),
44081 sizeof(__u64),
44082diff -urNp linux-2.6.32.45/fs/notify/dnotify/dnotify.c linux-2.6.32.45/fs/notify/dnotify/dnotify.c
44083--- linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
44084+++ linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
44085@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
44086 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
44087 }
44088
44089-static struct fsnotify_ops dnotify_fsnotify_ops = {
44090+static const struct fsnotify_ops dnotify_fsnotify_ops = {
44091 .handle_event = dnotify_handle_event,
44092 .should_send_event = dnotify_should_send_event,
44093 .free_group_priv = NULL,
44094diff -urNp linux-2.6.32.45/fs/notify/notification.c linux-2.6.32.45/fs/notify/notification.c
44095--- linux-2.6.32.45/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
44096+++ linux-2.6.32.45/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
44097@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44098 * get set to 0 so it will never get 'freed'
44099 */
44100 static struct fsnotify_event q_overflow_event;
44101-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44102+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44103
44104 /**
44105 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44106@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44107 */
44108 u32 fsnotify_get_cookie(void)
44109 {
44110- return atomic_inc_return(&fsnotify_sync_cookie);
44111+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44112 }
44113 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44114
44115diff -urNp linux-2.6.32.45/fs/ntfs/dir.c linux-2.6.32.45/fs/ntfs/dir.c
44116--- linux-2.6.32.45/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44117+++ linux-2.6.32.45/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
44118@@ -1328,7 +1328,7 @@ find_next_index_buffer:
44119 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44120 ~(s64)(ndir->itype.index.block_size - 1)));
44121 /* Bounds checks. */
44122- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44123+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44124 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44125 "inode 0x%lx or driver bug.", vdir->i_ino);
44126 goto err_out;
44127diff -urNp linux-2.6.32.45/fs/ntfs/file.c linux-2.6.32.45/fs/ntfs/file.c
44128--- linux-2.6.32.45/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
44129+++ linux-2.6.32.45/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
44130@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
44131 #endif /* NTFS_RW */
44132 };
44133
44134-const struct file_operations ntfs_empty_file_ops = {};
44135+const struct file_operations ntfs_empty_file_ops __read_only;
44136
44137-const struct inode_operations ntfs_empty_inode_ops = {};
44138+const struct inode_operations ntfs_empty_inode_ops __read_only;
44139diff -urNp linux-2.6.32.45/fs/ocfs2/cluster/masklog.c linux-2.6.32.45/fs/ocfs2/cluster/masklog.c
44140--- linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
44141+++ linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
44142@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
44143 return mlog_mask_store(mlog_attr->mask, buf, count);
44144 }
44145
44146-static struct sysfs_ops mlog_attr_ops = {
44147+static const struct sysfs_ops mlog_attr_ops = {
44148 .show = mlog_show,
44149 .store = mlog_store,
44150 };
44151diff -urNp linux-2.6.32.45/fs/ocfs2/localalloc.c linux-2.6.32.45/fs/ocfs2/localalloc.c
44152--- linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
44153+++ linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
44154@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
44155 goto bail;
44156 }
44157
44158- atomic_inc(&osb->alloc_stats.moves);
44159+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44160
44161 status = 0;
44162 bail:
44163diff -urNp linux-2.6.32.45/fs/ocfs2/namei.c linux-2.6.32.45/fs/ocfs2/namei.c
44164--- linux-2.6.32.45/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
44165+++ linux-2.6.32.45/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
44166@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
44167 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44168 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44169
44170+ pax_track_stack();
44171+
44172 /* At some point it might be nice to break this function up a
44173 * bit. */
44174
44175diff -urNp linux-2.6.32.45/fs/ocfs2/ocfs2.h linux-2.6.32.45/fs/ocfs2/ocfs2.h
44176--- linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
44177+++ linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
44178@@ -217,11 +217,11 @@ enum ocfs2_vol_state
44179
44180 struct ocfs2_alloc_stats
44181 {
44182- atomic_t moves;
44183- atomic_t local_data;
44184- atomic_t bitmap_data;
44185- atomic_t bg_allocs;
44186- atomic_t bg_extends;
44187+ atomic_unchecked_t moves;
44188+ atomic_unchecked_t local_data;
44189+ atomic_unchecked_t bitmap_data;
44190+ atomic_unchecked_t bg_allocs;
44191+ atomic_unchecked_t bg_extends;
44192 };
44193
44194 enum ocfs2_local_alloc_state
44195diff -urNp linux-2.6.32.45/fs/ocfs2/suballoc.c linux-2.6.32.45/fs/ocfs2/suballoc.c
44196--- linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
44197+++ linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
44198@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
44199 mlog_errno(status);
44200 goto bail;
44201 }
44202- atomic_inc(&osb->alloc_stats.bg_extends);
44203+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44204
44205 /* You should never ask for this much metadata */
44206 BUG_ON(bits_wanted >
44207@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
44208 mlog_errno(status);
44209 goto bail;
44210 }
44211- atomic_inc(&osb->alloc_stats.bg_allocs);
44212+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44213
44214 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
44215 ac->ac_bits_given += (*num_bits);
44216@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
44217 mlog_errno(status);
44218 goto bail;
44219 }
44220- atomic_inc(&osb->alloc_stats.bg_allocs);
44221+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44222
44223 BUG_ON(num_bits != 1);
44224
44225@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44226 cluster_start,
44227 num_clusters);
44228 if (!status)
44229- atomic_inc(&osb->alloc_stats.local_data);
44230+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44231 } else {
44232 if (min_clusters > (osb->bitmap_cpg - 1)) {
44233 /* The only paths asking for contiguousness
44234@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44235 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44236 bg_blkno,
44237 bg_bit_off);
44238- atomic_inc(&osb->alloc_stats.bitmap_data);
44239+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44240 }
44241 }
44242 if (status < 0) {
44243diff -urNp linux-2.6.32.45/fs/ocfs2/super.c linux-2.6.32.45/fs/ocfs2/super.c
44244--- linux-2.6.32.45/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
44245+++ linux-2.6.32.45/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
44246@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44247 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44248 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44249 "Stats",
44250- atomic_read(&osb->alloc_stats.bitmap_data),
44251- atomic_read(&osb->alloc_stats.local_data),
44252- atomic_read(&osb->alloc_stats.bg_allocs),
44253- atomic_read(&osb->alloc_stats.moves),
44254- atomic_read(&osb->alloc_stats.bg_extends));
44255+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44256+ atomic_read_unchecked(&osb->alloc_stats.local_data),
44257+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44258+ atomic_read_unchecked(&osb->alloc_stats.moves),
44259+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44260
44261 out += snprintf(buf + out, len - out,
44262 "%10s => State: %u Descriptor: %llu Size: %u bits "
44263@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
44264 spin_lock_init(&osb->osb_xattr_lock);
44265 ocfs2_init_inode_steal_slot(osb);
44266
44267- atomic_set(&osb->alloc_stats.moves, 0);
44268- atomic_set(&osb->alloc_stats.local_data, 0);
44269- atomic_set(&osb->alloc_stats.bitmap_data, 0);
44270- atomic_set(&osb->alloc_stats.bg_allocs, 0);
44271- atomic_set(&osb->alloc_stats.bg_extends, 0);
44272+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44273+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44274+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44275+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44276+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44277
44278 /* Copy the blockcheck stats from the superblock probe */
44279 osb->osb_ecc_stats = *stats;
44280diff -urNp linux-2.6.32.45/fs/open.c linux-2.6.32.45/fs/open.c
44281--- linux-2.6.32.45/fs/open.c 2011-03-27 14:31:47.000000000 -0400
44282+++ linux-2.6.32.45/fs/open.c 2011-04-17 15:56:46.000000000 -0400
44283@@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
44284 error = locks_verify_truncate(inode, NULL, length);
44285 if (!error)
44286 error = security_path_truncate(&path, length, 0);
44287+
44288+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44289+ error = -EACCES;
44290+
44291 if (!error) {
44292 vfs_dq_init(inode);
44293 error = do_truncate(path.dentry, length, 0, NULL);
44294@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44295 if (__mnt_is_readonly(path.mnt))
44296 res = -EROFS;
44297
44298+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44299+ res = -EACCES;
44300+
44301 out_path_release:
44302 path_put(&path);
44303 out:
44304@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44305 if (error)
44306 goto dput_and_out;
44307
44308+ gr_log_chdir(path.dentry, path.mnt);
44309+
44310 set_fs_pwd(current->fs, &path);
44311
44312 dput_and_out:
44313@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44314 goto out_putf;
44315
44316 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
44317+
44318+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44319+ error = -EPERM;
44320+
44321+ if (!error)
44322+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44323+
44324 if (!error)
44325 set_fs_pwd(current->fs, &file->f_path);
44326 out_putf:
44327@@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
44328 if (!capable(CAP_SYS_CHROOT))
44329 goto dput_and_out;
44330
44331+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44332+ goto dput_and_out;
44333+
44334+ if (gr_handle_chroot_caps(&path)) {
44335+ error = -ENOMEM;
44336+ goto dput_and_out;
44337+ }
44338+
44339 set_fs_root(current->fs, &path);
44340+
44341+ gr_handle_chroot_chdir(&path);
44342+
44343 error = 0;
44344 dput_and_out:
44345 path_put(&path);
44346@@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44347 err = mnt_want_write_file(file);
44348 if (err)
44349 goto out_putf;
44350+
44351 mutex_lock(&inode->i_mutex);
44352+
44353+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
44354+ err = -EACCES;
44355+ goto out_unlock;
44356+ }
44357+
44358 if (mode == (mode_t) -1)
44359 mode = inode->i_mode;
44360+
44361+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
44362+ err = -EPERM;
44363+ goto out_unlock;
44364+ }
44365+
44366 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44367 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44368 err = notify_change(dentry, &newattrs);
44369+
44370+out_unlock:
44371 mutex_unlock(&inode->i_mutex);
44372 mnt_drop_write(file->f_path.mnt);
44373 out_putf:
44374@@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44375 error = mnt_want_write(path.mnt);
44376 if (error)
44377 goto dput_and_out;
44378+
44379 mutex_lock(&inode->i_mutex);
44380+
44381+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44382+ error = -EACCES;
44383+ goto out_unlock;
44384+ }
44385+
44386 if (mode == (mode_t) -1)
44387 mode = inode->i_mode;
44388+
44389+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44390+ error = -EACCES;
44391+ goto out_unlock;
44392+ }
44393+
44394 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44395 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44396 error = notify_change(path.dentry, &newattrs);
44397+
44398+out_unlock:
44399 mutex_unlock(&inode->i_mutex);
44400 mnt_drop_write(path.mnt);
44401 dput_and_out:
44402@@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
44403 return sys_fchmodat(AT_FDCWD, filename, mode);
44404 }
44405
44406-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
44407+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
44408 {
44409 struct inode *inode = dentry->d_inode;
44410 int error;
44411 struct iattr newattrs;
44412
44413+ if (!gr_acl_handle_chown(dentry, mnt))
44414+ return -EACCES;
44415+
44416 newattrs.ia_valid = ATTR_CTIME;
44417 if (user != (uid_t) -1) {
44418 newattrs.ia_valid |= ATTR_UID;
44419@@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
44420 error = mnt_want_write(path.mnt);
44421 if (error)
44422 goto out_release;
44423- error = chown_common(path.dentry, user, group);
44424+ error = chown_common(path.dentry, user, group, path.mnt);
44425 mnt_drop_write(path.mnt);
44426 out_release:
44427 path_put(&path);
44428@@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
44429 error = mnt_want_write(path.mnt);
44430 if (error)
44431 goto out_release;
44432- error = chown_common(path.dentry, user, group);
44433+ error = chown_common(path.dentry, user, group, path.mnt);
44434 mnt_drop_write(path.mnt);
44435 out_release:
44436 path_put(&path);
44437@@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
44438 error = mnt_want_write(path.mnt);
44439 if (error)
44440 goto out_release;
44441- error = chown_common(path.dentry, user, group);
44442+ error = chown_common(path.dentry, user, group, path.mnt);
44443 mnt_drop_write(path.mnt);
44444 out_release:
44445 path_put(&path);
44446@@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
44447 goto out_fput;
44448 dentry = file->f_path.dentry;
44449 audit_inode(NULL, dentry);
44450- error = chown_common(dentry, user, group);
44451+ error = chown_common(dentry, user, group, file->f_path.mnt);
44452 mnt_drop_write(file->f_path.mnt);
44453 out_fput:
44454 fput(file);
44455@@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
44456 if (!IS_ERR(tmp)) {
44457 fd = get_unused_fd_flags(flags);
44458 if (fd >= 0) {
44459- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
44460+ struct file *f;
44461+ /* don't allow to be set by userland */
44462+ flags &= ~FMODE_GREXEC;
44463+ f = do_filp_open(dfd, tmp, flags, mode, 0);
44464 if (IS_ERR(f)) {
44465 put_unused_fd(fd);
44466 fd = PTR_ERR(f);
44467diff -urNp linux-2.6.32.45/fs/partitions/ldm.c linux-2.6.32.45/fs/partitions/ldm.c
44468--- linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
44469+++ linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
44470@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44471 ldm_error ("A VBLK claims to have %d parts.", num);
44472 return false;
44473 }
44474+
44475 if (rec >= num) {
44476 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44477 return false;
44478@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44479 goto found;
44480 }
44481
44482- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44483+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44484 if (!f) {
44485 ldm_crit ("Out of memory.");
44486 return false;
44487diff -urNp linux-2.6.32.45/fs/partitions/mac.c linux-2.6.32.45/fs/partitions/mac.c
44488--- linux-2.6.32.45/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
44489+++ linux-2.6.32.45/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
44490@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
44491 return 0; /* not a MacOS disk */
44492 }
44493 blocks_in_map = be32_to_cpu(part->map_count);
44494+ printk(" [mac]");
44495 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
44496 put_dev_sector(sect);
44497 return 0;
44498 }
44499- printk(" [mac]");
44500 for (slot = 1; slot <= blocks_in_map; ++slot) {
44501 int pos = slot * secsize;
44502 put_dev_sector(sect);
44503diff -urNp linux-2.6.32.45/fs/pipe.c linux-2.6.32.45/fs/pipe.c
44504--- linux-2.6.32.45/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
44505+++ linux-2.6.32.45/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
44506@@ -401,9 +401,9 @@ redo:
44507 }
44508 if (bufs) /* More to do? */
44509 continue;
44510- if (!pipe->writers)
44511+ if (!atomic_read(&pipe->writers))
44512 break;
44513- if (!pipe->waiting_writers) {
44514+ if (!atomic_read(&pipe->waiting_writers)) {
44515 /* syscall merging: Usually we must not sleep
44516 * if O_NONBLOCK is set, or if we got some data.
44517 * But if a writer sleeps in kernel space, then
44518@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
44519 mutex_lock(&inode->i_mutex);
44520 pipe = inode->i_pipe;
44521
44522- if (!pipe->readers) {
44523+ if (!atomic_read(&pipe->readers)) {
44524 send_sig(SIGPIPE, current, 0);
44525 ret = -EPIPE;
44526 goto out;
44527@@ -511,7 +511,7 @@ redo1:
44528 for (;;) {
44529 int bufs;
44530
44531- if (!pipe->readers) {
44532+ if (!atomic_read(&pipe->readers)) {
44533 send_sig(SIGPIPE, current, 0);
44534 if (!ret)
44535 ret = -EPIPE;
44536@@ -597,9 +597,9 @@ redo2:
44537 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44538 do_wakeup = 0;
44539 }
44540- pipe->waiting_writers++;
44541+ atomic_inc(&pipe->waiting_writers);
44542 pipe_wait(pipe);
44543- pipe->waiting_writers--;
44544+ atomic_dec(&pipe->waiting_writers);
44545 }
44546 out:
44547 mutex_unlock(&inode->i_mutex);
44548@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
44549 mask = 0;
44550 if (filp->f_mode & FMODE_READ) {
44551 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44552- if (!pipe->writers && filp->f_version != pipe->w_counter)
44553+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44554 mask |= POLLHUP;
44555 }
44556
44557@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
44558 * Most Unices do not set POLLERR for FIFOs but on Linux they
44559 * behave exactly like pipes for poll().
44560 */
44561- if (!pipe->readers)
44562+ if (!atomic_read(&pipe->readers))
44563 mask |= POLLERR;
44564 }
44565
44566@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
44567
44568 mutex_lock(&inode->i_mutex);
44569 pipe = inode->i_pipe;
44570- pipe->readers -= decr;
44571- pipe->writers -= decw;
44572+ atomic_sub(decr, &pipe->readers);
44573+ atomic_sub(decw, &pipe->writers);
44574
44575- if (!pipe->readers && !pipe->writers) {
44576+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
44577 free_pipe_info(inode);
44578 } else {
44579 wake_up_interruptible_sync(&pipe->wait);
44580@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
44581
44582 if (inode->i_pipe) {
44583 ret = 0;
44584- inode->i_pipe->readers++;
44585+ atomic_inc(&inode->i_pipe->readers);
44586 }
44587
44588 mutex_unlock(&inode->i_mutex);
44589@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
44590
44591 if (inode->i_pipe) {
44592 ret = 0;
44593- inode->i_pipe->writers++;
44594+ atomic_inc(&inode->i_pipe->writers);
44595 }
44596
44597 mutex_unlock(&inode->i_mutex);
44598@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
44599 if (inode->i_pipe) {
44600 ret = 0;
44601 if (filp->f_mode & FMODE_READ)
44602- inode->i_pipe->readers++;
44603+ atomic_inc(&inode->i_pipe->readers);
44604 if (filp->f_mode & FMODE_WRITE)
44605- inode->i_pipe->writers++;
44606+ atomic_inc(&inode->i_pipe->writers);
44607 }
44608
44609 mutex_unlock(&inode->i_mutex);
44610@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
44611 inode->i_pipe = NULL;
44612 }
44613
44614-static struct vfsmount *pipe_mnt __read_mostly;
44615+struct vfsmount *pipe_mnt __read_mostly;
44616 static int pipefs_delete_dentry(struct dentry *dentry)
44617 {
44618 /*
44619@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
44620 goto fail_iput;
44621 inode->i_pipe = pipe;
44622
44623- pipe->readers = pipe->writers = 1;
44624+ atomic_set(&pipe->readers, 1);
44625+ atomic_set(&pipe->writers, 1);
44626 inode->i_fop = &rdwr_pipefifo_fops;
44627
44628 /*
44629diff -urNp linux-2.6.32.45/fs/proc/array.c linux-2.6.32.45/fs/proc/array.c
44630--- linux-2.6.32.45/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
44631+++ linux-2.6.32.45/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
44632@@ -60,6 +60,7 @@
44633 #include <linux/tty.h>
44634 #include <linux/string.h>
44635 #include <linux/mman.h>
44636+#include <linux/grsecurity.h>
44637 #include <linux/proc_fs.h>
44638 #include <linux/ioport.h>
44639 #include <linux/uaccess.h>
44640@@ -321,6 +322,21 @@ static inline void task_context_switch_c
44641 p->nivcsw);
44642 }
44643
44644+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44645+static inline void task_pax(struct seq_file *m, struct task_struct *p)
44646+{
44647+ if (p->mm)
44648+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
44649+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
44650+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
44651+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
44652+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
44653+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
44654+ else
44655+ seq_printf(m, "PaX:\t-----\n");
44656+}
44657+#endif
44658+
44659 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
44660 struct pid *pid, struct task_struct *task)
44661 {
44662@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
44663 task_cap(m, task);
44664 cpuset_task_status_allowed(m, task);
44665 task_context_switch_counts(m, task);
44666+
44667+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44668+ task_pax(m, task);
44669+#endif
44670+
44671+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
44672+ task_grsec_rbac(m, task);
44673+#endif
44674+
44675 return 0;
44676 }
44677
44678+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44679+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44680+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
44681+ _mm->pax_flags & MF_PAX_SEGMEXEC))
44682+#endif
44683+
44684 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
44685 struct pid *pid, struct task_struct *task, int whole)
44686 {
44687@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
44688 cputime_t cutime, cstime, utime, stime;
44689 cputime_t cgtime, gtime;
44690 unsigned long rsslim = 0;
44691- char tcomm[sizeof(task->comm)];
44692+ char tcomm[sizeof(task->comm)] = { 0 };
44693 unsigned long flags;
44694
44695+ pax_track_stack();
44696+
44697 state = *get_task_state(task);
44698 vsize = eip = esp = 0;
44699 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
44700@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
44701 gtime = task_gtime(task);
44702 }
44703
44704+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44705+ if (PAX_RAND_FLAGS(mm)) {
44706+ eip = 0;
44707+ esp = 0;
44708+ wchan = 0;
44709+ }
44710+#endif
44711+#ifdef CONFIG_GRKERNSEC_HIDESYM
44712+ wchan = 0;
44713+ eip =0;
44714+ esp =0;
44715+#endif
44716+
44717 /* scale priority and nice values from timeslices to -20..20 */
44718 /* to make it look like a "normal" Unix priority/nice value */
44719 priority = task_prio(task);
44720@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
44721 vsize,
44722 mm ? get_mm_rss(mm) : 0,
44723 rsslim,
44724+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44725+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
44726+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
44727+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
44728+#else
44729 mm ? (permitted ? mm->start_code : 1) : 0,
44730 mm ? (permitted ? mm->end_code : 1) : 0,
44731 (permitted && mm) ? mm->start_stack : 0,
44732+#endif
44733 esp,
44734 eip,
44735 /* The signal information here is obsolete.
44736@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
44737
44738 return 0;
44739 }
44740+
44741+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
44742+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
44743+{
44744+ u32 curr_ip = 0;
44745+ unsigned long flags;
44746+
44747+ if (lock_task_sighand(task, &flags)) {
44748+ curr_ip = task->signal->curr_ip;
44749+ unlock_task_sighand(task, &flags);
44750+ }
44751+
44752+ return sprintf(buffer, "%pI4\n", &curr_ip);
44753+}
44754+#endif
44755diff -urNp linux-2.6.32.45/fs/proc/base.c linux-2.6.32.45/fs/proc/base.c
44756--- linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:35:30.000000000 -0400
44757+++ linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:34:33.000000000 -0400
44758@@ -102,6 +102,22 @@ struct pid_entry {
44759 union proc_op op;
44760 };
44761
44762+struct getdents_callback {
44763+ struct linux_dirent __user * current_dir;
44764+ struct linux_dirent __user * previous;
44765+ struct file * file;
44766+ int count;
44767+ int error;
44768+};
44769+
44770+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
44771+ loff_t offset, u64 ino, unsigned int d_type)
44772+{
44773+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
44774+ buf->error = -EINVAL;
44775+ return 0;
44776+}
44777+
44778 #define NOD(NAME, MODE, IOP, FOP, OP) { \
44779 .name = (NAME), \
44780 .len = sizeof(NAME) - 1, \
44781@@ -213,6 +229,9 @@ static int check_mem_permission(struct t
44782 if (task == current)
44783 return 0;
44784
44785+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
44786+ return -EPERM;
44787+
44788 /*
44789 * If current is actively ptrace'ing, and would also be
44790 * permitted to freshly attach with ptrace now, permit it.
44791@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
44792 if (!mm->arg_end)
44793 goto out_mm; /* Shh! No looking before we're done */
44794
44795+ if (gr_acl_handle_procpidmem(task))
44796+ goto out_mm;
44797+
44798 len = mm->arg_end - mm->arg_start;
44799
44800 if (len > PAGE_SIZE)
44801@@ -287,12 +309,28 @@ out:
44802 return res;
44803 }
44804
44805+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44806+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44807+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
44808+ _mm->pax_flags & MF_PAX_SEGMEXEC))
44809+#endif
44810+
44811 static int proc_pid_auxv(struct task_struct *task, char *buffer)
44812 {
44813 int res = 0;
44814 struct mm_struct *mm = get_task_mm(task);
44815 if (mm) {
44816 unsigned int nwords = 0;
44817+
44818+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44819+ /* allow if we're currently ptracing this task */
44820+ if (PAX_RAND_FLAGS(mm) &&
44821+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
44822+ mmput(mm);
44823+ return res;
44824+ }
44825+#endif
44826+
44827 do {
44828 nwords += 2;
44829 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
44830@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
44831 }
44832
44833
44834-#ifdef CONFIG_KALLSYMS
44835+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44836 /*
44837 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
44838 * Returns the resolved symbol. If that fails, simply return the address.
44839@@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
44840 }
44841 #endif /* CONFIG_KALLSYMS */
44842
44843-#ifdef CONFIG_STACKTRACE
44844+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44845
44846 #define MAX_STACK_TRACE_DEPTH 64
44847
44848@@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
44849 return count;
44850 }
44851
44852-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
44853+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44854 static int proc_pid_syscall(struct task_struct *task, char *buffer)
44855 {
44856 long nr;
44857@@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
44858 /************************************************************************/
44859
44860 /* permission checks */
44861-static int proc_fd_access_allowed(struct inode *inode)
44862+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
44863 {
44864 struct task_struct *task;
44865 int allowed = 0;
44866@@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
44867 */
44868 task = get_proc_task(inode);
44869 if (task) {
44870- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
44871+ if (log)
44872+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
44873+ else
44874+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
44875 put_task_struct(task);
44876 }
44877 return allowed;
44878@@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
44879 if (!task)
44880 goto out_no_task;
44881
44882+ if (gr_acl_handle_procpidmem(task))
44883+ goto out;
44884+
44885 if (!ptrace_may_access(task, PTRACE_MODE_READ))
44886 goto out;
44887
44888@@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
44889 path_put(&nd->path);
44890
44891 /* Are we allowed to snoop on the tasks file descriptors? */
44892- if (!proc_fd_access_allowed(inode))
44893+ if (!proc_fd_access_allowed(inode,0))
44894 goto out;
44895
44896 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
44897@@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
44898 struct path path;
44899
44900 /* Are we allowed to snoop on the tasks file descriptors? */
44901- if (!proc_fd_access_allowed(inode))
44902- goto out;
44903+ /* logging this is needed for learning on chromium to work properly,
44904+ but we don't want to flood the logs from 'ps' which does a readlink
44905+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
44906+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
44907+ */
44908+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
44909+ if (!proc_fd_access_allowed(inode,0))
44910+ goto out;
44911+ } else {
44912+ if (!proc_fd_access_allowed(inode,1))
44913+ goto out;
44914+ }
44915
44916 error = PROC_I(inode)->op.proc_get_link(inode, &path);
44917 if (error)
44918@@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
44919 rcu_read_lock();
44920 cred = __task_cred(task);
44921 inode->i_uid = cred->euid;
44922+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44923+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44924+#else
44925 inode->i_gid = cred->egid;
44926+#endif
44927 rcu_read_unlock();
44928 }
44929 security_task_to_inode(task, inode);
44930@@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
44931 struct inode *inode = dentry->d_inode;
44932 struct task_struct *task;
44933 const struct cred *cred;
44934+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44935+ const struct cred *tmpcred = current_cred();
44936+#endif
44937
44938 generic_fillattr(inode, stat);
44939
44940@@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
44941 stat->uid = 0;
44942 stat->gid = 0;
44943 task = pid_task(proc_pid(inode), PIDTYPE_PID);
44944+
44945+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
44946+ rcu_read_unlock();
44947+ return -ENOENT;
44948+ }
44949+
44950 if (task) {
44951+ cred = __task_cred(task);
44952+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44953+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
44954+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44955+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
44956+#endif
44957+ ) {
44958+#endif
44959 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
44960+#ifdef CONFIG_GRKERNSEC_PROC_USER
44961+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
44962+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44963+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
44964+#endif
44965 task_dumpable(task)) {
44966- cred = __task_cred(task);
44967 stat->uid = cred->euid;
44968+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44969+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
44970+#else
44971 stat->gid = cred->egid;
44972+#endif
44973 }
44974+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44975+ } else {
44976+ rcu_read_unlock();
44977+ return -ENOENT;
44978+ }
44979+#endif
44980 }
44981 rcu_read_unlock();
44982 return 0;
44983@@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
44984
44985 if (task) {
44986 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
44987+#ifdef CONFIG_GRKERNSEC_PROC_USER
44988+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
44989+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44990+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
44991+#endif
44992 task_dumpable(task)) {
44993 rcu_read_lock();
44994 cred = __task_cred(task);
44995 inode->i_uid = cred->euid;
44996+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44997+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44998+#else
44999 inode->i_gid = cred->egid;
45000+#endif
45001 rcu_read_unlock();
45002 } else {
45003 inode->i_uid = 0;
45004@@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
45005 int fd = proc_fd(inode);
45006
45007 if (task) {
45008- files = get_files_struct(task);
45009+ if (!gr_acl_handle_procpidmem(task))
45010+ files = get_files_struct(task);
45011 put_task_struct(task);
45012 }
45013 if (files) {
45014@@ -1895,12 +1994,22 @@ static const struct file_operations proc
45015 static int proc_fd_permission(struct inode *inode, int mask)
45016 {
45017 int rv;
45018+ struct task_struct *task;
45019
45020 rv = generic_permission(inode, mask, NULL);
45021- if (rv == 0)
45022- return 0;
45023+
45024 if (task_pid(current) == proc_pid(inode))
45025 rv = 0;
45026+
45027+ task = get_proc_task(inode);
45028+ if (task == NULL)
45029+ return rv;
45030+
45031+ if (gr_acl_handle_procpidmem(task))
45032+ rv = -EACCES;
45033+
45034+ put_task_struct(task);
45035+
45036 return rv;
45037 }
45038
45039@@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
45040 if (!task)
45041 goto out_no_task;
45042
45043+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45044+ goto out;
45045+
45046 /*
45047 * Yes, it does not scale. And it should not. Don't add
45048 * new entries into /proc/<tgid>/ without very good reasons.
45049@@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
45050 if (!task)
45051 goto out_no_task;
45052
45053+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45054+ goto out;
45055+
45056 ret = 0;
45057 i = filp->f_pos;
45058 switch (i) {
45059@@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
45060 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45061 void *cookie)
45062 {
45063- char *s = nd_get_link(nd);
45064+ const char *s = nd_get_link(nd);
45065 if (!IS_ERR(s))
45066 __putname(s);
45067 }
45068@@ -2522,7 +2637,7 @@ static const struct pid_entry tgid_base_
45069 #ifdef CONFIG_SCHED_DEBUG
45070 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45071 #endif
45072-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45073+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45074 INF("syscall", S_IRUSR, proc_pid_syscall),
45075 #endif
45076 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45077@@ -2547,10 +2662,10 @@ static const struct pid_entry tgid_base_
45078 #ifdef CONFIG_SECURITY
45079 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45080 #endif
45081-#ifdef CONFIG_KALLSYMS
45082+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45083 INF("wchan", S_IRUGO, proc_pid_wchan),
45084 #endif
45085-#ifdef CONFIG_STACKTRACE
45086+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45087 ONE("stack", S_IRUSR, proc_pid_stack),
45088 #endif
45089 #ifdef CONFIG_SCHEDSTATS
45090@@ -2580,6 +2695,9 @@ static const struct pid_entry tgid_base_
45091 #ifdef CONFIG_TASK_IO_ACCOUNTING
45092 INF("io", S_IRUSR, proc_tgid_io_accounting),
45093 #endif
45094+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45095+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45096+#endif
45097 };
45098
45099 static int proc_tgid_base_readdir(struct file * filp,
45100@@ -2704,7 +2822,14 @@ static struct dentry *proc_pid_instantia
45101 if (!inode)
45102 goto out;
45103
45104+#ifdef CONFIG_GRKERNSEC_PROC_USER
45105+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45106+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45107+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45108+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45109+#else
45110 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45111+#endif
45112 inode->i_op = &proc_tgid_base_inode_operations;
45113 inode->i_fop = &proc_tgid_base_operations;
45114 inode->i_flags|=S_IMMUTABLE;
45115@@ -2746,7 +2871,11 @@ struct dentry *proc_pid_lookup(struct in
45116 if (!task)
45117 goto out;
45118
45119+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45120+ goto out_put_task;
45121+
45122 result = proc_pid_instantiate(dir, dentry, task, NULL);
45123+out_put_task:
45124 put_task_struct(task);
45125 out:
45126 return result;
45127@@ -2811,6 +2940,11 @@ int proc_pid_readdir(struct file * filp,
45128 {
45129 unsigned int nr;
45130 struct task_struct *reaper;
45131+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45132+ const struct cred *tmpcred = current_cred();
45133+ const struct cred *itercred;
45134+#endif
45135+ filldir_t __filldir = filldir;
45136 struct tgid_iter iter;
45137 struct pid_namespace *ns;
45138
45139@@ -2834,8 +2968,27 @@ int proc_pid_readdir(struct file * filp,
45140 for (iter = next_tgid(ns, iter);
45141 iter.task;
45142 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45143+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45144+ rcu_read_lock();
45145+ itercred = __task_cred(iter.task);
45146+#endif
45147+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45148+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45149+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45150+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45151+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45152+#endif
45153+ )
45154+#endif
45155+ )
45156+ __filldir = &gr_fake_filldir;
45157+ else
45158+ __filldir = filldir;
45159+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45160+ rcu_read_unlock();
45161+#endif
45162 filp->f_pos = iter.tgid + TGID_OFFSET;
45163- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45164+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45165 put_task_struct(iter.task);
45166 goto out;
45167 }
45168@@ -2861,7 +3014,7 @@ static const struct pid_entry tid_base_s
45169 #ifdef CONFIG_SCHED_DEBUG
45170 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45171 #endif
45172-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45173+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45174 INF("syscall", S_IRUSR, proc_pid_syscall),
45175 #endif
45176 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45177@@ -2885,10 +3038,10 @@ static const struct pid_entry tid_base_s
45178 #ifdef CONFIG_SECURITY
45179 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45180 #endif
45181-#ifdef CONFIG_KALLSYMS
45182+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45183 INF("wchan", S_IRUGO, proc_pid_wchan),
45184 #endif
45185-#ifdef CONFIG_STACKTRACE
45186+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45187 ONE("stack", S_IRUSR, proc_pid_stack),
45188 #endif
45189 #ifdef CONFIG_SCHEDSTATS
45190diff -urNp linux-2.6.32.45/fs/proc/cmdline.c linux-2.6.32.45/fs/proc/cmdline.c
45191--- linux-2.6.32.45/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
45192+++ linux-2.6.32.45/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
45193@@ -23,7 +23,11 @@ static const struct file_operations cmdl
45194
45195 static int __init proc_cmdline_init(void)
45196 {
45197+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45198+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45199+#else
45200 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45201+#endif
45202 return 0;
45203 }
45204 module_init(proc_cmdline_init);
45205diff -urNp linux-2.6.32.45/fs/proc/devices.c linux-2.6.32.45/fs/proc/devices.c
45206--- linux-2.6.32.45/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
45207+++ linux-2.6.32.45/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
45208@@ -64,7 +64,11 @@ static const struct file_operations proc
45209
45210 static int __init proc_devices_init(void)
45211 {
45212+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45213+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45214+#else
45215 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45216+#endif
45217 return 0;
45218 }
45219 module_init(proc_devices_init);
45220diff -urNp linux-2.6.32.45/fs/proc/inode.c linux-2.6.32.45/fs/proc/inode.c
45221--- linux-2.6.32.45/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
45222+++ linux-2.6.32.45/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
45223@@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
45224 if (de->mode) {
45225 inode->i_mode = de->mode;
45226 inode->i_uid = de->uid;
45227+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45228+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45229+#else
45230 inode->i_gid = de->gid;
45231+#endif
45232 }
45233 if (de->size)
45234 inode->i_size = de->size;
45235diff -urNp linux-2.6.32.45/fs/proc/internal.h linux-2.6.32.45/fs/proc/internal.h
45236--- linux-2.6.32.45/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
45237+++ linux-2.6.32.45/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
45238@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45239 struct pid *pid, struct task_struct *task);
45240 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45241 struct pid *pid, struct task_struct *task);
45242+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45243+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45244+#endif
45245 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45246
45247 extern const struct file_operations proc_maps_operations;
45248diff -urNp linux-2.6.32.45/fs/proc/Kconfig linux-2.6.32.45/fs/proc/Kconfig
45249--- linux-2.6.32.45/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
45250+++ linux-2.6.32.45/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
45251@@ -30,12 +30,12 @@ config PROC_FS
45252
45253 config PROC_KCORE
45254 bool "/proc/kcore support" if !ARM
45255- depends on PROC_FS && MMU
45256+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45257
45258 config PROC_VMCORE
45259 bool "/proc/vmcore support (EXPERIMENTAL)"
45260- depends on PROC_FS && CRASH_DUMP
45261- default y
45262+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45263+ default n
45264 help
45265 Exports the dump image of crashed kernel in ELF format.
45266
45267@@ -59,8 +59,8 @@ config PROC_SYSCTL
45268 limited in memory.
45269
45270 config PROC_PAGE_MONITOR
45271- default y
45272- depends on PROC_FS && MMU
45273+ default n
45274+ depends on PROC_FS && MMU && !GRKERNSEC
45275 bool "Enable /proc page monitoring" if EMBEDDED
45276 help
45277 Various /proc files exist to monitor process memory utilization:
45278diff -urNp linux-2.6.32.45/fs/proc/kcore.c linux-2.6.32.45/fs/proc/kcore.c
45279--- linux-2.6.32.45/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
45280+++ linux-2.6.32.45/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
45281@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
45282 off_t offset = 0;
45283 struct kcore_list *m;
45284
45285+ pax_track_stack();
45286+
45287 /* setup ELF header */
45288 elf = (struct elfhdr *) bufp;
45289 bufp += sizeof(struct elfhdr);
45290@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
45291 * the addresses in the elf_phdr on our list.
45292 */
45293 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45294- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45295+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45296+ if (tsz > buflen)
45297 tsz = buflen;
45298-
45299+
45300 while (buflen) {
45301 struct kcore_list *m;
45302
45303@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
45304 kfree(elf_buf);
45305 } else {
45306 if (kern_addr_valid(start)) {
45307- unsigned long n;
45308+ char *elf_buf;
45309+ mm_segment_t oldfs;
45310
45311- n = copy_to_user(buffer, (char *)start, tsz);
45312- /*
45313- * We cannot distingush between fault on source
45314- * and fault on destination. When this happens
45315- * we clear too and hope it will trigger the
45316- * EFAULT again.
45317- */
45318- if (n) {
45319- if (clear_user(buffer + tsz - n,
45320- n))
45321+ elf_buf = kmalloc(tsz, GFP_KERNEL);
45322+ if (!elf_buf)
45323+ return -ENOMEM;
45324+ oldfs = get_fs();
45325+ set_fs(KERNEL_DS);
45326+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45327+ set_fs(oldfs);
45328+ if (copy_to_user(buffer, elf_buf, tsz)) {
45329+ kfree(elf_buf);
45330 return -EFAULT;
45331+ }
45332 }
45333+ set_fs(oldfs);
45334+ kfree(elf_buf);
45335 } else {
45336 if (clear_user(buffer, tsz))
45337 return -EFAULT;
45338@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
45339
45340 static int open_kcore(struct inode *inode, struct file *filp)
45341 {
45342+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45343+ return -EPERM;
45344+#endif
45345 if (!capable(CAP_SYS_RAWIO))
45346 return -EPERM;
45347 if (kcore_need_update)
45348diff -urNp linux-2.6.32.45/fs/proc/meminfo.c linux-2.6.32.45/fs/proc/meminfo.c
45349--- linux-2.6.32.45/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
45350+++ linux-2.6.32.45/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
45351@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45352 unsigned long pages[NR_LRU_LISTS];
45353 int lru;
45354
45355+ pax_track_stack();
45356+
45357 /*
45358 * display in kilobytes.
45359 */
45360@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
45361 vmi.used >> 10,
45362 vmi.largest_chunk >> 10
45363 #ifdef CONFIG_MEMORY_FAILURE
45364- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45365+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45366 #endif
45367 );
45368
45369diff -urNp linux-2.6.32.45/fs/proc/nommu.c linux-2.6.32.45/fs/proc/nommu.c
45370--- linux-2.6.32.45/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
45371+++ linux-2.6.32.45/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
45372@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
45373 if (len < 1)
45374 len = 1;
45375 seq_printf(m, "%*c", len, ' ');
45376- seq_path(m, &file->f_path, "");
45377+ seq_path(m, &file->f_path, "\n\\");
45378 }
45379
45380 seq_putc(m, '\n');
45381diff -urNp linux-2.6.32.45/fs/proc/proc_net.c linux-2.6.32.45/fs/proc/proc_net.c
45382--- linux-2.6.32.45/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
45383+++ linux-2.6.32.45/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
45384@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
45385 struct task_struct *task;
45386 struct nsproxy *ns;
45387 struct net *net = NULL;
45388+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45389+ const struct cred *cred = current_cred();
45390+#endif
45391+
45392+#ifdef CONFIG_GRKERNSEC_PROC_USER
45393+ if (cred->fsuid)
45394+ return net;
45395+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45396+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45397+ return net;
45398+#endif
45399
45400 rcu_read_lock();
45401 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45402diff -urNp linux-2.6.32.45/fs/proc/proc_sysctl.c linux-2.6.32.45/fs/proc/proc_sysctl.c
45403--- linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
45404+++ linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
45405@@ -7,6 +7,8 @@
45406 #include <linux/security.h>
45407 #include "internal.h"
45408
45409+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45410+
45411 static const struct dentry_operations proc_sys_dentry_operations;
45412 static const struct file_operations proc_sys_file_operations;
45413 static const struct inode_operations proc_sys_inode_operations;
45414@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
45415 if (!p)
45416 goto out;
45417
45418+ if (gr_handle_sysctl(p, MAY_EXEC))
45419+ goto out;
45420+
45421 err = ERR_PTR(-ENOMEM);
45422 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
45423 if (h)
45424@@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
45425 if (*pos < file->f_pos)
45426 continue;
45427
45428+ if (gr_handle_sysctl(table, 0))
45429+ continue;
45430+
45431 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45432 if (res)
45433 return res;
45434@@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
45435 if (IS_ERR(head))
45436 return PTR_ERR(head);
45437
45438+ if (table && gr_handle_sysctl(table, MAY_EXEC))
45439+ return -ENOENT;
45440+
45441 generic_fillattr(inode, stat);
45442 if (table)
45443 stat->mode = (stat->mode & S_IFMT) | table->mode;
45444diff -urNp linux-2.6.32.45/fs/proc/root.c linux-2.6.32.45/fs/proc/root.c
45445--- linux-2.6.32.45/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
45446+++ linux-2.6.32.45/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
45447@@ -134,7 +134,15 @@ void __init proc_root_init(void)
45448 #ifdef CONFIG_PROC_DEVICETREE
45449 proc_device_tree_init();
45450 #endif
45451+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45452+#ifdef CONFIG_GRKERNSEC_PROC_USER
45453+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45454+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45455+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45456+#endif
45457+#else
45458 proc_mkdir("bus", NULL);
45459+#endif
45460 proc_sys_init();
45461 }
45462
45463diff -urNp linux-2.6.32.45/fs/proc/task_mmu.c linux-2.6.32.45/fs/proc/task_mmu.c
45464--- linux-2.6.32.45/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
45465+++ linux-2.6.32.45/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
45466@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
45467 "VmStk:\t%8lu kB\n"
45468 "VmExe:\t%8lu kB\n"
45469 "VmLib:\t%8lu kB\n"
45470- "VmPTE:\t%8lu kB\n",
45471- hiwater_vm << (PAGE_SHIFT-10),
45472+ "VmPTE:\t%8lu kB\n"
45473+
45474+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45475+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45476+#endif
45477+
45478+ ,hiwater_vm << (PAGE_SHIFT-10),
45479 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45480 mm->locked_vm << (PAGE_SHIFT-10),
45481 hiwater_rss << (PAGE_SHIFT-10),
45482 total_rss << (PAGE_SHIFT-10),
45483 data << (PAGE_SHIFT-10),
45484 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
45485- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
45486+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
45487+
45488+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45489+ , mm->context.user_cs_base, mm->context.user_cs_limit
45490+#endif
45491+
45492+ );
45493 }
45494
45495 unsigned long task_vsize(struct mm_struct *mm)
45496@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
45497 struct proc_maps_private *priv = m->private;
45498 struct vm_area_struct *vma = v;
45499
45500- vma_stop(priv, vma);
45501+ if (!IS_ERR(vma))
45502+ vma_stop(priv, vma);
45503 if (priv->task)
45504 put_task_struct(priv->task);
45505 }
45506@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
45507 return ret;
45508 }
45509
45510+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45511+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45512+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45513+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45514+#endif
45515+
45516 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
45517 {
45518 struct mm_struct *mm = vma->vm_mm;
45519@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
45520 int flags = vma->vm_flags;
45521 unsigned long ino = 0;
45522 unsigned long long pgoff = 0;
45523- unsigned long start;
45524 dev_t dev = 0;
45525 int len;
45526
45527@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
45528 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
45529 }
45530
45531- /* We don't show the stack guard page in /proc/maps */
45532- start = vma->vm_start;
45533- if (vma->vm_flags & VM_GROWSDOWN)
45534- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
45535- start += PAGE_SIZE;
45536-
45537 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
45538- start,
45539+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45540+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
45541+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
45542+#else
45543+ vma->vm_start,
45544 vma->vm_end,
45545+#endif
45546 flags & VM_READ ? 'r' : '-',
45547 flags & VM_WRITE ? 'w' : '-',
45548 flags & VM_EXEC ? 'x' : '-',
45549 flags & VM_MAYSHARE ? 's' : 'p',
45550+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45551+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
45552+#else
45553 pgoff,
45554+#endif
45555 MAJOR(dev), MINOR(dev), ino, &len);
45556
45557 /*
45558@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
45559 */
45560 if (file) {
45561 pad_len_spaces(m, len);
45562- seq_path(m, &file->f_path, "\n");
45563+ seq_path(m, &file->f_path, "\n\\");
45564 } else {
45565 const char *name = arch_vma_name(vma);
45566 if (!name) {
45567@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
45568 if (vma->vm_start <= mm->brk &&
45569 vma->vm_end >= mm->start_brk) {
45570 name = "[heap]";
45571- } else if (vma->vm_start <= mm->start_stack &&
45572- vma->vm_end >= mm->start_stack) {
45573+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
45574+ (vma->vm_start <= mm->start_stack &&
45575+ vma->vm_end >= mm->start_stack)) {
45576 name = "[stack]";
45577 }
45578 } else {
45579@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
45580 };
45581
45582 memset(&mss, 0, sizeof mss);
45583- mss.vma = vma;
45584- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45585- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45586+
45587+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45588+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
45589+#endif
45590+ mss.vma = vma;
45591+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45592+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45593+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45594+ }
45595+#endif
45596
45597 show_map_vma(m, vma);
45598
45599@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
45600 "Swap: %8lu kB\n"
45601 "KernelPageSize: %8lu kB\n"
45602 "MMUPageSize: %8lu kB\n",
45603+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45604+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
45605+#else
45606 (vma->vm_end - vma->vm_start) >> 10,
45607+#endif
45608 mss.resident >> 10,
45609 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
45610 mss.shared_clean >> 10,
45611diff -urNp linux-2.6.32.45/fs/proc/task_nommu.c linux-2.6.32.45/fs/proc/task_nommu.c
45612--- linux-2.6.32.45/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
45613+++ linux-2.6.32.45/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
45614@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
45615 else
45616 bytes += kobjsize(mm);
45617
45618- if (current->fs && current->fs->users > 1)
45619+ if (current->fs && atomic_read(&current->fs->users) > 1)
45620 sbytes += kobjsize(current->fs);
45621 else
45622 bytes += kobjsize(current->fs);
45623@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
45624 if (len < 1)
45625 len = 1;
45626 seq_printf(m, "%*c", len, ' ');
45627- seq_path(m, &file->f_path, "");
45628+ seq_path(m, &file->f_path, "\n\\");
45629 }
45630
45631 seq_putc(m, '\n');
45632diff -urNp linux-2.6.32.45/fs/readdir.c linux-2.6.32.45/fs/readdir.c
45633--- linux-2.6.32.45/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
45634+++ linux-2.6.32.45/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
45635@@ -16,6 +16,7 @@
45636 #include <linux/security.h>
45637 #include <linux/syscalls.h>
45638 #include <linux/unistd.h>
45639+#include <linux/namei.h>
45640
45641 #include <asm/uaccess.h>
45642
45643@@ -67,6 +68,7 @@ struct old_linux_dirent {
45644
45645 struct readdir_callback {
45646 struct old_linux_dirent __user * dirent;
45647+ struct file * file;
45648 int result;
45649 };
45650
45651@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
45652 buf->result = -EOVERFLOW;
45653 return -EOVERFLOW;
45654 }
45655+
45656+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45657+ return 0;
45658+
45659 buf->result++;
45660 dirent = buf->dirent;
45661 if (!access_ok(VERIFY_WRITE, dirent,
45662@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
45663
45664 buf.result = 0;
45665 buf.dirent = dirent;
45666+ buf.file = file;
45667
45668 error = vfs_readdir(file, fillonedir, &buf);
45669 if (buf.result)
45670@@ -142,6 +149,7 @@ struct linux_dirent {
45671 struct getdents_callback {
45672 struct linux_dirent __user * current_dir;
45673 struct linux_dirent __user * previous;
45674+ struct file * file;
45675 int count;
45676 int error;
45677 };
45678@@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
45679 buf->error = -EOVERFLOW;
45680 return -EOVERFLOW;
45681 }
45682+
45683+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45684+ return 0;
45685+
45686 dirent = buf->previous;
45687 if (dirent) {
45688 if (__put_user(offset, &dirent->d_off))
45689@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
45690 buf.previous = NULL;
45691 buf.count = count;
45692 buf.error = 0;
45693+ buf.file = file;
45694
45695 error = vfs_readdir(file, filldir, &buf);
45696 if (error >= 0)
45697@@ -228,6 +241,7 @@ out:
45698 struct getdents_callback64 {
45699 struct linux_dirent64 __user * current_dir;
45700 struct linux_dirent64 __user * previous;
45701+ struct file *file;
45702 int count;
45703 int error;
45704 };
45705@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
45706 buf->error = -EINVAL; /* only used if we fail.. */
45707 if (reclen > buf->count)
45708 return -EINVAL;
45709+
45710+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45711+ return 0;
45712+
45713 dirent = buf->previous;
45714 if (dirent) {
45715 if (__put_user(offset, &dirent->d_off))
45716@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
45717
45718 buf.current_dir = dirent;
45719 buf.previous = NULL;
45720+ buf.file = file;
45721 buf.count = count;
45722 buf.error = 0;
45723
45724diff -urNp linux-2.6.32.45/fs/reiserfs/dir.c linux-2.6.32.45/fs/reiserfs/dir.c
45725--- linux-2.6.32.45/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
45726+++ linux-2.6.32.45/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
45727@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
45728 struct reiserfs_dir_entry de;
45729 int ret = 0;
45730
45731+ pax_track_stack();
45732+
45733 reiserfs_write_lock(inode->i_sb);
45734
45735 reiserfs_check_lock_depth(inode->i_sb, "readdir");
45736diff -urNp linux-2.6.32.45/fs/reiserfs/do_balan.c linux-2.6.32.45/fs/reiserfs/do_balan.c
45737--- linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
45738+++ linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
45739@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
45740 return;
45741 }
45742
45743- atomic_inc(&(fs_generation(tb->tb_sb)));
45744+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
45745 do_balance_starts(tb);
45746
45747 /* balance leaf returns 0 except if combining L R and S into
45748diff -urNp linux-2.6.32.45/fs/reiserfs/item_ops.c linux-2.6.32.45/fs/reiserfs/item_ops.c
45749--- linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
45750+++ linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
45751@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
45752 vi->vi_index, vi->vi_type, vi->vi_ih);
45753 }
45754
45755-static struct item_operations stat_data_ops = {
45756+static const struct item_operations stat_data_ops = {
45757 .bytes_number = sd_bytes_number,
45758 .decrement_key = sd_decrement_key,
45759 .is_left_mergeable = sd_is_left_mergeable,
45760@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
45761 vi->vi_index, vi->vi_type, vi->vi_ih);
45762 }
45763
45764-static struct item_operations direct_ops = {
45765+static const struct item_operations direct_ops = {
45766 .bytes_number = direct_bytes_number,
45767 .decrement_key = direct_decrement_key,
45768 .is_left_mergeable = direct_is_left_mergeable,
45769@@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
45770 vi->vi_index, vi->vi_type, vi->vi_ih);
45771 }
45772
45773-static struct item_operations indirect_ops = {
45774+static const struct item_operations indirect_ops = {
45775 .bytes_number = indirect_bytes_number,
45776 .decrement_key = indirect_decrement_key,
45777 .is_left_mergeable = indirect_is_left_mergeable,
45778@@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
45779 printk("\n");
45780 }
45781
45782-static struct item_operations direntry_ops = {
45783+static const struct item_operations direntry_ops = {
45784 .bytes_number = direntry_bytes_number,
45785 .decrement_key = direntry_decrement_key,
45786 .is_left_mergeable = direntry_is_left_mergeable,
45787@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
45788 "Invalid item type observed, run fsck ASAP");
45789 }
45790
45791-static struct item_operations errcatch_ops = {
45792+static const struct item_operations errcatch_ops = {
45793 errcatch_bytes_number,
45794 errcatch_decrement_key,
45795 errcatch_is_left_mergeable,
45796@@ -746,7 +746,7 @@ static struct item_operations errcatch_o
45797 #error Item types must use disk-format assigned values.
45798 #endif
45799
45800-struct item_operations *item_ops[TYPE_ANY + 1] = {
45801+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
45802 &stat_data_ops,
45803 &indirect_ops,
45804 &direct_ops,
45805diff -urNp linux-2.6.32.45/fs/reiserfs/journal.c linux-2.6.32.45/fs/reiserfs/journal.c
45806--- linux-2.6.32.45/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
45807+++ linux-2.6.32.45/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
45808@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
45809 struct buffer_head *bh;
45810 int i, j;
45811
45812+ pax_track_stack();
45813+
45814 bh = __getblk(dev, block, bufsize);
45815 if (buffer_uptodate(bh))
45816 return (bh);
45817diff -urNp linux-2.6.32.45/fs/reiserfs/namei.c linux-2.6.32.45/fs/reiserfs/namei.c
45818--- linux-2.6.32.45/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
45819+++ linux-2.6.32.45/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
45820@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
45821 unsigned long savelink = 1;
45822 struct timespec ctime;
45823
45824+ pax_track_stack();
45825+
45826 /* three balancings: (1) old name removal, (2) new name insertion
45827 and (3) maybe "save" link insertion
45828 stat data updates: (1) old directory,
45829diff -urNp linux-2.6.32.45/fs/reiserfs/procfs.c linux-2.6.32.45/fs/reiserfs/procfs.c
45830--- linux-2.6.32.45/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
45831+++ linux-2.6.32.45/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
45832@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
45833 "SMALL_TAILS " : "NO_TAILS ",
45834 replay_only(sb) ? "REPLAY_ONLY " : "",
45835 convert_reiserfs(sb) ? "CONV " : "",
45836- atomic_read(&r->s_generation_counter),
45837+ atomic_read_unchecked(&r->s_generation_counter),
45838 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
45839 SF(s_do_balance), SF(s_unneeded_left_neighbor),
45840 SF(s_good_search_by_key_reada), SF(s_bmaps),
45841@@ -309,6 +309,8 @@ static int show_journal(struct seq_file
45842 struct journal_params *jp = &rs->s_v1.s_journal;
45843 char b[BDEVNAME_SIZE];
45844
45845+ pax_track_stack();
45846+
45847 seq_printf(m, /* on-disk fields */
45848 "jp_journal_1st_block: \t%i\n"
45849 "jp_journal_dev: \t%s[%x]\n"
45850diff -urNp linux-2.6.32.45/fs/reiserfs/stree.c linux-2.6.32.45/fs/reiserfs/stree.c
45851--- linux-2.6.32.45/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
45852+++ linux-2.6.32.45/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
45853@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
45854 int iter = 0;
45855 #endif
45856
45857+ pax_track_stack();
45858+
45859 BUG_ON(!th->t_trans_id);
45860
45861 init_tb_struct(th, &s_del_balance, sb, path,
45862@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
45863 int retval;
45864 int quota_cut_bytes = 0;
45865
45866+ pax_track_stack();
45867+
45868 BUG_ON(!th->t_trans_id);
45869
45870 le_key2cpu_key(&cpu_key, key);
45871@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
45872 int quota_cut_bytes;
45873 loff_t tail_pos = 0;
45874
45875+ pax_track_stack();
45876+
45877 BUG_ON(!th->t_trans_id);
45878
45879 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
45880@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
45881 int retval;
45882 int fs_gen;
45883
45884+ pax_track_stack();
45885+
45886 BUG_ON(!th->t_trans_id);
45887
45888 fs_gen = get_generation(inode->i_sb);
45889@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
45890 int fs_gen = 0;
45891 int quota_bytes = 0;
45892
45893+ pax_track_stack();
45894+
45895 BUG_ON(!th->t_trans_id);
45896
45897 if (inode) { /* Do we count quotas for item? */
45898diff -urNp linux-2.6.32.45/fs/reiserfs/super.c linux-2.6.32.45/fs/reiserfs/super.c
45899--- linux-2.6.32.45/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
45900+++ linux-2.6.32.45/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
45901@@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
45902 {.option_name = NULL}
45903 };
45904
45905+ pax_track_stack();
45906+
45907 *blocks = 0;
45908 if (!options || !*options)
45909 /* use default configuration: create tails, journaling on, no
45910diff -urNp linux-2.6.32.45/fs/select.c linux-2.6.32.45/fs/select.c
45911--- linux-2.6.32.45/fs/select.c 2011-03-27 14:31:47.000000000 -0400
45912+++ linux-2.6.32.45/fs/select.c 2011-05-16 21:46:57.000000000 -0400
45913@@ -20,6 +20,7 @@
45914 #include <linux/module.h>
45915 #include <linux/slab.h>
45916 #include <linux/poll.h>
45917+#include <linux/security.h>
45918 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
45919 #include <linux/file.h>
45920 #include <linux/fdtable.h>
45921@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
45922 int retval, i, timed_out = 0;
45923 unsigned long slack = 0;
45924
45925+ pax_track_stack();
45926+
45927 rcu_read_lock();
45928 retval = max_select_fd(n, fds);
45929 rcu_read_unlock();
45930@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
45931 /* Allocate small arguments on the stack to save memory and be faster */
45932 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
45933
45934+ pax_track_stack();
45935+
45936 ret = -EINVAL;
45937 if (n < 0)
45938 goto out_nofds;
45939@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
45940 struct poll_list *walk = head;
45941 unsigned long todo = nfds;
45942
45943+ pax_track_stack();
45944+
45945+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
45946 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
45947 return -EINVAL;
45948
45949diff -urNp linux-2.6.32.45/fs/seq_file.c linux-2.6.32.45/fs/seq_file.c
45950--- linux-2.6.32.45/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
45951+++ linux-2.6.32.45/fs/seq_file.c 2011-08-23 21:22:32.000000000 -0400
45952@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
45953 return 0;
45954 }
45955 if (!m->buf) {
45956- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
45957+ m->size = PAGE_SIZE;
45958+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
45959 if (!m->buf)
45960 return -ENOMEM;
45961 }
45962@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
45963 Eoverflow:
45964 m->op->stop(m, p);
45965 kfree(m->buf);
45966- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
45967+ m->size <<= 1;
45968+ m->buf = kmalloc(m->size, GFP_KERNEL);
45969 return !m->buf ? -ENOMEM : -EAGAIN;
45970 }
45971
45972@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
45973 m->version = file->f_version;
45974 /* grab buffer if we didn't have one */
45975 if (!m->buf) {
45976- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
45977+ m->size = PAGE_SIZE;
45978+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
45979 if (!m->buf)
45980 goto Enomem;
45981 }
45982@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
45983 goto Fill;
45984 m->op->stop(m, p);
45985 kfree(m->buf);
45986- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
45987+ m->size <<= 1;
45988+ m->buf = kmalloc(m->size, GFP_KERNEL);
45989 if (!m->buf)
45990 goto Enomem;
45991 m->count = 0;
45992@@ -551,7 +555,7 @@ static void single_stop(struct seq_file
45993 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
45994 void *data)
45995 {
45996- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
45997+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
45998 int res = -ENOMEM;
45999
46000 if (op) {
46001diff -urNp linux-2.6.32.45/fs/smbfs/proc.c linux-2.6.32.45/fs/smbfs/proc.c
46002--- linux-2.6.32.45/fs/smbfs/proc.c 2011-03-27 14:31:47.000000000 -0400
46003+++ linux-2.6.32.45/fs/smbfs/proc.c 2011-08-05 20:33:55.000000000 -0400
46004@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *
46005
46006 out:
46007 if (server->local_nls != NULL && server->remote_nls != NULL)
46008- server->ops->convert = convert_cp;
46009+ *(void **)&server->ops->convert = convert_cp;
46010 else
46011- server->ops->convert = convert_memcpy;
46012+ *(void **)&server->ops->convert = convert_memcpy;
46013
46014 smb_unlock_server(server);
46015 return n;
46016@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server,
46017
46018 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
46019 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
46020- server->ops->getattr = smb_proc_getattr_core;
46021+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
46022 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
46023- server->ops->getattr = smb_proc_getattr_ff;
46024+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
46025 }
46026
46027 /* Decode server capabilities */
46028@@ -3439,7 +3439,7 @@ out:
46029 static void
46030 install_ops(struct smb_ops *dst, struct smb_ops *src)
46031 {
46032- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46033+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46034 }
46035
46036 /* < LANMAN2 */
46037diff -urNp linux-2.6.32.45/fs/smbfs/symlink.c linux-2.6.32.45/fs/smbfs/symlink.c
46038--- linux-2.6.32.45/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46039+++ linux-2.6.32.45/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46040@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
46041
46042 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46043 {
46044- char *s = nd_get_link(nd);
46045+ const char *s = nd_get_link(nd);
46046 if (!IS_ERR(s))
46047 __putname(s);
46048 }
46049diff -urNp linux-2.6.32.45/fs/splice.c linux-2.6.32.45/fs/splice.c
46050--- linux-2.6.32.45/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
46051+++ linux-2.6.32.45/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
46052@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46053 pipe_lock(pipe);
46054
46055 for (;;) {
46056- if (!pipe->readers) {
46057+ if (!atomic_read(&pipe->readers)) {
46058 send_sig(SIGPIPE, current, 0);
46059 if (!ret)
46060 ret = -EPIPE;
46061@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46062 do_wakeup = 0;
46063 }
46064
46065- pipe->waiting_writers++;
46066+ atomic_inc(&pipe->waiting_writers);
46067 pipe_wait(pipe);
46068- pipe->waiting_writers--;
46069+ atomic_dec(&pipe->waiting_writers);
46070 }
46071
46072 pipe_unlock(pipe);
46073@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
46074 .spd_release = spd_release_page,
46075 };
46076
46077+ pax_track_stack();
46078+
46079 index = *ppos >> PAGE_CACHE_SHIFT;
46080 loff = *ppos & ~PAGE_CACHE_MASK;
46081 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46082@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
46083 old_fs = get_fs();
46084 set_fs(get_ds());
46085 /* The cast to a user pointer is valid due to the set_fs() */
46086- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46087+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
46088 set_fs(old_fs);
46089
46090 return res;
46091@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
46092 old_fs = get_fs();
46093 set_fs(get_ds());
46094 /* The cast to a user pointer is valid due to the set_fs() */
46095- res = vfs_write(file, (const char __user *)buf, count, &pos);
46096+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
46097 set_fs(old_fs);
46098
46099 return res;
46100@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
46101 .spd_release = spd_release_page,
46102 };
46103
46104+ pax_track_stack();
46105+
46106 index = *ppos >> PAGE_CACHE_SHIFT;
46107 offset = *ppos & ~PAGE_CACHE_MASK;
46108 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46109@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
46110 goto err;
46111
46112 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46113- vec[i].iov_base = (void __user *) page_address(page);
46114+ vec[i].iov_base = (__force void __user *) page_address(page);
46115 vec[i].iov_len = this_len;
46116 pages[i] = page;
46117 spd.nr_pages++;
46118@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46119 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46120 {
46121 while (!pipe->nrbufs) {
46122- if (!pipe->writers)
46123+ if (!atomic_read(&pipe->writers))
46124 return 0;
46125
46126- if (!pipe->waiting_writers && sd->num_spliced)
46127+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46128 return 0;
46129
46130 if (sd->flags & SPLICE_F_NONBLOCK)
46131@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
46132 * out of the pipe right after the splice_to_pipe(). So set
46133 * PIPE_READERS appropriately.
46134 */
46135- pipe->readers = 1;
46136+ atomic_set(&pipe->readers, 1);
46137
46138 current->splice_pipe = pipe;
46139 }
46140@@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
46141 .spd_release = spd_release_page,
46142 };
46143
46144+ pax_track_stack();
46145+
46146 pipe = pipe_info(file->f_path.dentry->d_inode);
46147 if (!pipe)
46148 return -EBADF;
46149@@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
46150 ret = -ERESTARTSYS;
46151 break;
46152 }
46153- if (!pipe->writers)
46154+ if (!atomic_read(&pipe->writers))
46155 break;
46156- if (!pipe->waiting_writers) {
46157+ if (!atomic_read(&pipe->waiting_writers)) {
46158 if (flags & SPLICE_F_NONBLOCK) {
46159 ret = -EAGAIN;
46160 break;
46161@@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
46162 pipe_lock(pipe);
46163
46164 while (pipe->nrbufs >= PIPE_BUFFERS) {
46165- if (!pipe->readers) {
46166+ if (!atomic_read(&pipe->readers)) {
46167 send_sig(SIGPIPE, current, 0);
46168 ret = -EPIPE;
46169 break;
46170@@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
46171 ret = -ERESTARTSYS;
46172 break;
46173 }
46174- pipe->waiting_writers++;
46175+ atomic_inc(&pipe->waiting_writers);
46176 pipe_wait(pipe);
46177- pipe->waiting_writers--;
46178+ atomic_dec(&pipe->waiting_writers);
46179 }
46180
46181 pipe_unlock(pipe);
46182@@ -1785,14 +1791,14 @@ retry:
46183 pipe_double_lock(ipipe, opipe);
46184
46185 do {
46186- if (!opipe->readers) {
46187+ if (!atomic_read(&opipe->readers)) {
46188 send_sig(SIGPIPE, current, 0);
46189 if (!ret)
46190 ret = -EPIPE;
46191 break;
46192 }
46193
46194- if (!ipipe->nrbufs && !ipipe->writers)
46195+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46196 break;
46197
46198 /*
46199@@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
46200 pipe_double_lock(ipipe, opipe);
46201
46202 do {
46203- if (!opipe->readers) {
46204+ if (!atomic_read(&opipe->readers)) {
46205 send_sig(SIGPIPE, current, 0);
46206 if (!ret)
46207 ret = -EPIPE;
46208@@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
46209 * return EAGAIN if we have the potential of some data in the
46210 * future, otherwise just return 0
46211 */
46212- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46213+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46214 ret = -EAGAIN;
46215
46216 pipe_unlock(ipipe);
46217diff -urNp linux-2.6.32.45/fs/sysfs/file.c linux-2.6.32.45/fs/sysfs/file.c
46218--- linux-2.6.32.45/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
46219+++ linux-2.6.32.45/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
46220@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46221
46222 struct sysfs_open_dirent {
46223 atomic_t refcnt;
46224- atomic_t event;
46225+ atomic_unchecked_t event;
46226 wait_queue_head_t poll;
46227 struct list_head buffers; /* goes through sysfs_buffer.list */
46228 };
46229@@ -53,7 +53,7 @@ struct sysfs_buffer {
46230 size_t count;
46231 loff_t pos;
46232 char * page;
46233- struct sysfs_ops * ops;
46234+ const struct sysfs_ops * ops;
46235 struct mutex mutex;
46236 int needs_read_fill;
46237 int event;
46238@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
46239 {
46240 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46241 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46242- struct sysfs_ops * ops = buffer->ops;
46243+ const struct sysfs_ops * ops = buffer->ops;
46244 int ret = 0;
46245 ssize_t count;
46246
46247@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
46248 if (!sysfs_get_active_two(attr_sd))
46249 return -ENODEV;
46250
46251- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46252+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46253 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46254
46255 sysfs_put_active_two(attr_sd);
46256@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
46257 {
46258 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46259 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46260- struct sysfs_ops * ops = buffer->ops;
46261+ const struct sysfs_ops * ops = buffer->ops;
46262 int rc;
46263
46264 /* need attr_sd for attr and ops, its parent for kobj */
46265@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
46266 return -ENOMEM;
46267
46268 atomic_set(&new_od->refcnt, 0);
46269- atomic_set(&new_od->event, 1);
46270+ atomic_set_unchecked(&new_od->event, 1);
46271 init_waitqueue_head(&new_od->poll);
46272 INIT_LIST_HEAD(&new_od->buffers);
46273 goto retry;
46274@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
46275 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
46276 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46277 struct sysfs_buffer *buffer;
46278- struct sysfs_ops *ops;
46279+ const struct sysfs_ops *ops;
46280 int error = -EACCES;
46281 char *p;
46282
46283@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
46284
46285 sysfs_put_active_two(attr_sd);
46286
46287- if (buffer->event != atomic_read(&od->event))
46288+ if (buffer->event != atomic_read_unchecked(&od->event))
46289 goto trigger;
46290
46291 return DEFAULT_POLLMASK;
46292@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
46293
46294 od = sd->s_attr.open;
46295 if (od) {
46296- atomic_inc(&od->event);
46297+ atomic_inc_unchecked(&od->event);
46298 wake_up_interruptible(&od->poll);
46299 }
46300
46301diff -urNp linux-2.6.32.45/fs/sysfs/mount.c linux-2.6.32.45/fs/sysfs/mount.c
46302--- linux-2.6.32.45/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
46303+++ linux-2.6.32.45/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
46304@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46305 .s_name = "",
46306 .s_count = ATOMIC_INIT(1),
46307 .s_flags = SYSFS_DIR,
46308+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46309+ .s_mode = S_IFDIR | S_IRWXU,
46310+#else
46311 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46312+#endif
46313 .s_ino = 1,
46314 };
46315
46316diff -urNp linux-2.6.32.45/fs/sysfs/symlink.c linux-2.6.32.45/fs/sysfs/symlink.c
46317--- linux-2.6.32.45/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46318+++ linux-2.6.32.45/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46319@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
46320
46321 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46322 {
46323- char *page = nd_get_link(nd);
46324+ const char *page = nd_get_link(nd);
46325 if (!IS_ERR(page))
46326 free_page((unsigned long)page);
46327 }
46328diff -urNp linux-2.6.32.45/fs/udf/balloc.c linux-2.6.32.45/fs/udf/balloc.c
46329--- linux-2.6.32.45/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
46330+++ linux-2.6.32.45/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
46331@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
46332
46333 mutex_lock(&sbi->s_alloc_mutex);
46334 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46335- if (bloc->logicalBlockNum < 0 ||
46336- (bloc->logicalBlockNum + count) >
46337- partmap->s_partition_len) {
46338+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46339 udf_debug("%d < %d || %d + %d > %d\n",
46340 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
46341 count, partmap->s_partition_len);
46342@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
46343
46344 mutex_lock(&sbi->s_alloc_mutex);
46345 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46346- if (bloc->logicalBlockNum < 0 ||
46347- (bloc->logicalBlockNum + count) >
46348- partmap->s_partition_len) {
46349+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46350 udf_debug("%d < %d || %d + %d > %d\n",
46351 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
46352 partmap->s_partition_len);
46353diff -urNp linux-2.6.32.45/fs/udf/inode.c linux-2.6.32.45/fs/udf/inode.c
46354--- linux-2.6.32.45/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
46355+++ linux-2.6.32.45/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
46356@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
46357 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46358 int lastblock = 0;
46359
46360+ pax_track_stack();
46361+
46362 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46363 prev_epos.block = iinfo->i_location;
46364 prev_epos.bh = NULL;
46365diff -urNp linux-2.6.32.45/fs/udf/misc.c linux-2.6.32.45/fs/udf/misc.c
46366--- linux-2.6.32.45/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
46367+++ linux-2.6.32.45/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
46368@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46369
46370 u8 udf_tag_checksum(const struct tag *t)
46371 {
46372- u8 *data = (u8 *)t;
46373+ const u8 *data = (const u8 *)t;
46374 u8 checksum = 0;
46375 int i;
46376 for (i = 0; i < sizeof(struct tag); ++i)
46377diff -urNp linux-2.6.32.45/fs/utimes.c linux-2.6.32.45/fs/utimes.c
46378--- linux-2.6.32.45/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
46379+++ linux-2.6.32.45/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
46380@@ -1,6 +1,7 @@
46381 #include <linux/compiler.h>
46382 #include <linux/file.h>
46383 #include <linux/fs.h>
46384+#include <linux/security.h>
46385 #include <linux/linkage.h>
46386 #include <linux/mount.h>
46387 #include <linux/namei.h>
46388@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46389 goto mnt_drop_write_and_out;
46390 }
46391 }
46392+
46393+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46394+ error = -EACCES;
46395+ goto mnt_drop_write_and_out;
46396+ }
46397+
46398 mutex_lock(&inode->i_mutex);
46399 error = notify_change(path->dentry, &newattrs);
46400 mutex_unlock(&inode->i_mutex);
46401diff -urNp linux-2.6.32.45/fs/xattr_acl.c linux-2.6.32.45/fs/xattr_acl.c
46402--- linux-2.6.32.45/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
46403+++ linux-2.6.32.45/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
46404@@ -17,8 +17,8 @@
46405 struct posix_acl *
46406 posix_acl_from_xattr(const void *value, size_t size)
46407 {
46408- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46409- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46410+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46411+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46412 int count;
46413 struct posix_acl *acl;
46414 struct posix_acl_entry *acl_e;
46415diff -urNp linux-2.6.32.45/fs/xattr.c linux-2.6.32.45/fs/xattr.c
46416--- linux-2.6.32.45/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
46417+++ linux-2.6.32.45/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
46418@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46419 * Extended attribute SET operations
46420 */
46421 static long
46422-setxattr(struct dentry *d, const char __user *name, const void __user *value,
46423+setxattr(struct path *path, const char __user *name, const void __user *value,
46424 size_t size, int flags)
46425 {
46426 int error;
46427@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
46428 return PTR_ERR(kvalue);
46429 }
46430
46431- error = vfs_setxattr(d, kname, kvalue, size, flags);
46432+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46433+ error = -EACCES;
46434+ goto out;
46435+ }
46436+
46437+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46438+out:
46439 kfree(kvalue);
46440 return error;
46441 }
46442@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46443 return error;
46444 error = mnt_want_write(path.mnt);
46445 if (!error) {
46446- error = setxattr(path.dentry, name, value, size, flags);
46447+ error = setxattr(&path, name, value, size, flags);
46448 mnt_drop_write(path.mnt);
46449 }
46450 path_put(&path);
46451@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46452 return error;
46453 error = mnt_want_write(path.mnt);
46454 if (!error) {
46455- error = setxattr(path.dentry, name, value, size, flags);
46456+ error = setxattr(&path, name, value, size, flags);
46457 mnt_drop_write(path.mnt);
46458 }
46459 path_put(&path);
46460@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46461 const void __user *,value, size_t, size, int, flags)
46462 {
46463 struct file *f;
46464- struct dentry *dentry;
46465 int error = -EBADF;
46466
46467 f = fget(fd);
46468 if (!f)
46469 return error;
46470- dentry = f->f_path.dentry;
46471- audit_inode(NULL, dentry);
46472+ audit_inode(NULL, f->f_path.dentry);
46473 error = mnt_want_write_file(f);
46474 if (!error) {
46475- error = setxattr(dentry, name, value, size, flags);
46476+ error = setxattr(&f->f_path, name, value, size, flags);
46477 mnt_drop_write(f->f_path.mnt);
46478 }
46479 fput(f);
46480diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c
46481--- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
46482+++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
46483@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
46484 xfs_fsop_geom_t fsgeo;
46485 int error;
46486
46487+ memset(&fsgeo, 0, sizeof(fsgeo));
46488 error = xfs_fs_geometry(mp, &fsgeo, 3);
46489 if (error)
46490 return -error;
46491diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c
46492--- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
46493+++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
46494@@ -134,7 +134,7 @@ xfs_find_handle(
46495 }
46496
46497 error = -EFAULT;
46498- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46499+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46500 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46501 goto out_put;
46502
46503@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
46504 if (IS_ERR(dentry))
46505 return PTR_ERR(dentry);
46506
46507- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
46508+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
46509 if (!kbuf)
46510 goto out_dput;
46511
46512@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
46513 xfs_mount_t *mp,
46514 void __user *arg)
46515 {
46516- xfs_fsop_geom_t fsgeo;
46517+ xfs_fsop_geom_t fsgeo;
46518 int error;
46519
46520 error = xfs_fs_geometry(mp, &fsgeo, 3);
46521diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c
46522--- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
46523+++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
46524@@ -468,7 +468,7 @@ xfs_vn_put_link(
46525 struct nameidata *nd,
46526 void *p)
46527 {
46528- char *s = nd_get_link(nd);
46529+ const char *s = nd_get_link(nd);
46530
46531 if (!IS_ERR(s))
46532 kfree(s);
46533diff -urNp linux-2.6.32.45/fs/xfs/xfs_bmap.c linux-2.6.32.45/fs/xfs/xfs_bmap.c
46534--- linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
46535+++ linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
46536@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
46537 int nmap,
46538 int ret_nmap);
46539 #else
46540-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46541+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46542 #endif /* DEBUG */
46543
46544 #if defined(XFS_RW_TRACE)
46545diff -urNp linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c
46546--- linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
46547+++ linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
46548@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
46549 }
46550
46551 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46552- if (filldir(dirent, sfep->name, sfep->namelen,
46553+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46554+ char name[sfep->namelen];
46555+ memcpy(name, sfep->name, sfep->namelen);
46556+ if (filldir(dirent, name, sfep->namelen,
46557+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
46558+ *offset = off & 0x7fffffff;
46559+ return 0;
46560+ }
46561+ } else if (filldir(dirent, sfep->name, sfep->namelen,
46562 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46563 *offset = off & 0x7fffffff;
46564 return 0;
46565diff -urNp linux-2.6.32.45/grsecurity/gracl_alloc.c linux-2.6.32.45/grsecurity/gracl_alloc.c
46566--- linux-2.6.32.45/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
46567+++ linux-2.6.32.45/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
46568@@ -0,0 +1,105 @@
46569+#include <linux/kernel.h>
46570+#include <linux/mm.h>
46571+#include <linux/slab.h>
46572+#include <linux/vmalloc.h>
46573+#include <linux/gracl.h>
46574+#include <linux/grsecurity.h>
46575+
46576+static unsigned long alloc_stack_next = 1;
46577+static unsigned long alloc_stack_size = 1;
46578+static void **alloc_stack;
46579+
46580+static __inline__ int
46581+alloc_pop(void)
46582+{
46583+ if (alloc_stack_next == 1)
46584+ return 0;
46585+
46586+ kfree(alloc_stack[alloc_stack_next - 2]);
46587+
46588+ alloc_stack_next--;
46589+
46590+ return 1;
46591+}
46592+
46593+static __inline__ int
46594+alloc_push(void *buf)
46595+{
46596+ if (alloc_stack_next >= alloc_stack_size)
46597+ return 1;
46598+
46599+ alloc_stack[alloc_stack_next - 1] = buf;
46600+
46601+ alloc_stack_next++;
46602+
46603+ return 0;
46604+}
46605+
46606+void *
46607+acl_alloc(unsigned long len)
46608+{
46609+ void *ret = NULL;
46610+
46611+ if (!len || len > PAGE_SIZE)
46612+ goto out;
46613+
46614+ ret = kmalloc(len, GFP_KERNEL);
46615+
46616+ if (ret) {
46617+ if (alloc_push(ret)) {
46618+ kfree(ret);
46619+ ret = NULL;
46620+ }
46621+ }
46622+
46623+out:
46624+ return ret;
46625+}
46626+
46627+void *
46628+acl_alloc_num(unsigned long num, unsigned long len)
46629+{
46630+ if (!len || (num > (PAGE_SIZE / len)))
46631+ return NULL;
46632+
46633+ return acl_alloc(num * len);
46634+}
46635+
46636+void
46637+acl_free_all(void)
46638+{
46639+ if (gr_acl_is_enabled() || !alloc_stack)
46640+ return;
46641+
46642+ while (alloc_pop()) ;
46643+
46644+ if (alloc_stack) {
46645+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
46646+ kfree(alloc_stack);
46647+ else
46648+ vfree(alloc_stack);
46649+ }
46650+
46651+ alloc_stack = NULL;
46652+ alloc_stack_size = 1;
46653+ alloc_stack_next = 1;
46654+
46655+ return;
46656+}
46657+
46658+int
46659+acl_alloc_stack_init(unsigned long size)
46660+{
46661+ if ((size * sizeof (void *)) <= PAGE_SIZE)
46662+ alloc_stack =
46663+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
46664+ else
46665+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
46666+
46667+ alloc_stack_size = size;
46668+
46669+ if (!alloc_stack)
46670+ return 0;
46671+ else
46672+ return 1;
46673+}
46674diff -urNp linux-2.6.32.45/grsecurity/gracl.c linux-2.6.32.45/grsecurity/gracl.c
46675--- linux-2.6.32.45/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
46676+++ linux-2.6.32.45/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
46677@@ -0,0 +1,4082 @@
46678+#include <linux/kernel.h>
46679+#include <linux/module.h>
46680+#include <linux/sched.h>
46681+#include <linux/mm.h>
46682+#include <linux/file.h>
46683+#include <linux/fs.h>
46684+#include <linux/namei.h>
46685+#include <linux/mount.h>
46686+#include <linux/tty.h>
46687+#include <linux/proc_fs.h>
46688+#include <linux/smp_lock.h>
46689+#include <linux/slab.h>
46690+#include <linux/vmalloc.h>
46691+#include <linux/types.h>
46692+#include <linux/sysctl.h>
46693+#include <linux/netdevice.h>
46694+#include <linux/ptrace.h>
46695+#include <linux/gracl.h>
46696+#include <linux/gralloc.h>
46697+#include <linux/grsecurity.h>
46698+#include <linux/grinternal.h>
46699+#include <linux/pid_namespace.h>
46700+#include <linux/fdtable.h>
46701+#include <linux/percpu.h>
46702+
46703+#include <asm/uaccess.h>
46704+#include <asm/errno.h>
46705+#include <asm/mman.h>
46706+
46707+static struct acl_role_db acl_role_set;
46708+static struct name_db name_set;
46709+static struct inodev_db inodev_set;
46710+
46711+/* for keeping track of userspace pointers used for subjects, so we
46712+ can share references in the kernel as well
46713+*/
46714+
46715+static struct dentry *real_root;
46716+static struct vfsmount *real_root_mnt;
46717+
46718+static struct acl_subj_map_db subj_map_set;
46719+
46720+static struct acl_role_label *default_role;
46721+
46722+static struct acl_role_label *role_list;
46723+
46724+static u16 acl_sp_role_value;
46725+
46726+extern char *gr_shared_page[4];
46727+static DEFINE_MUTEX(gr_dev_mutex);
46728+DEFINE_RWLOCK(gr_inode_lock);
46729+
46730+struct gr_arg *gr_usermode;
46731+
46732+static unsigned int gr_status __read_only = GR_STATUS_INIT;
46733+
46734+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
46735+extern void gr_clear_learn_entries(void);
46736+
46737+#ifdef CONFIG_GRKERNSEC_RESLOG
46738+extern void gr_log_resource(const struct task_struct *task,
46739+ const int res, const unsigned long wanted, const int gt);
46740+#endif
46741+
46742+unsigned char *gr_system_salt;
46743+unsigned char *gr_system_sum;
46744+
46745+static struct sprole_pw **acl_special_roles = NULL;
46746+static __u16 num_sprole_pws = 0;
46747+
46748+static struct acl_role_label *kernel_role = NULL;
46749+
46750+static unsigned int gr_auth_attempts = 0;
46751+static unsigned long gr_auth_expires = 0UL;
46752+
46753+#ifdef CONFIG_NET
46754+extern struct vfsmount *sock_mnt;
46755+#endif
46756+extern struct vfsmount *pipe_mnt;
46757+extern struct vfsmount *shm_mnt;
46758+#ifdef CONFIG_HUGETLBFS
46759+extern struct vfsmount *hugetlbfs_vfsmount;
46760+#endif
46761+
46762+static struct acl_object_label *fakefs_obj_rw;
46763+static struct acl_object_label *fakefs_obj_rwx;
46764+
46765+extern int gr_init_uidset(void);
46766+extern void gr_free_uidset(void);
46767+extern void gr_remove_uid(uid_t uid);
46768+extern int gr_find_uid(uid_t uid);
46769+
46770+__inline__ int
46771+gr_acl_is_enabled(void)
46772+{
46773+ return (gr_status & GR_READY);
46774+}
46775+
46776+#ifdef CONFIG_BTRFS_FS
46777+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46778+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46779+#endif
46780+
46781+static inline dev_t __get_dev(const struct dentry *dentry)
46782+{
46783+#ifdef CONFIG_BTRFS_FS
46784+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46785+ return get_btrfs_dev_from_inode(dentry->d_inode);
46786+ else
46787+#endif
46788+ return dentry->d_inode->i_sb->s_dev;
46789+}
46790+
46791+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
46792+{
46793+ return __get_dev(dentry);
46794+}
46795+
46796+static char gr_task_roletype_to_char(struct task_struct *task)
46797+{
46798+ switch (task->role->roletype &
46799+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
46800+ GR_ROLE_SPECIAL)) {
46801+ case GR_ROLE_DEFAULT:
46802+ return 'D';
46803+ case GR_ROLE_USER:
46804+ return 'U';
46805+ case GR_ROLE_GROUP:
46806+ return 'G';
46807+ case GR_ROLE_SPECIAL:
46808+ return 'S';
46809+ }
46810+
46811+ return 'X';
46812+}
46813+
46814+char gr_roletype_to_char(void)
46815+{
46816+ return gr_task_roletype_to_char(current);
46817+}
46818+
46819+__inline__ int
46820+gr_acl_tpe_check(void)
46821+{
46822+ if (unlikely(!(gr_status & GR_READY)))
46823+ return 0;
46824+ if (current->role->roletype & GR_ROLE_TPE)
46825+ return 1;
46826+ else
46827+ return 0;
46828+}
46829+
46830+int
46831+gr_handle_rawio(const struct inode *inode)
46832+{
46833+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
46834+ if (inode && S_ISBLK(inode->i_mode) &&
46835+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
46836+ !capable(CAP_SYS_RAWIO))
46837+ return 1;
46838+#endif
46839+ return 0;
46840+}
46841+
46842+static int
46843+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
46844+{
46845+ if (likely(lena != lenb))
46846+ return 0;
46847+
46848+ return !memcmp(a, b, lena);
46849+}
46850+
46851+/* this must be called with vfsmount_lock and dcache_lock held */
46852+
46853+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
46854+ struct dentry *root, struct vfsmount *rootmnt,
46855+ char *buffer, int buflen)
46856+{
46857+ char * end = buffer+buflen;
46858+ char * retval;
46859+ int namelen;
46860+
46861+ *--end = '\0';
46862+ buflen--;
46863+
46864+ if (buflen < 1)
46865+ goto Elong;
46866+ /* Get '/' right */
46867+ retval = end-1;
46868+ *retval = '/';
46869+
46870+ for (;;) {
46871+ struct dentry * parent;
46872+
46873+ if (dentry == root && vfsmnt == rootmnt)
46874+ break;
46875+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
46876+ /* Global root? */
46877+ if (vfsmnt->mnt_parent == vfsmnt)
46878+ goto global_root;
46879+ dentry = vfsmnt->mnt_mountpoint;
46880+ vfsmnt = vfsmnt->mnt_parent;
46881+ continue;
46882+ }
46883+ parent = dentry->d_parent;
46884+ prefetch(parent);
46885+ namelen = dentry->d_name.len;
46886+ buflen -= namelen + 1;
46887+ if (buflen < 0)
46888+ goto Elong;
46889+ end -= namelen;
46890+ memcpy(end, dentry->d_name.name, namelen);
46891+ *--end = '/';
46892+ retval = end;
46893+ dentry = parent;
46894+ }
46895+
46896+out:
46897+ return retval;
46898+
46899+global_root:
46900+ namelen = dentry->d_name.len;
46901+ buflen -= namelen;
46902+ if (buflen < 0)
46903+ goto Elong;
46904+ retval -= namelen-1; /* hit the slash */
46905+ memcpy(retval, dentry->d_name.name, namelen);
46906+ goto out;
46907+Elong:
46908+ retval = ERR_PTR(-ENAMETOOLONG);
46909+ goto out;
46910+}
46911+
46912+static char *
46913+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
46914+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
46915+{
46916+ char *retval;
46917+
46918+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
46919+ if (unlikely(IS_ERR(retval)))
46920+ retval = strcpy(buf, "<path too long>");
46921+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
46922+ retval[1] = '\0';
46923+
46924+ return retval;
46925+}
46926+
46927+static char *
46928+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
46929+ char *buf, int buflen)
46930+{
46931+ char *res;
46932+
46933+ /* we can use real_root, real_root_mnt, because this is only called
46934+ by the RBAC system */
46935+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
46936+
46937+ return res;
46938+}
46939+
46940+static char *
46941+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
46942+ char *buf, int buflen)
46943+{
46944+ char *res;
46945+ struct dentry *root;
46946+ struct vfsmount *rootmnt;
46947+ struct task_struct *reaper = &init_task;
46948+
46949+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
46950+ read_lock(&reaper->fs->lock);
46951+ root = dget(reaper->fs->root.dentry);
46952+ rootmnt = mntget(reaper->fs->root.mnt);
46953+ read_unlock(&reaper->fs->lock);
46954+
46955+ spin_lock(&dcache_lock);
46956+ spin_lock(&vfsmount_lock);
46957+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
46958+ spin_unlock(&vfsmount_lock);
46959+ spin_unlock(&dcache_lock);
46960+
46961+ dput(root);
46962+ mntput(rootmnt);
46963+ return res;
46964+}
46965+
46966+static char *
46967+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
46968+{
46969+ char *ret;
46970+ spin_lock(&dcache_lock);
46971+ spin_lock(&vfsmount_lock);
46972+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
46973+ PAGE_SIZE);
46974+ spin_unlock(&vfsmount_lock);
46975+ spin_unlock(&dcache_lock);
46976+ return ret;
46977+}
46978+
46979+char *
46980+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
46981+{
46982+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
46983+ PAGE_SIZE);
46984+}
46985+
46986+char *
46987+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
46988+{
46989+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
46990+ PAGE_SIZE);
46991+}
46992+
46993+char *
46994+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
46995+{
46996+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
46997+ PAGE_SIZE);
46998+}
46999+
47000+char *
47001+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47002+{
47003+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47004+ PAGE_SIZE);
47005+}
47006+
47007+char *
47008+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47009+{
47010+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47011+ PAGE_SIZE);
47012+}
47013+
47014+__inline__ __u32
47015+to_gr_audit(const __u32 reqmode)
47016+{
47017+ /* masks off auditable permission flags, then shifts them to create
47018+ auditing flags, and adds the special case of append auditing if
47019+ we're requesting write */
47020+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47021+}
47022+
47023+struct acl_subject_label *
47024+lookup_subject_map(const struct acl_subject_label *userp)
47025+{
47026+ unsigned int index = shash(userp, subj_map_set.s_size);
47027+ struct subject_map *match;
47028+
47029+ match = subj_map_set.s_hash[index];
47030+
47031+ while (match && match->user != userp)
47032+ match = match->next;
47033+
47034+ if (match != NULL)
47035+ return match->kernel;
47036+ else
47037+ return NULL;
47038+}
47039+
47040+static void
47041+insert_subj_map_entry(struct subject_map *subjmap)
47042+{
47043+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47044+ struct subject_map **curr;
47045+
47046+ subjmap->prev = NULL;
47047+
47048+ curr = &subj_map_set.s_hash[index];
47049+ if (*curr != NULL)
47050+ (*curr)->prev = subjmap;
47051+
47052+ subjmap->next = *curr;
47053+ *curr = subjmap;
47054+
47055+ return;
47056+}
47057+
47058+static struct acl_role_label *
47059+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47060+ const gid_t gid)
47061+{
47062+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47063+ struct acl_role_label *match;
47064+ struct role_allowed_ip *ipp;
47065+ unsigned int x;
47066+ u32 curr_ip = task->signal->curr_ip;
47067+
47068+ task->signal->saved_ip = curr_ip;
47069+
47070+ match = acl_role_set.r_hash[index];
47071+
47072+ while (match) {
47073+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47074+ for (x = 0; x < match->domain_child_num; x++) {
47075+ if (match->domain_children[x] == uid)
47076+ goto found;
47077+ }
47078+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47079+ break;
47080+ match = match->next;
47081+ }
47082+found:
47083+ if (match == NULL) {
47084+ try_group:
47085+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47086+ match = acl_role_set.r_hash[index];
47087+
47088+ while (match) {
47089+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47090+ for (x = 0; x < match->domain_child_num; x++) {
47091+ if (match->domain_children[x] == gid)
47092+ goto found2;
47093+ }
47094+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47095+ break;
47096+ match = match->next;
47097+ }
47098+found2:
47099+ if (match == NULL)
47100+ match = default_role;
47101+ if (match->allowed_ips == NULL)
47102+ return match;
47103+ else {
47104+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47105+ if (likely
47106+ ((ntohl(curr_ip) & ipp->netmask) ==
47107+ (ntohl(ipp->addr) & ipp->netmask)))
47108+ return match;
47109+ }
47110+ match = default_role;
47111+ }
47112+ } else if (match->allowed_ips == NULL) {
47113+ return match;
47114+ } else {
47115+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47116+ if (likely
47117+ ((ntohl(curr_ip) & ipp->netmask) ==
47118+ (ntohl(ipp->addr) & ipp->netmask)))
47119+ return match;
47120+ }
47121+ goto try_group;
47122+ }
47123+
47124+ return match;
47125+}
47126+
47127+struct acl_subject_label *
47128+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47129+ const struct acl_role_label *role)
47130+{
47131+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47132+ struct acl_subject_label *match;
47133+
47134+ match = role->subj_hash[index];
47135+
47136+ while (match && (match->inode != ino || match->device != dev ||
47137+ (match->mode & GR_DELETED))) {
47138+ match = match->next;
47139+ }
47140+
47141+ if (match && !(match->mode & GR_DELETED))
47142+ return match;
47143+ else
47144+ return NULL;
47145+}
47146+
47147+struct acl_subject_label *
47148+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47149+ const struct acl_role_label *role)
47150+{
47151+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47152+ struct acl_subject_label *match;
47153+
47154+ match = role->subj_hash[index];
47155+
47156+ while (match && (match->inode != ino || match->device != dev ||
47157+ !(match->mode & GR_DELETED))) {
47158+ match = match->next;
47159+ }
47160+
47161+ if (match && (match->mode & GR_DELETED))
47162+ return match;
47163+ else
47164+ return NULL;
47165+}
47166+
47167+static struct acl_object_label *
47168+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47169+ const struct acl_subject_label *subj)
47170+{
47171+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47172+ struct acl_object_label *match;
47173+
47174+ match = subj->obj_hash[index];
47175+
47176+ while (match && (match->inode != ino || match->device != dev ||
47177+ (match->mode & GR_DELETED))) {
47178+ match = match->next;
47179+ }
47180+
47181+ if (match && !(match->mode & GR_DELETED))
47182+ return match;
47183+ else
47184+ return NULL;
47185+}
47186+
47187+static struct acl_object_label *
47188+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47189+ const struct acl_subject_label *subj)
47190+{
47191+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47192+ struct acl_object_label *match;
47193+
47194+ match = subj->obj_hash[index];
47195+
47196+ while (match && (match->inode != ino || match->device != dev ||
47197+ !(match->mode & GR_DELETED))) {
47198+ match = match->next;
47199+ }
47200+
47201+ if (match && (match->mode & GR_DELETED))
47202+ return match;
47203+
47204+ match = subj->obj_hash[index];
47205+
47206+ while (match && (match->inode != ino || match->device != dev ||
47207+ (match->mode & GR_DELETED))) {
47208+ match = match->next;
47209+ }
47210+
47211+ if (match && !(match->mode & GR_DELETED))
47212+ return match;
47213+ else
47214+ return NULL;
47215+}
47216+
47217+static struct name_entry *
47218+lookup_name_entry(const char *name)
47219+{
47220+ unsigned int len = strlen(name);
47221+ unsigned int key = full_name_hash(name, len);
47222+ unsigned int index = key % name_set.n_size;
47223+ struct name_entry *match;
47224+
47225+ match = name_set.n_hash[index];
47226+
47227+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47228+ match = match->next;
47229+
47230+ return match;
47231+}
47232+
47233+static struct name_entry *
47234+lookup_name_entry_create(const char *name)
47235+{
47236+ unsigned int len = strlen(name);
47237+ unsigned int key = full_name_hash(name, len);
47238+ unsigned int index = key % name_set.n_size;
47239+ struct name_entry *match;
47240+
47241+ match = name_set.n_hash[index];
47242+
47243+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47244+ !match->deleted))
47245+ match = match->next;
47246+
47247+ if (match && match->deleted)
47248+ return match;
47249+
47250+ match = name_set.n_hash[index];
47251+
47252+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47253+ match->deleted))
47254+ match = match->next;
47255+
47256+ if (match && !match->deleted)
47257+ return match;
47258+ else
47259+ return NULL;
47260+}
47261+
47262+static struct inodev_entry *
47263+lookup_inodev_entry(const ino_t ino, const dev_t dev)
47264+{
47265+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
47266+ struct inodev_entry *match;
47267+
47268+ match = inodev_set.i_hash[index];
47269+
47270+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47271+ match = match->next;
47272+
47273+ return match;
47274+}
47275+
47276+static void
47277+insert_inodev_entry(struct inodev_entry *entry)
47278+{
47279+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47280+ inodev_set.i_size);
47281+ struct inodev_entry **curr;
47282+
47283+ entry->prev = NULL;
47284+
47285+ curr = &inodev_set.i_hash[index];
47286+ if (*curr != NULL)
47287+ (*curr)->prev = entry;
47288+
47289+ entry->next = *curr;
47290+ *curr = entry;
47291+
47292+ return;
47293+}
47294+
47295+static void
47296+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47297+{
47298+ unsigned int index =
47299+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47300+ struct acl_role_label **curr;
47301+ struct acl_role_label *tmp;
47302+
47303+ curr = &acl_role_set.r_hash[index];
47304+
47305+ /* if role was already inserted due to domains and already has
47306+ a role in the same bucket as it attached, then we need to
47307+ combine these two buckets
47308+ */
47309+ if (role->next) {
47310+ tmp = role->next;
47311+ while (tmp->next)
47312+ tmp = tmp->next;
47313+ tmp->next = *curr;
47314+ } else
47315+ role->next = *curr;
47316+ *curr = role;
47317+
47318+ return;
47319+}
47320+
47321+static void
47322+insert_acl_role_label(struct acl_role_label *role)
47323+{
47324+ int i;
47325+
47326+ if (role_list == NULL) {
47327+ role_list = role;
47328+ role->prev = NULL;
47329+ } else {
47330+ role->prev = role_list;
47331+ role_list = role;
47332+ }
47333+
47334+ /* used for hash chains */
47335+ role->next = NULL;
47336+
47337+ if (role->roletype & GR_ROLE_DOMAIN) {
47338+ for (i = 0; i < role->domain_child_num; i++)
47339+ __insert_acl_role_label(role, role->domain_children[i]);
47340+ } else
47341+ __insert_acl_role_label(role, role->uidgid);
47342+}
47343+
47344+static int
47345+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47346+{
47347+ struct name_entry **curr, *nentry;
47348+ struct inodev_entry *ientry;
47349+ unsigned int len = strlen(name);
47350+ unsigned int key = full_name_hash(name, len);
47351+ unsigned int index = key % name_set.n_size;
47352+
47353+ curr = &name_set.n_hash[index];
47354+
47355+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47356+ curr = &((*curr)->next);
47357+
47358+ if (*curr != NULL)
47359+ return 1;
47360+
47361+ nentry = acl_alloc(sizeof (struct name_entry));
47362+ if (nentry == NULL)
47363+ return 0;
47364+ ientry = acl_alloc(sizeof (struct inodev_entry));
47365+ if (ientry == NULL)
47366+ return 0;
47367+ ientry->nentry = nentry;
47368+
47369+ nentry->key = key;
47370+ nentry->name = name;
47371+ nentry->inode = inode;
47372+ nentry->device = device;
47373+ nentry->len = len;
47374+ nentry->deleted = deleted;
47375+
47376+ nentry->prev = NULL;
47377+ curr = &name_set.n_hash[index];
47378+ if (*curr != NULL)
47379+ (*curr)->prev = nentry;
47380+ nentry->next = *curr;
47381+ *curr = nentry;
47382+
47383+ /* insert us into the table searchable by inode/dev */
47384+ insert_inodev_entry(ientry);
47385+
47386+ return 1;
47387+}
47388+
47389+static void
47390+insert_acl_obj_label(struct acl_object_label *obj,
47391+ struct acl_subject_label *subj)
47392+{
47393+ unsigned int index =
47394+ fhash(obj->inode, obj->device, subj->obj_hash_size);
47395+ struct acl_object_label **curr;
47396+
47397+
47398+ obj->prev = NULL;
47399+
47400+ curr = &subj->obj_hash[index];
47401+ if (*curr != NULL)
47402+ (*curr)->prev = obj;
47403+
47404+ obj->next = *curr;
47405+ *curr = obj;
47406+
47407+ return;
47408+}
47409+
47410+static void
47411+insert_acl_subj_label(struct acl_subject_label *obj,
47412+ struct acl_role_label *role)
47413+{
47414+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47415+ struct acl_subject_label **curr;
47416+
47417+ obj->prev = NULL;
47418+
47419+ curr = &role->subj_hash[index];
47420+ if (*curr != NULL)
47421+ (*curr)->prev = obj;
47422+
47423+ obj->next = *curr;
47424+ *curr = obj;
47425+
47426+ return;
47427+}
47428+
47429+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
47430+
47431+static void *
47432+create_table(__u32 * len, int elementsize)
47433+{
47434+ unsigned int table_sizes[] = {
47435+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
47436+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
47437+ 4194301, 8388593, 16777213, 33554393, 67108859
47438+ };
47439+ void *newtable = NULL;
47440+ unsigned int pwr = 0;
47441+
47442+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
47443+ table_sizes[pwr] <= *len)
47444+ pwr++;
47445+
47446+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
47447+ return newtable;
47448+
47449+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
47450+ newtable =
47451+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
47452+ else
47453+ newtable = vmalloc(table_sizes[pwr] * elementsize);
47454+
47455+ *len = table_sizes[pwr];
47456+
47457+ return newtable;
47458+}
47459+
47460+static int
47461+init_variables(const struct gr_arg *arg)
47462+{
47463+ struct task_struct *reaper = &init_task;
47464+ unsigned int stacksize;
47465+
47466+ subj_map_set.s_size = arg->role_db.num_subjects;
47467+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
47468+ name_set.n_size = arg->role_db.num_objects;
47469+ inodev_set.i_size = arg->role_db.num_objects;
47470+
47471+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
47472+ !name_set.n_size || !inodev_set.i_size)
47473+ return 1;
47474+
47475+ if (!gr_init_uidset())
47476+ return 1;
47477+
47478+ /* set up the stack that holds allocation info */
47479+
47480+ stacksize = arg->role_db.num_pointers + 5;
47481+
47482+ if (!acl_alloc_stack_init(stacksize))
47483+ return 1;
47484+
47485+ /* grab reference for the real root dentry and vfsmount */
47486+ read_lock(&reaper->fs->lock);
47487+ real_root = dget(reaper->fs->root.dentry);
47488+ real_root_mnt = mntget(reaper->fs->root.mnt);
47489+ read_unlock(&reaper->fs->lock);
47490+
47491+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47492+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
47493+#endif
47494+
47495+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
47496+ if (fakefs_obj_rw == NULL)
47497+ return 1;
47498+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
47499+
47500+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
47501+ if (fakefs_obj_rwx == NULL)
47502+ return 1;
47503+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
47504+
47505+ subj_map_set.s_hash =
47506+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
47507+ acl_role_set.r_hash =
47508+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
47509+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
47510+ inodev_set.i_hash =
47511+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
47512+
47513+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
47514+ !name_set.n_hash || !inodev_set.i_hash)
47515+ return 1;
47516+
47517+ memset(subj_map_set.s_hash, 0,
47518+ sizeof(struct subject_map *) * subj_map_set.s_size);
47519+ memset(acl_role_set.r_hash, 0,
47520+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
47521+ memset(name_set.n_hash, 0,
47522+ sizeof (struct name_entry *) * name_set.n_size);
47523+ memset(inodev_set.i_hash, 0,
47524+ sizeof (struct inodev_entry *) * inodev_set.i_size);
47525+
47526+ return 0;
47527+}
47528+
47529+/* free information not needed after startup
47530+ currently contains user->kernel pointer mappings for subjects
47531+*/
47532+
47533+static void
47534+free_init_variables(void)
47535+{
47536+ __u32 i;
47537+
47538+ if (subj_map_set.s_hash) {
47539+ for (i = 0; i < subj_map_set.s_size; i++) {
47540+ if (subj_map_set.s_hash[i]) {
47541+ kfree(subj_map_set.s_hash[i]);
47542+ subj_map_set.s_hash[i] = NULL;
47543+ }
47544+ }
47545+
47546+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
47547+ PAGE_SIZE)
47548+ kfree(subj_map_set.s_hash);
47549+ else
47550+ vfree(subj_map_set.s_hash);
47551+ }
47552+
47553+ return;
47554+}
47555+
47556+static void
47557+free_variables(void)
47558+{
47559+ struct acl_subject_label *s;
47560+ struct acl_role_label *r;
47561+ struct task_struct *task, *task2;
47562+ unsigned int x;
47563+
47564+ gr_clear_learn_entries();
47565+
47566+ read_lock(&tasklist_lock);
47567+ do_each_thread(task2, task) {
47568+ task->acl_sp_role = 0;
47569+ task->acl_role_id = 0;
47570+ task->acl = NULL;
47571+ task->role = NULL;
47572+ } while_each_thread(task2, task);
47573+ read_unlock(&tasklist_lock);
47574+
47575+ /* release the reference to the real root dentry and vfsmount */
47576+ if (real_root)
47577+ dput(real_root);
47578+ real_root = NULL;
47579+ if (real_root_mnt)
47580+ mntput(real_root_mnt);
47581+ real_root_mnt = NULL;
47582+
47583+ /* free all object hash tables */
47584+
47585+ FOR_EACH_ROLE_START(r)
47586+ if (r->subj_hash == NULL)
47587+ goto next_role;
47588+ FOR_EACH_SUBJECT_START(r, s, x)
47589+ if (s->obj_hash == NULL)
47590+ break;
47591+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47592+ kfree(s->obj_hash);
47593+ else
47594+ vfree(s->obj_hash);
47595+ FOR_EACH_SUBJECT_END(s, x)
47596+ FOR_EACH_NESTED_SUBJECT_START(r, s)
47597+ if (s->obj_hash == NULL)
47598+ break;
47599+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47600+ kfree(s->obj_hash);
47601+ else
47602+ vfree(s->obj_hash);
47603+ FOR_EACH_NESTED_SUBJECT_END(s)
47604+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
47605+ kfree(r->subj_hash);
47606+ else
47607+ vfree(r->subj_hash);
47608+ r->subj_hash = NULL;
47609+next_role:
47610+ FOR_EACH_ROLE_END(r)
47611+
47612+ acl_free_all();
47613+
47614+ if (acl_role_set.r_hash) {
47615+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
47616+ PAGE_SIZE)
47617+ kfree(acl_role_set.r_hash);
47618+ else
47619+ vfree(acl_role_set.r_hash);
47620+ }
47621+ if (name_set.n_hash) {
47622+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
47623+ PAGE_SIZE)
47624+ kfree(name_set.n_hash);
47625+ else
47626+ vfree(name_set.n_hash);
47627+ }
47628+
47629+ if (inodev_set.i_hash) {
47630+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
47631+ PAGE_SIZE)
47632+ kfree(inodev_set.i_hash);
47633+ else
47634+ vfree(inodev_set.i_hash);
47635+ }
47636+
47637+ gr_free_uidset();
47638+
47639+ memset(&name_set, 0, sizeof (struct name_db));
47640+ memset(&inodev_set, 0, sizeof (struct inodev_db));
47641+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
47642+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
47643+
47644+ default_role = NULL;
47645+ role_list = NULL;
47646+
47647+ return;
47648+}
47649+
47650+static __u32
47651+count_user_objs(struct acl_object_label *userp)
47652+{
47653+ struct acl_object_label o_tmp;
47654+ __u32 num = 0;
47655+
47656+ while (userp) {
47657+ if (copy_from_user(&o_tmp, userp,
47658+ sizeof (struct acl_object_label)))
47659+ break;
47660+
47661+ userp = o_tmp.prev;
47662+ num++;
47663+ }
47664+
47665+ return num;
47666+}
47667+
47668+static struct acl_subject_label *
47669+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
47670+
47671+static int
47672+copy_user_glob(struct acl_object_label *obj)
47673+{
47674+ struct acl_object_label *g_tmp, **guser;
47675+ unsigned int len;
47676+ char *tmp;
47677+
47678+ if (obj->globbed == NULL)
47679+ return 0;
47680+
47681+ guser = &obj->globbed;
47682+ while (*guser) {
47683+ g_tmp = (struct acl_object_label *)
47684+ acl_alloc(sizeof (struct acl_object_label));
47685+ if (g_tmp == NULL)
47686+ return -ENOMEM;
47687+
47688+ if (copy_from_user(g_tmp, *guser,
47689+ sizeof (struct acl_object_label)))
47690+ return -EFAULT;
47691+
47692+ len = strnlen_user(g_tmp->filename, PATH_MAX);
47693+
47694+ if (!len || len >= PATH_MAX)
47695+ return -EINVAL;
47696+
47697+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47698+ return -ENOMEM;
47699+
47700+ if (copy_from_user(tmp, g_tmp->filename, len))
47701+ return -EFAULT;
47702+ tmp[len-1] = '\0';
47703+ g_tmp->filename = tmp;
47704+
47705+ *guser = g_tmp;
47706+ guser = &(g_tmp->next);
47707+ }
47708+
47709+ return 0;
47710+}
47711+
47712+static int
47713+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
47714+ struct acl_role_label *role)
47715+{
47716+ struct acl_object_label *o_tmp;
47717+ unsigned int len;
47718+ int ret;
47719+ char *tmp;
47720+
47721+ while (userp) {
47722+ if ((o_tmp = (struct acl_object_label *)
47723+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
47724+ return -ENOMEM;
47725+
47726+ if (copy_from_user(o_tmp, userp,
47727+ sizeof (struct acl_object_label)))
47728+ return -EFAULT;
47729+
47730+ userp = o_tmp->prev;
47731+
47732+ len = strnlen_user(o_tmp->filename, PATH_MAX);
47733+
47734+ if (!len || len >= PATH_MAX)
47735+ return -EINVAL;
47736+
47737+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47738+ return -ENOMEM;
47739+
47740+ if (copy_from_user(tmp, o_tmp->filename, len))
47741+ return -EFAULT;
47742+ tmp[len-1] = '\0';
47743+ o_tmp->filename = tmp;
47744+
47745+ insert_acl_obj_label(o_tmp, subj);
47746+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
47747+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
47748+ return -ENOMEM;
47749+
47750+ ret = copy_user_glob(o_tmp);
47751+ if (ret)
47752+ return ret;
47753+
47754+ if (o_tmp->nested) {
47755+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
47756+ if (IS_ERR(o_tmp->nested))
47757+ return PTR_ERR(o_tmp->nested);
47758+
47759+ /* insert into nested subject list */
47760+ o_tmp->nested->next = role->hash->first;
47761+ role->hash->first = o_tmp->nested;
47762+ }
47763+ }
47764+
47765+ return 0;
47766+}
47767+
47768+static __u32
47769+count_user_subjs(struct acl_subject_label *userp)
47770+{
47771+ struct acl_subject_label s_tmp;
47772+ __u32 num = 0;
47773+
47774+ while (userp) {
47775+ if (copy_from_user(&s_tmp, userp,
47776+ sizeof (struct acl_subject_label)))
47777+ break;
47778+
47779+ userp = s_tmp.prev;
47780+ /* do not count nested subjects against this count, since
47781+ they are not included in the hash table, but are
47782+ attached to objects. We have already counted
47783+ the subjects in userspace for the allocation
47784+ stack
47785+ */
47786+ if (!(s_tmp.mode & GR_NESTED))
47787+ num++;
47788+ }
47789+
47790+ return num;
47791+}
47792+
47793+static int
47794+copy_user_allowedips(struct acl_role_label *rolep)
47795+{
47796+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
47797+
47798+ ruserip = rolep->allowed_ips;
47799+
47800+ while (ruserip) {
47801+ rlast = rtmp;
47802+
47803+ if ((rtmp = (struct role_allowed_ip *)
47804+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
47805+ return -ENOMEM;
47806+
47807+ if (copy_from_user(rtmp, ruserip,
47808+ sizeof (struct role_allowed_ip)))
47809+ return -EFAULT;
47810+
47811+ ruserip = rtmp->prev;
47812+
47813+ if (!rlast) {
47814+ rtmp->prev = NULL;
47815+ rolep->allowed_ips = rtmp;
47816+ } else {
47817+ rlast->next = rtmp;
47818+ rtmp->prev = rlast;
47819+ }
47820+
47821+ if (!ruserip)
47822+ rtmp->next = NULL;
47823+ }
47824+
47825+ return 0;
47826+}
47827+
47828+static int
47829+copy_user_transitions(struct acl_role_label *rolep)
47830+{
47831+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
47832+
47833+ unsigned int len;
47834+ char *tmp;
47835+
47836+ rusertp = rolep->transitions;
47837+
47838+ while (rusertp) {
47839+ rlast = rtmp;
47840+
47841+ if ((rtmp = (struct role_transition *)
47842+ acl_alloc(sizeof (struct role_transition))) == NULL)
47843+ return -ENOMEM;
47844+
47845+ if (copy_from_user(rtmp, rusertp,
47846+ sizeof (struct role_transition)))
47847+ return -EFAULT;
47848+
47849+ rusertp = rtmp->prev;
47850+
47851+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
47852+
47853+ if (!len || len >= GR_SPROLE_LEN)
47854+ return -EINVAL;
47855+
47856+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47857+ return -ENOMEM;
47858+
47859+ if (copy_from_user(tmp, rtmp->rolename, len))
47860+ return -EFAULT;
47861+ tmp[len-1] = '\0';
47862+ rtmp->rolename = tmp;
47863+
47864+ if (!rlast) {
47865+ rtmp->prev = NULL;
47866+ rolep->transitions = rtmp;
47867+ } else {
47868+ rlast->next = rtmp;
47869+ rtmp->prev = rlast;
47870+ }
47871+
47872+ if (!rusertp)
47873+ rtmp->next = NULL;
47874+ }
47875+
47876+ return 0;
47877+}
47878+
47879+static struct acl_subject_label *
47880+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
47881+{
47882+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
47883+ unsigned int len;
47884+ char *tmp;
47885+ __u32 num_objs;
47886+ struct acl_ip_label **i_tmp, *i_utmp2;
47887+ struct gr_hash_struct ghash;
47888+ struct subject_map *subjmap;
47889+ unsigned int i_num;
47890+ int err;
47891+
47892+ s_tmp = lookup_subject_map(userp);
47893+
47894+ /* we've already copied this subject into the kernel, just return
47895+ the reference to it, and don't copy it over again
47896+ */
47897+ if (s_tmp)
47898+ return(s_tmp);
47899+
47900+ if ((s_tmp = (struct acl_subject_label *)
47901+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
47902+ return ERR_PTR(-ENOMEM);
47903+
47904+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
47905+ if (subjmap == NULL)
47906+ return ERR_PTR(-ENOMEM);
47907+
47908+ subjmap->user = userp;
47909+ subjmap->kernel = s_tmp;
47910+ insert_subj_map_entry(subjmap);
47911+
47912+ if (copy_from_user(s_tmp, userp,
47913+ sizeof (struct acl_subject_label)))
47914+ return ERR_PTR(-EFAULT);
47915+
47916+ len = strnlen_user(s_tmp->filename, PATH_MAX);
47917+
47918+ if (!len || len >= PATH_MAX)
47919+ return ERR_PTR(-EINVAL);
47920+
47921+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47922+ return ERR_PTR(-ENOMEM);
47923+
47924+ if (copy_from_user(tmp, s_tmp->filename, len))
47925+ return ERR_PTR(-EFAULT);
47926+ tmp[len-1] = '\0';
47927+ s_tmp->filename = tmp;
47928+
47929+ if (!strcmp(s_tmp->filename, "/"))
47930+ role->root_label = s_tmp;
47931+
47932+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
47933+ return ERR_PTR(-EFAULT);
47934+
47935+ /* copy user and group transition tables */
47936+
47937+ if (s_tmp->user_trans_num) {
47938+ uid_t *uidlist;
47939+
47940+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
47941+ if (uidlist == NULL)
47942+ return ERR_PTR(-ENOMEM);
47943+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
47944+ return ERR_PTR(-EFAULT);
47945+
47946+ s_tmp->user_transitions = uidlist;
47947+ }
47948+
47949+ if (s_tmp->group_trans_num) {
47950+ gid_t *gidlist;
47951+
47952+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
47953+ if (gidlist == NULL)
47954+ return ERR_PTR(-ENOMEM);
47955+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
47956+ return ERR_PTR(-EFAULT);
47957+
47958+ s_tmp->group_transitions = gidlist;
47959+ }
47960+
47961+ /* set up object hash table */
47962+ num_objs = count_user_objs(ghash.first);
47963+
47964+ s_tmp->obj_hash_size = num_objs;
47965+ s_tmp->obj_hash =
47966+ (struct acl_object_label **)
47967+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
47968+
47969+ if (!s_tmp->obj_hash)
47970+ return ERR_PTR(-ENOMEM);
47971+
47972+ memset(s_tmp->obj_hash, 0,
47973+ s_tmp->obj_hash_size *
47974+ sizeof (struct acl_object_label *));
47975+
47976+ /* add in objects */
47977+ err = copy_user_objs(ghash.first, s_tmp, role);
47978+
47979+ if (err)
47980+ return ERR_PTR(err);
47981+
47982+ /* set pointer for parent subject */
47983+ if (s_tmp->parent_subject) {
47984+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
47985+
47986+ if (IS_ERR(s_tmp2))
47987+ return s_tmp2;
47988+
47989+ s_tmp->parent_subject = s_tmp2;
47990+ }
47991+
47992+ /* add in ip acls */
47993+
47994+ if (!s_tmp->ip_num) {
47995+ s_tmp->ips = NULL;
47996+ goto insert;
47997+ }
47998+
47999+ i_tmp =
48000+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48001+ sizeof (struct acl_ip_label *));
48002+
48003+ if (!i_tmp)
48004+ return ERR_PTR(-ENOMEM);
48005+
48006+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48007+ *(i_tmp + i_num) =
48008+ (struct acl_ip_label *)
48009+ acl_alloc(sizeof (struct acl_ip_label));
48010+ if (!*(i_tmp + i_num))
48011+ return ERR_PTR(-ENOMEM);
48012+
48013+ if (copy_from_user
48014+ (&i_utmp2, s_tmp->ips + i_num,
48015+ sizeof (struct acl_ip_label *)))
48016+ return ERR_PTR(-EFAULT);
48017+
48018+ if (copy_from_user
48019+ (*(i_tmp + i_num), i_utmp2,
48020+ sizeof (struct acl_ip_label)))
48021+ return ERR_PTR(-EFAULT);
48022+
48023+ if ((*(i_tmp + i_num))->iface == NULL)
48024+ continue;
48025+
48026+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48027+ if (!len || len >= IFNAMSIZ)
48028+ return ERR_PTR(-EINVAL);
48029+ tmp = acl_alloc(len);
48030+ if (tmp == NULL)
48031+ return ERR_PTR(-ENOMEM);
48032+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48033+ return ERR_PTR(-EFAULT);
48034+ (*(i_tmp + i_num))->iface = tmp;
48035+ }
48036+
48037+ s_tmp->ips = i_tmp;
48038+
48039+insert:
48040+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48041+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48042+ return ERR_PTR(-ENOMEM);
48043+
48044+ return s_tmp;
48045+}
48046+
48047+static int
48048+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48049+{
48050+ struct acl_subject_label s_pre;
48051+ struct acl_subject_label * ret;
48052+ int err;
48053+
48054+ while (userp) {
48055+ if (copy_from_user(&s_pre, userp,
48056+ sizeof (struct acl_subject_label)))
48057+ return -EFAULT;
48058+
48059+ /* do not add nested subjects here, add
48060+ while parsing objects
48061+ */
48062+
48063+ if (s_pre.mode & GR_NESTED) {
48064+ userp = s_pre.prev;
48065+ continue;
48066+ }
48067+
48068+ ret = do_copy_user_subj(userp, role);
48069+
48070+ err = PTR_ERR(ret);
48071+ if (IS_ERR(ret))
48072+ return err;
48073+
48074+ insert_acl_subj_label(ret, role);
48075+
48076+ userp = s_pre.prev;
48077+ }
48078+
48079+ return 0;
48080+}
48081+
48082+static int
48083+copy_user_acl(struct gr_arg *arg)
48084+{
48085+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48086+ struct sprole_pw *sptmp;
48087+ struct gr_hash_struct *ghash;
48088+ uid_t *domainlist;
48089+ unsigned int r_num;
48090+ unsigned int len;
48091+ char *tmp;
48092+ int err = 0;
48093+ __u16 i;
48094+ __u32 num_subjs;
48095+
48096+ /* we need a default and kernel role */
48097+ if (arg->role_db.num_roles < 2)
48098+ return -EINVAL;
48099+
48100+ /* copy special role authentication info from userspace */
48101+
48102+ num_sprole_pws = arg->num_sprole_pws;
48103+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48104+
48105+ if (!acl_special_roles) {
48106+ err = -ENOMEM;
48107+ goto cleanup;
48108+ }
48109+
48110+ for (i = 0; i < num_sprole_pws; i++) {
48111+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48112+ if (!sptmp) {
48113+ err = -ENOMEM;
48114+ goto cleanup;
48115+ }
48116+ if (copy_from_user(sptmp, arg->sprole_pws + i,
48117+ sizeof (struct sprole_pw))) {
48118+ err = -EFAULT;
48119+ goto cleanup;
48120+ }
48121+
48122+ len =
48123+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48124+
48125+ if (!len || len >= GR_SPROLE_LEN) {
48126+ err = -EINVAL;
48127+ goto cleanup;
48128+ }
48129+
48130+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48131+ err = -ENOMEM;
48132+ goto cleanup;
48133+ }
48134+
48135+ if (copy_from_user(tmp, sptmp->rolename, len)) {
48136+ err = -EFAULT;
48137+ goto cleanup;
48138+ }
48139+ tmp[len-1] = '\0';
48140+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48141+ printk(KERN_ALERT "Copying special role %s\n", tmp);
48142+#endif
48143+ sptmp->rolename = tmp;
48144+ acl_special_roles[i] = sptmp;
48145+ }
48146+
48147+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48148+
48149+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48150+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
48151+
48152+ if (!r_tmp) {
48153+ err = -ENOMEM;
48154+ goto cleanup;
48155+ }
48156+
48157+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
48158+ sizeof (struct acl_role_label *))) {
48159+ err = -EFAULT;
48160+ goto cleanup;
48161+ }
48162+
48163+ if (copy_from_user(r_tmp, r_utmp2,
48164+ sizeof (struct acl_role_label))) {
48165+ err = -EFAULT;
48166+ goto cleanup;
48167+ }
48168+
48169+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48170+
48171+ if (!len || len >= PATH_MAX) {
48172+ err = -EINVAL;
48173+ goto cleanup;
48174+ }
48175+
48176+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48177+ err = -ENOMEM;
48178+ goto cleanup;
48179+ }
48180+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
48181+ err = -EFAULT;
48182+ goto cleanup;
48183+ }
48184+ tmp[len-1] = '\0';
48185+ r_tmp->rolename = tmp;
48186+
48187+ if (!strcmp(r_tmp->rolename, "default")
48188+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48189+ default_role = r_tmp;
48190+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48191+ kernel_role = r_tmp;
48192+ }
48193+
48194+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48195+ err = -ENOMEM;
48196+ goto cleanup;
48197+ }
48198+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48199+ err = -EFAULT;
48200+ goto cleanup;
48201+ }
48202+
48203+ r_tmp->hash = ghash;
48204+
48205+ num_subjs = count_user_subjs(r_tmp->hash->first);
48206+
48207+ r_tmp->subj_hash_size = num_subjs;
48208+ r_tmp->subj_hash =
48209+ (struct acl_subject_label **)
48210+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48211+
48212+ if (!r_tmp->subj_hash) {
48213+ err = -ENOMEM;
48214+ goto cleanup;
48215+ }
48216+
48217+ err = copy_user_allowedips(r_tmp);
48218+ if (err)
48219+ goto cleanup;
48220+
48221+ /* copy domain info */
48222+ if (r_tmp->domain_children != NULL) {
48223+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48224+ if (domainlist == NULL) {
48225+ err = -ENOMEM;
48226+ goto cleanup;
48227+ }
48228+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48229+ err = -EFAULT;
48230+ goto cleanup;
48231+ }
48232+ r_tmp->domain_children = domainlist;
48233+ }
48234+
48235+ err = copy_user_transitions(r_tmp);
48236+ if (err)
48237+ goto cleanup;
48238+
48239+ memset(r_tmp->subj_hash, 0,
48240+ r_tmp->subj_hash_size *
48241+ sizeof (struct acl_subject_label *));
48242+
48243+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48244+
48245+ if (err)
48246+ goto cleanup;
48247+
48248+ /* set nested subject list to null */
48249+ r_tmp->hash->first = NULL;
48250+
48251+ insert_acl_role_label(r_tmp);
48252+ }
48253+
48254+ goto return_err;
48255+ cleanup:
48256+ free_variables();
48257+ return_err:
48258+ return err;
48259+
48260+}
48261+
48262+static int
48263+gracl_init(struct gr_arg *args)
48264+{
48265+ int error = 0;
48266+
48267+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48268+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48269+
48270+ if (init_variables(args)) {
48271+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48272+ error = -ENOMEM;
48273+ free_variables();
48274+ goto out;
48275+ }
48276+
48277+ error = copy_user_acl(args);
48278+ free_init_variables();
48279+ if (error) {
48280+ free_variables();
48281+ goto out;
48282+ }
48283+
48284+ if ((error = gr_set_acls(0))) {
48285+ free_variables();
48286+ goto out;
48287+ }
48288+
48289+ pax_open_kernel();
48290+ gr_status |= GR_READY;
48291+ pax_close_kernel();
48292+
48293+ out:
48294+ return error;
48295+}
48296+
48297+/* derived from glibc fnmatch() 0: match, 1: no match*/
48298+
48299+static int
48300+glob_match(const char *p, const char *n)
48301+{
48302+ char c;
48303+
48304+ while ((c = *p++) != '\0') {
48305+ switch (c) {
48306+ case '?':
48307+ if (*n == '\0')
48308+ return 1;
48309+ else if (*n == '/')
48310+ return 1;
48311+ break;
48312+ case '\\':
48313+ if (*n != c)
48314+ return 1;
48315+ break;
48316+ case '*':
48317+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
48318+ if (*n == '/')
48319+ return 1;
48320+ else if (c == '?') {
48321+ if (*n == '\0')
48322+ return 1;
48323+ else
48324+ ++n;
48325+ }
48326+ }
48327+ if (c == '\0') {
48328+ return 0;
48329+ } else {
48330+ const char *endp;
48331+
48332+ if ((endp = strchr(n, '/')) == NULL)
48333+ endp = n + strlen(n);
48334+
48335+ if (c == '[') {
48336+ for (--p; n < endp; ++n)
48337+ if (!glob_match(p, n))
48338+ return 0;
48339+ } else if (c == '/') {
48340+ while (*n != '\0' && *n != '/')
48341+ ++n;
48342+ if (*n == '/' && !glob_match(p, n + 1))
48343+ return 0;
48344+ } else {
48345+ for (--p; n < endp; ++n)
48346+ if (*n == c && !glob_match(p, n))
48347+ return 0;
48348+ }
48349+
48350+ return 1;
48351+ }
48352+ case '[':
48353+ {
48354+ int not;
48355+ char cold;
48356+
48357+ if (*n == '\0' || *n == '/')
48358+ return 1;
48359+
48360+ not = (*p == '!' || *p == '^');
48361+ if (not)
48362+ ++p;
48363+
48364+ c = *p++;
48365+ for (;;) {
48366+ unsigned char fn = (unsigned char)*n;
48367+
48368+ if (c == '\0')
48369+ return 1;
48370+ else {
48371+ if (c == fn)
48372+ goto matched;
48373+ cold = c;
48374+ c = *p++;
48375+
48376+ if (c == '-' && *p != ']') {
48377+ unsigned char cend = *p++;
48378+
48379+ if (cend == '\0')
48380+ return 1;
48381+
48382+ if (cold <= fn && fn <= cend)
48383+ goto matched;
48384+
48385+ c = *p++;
48386+ }
48387+ }
48388+
48389+ if (c == ']')
48390+ break;
48391+ }
48392+ if (!not)
48393+ return 1;
48394+ break;
48395+ matched:
48396+ while (c != ']') {
48397+ if (c == '\0')
48398+ return 1;
48399+
48400+ c = *p++;
48401+ }
48402+ if (not)
48403+ return 1;
48404+ }
48405+ break;
48406+ default:
48407+ if (c != *n)
48408+ return 1;
48409+ }
48410+
48411+ ++n;
48412+ }
48413+
48414+ if (*n == '\0')
48415+ return 0;
48416+
48417+ if (*n == '/')
48418+ return 0;
48419+
48420+ return 1;
48421+}
48422+
48423+static struct acl_object_label *
48424+chk_glob_label(struct acl_object_label *globbed,
48425+ struct dentry *dentry, struct vfsmount *mnt, char **path)
48426+{
48427+ struct acl_object_label *tmp;
48428+
48429+ if (*path == NULL)
48430+ *path = gr_to_filename_nolock(dentry, mnt);
48431+
48432+ tmp = globbed;
48433+
48434+ while (tmp) {
48435+ if (!glob_match(tmp->filename, *path))
48436+ return tmp;
48437+ tmp = tmp->next;
48438+ }
48439+
48440+ return NULL;
48441+}
48442+
48443+static struct acl_object_label *
48444+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48445+ const ino_t curr_ino, const dev_t curr_dev,
48446+ const struct acl_subject_label *subj, char **path, const int checkglob)
48447+{
48448+ struct acl_subject_label *tmpsubj;
48449+ struct acl_object_label *retval;
48450+ struct acl_object_label *retval2;
48451+
48452+ tmpsubj = (struct acl_subject_label *) subj;
48453+ read_lock(&gr_inode_lock);
48454+ do {
48455+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
48456+ if (retval) {
48457+ if (checkglob && retval->globbed) {
48458+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
48459+ (struct vfsmount *)orig_mnt, path);
48460+ if (retval2)
48461+ retval = retval2;
48462+ }
48463+ break;
48464+ }
48465+ } while ((tmpsubj = tmpsubj->parent_subject));
48466+ read_unlock(&gr_inode_lock);
48467+
48468+ return retval;
48469+}
48470+
48471+static __inline__ struct acl_object_label *
48472+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48473+ const struct dentry *curr_dentry,
48474+ const struct acl_subject_label *subj, char **path, const int checkglob)
48475+{
48476+ int newglob = checkglob;
48477+
48478+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
48479+ as we don't want a / * rule to match instead of the / object
48480+ don't do this for create lookups that call this function though, since they're looking up
48481+ on the parent and thus need globbing checks on all paths
48482+ */
48483+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
48484+ newglob = GR_NO_GLOB;
48485+
48486+ return __full_lookup(orig_dentry, orig_mnt,
48487+ curr_dentry->d_inode->i_ino,
48488+ __get_dev(curr_dentry), subj, path, newglob);
48489+}
48490+
48491+static struct acl_object_label *
48492+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48493+ const struct acl_subject_label *subj, char *path, const int checkglob)
48494+{
48495+ struct dentry *dentry = (struct dentry *) l_dentry;
48496+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48497+ struct acl_object_label *retval;
48498+
48499+ spin_lock(&dcache_lock);
48500+ spin_lock(&vfsmount_lock);
48501+
48502+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
48503+#ifdef CONFIG_NET
48504+ mnt == sock_mnt ||
48505+#endif
48506+#ifdef CONFIG_HUGETLBFS
48507+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
48508+#endif
48509+ /* ignore Eric Biederman */
48510+ IS_PRIVATE(l_dentry->d_inode))) {
48511+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
48512+ goto out;
48513+ }
48514+
48515+ for (;;) {
48516+ if (dentry == real_root && mnt == real_root_mnt)
48517+ break;
48518+
48519+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48520+ if (mnt->mnt_parent == mnt)
48521+ break;
48522+
48523+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48524+ if (retval != NULL)
48525+ goto out;
48526+
48527+ dentry = mnt->mnt_mountpoint;
48528+ mnt = mnt->mnt_parent;
48529+ continue;
48530+ }
48531+
48532+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48533+ if (retval != NULL)
48534+ goto out;
48535+
48536+ dentry = dentry->d_parent;
48537+ }
48538+
48539+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48540+
48541+ if (retval == NULL)
48542+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
48543+out:
48544+ spin_unlock(&vfsmount_lock);
48545+ spin_unlock(&dcache_lock);
48546+
48547+ BUG_ON(retval == NULL);
48548+
48549+ return retval;
48550+}
48551+
48552+static __inline__ struct acl_object_label *
48553+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48554+ const struct acl_subject_label *subj)
48555+{
48556+ char *path = NULL;
48557+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
48558+}
48559+
48560+static __inline__ struct acl_object_label *
48561+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48562+ const struct acl_subject_label *subj)
48563+{
48564+ char *path = NULL;
48565+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
48566+}
48567+
48568+static __inline__ struct acl_object_label *
48569+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48570+ const struct acl_subject_label *subj, char *path)
48571+{
48572+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
48573+}
48574+
48575+static struct acl_subject_label *
48576+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48577+ const struct acl_role_label *role)
48578+{
48579+ struct dentry *dentry = (struct dentry *) l_dentry;
48580+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48581+ struct acl_subject_label *retval;
48582+
48583+ spin_lock(&dcache_lock);
48584+ spin_lock(&vfsmount_lock);
48585+
48586+ for (;;) {
48587+ if (dentry == real_root && mnt == real_root_mnt)
48588+ break;
48589+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48590+ if (mnt->mnt_parent == mnt)
48591+ break;
48592+
48593+ read_lock(&gr_inode_lock);
48594+ retval =
48595+ lookup_acl_subj_label(dentry->d_inode->i_ino,
48596+ __get_dev(dentry), role);
48597+ read_unlock(&gr_inode_lock);
48598+ if (retval != NULL)
48599+ goto out;
48600+
48601+ dentry = mnt->mnt_mountpoint;
48602+ mnt = mnt->mnt_parent;
48603+ continue;
48604+ }
48605+
48606+ read_lock(&gr_inode_lock);
48607+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48608+ __get_dev(dentry), role);
48609+ read_unlock(&gr_inode_lock);
48610+ if (retval != NULL)
48611+ goto out;
48612+
48613+ dentry = dentry->d_parent;
48614+ }
48615+
48616+ read_lock(&gr_inode_lock);
48617+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48618+ __get_dev(dentry), role);
48619+ read_unlock(&gr_inode_lock);
48620+
48621+ if (unlikely(retval == NULL)) {
48622+ read_lock(&gr_inode_lock);
48623+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
48624+ __get_dev(real_root), role);
48625+ read_unlock(&gr_inode_lock);
48626+ }
48627+out:
48628+ spin_unlock(&vfsmount_lock);
48629+ spin_unlock(&dcache_lock);
48630+
48631+ BUG_ON(retval == NULL);
48632+
48633+ return retval;
48634+}
48635+
48636+static void
48637+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
48638+{
48639+ struct task_struct *task = current;
48640+ const struct cred *cred = current_cred();
48641+
48642+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48643+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48644+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48645+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
48646+
48647+ return;
48648+}
48649+
48650+static void
48651+gr_log_learn_sysctl(const char *path, const __u32 mode)
48652+{
48653+ struct task_struct *task = current;
48654+ const struct cred *cred = current_cred();
48655+
48656+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48657+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48658+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48659+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
48660+
48661+ return;
48662+}
48663+
48664+static void
48665+gr_log_learn_id_change(const char type, const unsigned int real,
48666+ const unsigned int effective, const unsigned int fs)
48667+{
48668+ struct task_struct *task = current;
48669+ const struct cred *cred = current_cred();
48670+
48671+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
48672+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48673+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48674+ type, real, effective, fs, &task->signal->saved_ip);
48675+
48676+ return;
48677+}
48678+
48679+__u32
48680+gr_check_link(const struct dentry * new_dentry,
48681+ const struct dentry * parent_dentry,
48682+ const struct vfsmount * parent_mnt,
48683+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
48684+{
48685+ struct acl_object_label *obj;
48686+ __u32 oldmode, newmode;
48687+ __u32 needmode;
48688+
48689+ if (unlikely(!(gr_status & GR_READY)))
48690+ return (GR_CREATE | GR_LINK);
48691+
48692+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
48693+ oldmode = obj->mode;
48694+
48695+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48696+ oldmode |= (GR_CREATE | GR_LINK);
48697+
48698+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
48699+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
48700+ needmode |= GR_SETID | GR_AUDIT_SETID;
48701+
48702+ newmode =
48703+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
48704+ oldmode | needmode);
48705+
48706+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
48707+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
48708+ GR_INHERIT | GR_AUDIT_INHERIT);
48709+
48710+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
48711+ goto bad;
48712+
48713+ if ((oldmode & needmode) != needmode)
48714+ goto bad;
48715+
48716+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
48717+ if ((newmode & needmode) != needmode)
48718+ goto bad;
48719+
48720+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
48721+ return newmode;
48722+bad:
48723+ needmode = oldmode;
48724+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
48725+ needmode |= GR_SETID;
48726+
48727+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
48728+ gr_log_learn(old_dentry, old_mnt, needmode);
48729+ return (GR_CREATE | GR_LINK);
48730+ } else if (newmode & GR_SUPPRESS)
48731+ return GR_SUPPRESS;
48732+ else
48733+ return 0;
48734+}
48735+
48736+__u32
48737+gr_search_file(const struct dentry * dentry, const __u32 mode,
48738+ const struct vfsmount * mnt)
48739+{
48740+ __u32 retval = mode;
48741+ struct acl_subject_label *curracl;
48742+ struct acl_object_label *currobj;
48743+
48744+ if (unlikely(!(gr_status & GR_READY)))
48745+ return (mode & ~GR_AUDITS);
48746+
48747+ curracl = current->acl;
48748+
48749+ currobj = chk_obj_label(dentry, mnt, curracl);
48750+ retval = currobj->mode & mode;
48751+
48752+ /* if we're opening a specified transfer file for writing
48753+ (e.g. /dev/initctl), then transfer our role to init
48754+ */
48755+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
48756+ current->role->roletype & GR_ROLE_PERSIST)) {
48757+ struct task_struct *task = init_pid_ns.child_reaper;
48758+
48759+ if (task->role != current->role) {
48760+ task->acl_sp_role = 0;
48761+ task->acl_role_id = current->acl_role_id;
48762+ task->role = current->role;
48763+ rcu_read_lock();
48764+ read_lock(&grsec_exec_file_lock);
48765+ gr_apply_subject_to_task(task);
48766+ read_unlock(&grsec_exec_file_lock);
48767+ rcu_read_unlock();
48768+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
48769+ }
48770+ }
48771+
48772+ if (unlikely
48773+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
48774+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
48775+ __u32 new_mode = mode;
48776+
48777+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48778+
48779+ retval = new_mode;
48780+
48781+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
48782+ new_mode |= GR_INHERIT;
48783+
48784+ if (!(mode & GR_NOLEARN))
48785+ gr_log_learn(dentry, mnt, new_mode);
48786+ }
48787+
48788+ return retval;
48789+}
48790+
48791+__u32
48792+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
48793+ const struct vfsmount * mnt, const __u32 mode)
48794+{
48795+ struct name_entry *match;
48796+ struct acl_object_label *matchpo;
48797+ struct acl_subject_label *curracl;
48798+ char *path;
48799+ __u32 retval;
48800+
48801+ if (unlikely(!(gr_status & GR_READY)))
48802+ return (mode & ~GR_AUDITS);
48803+
48804+ preempt_disable();
48805+ path = gr_to_filename_rbac(new_dentry, mnt);
48806+ match = lookup_name_entry_create(path);
48807+
48808+ if (!match)
48809+ goto check_parent;
48810+
48811+ curracl = current->acl;
48812+
48813+ read_lock(&gr_inode_lock);
48814+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
48815+ read_unlock(&gr_inode_lock);
48816+
48817+ if (matchpo) {
48818+ if ((matchpo->mode & mode) !=
48819+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
48820+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
48821+ __u32 new_mode = mode;
48822+
48823+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48824+
48825+ gr_log_learn(new_dentry, mnt, new_mode);
48826+
48827+ preempt_enable();
48828+ return new_mode;
48829+ }
48830+ preempt_enable();
48831+ return (matchpo->mode & mode);
48832+ }
48833+
48834+ check_parent:
48835+ curracl = current->acl;
48836+
48837+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
48838+ retval = matchpo->mode & mode;
48839+
48840+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
48841+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
48842+ __u32 new_mode = mode;
48843+
48844+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48845+
48846+ gr_log_learn(new_dentry, mnt, new_mode);
48847+ preempt_enable();
48848+ return new_mode;
48849+ }
48850+
48851+ preempt_enable();
48852+ return retval;
48853+}
48854+
48855+int
48856+gr_check_hidden_task(const struct task_struct *task)
48857+{
48858+ if (unlikely(!(gr_status & GR_READY)))
48859+ return 0;
48860+
48861+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
48862+ return 1;
48863+
48864+ return 0;
48865+}
48866+
48867+int
48868+gr_check_protected_task(const struct task_struct *task)
48869+{
48870+ if (unlikely(!(gr_status & GR_READY) || !task))
48871+ return 0;
48872+
48873+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
48874+ task->acl != current->acl)
48875+ return 1;
48876+
48877+ return 0;
48878+}
48879+
48880+int
48881+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
48882+{
48883+ struct task_struct *p;
48884+ int ret = 0;
48885+
48886+ if (unlikely(!(gr_status & GR_READY) || !pid))
48887+ return ret;
48888+
48889+ read_lock(&tasklist_lock);
48890+ do_each_pid_task(pid, type, p) {
48891+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
48892+ p->acl != current->acl) {
48893+ ret = 1;
48894+ goto out;
48895+ }
48896+ } while_each_pid_task(pid, type, p);
48897+out:
48898+ read_unlock(&tasklist_lock);
48899+
48900+ return ret;
48901+}
48902+
48903+void
48904+gr_copy_label(struct task_struct *tsk)
48905+{
48906+ tsk->signal->used_accept = 0;
48907+ tsk->acl_sp_role = 0;
48908+ tsk->acl_role_id = current->acl_role_id;
48909+ tsk->acl = current->acl;
48910+ tsk->role = current->role;
48911+ tsk->signal->curr_ip = current->signal->curr_ip;
48912+ tsk->signal->saved_ip = current->signal->saved_ip;
48913+ if (current->exec_file)
48914+ get_file(current->exec_file);
48915+ tsk->exec_file = current->exec_file;
48916+ tsk->is_writable = current->is_writable;
48917+ if (unlikely(current->signal->used_accept)) {
48918+ current->signal->curr_ip = 0;
48919+ current->signal->saved_ip = 0;
48920+ }
48921+
48922+ return;
48923+}
48924+
48925+static void
48926+gr_set_proc_res(struct task_struct *task)
48927+{
48928+ struct acl_subject_label *proc;
48929+ unsigned short i;
48930+
48931+ proc = task->acl;
48932+
48933+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
48934+ return;
48935+
48936+ for (i = 0; i < RLIM_NLIMITS; i++) {
48937+ if (!(proc->resmask & (1 << i)))
48938+ continue;
48939+
48940+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
48941+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
48942+ }
48943+
48944+ return;
48945+}
48946+
48947+extern int __gr_process_user_ban(struct user_struct *user);
48948+
48949+int
48950+gr_check_user_change(int real, int effective, int fs)
48951+{
48952+ unsigned int i;
48953+ __u16 num;
48954+ uid_t *uidlist;
48955+ int curuid;
48956+ int realok = 0;
48957+ int effectiveok = 0;
48958+ int fsok = 0;
48959+
48960+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
48961+ struct user_struct *user;
48962+
48963+ if (real == -1)
48964+ goto skipit;
48965+
48966+ user = find_user(real);
48967+ if (user == NULL)
48968+ goto skipit;
48969+
48970+ if (__gr_process_user_ban(user)) {
48971+ /* for find_user */
48972+ free_uid(user);
48973+ return 1;
48974+ }
48975+
48976+ /* for find_user */
48977+ free_uid(user);
48978+
48979+skipit:
48980+#endif
48981+
48982+ if (unlikely(!(gr_status & GR_READY)))
48983+ return 0;
48984+
48985+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48986+ gr_log_learn_id_change('u', real, effective, fs);
48987+
48988+ num = current->acl->user_trans_num;
48989+ uidlist = current->acl->user_transitions;
48990+
48991+ if (uidlist == NULL)
48992+ return 0;
48993+
48994+ if (real == -1)
48995+ realok = 1;
48996+ if (effective == -1)
48997+ effectiveok = 1;
48998+ if (fs == -1)
48999+ fsok = 1;
49000+
49001+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
49002+ for (i = 0; i < num; i++) {
49003+ curuid = (int)uidlist[i];
49004+ if (real == curuid)
49005+ realok = 1;
49006+ if (effective == curuid)
49007+ effectiveok = 1;
49008+ if (fs == curuid)
49009+ fsok = 1;
49010+ }
49011+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
49012+ for (i = 0; i < num; i++) {
49013+ curuid = (int)uidlist[i];
49014+ if (real == curuid)
49015+ break;
49016+ if (effective == curuid)
49017+ break;
49018+ if (fs == curuid)
49019+ break;
49020+ }
49021+ /* not in deny list */
49022+ if (i == num) {
49023+ realok = 1;
49024+ effectiveok = 1;
49025+ fsok = 1;
49026+ }
49027+ }
49028+
49029+ if (realok && effectiveok && fsok)
49030+ return 0;
49031+ else {
49032+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49033+ return 1;
49034+ }
49035+}
49036+
49037+int
49038+gr_check_group_change(int real, int effective, int fs)
49039+{
49040+ unsigned int i;
49041+ __u16 num;
49042+ gid_t *gidlist;
49043+ int curgid;
49044+ int realok = 0;
49045+ int effectiveok = 0;
49046+ int fsok = 0;
49047+
49048+ if (unlikely(!(gr_status & GR_READY)))
49049+ return 0;
49050+
49051+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49052+ gr_log_learn_id_change('g', real, effective, fs);
49053+
49054+ num = current->acl->group_trans_num;
49055+ gidlist = current->acl->group_transitions;
49056+
49057+ if (gidlist == NULL)
49058+ return 0;
49059+
49060+ if (real == -1)
49061+ realok = 1;
49062+ if (effective == -1)
49063+ effectiveok = 1;
49064+ if (fs == -1)
49065+ fsok = 1;
49066+
49067+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
49068+ for (i = 0; i < num; i++) {
49069+ curgid = (int)gidlist[i];
49070+ if (real == curgid)
49071+ realok = 1;
49072+ if (effective == curgid)
49073+ effectiveok = 1;
49074+ if (fs == curgid)
49075+ fsok = 1;
49076+ }
49077+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
49078+ for (i = 0; i < num; i++) {
49079+ curgid = (int)gidlist[i];
49080+ if (real == curgid)
49081+ break;
49082+ if (effective == curgid)
49083+ break;
49084+ if (fs == curgid)
49085+ break;
49086+ }
49087+ /* not in deny list */
49088+ if (i == num) {
49089+ realok = 1;
49090+ effectiveok = 1;
49091+ fsok = 1;
49092+ }
49093+ }
49094+
49095+ if (realok && effectiveok && fsok)
49096+ return 0;
49097+ else {
49098+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49099+ return 1;
49100+ }
49101+}
49102+
49103+void
49104+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49105+{
49106+ struct acl_role_label *role = task->role;
49107+ struct acl_subject_label *subj = NULL;
49108+ struct acl_object_label *obj;
49109+ struct file *filp;
49110+
49111+ if (unlikely(!(gr_status & GR_READY)))
49112+ return;
49113+
49114+ filp = task->exec_file;
49115+
49116+ /* kernel process, we'll give them the kernel role */
49117+ if (unlikely(!filp)) {
49118+ task->role = kernel_role;
49119+ task->acl = kernel_role->root_label;
49120+ return;
49121+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49122+ role = lookup_acl_role_label(task, uid, gid);
49123+
49124+ /* perform subject lookup in possibly new role
49125+ we can use this result below in the case where role == task->role
49126+ */
49127+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49128+
49129+ /* if we changed uid/gid, but result in the same role
49130+ and are using inheritance, don't lose the inherited subject
49131+ if current subject is other than what normal lookup
49132+ would result in, we arrived via inheritance, don't
49133+ lose subject
49134+ */
49135+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49136+ (subj == task->acl)))
49137+ task->acl = subj;
49138+
49139+ task->role = role;
49140+
49141+ task->is_writable = 0;
49142+
49143+ /* ignore additional mmap checks for processes that are writable
49144+ by the default ACL */
49145+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49146+ if (unlikely(obj->mode & GR_WRITE))
49147+ task->is_writable = 1;
49148+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49149+ if (unlikely(obj->mode & GR_WRITE))
49150+ task->is_writable = 1;
49151+
49152+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49153+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49154+#endif
49155+
49156+ gr_set_proc_res(task);
49157+
49158+ return;
49159+}
49160+
49161+int
49162+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49163+ const int unsafe_share)
49164+{
49165+ struct task_struct *task = current;
49166+ struct acl_subject_label *newacl;
49167+ struct acl_object_label *obj;
49168+ __u32 retmode;
49169+
49170+ if (unlikely(!(gr_status & GR_READY)))
49171+ return 0;
49172+
49173+ newacl = chk_subj_label(dentry, mnt, task->role);
49174+
49175+ task_lock(task);
49176+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49177+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49178+ !(task->role->roletype & GR_ROLE_GOD) &&
49179+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49180+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49181+ task_unlock(task);
49182+ if (unsafe_share)
49183+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49184+ else
49185+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49186+ return -EACCES;
49187+ }
49188+ task_unlock(task);
49189+
49190+ obj = chk_obj_label(dentry, mnt, task->acl);
49191+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49192+
49193+ if (!(task->acl->mode & GR_INHERITLEARN) &&
49194+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49195+ if (obj->nested)
49196+ task->acl = obj->nested;
49197+ else
49198+ task->acl = newacl;
49199+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49200+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49201+
49202+ task->is_writable = 0;
49203+
49204+ /* ignore additional mmap checks for processes that are writable
49205+ by the default ACL */
49206+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
49207+ if (unlikely(obj->mode & GR_WRITE))
49208+ task->is_writable = 1;
49209+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
49210+ if (unlikely(obj->mode & GR_WRITE))
49211+ task->is_writable = 1;
49212+
49213+ gr_set_proc_res(task);
49214+
49215+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49216+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49217+#endif
49218+ return 0;
49219+}
49220+
49221+/* always called with valid inodev ptr */
49222+static void
49223+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49224+{
49225+ struct acl_object_label *matchpo;
49226+ struct acl_subject_label *matchps;
49227+ struct acl_subject_label *subj;
49228+ struct acl_role_label *role;
49229+ unsigned int x;
49230+
49231+ FOR_EACH_ROLE_START(role)
49232+ FOR_EACH_SUBJECT_START(role, subj, x)
49233+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49234+ matchpo->mode |= GR_DELETED;
49235+ FOR_EACH_SUBJECT_END(subj,x)
49236+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49237+ if (subj->inode == ino && subj->device == dev)
49238+ subj->mode |= GR_DELETED;
49239+ FOR_EACH_NESTED_SUBJECT_END(subj)
49240+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49241+ matchps->mode |= GR_DELETED;
49242+ FOR_EACH_ROLE_END(role)
49243+
49244+ inodev->nentry->deleted = 1;
49245+
49246+ return;
49247+}
49248+
49249+void
49250+gr_handle_delete(const ino_t ino, const dev_t dev)
49251+{
49252+ struct inodev_entry *inodev;
49253+
49254+ if (unlikely(!(gr_status & GR_READY)))
49255+ return;
49256+
49257+ write_lock(&gr_inode_lock);
49258+ inodev = lookup_inodev_entry(ino, dev);
49259+ if (inodev != NULL)
49260+ do_handle_delete(inodev, ino, dev);
49261+ write_unlock(&gr_inode_lock);
49262+
49263+ return;
49264+}
49265+
49266+static void
49267+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49268+ const ino_t newinode, const dev_t newdevice,
49269+ struct acl_subject_label *subj)
49270+{
49271+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49272+ struct acl_object_label *match;
49273+
49274+ match = subj->obj_hash[index];
49275+
49276+ while (match && (match->inode != oldinode ||
49277+ match->device != olddevice ||
49278+ !(match->mode & GR_DELETED)))
49279+ match = match->next;
49280+
49281+ if (match && (match->inode == oldinode)
49282+ && (match->device == olddevice)
49283+ && (match->mode & GR_DELETED)) {
49284+ if (match->prev == NULL) {
49285+ subj->obj_hash[index] = match->next;
49286+ if (match->next != NULL)
49287+ match->next->prev = NULL;
49288+ } else {
49289+ match->prev->next = match->next;
49290+ if (match->next != NULL)
49291+ match->next->prev = match->prev;
49292+ }
49293+ match->prev = NULL;
49294+ match->next = NULL;
49295+ match->inode = newinode;
49296+ match->device = newdevice;
49297+ match->mode &= ~GR_DELETED;
49298+
49299+ insert_acl_obj_label(match, subj);
49300+ }
49301+
49302+ return;
49303+}
49304+
49305+static void
49306+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49307+ const ino_t newinode, const dev_t newdevice,
49308+ struct acl_role_label *role)
49309+{
49310+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49311+ struct acl_subject_label *match;
49312+
49313+ match = role->subj_hash[index];
49314+
49315+ while (match && (match->inode != oldinode ||
49316+ match->device != olddevice ||
49317+ !(match->mode & GR_DELETED)))
49318+ match = match->next;
49319+
49320+ if (match && (match->inode == oldinode)
49321+ && (match->device == olddevice)
49322+ && (match->mode & GR_DELETED)) {
49323+ if (match->prev == NULL) {
49324+ role->subj_hash[index] = match->next;
49325+ if (match->next != NULL)
49326+ match->next->prev = NULL;
49327+ } else {
49328+ match->prev->next = match->next;
49329+ if (match->next != NULL)
49330+ match->next->prev = match->prev;
49331+ }
49332+ match->prev = NULL;
49333+ match->next = NULL;
49334+ match->inode = newinode;
49335+ match->device = newdevice;
49336+ match->mode &= ~GR_DELETED;
49337+
49338+ insert_acl_subj_label(match, role);
49339+ }
49340+
49341+ return;
49342+}
49343+
49344+static void
49345+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49346+ const ino_t newinode, const dev_t newdevice)
49347+{
49348+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49349+ struct inodev_entry *match;
49350+
49351+ match = inodev_set.i_hash[index];
49352+
49353+ while (match && (match->nentry->inode != oldinode ||
49354+ match->nentry->device != olddevice || !match->nentry->deleted))
49355+ match = match->next;
49356+
49357+ if (match && (match->nentry->inode == oldinode)
49358+ && (match->nentry->device == olddevice) &&
49359+ match->nentry->deleted) {
49360+ if (match->prev == NULL) {
49361+ inodev_set.i_hash[index] = match->next;
49362+ if (match->next != NULL)
49363+ match->next->prev = NULL;
49364+ } else {
49365+ match->prev->next = match->next;
49366+ if (match->next != NULL)
49367+ match->next->prev = match->prev;
49368+ }
49369+ match->prev = NULL;
49370+ match->next = NULL;
49371+ match->nentry->inode = newinode;
49372+ match->nentry->device = newdevice;
49373+ match->nentry->deleted = 0;
49374+
49375+ insert_inodev_entry(match);
49376+ }
49377+
49378+ return;
49379+}
49380+
49381+static void
49382+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49383+ const struct vfsmount *mnt)
49384+{
49385+ struct acl_subject_label *subj;
49386+ struct acl_role_label *role;
49387+ unsigned int x;
49388+ ino_t inode = dentry->d_inode->i_ino;
49389+ dev_t dev = __get_dev(dentry);
49390+
49391+ FOR_EACH_ROLE_START(role)
49392+ update_acl_subj_label(matchn->inode, matchn->device,
49393+ inode, dev, role);
49394+
49395+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49396+ if ((subj->inode == inode) && (subj->device == dev)) {
49397+ subj->inode = inode;
49398+ subj->device = dev;
49399+ }
49400+ FOR_EACH_NESTED_SUBJECT_END(subj)
49401+ FOR_EACH_SUBJECT_START(role, subj, x)
49402+ update_acl_obj_label(matchn->inode, matchn->device,
49403+ inode, dev, subj);
49404+ FOR_EACH_SUBJECT_END(subj,x)
49405+ FOR_EACH_ROLE_END(role)
49406+
49407+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
49408+
49409+ return;
49410+}
49411+
49412+void
49413+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49414+{
49415+ struct name_entry *matchn;
49416+
49417+ if (unlikely(!(gr_status & GR_READY)))
49418+ return;
49419+
49420+ preempt_disable();
49421+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
49422+
49423+ if (unlikely((unsigned long)matchn)) {
49424+ write_lock(&gr_inode_lock);
49425+ do_handle_create(matchn, dentry, mnt);
49426+ write_unlock(&gr_inode_lock);
49427+ }
49428+ preempt_enable();
49429+
49430+ return;
49431+}
49432+
49433+void
49434+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49435+ struct dentry *old_dentry,
49436+ struct dentry *new_dentry,
49437+ struct vfsmount *mnt, const __u8 replace)
49438+{
49439+ struct name_entry *matchn;
49440+ struct inodev_entry *inodev;
49441+ ino_t oldinode = old_dentry->d_inode->i_ino;
49442+ dev_t olddev = __get_dev(old_dentry);
49443+
49444+ /* vfs_rename swaps the name and parent link for old_dentry and
49445+ new_dentry
49446+ at this point, old_dentry has the new name, parent link, and inode
49447+ for the renamed file
49448+ if a file is being replaced by a rename, new_dentry has the inode
49449+ and name for the replaced file
49450+ */
49451+
49452+ if (unlikely(!(gr_status & GR_READY)))
49453+ return;
49454+
49455+ preempt_disable();
49456+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
49457+
49458+ /* we wouldn't have to check d_inode if it weren't for
49459+ NFS silly-renaming
49460+ */
49461+
49462+ write_lock(&gr_inode_lock);
49463+ if (unlikely(replace && new_dentry->d_inode)) {
49464+ ino_t newinode = new_dentry->d_inode->i_ino;
49465+ dev_t newdev = __get_dev(new_dentry);
49466+ inodev = lookup_inodev_entry(newinode, newdev);
49467+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
49468+ do_handle_delete(inodev, newinode, newdev);
49469+ }
49470+
49471+ inodev = lookup_inodev_entry(oldinode, olddev);
49472+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
49473+ do_handle_delete(inodev, oldinode, olddev);
49474+
49475+ if (unlikely((unsigned long)matchn))
49476+ do_handle_create(matchn, old_dentry, mnt);
49477+
49478+ write_unlock(&gr_inode_lock);
49479+ preempt_enable();
49480+
49481+ return;
49482+}
49483+
49484+static int
49485+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
49486+ unsigned char **sum)
49487+{
49488+ struct acl_role_label *r;
49489+ struct role_allowed_ip *ipp;
49490+ struct role_transition *trans;
49491+ unsigned int i;
49492+ int found = 0;
49493+ u32 curr_ip = current->signal->curr_ip;
49494+
49495+ current->signal->saved_ip = curr_ip;
49496+
49497+ /* check transition table */
49498+
49499+ for (trans = current->role->transitions; trans; trans = trans->next) {
49500+ if (!strcmp(rolename, trans->rolename)) {
49501+ found = 1;
49502+ break;
49503+ }
49504+ }
49505+
49506+ if (!found)
49507+ return 0;
49508+
49509+ /* handle special roles that do not require authentication
49510+ and check ip */
49511+
49512+ FOR_EACH_ROLE_START(r)
49513+ if (!strcmp(rolename, r->rolename) &&
49514+ (r->roletype & GR_ROLE_SPECIAL)) {
49515+ found = 0;
49516+ if (r->allowed_ips != NULL) {
49517+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
49518+ if ((ntohl(curr_ip) & ipp->netmask) ==
49519+ (ntohl(ipp->addr) & ipp->netmask))
49520+ found = 1;
49521+ }
49522+ } else
49523+ found = 2;
49524+ if (!found)
49525+ return 0;
49526+
49527+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
49528+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
49529+ *salt = NULL;
49530+ *sum = NULL;
49531+ return 1;
49532+ }
49533+ }
49534+ FOR_EACH_ROLE_END(r)
49535+
49536+ for (i = 0; i < num_sprole_pws; i++) {
49537+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
49538+ *salt = acl_special_roles[i]->salt;
49539+ *sum = acl_special_roles[i]->sum;
49540+ return 1;
49541+ }
49542+ }
49543+
49544+ return 0;
49545+}
49546+
49547+static void
49548+assign_special_role(char *rolename)
49549+{
49550+ struct acl_object_label *obj;
49551+ struct acl_role_label *r;
49552+ struct acl_role_label *assigned = NULL;
49553+ struct task_struct *tsk;
49554+ struct file *filp;
49555+
49556+ FOR_EACH_ROLE_START(r)
49557+ if (!strcmp(rolename, r->rolename) &&
49558+ (r->roletype & GR_ROLE_SPECIAL)) {
49559+ assigned = r;
49560+ break;
49561+ }
49562+ FOR_EACH_ROLE_END(r)
49563+
49564+ if (!assigned)
49565+ return;
49566+
49567+ read_lock(&tasklist_lock);
49568+ read_lock(&grsec_exec_file_lock);
49569+
49570+ tsk = current->real_parent;
49571+ if (tsk == NULL)
49572+ goto out_unlock;
49573+
49574+ filp = tsk->exec_file;
49575+ if (filp == NULL)
49576+ goto out_unlock;
49577+
49578+ tsk->is_writable = 0;
49579+
49580+ tsk->acl_sp_role = 1;
49581+ tsk->acl_role_id = ++acl_sp_role_value;
49582+ tsk->role = assigned;
49583+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
49584+
49585+ /* ignore additional mmap checks for processes that are writable
49586+ by the default ACL */
49587+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49588+ if (unlikely(obj->mode & GR_WRITE))
49589+ tsk->is_writable = 1;
49590+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
49591+ if (unlikely(obj->mode & GR_WRITE))
49592+ tsk->is_writable = 1;
49593+
49594+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49595+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
49596+#endif
49597+
49598+out_unlock:
49599+ read_unlock(&grsec_exec_file_lock);
49600+ read_unlock(&tasklist_lock);
49601+ return;
49602+}
49603+
49604+int gr_check_secure_terminal(struct task_struct *task)
49605+{
49606+ struct task_struct *p, *p2, *p3;
49607+ struct files_struct *files;
49608+ struct fdtable *fdt;
49609+ struct file *our_file = NULL, *file;
49610+ int i;
49611+
49612+ if (task->signal->tty == NULL)
49613+ return 1;
49614+
49615+ files = get_files_struct(task);
49616+ if (files != NULL) {
49617+ rcu_read_lock();
49618+ fdt = files_fdtable(files);
49619+ for (i=0; i < fdt->max_fds; i++) {
49620+ file = fcheck_files(files, i);
49621+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
49622+ get_file(file);
49623+ our_file = file;
49624+ }
49625+ }
49626+ rcu_read_unlock();
49627+ put_files_struct(files);
49628+ }
49629+
49630+ if (our_file == NULL)
49631+ return 1;
49632+
49633+ read_lock(&tasklist_lock);
49634+ do_each_thread(p2, p) {
49635+ files = get_files_struct(p);
49636+ if (files == NULL ||
49637+ (p->signal && p->signal->tty == task->signal->tty)) {
49638+ if (files != NULL)
49639+ put_files_struct(files);
49640+ continue;
49641+ }
49642+ rcu_read_lock();
49643+ fdt = files_fdtable(files);
49644+ for (i=0; i < fdt->max_fds; i++) {
49645+ file = fcheck_files(files, i);
49646+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
49647+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
49648+ p3 = task;
49649+ while (p3->pid > 0) {
49650+ if (p3 == p)
49651+ break;
49652+ p3 = p3->real_parent;
49653+ }
49654+ if (p3 == p)
49655+ break;
49656+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
49657+ gr_handle_alertkill(p);
49658+ rcu_read_unlock();
49659+ put_files_struct(files);
49660+ read_unlock(&tasklist_lock);
49661+ fput(our_file);
49662+ return 0;
49663+ }
49664+ }
49665+ rcu_read_unlock();
49666+ put_files_struct(files);
49667+ } while_each_thread(p2, p);
49668+ read_unlock(&tasklist_lock);
49669+
49670+ fput(our_file);
49671+ return 1;
49672+}
49673+
49674+ssize_t
49675+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
49676+{
49677+ struct gr_arg_wrapper uwrap;
49678+ unsigned char *sprole_salt = NULL;
49679+ unsigned char *sprole_sum = NULL;
49680+ int error = sizeof (struct gr_arg_wrapper);
49681+ int error2 = 0;
49682+
49683+ mutex_lock(&gr_dev_mutex);
49684+
49685+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
49686+ error = -EPERM;
49687+ goto out;
49688+ }
49689+
49690+ if (count != sizeof (struct gr_arg_wrapper)) {
49691+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
49692+ error = -EINVAL;
49693+ goto out;
49694+ }
49695+
49696+
49697+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
49698+ gr_auth_expires = 0;
49699+ gr_auth_attempts = 0;
49700+ }
49701+
49702+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
49703+ error = -EFAULT;
49704+ goto out;
49705+ }
49706+
49707+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
49708+ error = -EINVAL;
49709+ goto out;
49710+ }
49711+
49712+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
49713+ error = -EFAULT;
49714+ goto out;
49715+ }
49716+
49717+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49718+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49719+ time_after(gr_auth_expires, get_seconds())) {
49720+ error = -EBUSY;
49721+ goto out;
49722+ }
49723+
49724+ /* if non-root trying to do anything other than use a special role,
49725+ do not attempt authentication, do not count towards authentication
49726+ locking
49727+ */
49728+
49729+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
49730+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49731+ current_uid()) {
49732+ error = -EPERM;
49733+ goto out;
49734+ }
49735+
49736+ /* ensure pw and special role name are null terminated */
49737+
49738+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
49739+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
49740+
49741+ /* Okay.
49742+ * We have our enough of the argument structure..(we have yet
49743+ * to copy_from_user the tables themselves) . Copy the tables
49744+ * only if we need them, i.e. for loading operations. */
49745+
49746+ switch (gr_usermode->mode) {
49747+ case GR_STATUS:
49748+ if (gr_status & GR_READY) {
49749+ error = 1;
49750+ if (!gr_check_secure_terminal(current))
49751+ error = 3;
49752+ } else
49753+ error = 2;
49754+ goto out;
49755+ case GR_SHUTDOWN:
49756+ if ((gr_status & GR_READY)
49757+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49758+ pax_open_kernel();
49759+ gr_status &= ~GR_READY;
49760+ pax_close_kernel();
49761+
49762+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
49763+ free_variables();
49764+ memset(gr_usermode, 0, sizeof (struct gr_arg));
49765+ memset(gr_system_salt, 0, GR_SALT_LEN);
49766+ memset(gr_system_sum, 0, GR_SHA_LEN);
49767+ } else if (gr_status & GR_READY) {
49768+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
49769+ error = -EPERM;
49770+ } else {
49771+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
49772+ error = -EAGAIN;
49773+ }
49774+ break;
49775+ case GR_ENABLE:
49776+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
49777+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
49778+ else {
49779+ if (gr_status & GR_READY)
49780+ error = -EAGAIN;
49781+ else
49782+ error = error2;
49783+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
49784+ }
49785+ break;
49786+ case GR_RELOAD:
49787+ if (!(gr_status & GR_READY)) {
49788+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
49789+ error = -EAGAIN;
49790+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49791+ lock_kernel();
49792+
49793+ pax_open_kernel();
49794+ gr_status &= ~GR_READY;
49795+ pax_close_kernel();
49796+
49797+ free_variables();
49798+ if (!(error2 = gracl_init(gr_usermode))) {
49799+ unlock_kernel();
49800+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
49801+ } else {
49802+ unlock_kernel();
49803+ error = error2;
49804+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
49805+ }
49806+ } else {
49807+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
49808+ error = -EPERM;
49809+ }
49810+ break;
49811+ case GR_SEGVMOD:
49812+ if (unlikely(!(gr_status & GR_READY))) {
49813+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
49814+ error = -EAGAIN;
49815+ break;
49816+ }
49817+
49818+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49819+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
49820+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
49821+ struct acl_subject_label *segvacl;
49822+ segvacl =
49823+ lookup_acl_subj_label(gr_usermode->segv_inode,
49824+ gr_usermode->segv_device,
49825+ current->role);
49826+ if (segvacl) {
49827+ segvacl->crashes = 0;
49828+ segvacl->expires = 0;
49829+ }
49830+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
49831+ gr_remove_uid(gr_usermode->segv_uid);
49832+ }
49833+ } else {
49834+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
49835+ error = -EPERM;
49836+ }
49837+ break;
49838+ case GR_SPROLE:
49839+ case GR_SPROLEPAM:
49840+ if (unlikely(!(gr_status & GR_READY))) {
49841+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
49842+ error = -EAGAIN;
49843+ break;
49844+ }
49845+
49846+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
49847+ current->role->expires = 0;
49848+ current->role->auth_attempts = 0;
49849+ }
49850+
49851+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49852+ time_after(current->role->expires, get_seconds())) {
49853+ error = -EBUSY;
49854+ goto out;
49855+ }
49856+
49857+ if (lookup_special_role_auth
49858+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
49859+ && ((!sprole_salt && !sprole_sum)
49860+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
49861+ char *p = "";
49862+ assign_special_role(gr_usermode->sp_role);
49863+ read_lock(&tasklist_lock);
49864+ if (current->real_parent)
49865+ p = current->real_parent->role->rolename;
49866+ read_unlock(&tasklist_lock);
49867+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
49868+ p, acl_sp_role_value);
49869+ } else {
49870+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
49871+ error = -EPERM;
49872+ if(!(current->role->auth_attempts++))
49873+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
49874+
49875+ goto out;
49876+ }
49877+ break;
49878+ case GR_UNSPROLE:
49879+ if (unlikely(!(gr_status & GR_READY))) {
49880+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
49881+ error = -EAGAIN;
49882+ break;
49883+ }
49884+
49885+ if (current->role->roletype & GR_ROLE_SPECIAL) {
49886+ char *p = "";
49887+ int i = 0;
49888+
49889+ read_lock(&tasklist_lock);
49890+ if (current->real_parent) {
49891+ p = current->real_parent->role->rolename;
49892+ i = current->real_parent->acl_role_id;
49893+ }
49894+ read_unlock(&tasklist_lock);
49895+
49896+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
49897+ gr_set_acls(1);
49898+ } else {
49899+ error = -EPERM;
49900+ goto out;
49901+ }
49902+ break;
49903+ default:
49904+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
49905+ error = -EINVAL;
49906+ break;
49907+ }
49908+
49909+ if (error != -EPERM)
49910+ goto out;
49911+
49912+ if(!(gr_auth_attempts++))
49913+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
49914+
49915+ out:
49916+ mutex_unlock(&gr_dev_mutex);
49917+ return error;
49918+}
49919+
49920+/* must be called with
49921+ rcu_read_lock();
49922+ read_lock(&tasklist_lock);
49923+ read_lock(&grsec_exec_file_lock);
49924+*/
49925+int gr_apply_subject_to_task(struct task_struct *task)
49926+{
49927+ struct acl_object_label *obj;
49928+ char *tmpname;
49929+ struct acl_subject_label *tmpsubj;
49930+ struct file *filp;
49931+ struct name_entry *nmatch;
49932+
49933+ filp = task->exec_file;
49934+ if (filp == NULL)
49935+ return 0;
49936+
49937+ /* the following is to apply the correct subject
49938+ on binaries running when the RBAC system
49939+ is enabled, when the binaries have been
49940+ replaced or deleted since their execution
49941+ -----
49942+ when the RBAC system starts, the inode/dev
49943+ from exec_file will be one the RBAC system
49944+ is unaware of. It only knows the inode/dev
49945+ of the present file on disk, or the absence
49946+ of it.
49947+ */
49948+ preempt_disable();
49949+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
49950+
49951+ nmatch = lookup_name_entry(tmpname);
49952+ preempt_enable();
49953+ tmpsubj = NULL;
49954+ if (nmatch) {
49955+ if (nmatch->deleted)
49956+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
49957+ else
49958+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
49959+ if (tmpsubj != NULL)
49960+ task->acl = tmpsubj;
49961+ }
49962+ if (tmpsubj == NULL)
49963+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
49964+ task->role);
49965+ if (task->acl) {
49966+ task->is_writable = 0;
49967+ /* ignore additional mmap checks for processes that are writable
49968+ by the default ACL */
49969+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49970+ if (unlikely(obj->mode & GR_WRITE))
49971+ task->is_writable = 1;
49972+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49973+ if (unlikely(obj->mode & GR_WRITE))
49974+ task->is_writable = 1;
49975+
49976+ gr_set_proc_res(task);
49977+
49978+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49979+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49980+#endif
49981+ } else {
49982+ return 1;
49983+ }
49984+
49985+ return 0;
49986+}
49987+
49988+int
49989+gr_set_acls(const int type)
49990+{
49991+ struct task_struct *task, *task2;
49992+ struct acl_role_label *role = current->role;
49993+ __u16 acl_role_id = current->acl_role_id;
49994+ const struct cred *cred;
49995+ int ret;
49996+
49997+ rcu_read_lock();
49998+ read_lock(&tasklist_lock);
49999+ read_lock(&grsec_exec_file_lock);
50000+ do_each_thread(task2, task) {
50001+ /* check to see if we're called from the exit handler,
50002+ if so, only replace ACLs that have inherited the admin
50003+ ACL */
50004+
50005+ if (type && (task->role != role ||
50006+ task->acl_role_id != acl_role_id))
50007+ continue;
50008+
50009+ task->acl_role_id = 0;
50010+ task->acl_sp_role = 0;
50011+
50012+ if (task->exec_file) {
50013+ cred = __task_cred(task);
50014+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50015+
50016+ ret = gr_apply_subject_to_task(task);
50017+ if (ret) {
50018+ read_unlock(&grsec_exec_file_lock);
50019+ read_unlock(&tasklist_lock);
50020+ rcu_read_unlock();
50021+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50022+ return ret;
50023+ }
50024+ } else {
50025+ // it's a kernel process
50026+ task->role = kernel_role;
50027+ task->acl = kernel_role->root_label;
50028+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50029+ task->acl->mode &= ~GR_PROCFIND;
50030+#endif
50031+ }
50032+ } while_each_thread(task2, task);
50033+ read_unlock(&grsec_exec_file_lock);
50034+ read_unlock(&tasklist_lock);
50035+ rcu_read_unlock();
50036+
50037+ return 0;
50038+}
50039+
50040+void
50041+gr_learn_resource(const struct task_struct *task,
50042+ const int res, const unsigned long wanted, const int gt)
50043+{
50044+ struct acl_subject_label *acl;
50045+ const struct cred *cred;
50046+
50047+ if (unlikely((gr_status & GR_READY) &&
50048+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50049+ goto skip_reslog;
50050+
50051+#ifdef CONFIG_GRKERNSEC_RESLOG
50052+ gr_log_resource(task, res, wanted, gt);
50053+#endif
50054+ skip_reslog:
50055+
50056+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50057+ return;
50058+
50059+ acl = task->acl;
50060+
50061+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50062+ !(acl->resmask & (1 << (unsigned short) res))))
50063+ return;
50064+
50065+ if (wanted >= acl->res[res].rlim_cur) {
50066+ unsigned long res_add;
50067+
50068+ res_add = wanted;
50069+ switch (res) {
50070+ case RLIMIT_CPU:
50071+ res_add += GR_RLIM_CPU_BUMP;
50072+ break;
50073+ case RLIMIT_FSIZE:
50074+ res_add += GR_RLIM_FSIZE_BUMP;
50075+ break;
50076+ case RLIMIT_DATA:
50077+ res_add += GR_RLIM_DATA_BUMP;
50078+ break;
50079+ case RLIMIT_STACK:
50080+ res_add += GR_RLIM_STACK_BUMP;
50081+ break;
50082+ case RLIMIT_CORE:
50083+ res_add += GR_RLIM_CORE_BUMP;
50084+ break;
50085+ case RLIMIT_RSS:
50086+ res_add += GR_RLIM_RSS_BUMP;
50087+ break;
50088+ case RLIMIT_NPROC:
50089+ res_add += GR_RLIM_NPROC_BUMP;
50090+ break;
50091+ case RLIMIT_NOFILE:
50092+ res_add += GR_RLIM_NOFILE_BUMP;
50093+ break;
50094+ case RLIMIT_MEMLOCK:
50095+ res_add += GR_RLIM_MEMLOCK_BUMP;
50096+ break;
50097+ case RLIMIT_AS:
50098+ res_add += GR_RLIM_AS_BUMP;
50099+ break;
50100+ case RLIMIT_LOCKS:
50101+ res_add += GR_RLIM_LOCKS_BUMP;
50102+ break;
50103+ case RLIMIT_SIGPENDING:
50104+ res_add += GR_RLIM_SIGPENDING_BUMP;
50105+ break;
50106+ case RLIMIT_MSGQUEUE:
50107+ res_add += GR_RLIM_MSGQUEUE_BUMP;
50108+ break;
50109+ case RLIMIT_NICE:
50110+ res_add += GR_RLIM_NICE_BUMP;
50111+ break;
50112+ case RLIMIT_RTPRIO:
50113+ res_add += GR_RLIM_RTPRIO_BUMP;
50114+ break;
50115+ case RLIMIT_RTTIME:
50116+ res_add += GR_RLIM_RTTIME_BUMP;
50117+ break;
50118+ }
50119+
50120+ acl->res[res].rlim_cur = res_add;
50121+
50122+ if (wanted > acl->res[res].rlim_max)
50123+ acl->res[res].rlim_max = res_add;
50124+
50125+ /* only log the subject filename, since resource logging is supported for
50126+ single-subject learning only */
50127+ rcu_read_lock();
50128+ cred = __task_cred(task);
50129+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50130+ task->role->roletype, cred->uid, cred->gid, acl->filename,
50131+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50132+ "", (unsigned long) res, &task->signal->saved_ip);
50133+ rcu_read_unlock();
50134+ }
50135+
50136+ return;
50137+}
50138+
50139+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50140+void
50141+pax_set_initial_flags(struct linux_binprm *bprm)
50142+{
50143+ struct task_struct *task = current;
50144+ struct acl_subject_label *proc;
50145+ unsigned long flags;
50146+
50147+ if (unlikely(!(gr_status & GR_READY)))
50148+ return;
50149+
50150+ flags = pax_get_flags(task);
50151+
50152+ proc = task->acl;
50153+
50154+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50155+ flags &= ~MF_PAX_PAGEEXEC;
50156+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50157+ flags &= ~MF_PAX_SEGMEXEC;
50158+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50159+ flags &= ~MF_PAX_RANDMMAP;
50160+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50161+ flags &= ~MF_PAX_EMUTRAMP;
50162+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50163+ flags &= ~MF_PAX_MPROTECT;
50164+
50165+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50166+ flags |= MF_PAX_PAGEEXEC;
50167+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50168+ flags |= MF_PAX_SEGMEXEC;
50169+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50170+ flags |= MF_PAX_RANDMMAP;
50171+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50172+ flags |= MF_PAX_EMUTRAMP;
50173+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50174+ flags |= MF_PAX_MPROTECT;
50175+
50176+ pax_set_flags(task, flags);
50177+
50178+ return;
50179+}
50180+#endif
50181+
50182+#ifdef CONFIG_SYSCTL
50183+/* Eric Biederman likes breaking userland ABI and every inode-based security
50184+ system to save 35kb of memory */
50185+
50186+/* we modify the passed in filename, but adjust it back before returning */
50187+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50188+{
50189+ struct name_entry *nmatch;
50190+ char *p, *lastp = NULL;
50191+ struct acl_object_label *obj = NULL, *tmp;
50192+ struct acl_subject_label *tmpsubj;
50193+ char c = '\0';
50194+
50195+ read_lock(&gr_inode_lock);
50196+
50197+ p = name + len - 1;
50198+ do {
50199+ nmatch = lookup_name_entry(name);
50200+ if (lastp != NULL)
50201+ *lastp = c;
50202+
50203+ if (nmatch == NULL)
50204+ goto next_component;
50205+ tmpsubj = current->acl;
50206+ do {
50207+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50208+ if (obj != NULL) {
50209+ tmp = obj->globbed;
50210+ while (tmp) {
50211+ if (!glob_match(tmp->filename, name)) {
50212+ obj = tmp;
50213+ goto found_obj;
50214+ }
50215+ tmp = tmp->next;
50216+ }
50217+ goto found_obj;
50218+ }
50219+ } while ((tmpsubj = tmpsubj->parent_subject));
50220+next_component:
50221+ /* end case */
50222+ if (p == name)
50223+ break;
50224+
50225+ while (*p != '/')
50226+ p--;
50227+ if (p == name)
50228+ lastp = p + 1;
50229+ else {
50230+ lastp = p;
50231+ p--;
50232+ }
50233+ c = *lastp;
50234+ *lastp = '\0';
50235+ } while (1);
50236+found_obj:
50237+ read_unlock(&gr_inode_lock);
50238+ /* obj returned will always be non-null */
50239+ return obj;
50240+}
50241+
50242+/* returns 0 when allowing, non-zero on error
50243+ op of 0 is used for readdir, so we don't log the names of hidden files
50244+*/
50245+__u32
50246+gr_handle_sysctl(const struct ctl_table *table, const int op)
50247+{
50248+ ctl_table *tmp;
50249+ const char *proc_sys = "/proc/sys";
50250+ char *path;
50251+ struct acl_object_label *obj;
50252+ unsigned short len = 0, pos = 0, depth = 0, i;
50253+ __u32 err = 0;
50254+ __u32 mode = 0;
50255+
50256+ if (unlikely(!(gr_status & GR_READY)))
50257+ return 0;
50258+
50259+ /* for now, ignore operations on non-sysctl entries if it's not a
50260+ readdir*/
50261+ if (table->child != NULL && op != 0)
50262+ return 0;
50263+
50264+ mode |= GR_FIND;
50265+ /* it's only a read if it's an entry, read on dirs is for readdir */
50266+ if (op & MAY_READ)
50267+ mode |= GR_READ;
50268+ if (op & MAY_WRITE)
50269+ mode |= GR_WRITE;
50270+
50271+ preempt_disable();
50272+
50273+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50274+
50275+ /* it's only a read/write if it's an actual entry, not a dir
50276+ (which are opened for readdir)
50277+ */
50278+
50279+ /* convert the requested sysctl entry into a pathname */
50280+
50281+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50282+ len += strlen(tmp->procname);
50283+ len++;
50284+ depth++;
50285+ }
50286+
50287+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50288+ /* deny */
50289+ goto out;
50290+ }
50291+
50292+ memset(path, 0, PAGE_SIZE);
50293+
50294+ memcpy(path, proc_sys, strlen(proc_sys));
50295+
50296+ pos += strlen(proc_sys);
50297+
50298+ for (; depth > 0; depth--) {
50299+ path[pos] = '/';
50300+ pos++;
50301+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50302+ if (depth == i) {
50303+ memcpy(path + pos, tmp->procname,
50304+ strlen(tmp->procname));
50305+ pos += strlen(tmp->procname);
50306+ }
50307+ i++;
50308+ }
50309+ }
50310+
50311+ obj = gr_lookup_by_name(path, pos);
50312+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50313+
50314+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50315+ ((err & mode) != mode))) {
50316+ __u32 new_mode = mode;
50317+
50318+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50319+
50320+ err = 0;
50321+ gr_log_learn_sysctl(path, new_mode);
50322+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50323+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50324+ err = -ENOENT;
50325+ } else if (!(err & GR_FIND)) {
50326+ err = -ENOENT;
50327+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50328+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50329+ path, (mode & GR_READ) ? " reading" : "",
50330+ (mode & GR_WRITE) ? " writing" : "");
50331+ err = -EACCES;
50332+ } else if ((err & mode) != mode) {
50333+ err = -EACCES;
50334+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50335+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50336+ path, (mode & GR_READ) ? " reading" : "",
50337+ (mode & GR_WRITE) ? " writing" : "");
50338+ err = 0;
50339+ } else
50340+ err = 0;
50341+
50342+ out:
50343+ preempt_enable();
50344+
50345+ return err;
50346+}
50347+#endif
50348+
50349+int
50350+gr_handle_proc_ptrace(struct task_struct *task)
50351+{
50352+ struct file *filp;
50353+ struct task_struct *tmp = task;
50354+ struct task_struct *curtemp = current;
50355+ __u32 retmode;
50356+
50357+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50358+ if (unlikely(!(gr_status & GR_READY)))
50359+ return 0;
50360+#endif
50361+
50362+ read_lock(&tasklist_lock);
50363+ read_lock(&grsec_exec_file_lock);
50364+ filp = task->exec_file;
50365+
50366+ while (tmp->pid > 0) {
50367+ if (tmp == curtemp)
50368+ break;
50369+ tmp = tmp->real_parent;
50370+ }
50371+
50372+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50373+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50374+ read_unlock(&grsec_exec_file_lock);
50375+ read_unlock(&tasklist_lock);
50376+ return 1;
50377+ }
50378+
50379+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50380+ if (!(gr_status & GR_READY)) {
50381+ read_unlock(&grsec_exec_file_lock);
50382+ read_unlock(&tasklist_lock);
50383+ return 0;
50384+ }
50385+#endif
50386+
50387+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50388+ read_unlock(&grsec_exec_file_lock);
50389+ read_unlock(&tasklist_lock);
50390+
50391+ if (retmode & GR_NOPTRACE)
50392+ return 1;
50393+
50394+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
50395+ && (current->acl != task->acl || (current->acl != current->role->root_label
50396+ && current->pid != task->pid)))
50397+ return 1;
50398+
50399+ return 0;
50400+}
50401+
50402+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
50403+{
50404+ if (unlikely(!(gr_status & GR_READY)))
50405+ return;
50406+
50407+ if (!(current->role->roletype & GR_ROLE_GOD))
50408+ return;
50409+
50410+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
50411+ p->role->rolename, gr_task_roletype_to_char(p),
50412+ p->acl->filename);
50413+}
50414+
50415+int
50416+gr_handle_ptrace(struct task_struct *task, const long request)
50417+{
50418+ struct task_struct *tmp = task;
50419+ struct task_struct *curtemp = current;
50420+ __u32 retmode;
50421+
50422+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50423+ if (unlikely(!(gr_status & GR_READY)))
50424+ return 0;
50425+#endif
50426+
50427+ read_lock(&tasklist_lock);
50428+ while (tmp->pid > 0) {
50429+ if (tmp == curtemp)
50430+ break;
50431+ tmp = tmp->real_parent;
50432+ }
50433+
50434+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50435+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
50436+ read_unlock(&tasklist_lock);
50437+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50438+ return 1;
50439+ }
50440+ read_unlock(&tasklist_lock);
50441+
50442+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50443+ if (!(gr_status & GR_READY))
50444+ return 0;
50445+#endif
50446+
50447+ read_lock(&grsec_exec_file_lock);
50448+ if (unlikely(!task->exec_file)) {
50449+ read_unlock(&grsec_exec_file_lock);
50450+ return 0;
50451+ }
50452+
50453+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
50454+ read_unlock(&grsec_exec_file_lock);
50455+
50456+ if (retmode & GR_NOPTRACE) {
50457+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50458+ return 1;
50459+ }
50460+
50461+ if (retmode & GR_PTRACERD) {
50462+ switch (request) {
50463+ case PTRACE_POKETEXT:
50464+ case PTRACE_POKEDATA:
50465+ case PTRACE_POKEUSR:
50466+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
50467+ case PTRACE_SETREGS:
50468+ case PTRACE_SETFPREGS:
50469+#endif
50470+#ifdef CONFIG_X86
50471+ case PTRACE_SETFPXREGS:
50472+#endif
50473+#ifdef CONFIG_ALTIVEC
50474+ case PTRACE_SETVRREGS:
50475+#endif
50476+ return 1;
50477+ default:
50478+ return 0;
50479+ }
50480+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
50481+ !(current->role->roletype & GR_ROLE_GOD) &&
50482+ (current->acl != task->acl)) {
50483+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50484+ return 1;
50485+ }
50486+
50487+ return 0;
50488+}
50489+
50490+static int is_writable_mmap(const struct file *filp)
50491+{
50492+ struct task_struct *task = current;
50493+ struct acl_object_label *obj, *obj2;
50494+
50495+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
50496+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
50497+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50498+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
50499+ task->role->root_label);
50500+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
50501+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
50502+ return 1;
50503+ }
50504+ }
50505+ return 0;
50506+}
50507+
50508+int
50509+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
50510+{
50511+ __u32 mode;
50512+
50513+ if (unlikely(!file || !(prot & PROT_EXEC)))
50514+ return 1;
50515+
50516+ if (is_writable_mmap(file))
50517+ return 0;
50518+
50519+ mode =
50520+ gr_search_file(file->f_path.dentry,
50521+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50522+ file->f_path.mnt);
50523+
50524+ if (!gr_tpe_allow(file))
50525+ return 0;
50526+
50527+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50528+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50529+ return 0;
50530+ } else if (unlikely(!(mode & GR_EXEC))) {
50531+ return 0;
50532+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50533+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50534+ return 1;
50535+ }
50536+
50537+ return 1;
50538+}
50539+
50540+int
50541+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50542+{
50543+ __u32 mode;
50544+
50545+ if (unlikely(!file || !(prot & PROT_EXEC)))
50546+ return 1;
50547+
50548+ if (is_writable_mmap(file))
50549+ return 0;
50550+
50551+ mode =
50552+ gr_search_file(file->f_path.dentry,
50553+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50554+ file->f_path.mnt);
50555+
50556+ if (!gr_tpe_allow(file))
50557+ return 0;
50558+
50559+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50560+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50561+ return 0;
50562+ } else if (unlikely(!(mode & GR_EXEC))) {
50563+ return 0;
50564+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50565+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50566+ return 1;
50567+ }
50568+
50569+ return 1;
50570+}
50571+
50572+void
50573+gr_acl_handle_psacct(struct task_struct *task, const long code)
50574+{
50575+ unsigned long runtime;
50576+ unsigned long cputime;
50577+ unsigned int wday, cday;
50578+ __u8 whr, chr;
50579+ __u8 wmin, cmin;
50580+ __u8 wsec, csec;
50581+ struct timespec timeval;
50582+
50583+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
50584+ !(task->acl->mode & GR_PROCACCT)))
50585+ return;
50586+
50587+ do_posix_clock_monotonic_gettime(&timeval);
50588+ runtime = timeval.tv_sec - task->start_time.tv_sec;
50589+ wday = runtime / (3600 * 24);
50590+ runtime -= wday * (3600 * 24);
50591+ whr = runtime / 3600;
50592+ runtime -= whr * 3600;
50593+ wmin = runtime / 60;
50594+ runtime -= wmin * 60;
50595+ wsec = runtime;
50596+
50597+ cputime = (task->utime + task->stime) / HZ;
50598+ cday = cputime / (3600 * 24);
50599+ cputime -= cday * (3600 * 24);
50600+ chr = cputime / 3600;
50601+ cputime -= chr * 3600;
50602+ cmin = cputime / 60;
50603+ cputime -= cmin * 60;
50604+ csec = cputime;
50605+
50606+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
50607+
50608+ return;
50609+}
50610+
50611+void gr_set_kernel_label(struct task_struct *task)
50612+{
50613+ if (gr_status & GR_READY) {
50614+ task->role = kernel_role;
50615+ task->acl = kernel_role->root_label;
50616+ }
50617+ return;
50618+}
50619+
50620+#ifdef CONFIG_TASKSTATS
50621+int gr_is_taskstats_denied(int pid)
50622+{
50623+ struct task_struct *task;
50624+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50625+ const struct cred *cred;
50626+#endif
50627+ int ret = 0;
50628+
50629+ /* restrict taskstats viewing to un-chrooted root users
50630+ who have the 'view' subject flag if the RBAC system is enabled
50631+ */
50632+
50633+ rcu_read_lock();
50634+ read_lock(&tasklist_lock);
50635+ task = find_task_by_vpid(pid);
50636+ if (task) {
50637+#ifdef CONFIG_GRKERNSEC_CHROOT
50638+ if (proc_is_chrooted(task))
50639+ ret = -EACCES;
50640+#endif
50641+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50642+ cred = __task_cred(task);
50643+#ifdef CONFIG_GRKERNSEC_PROC_USER
50644+ if (cred->uid != 0)
50645+ ret = -EACCES;
50646+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50647+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
50648+ ret = -EACCES;
50649+#endif
50650+#endif
50651+ if (gr_status & GR_READY) {
50652+ if (!(task->acl->mode & GR_VIEW))
50653+ ret = -EACCES;
50654+ }
50655+ } else
50656+ ret = -ENOENT;
50657+
50658+ read_unlock(&tasklist_lock);
50659+ rcu_read_unlock();
50660+
50661+ return ret;
50662+}
50663+#endif
50664+
50665+/* AUXV entries are filled via a descendant of search_binary_handler
50666+ after we've already applied the subject for the target
50667+*/
50668+int gr_acl_enable_at_secure(void)
50669+{
50670+ if (unlikely(!(gr_status & GR_READY)))
50671+ return 0;
50672+
50673+ if (current->acl->mode & GR_ATSECURE)
50674+ return 1;
50675+
50676+ return 0;
50677+}
50678+
50679+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
50680+{
50681+ struct task_struct *task = current;
50682+ struct dentry *dentry = file->f_path.dentry;
50683+ struct vfsmount *mnt = file->f_path.mnt;
50684+ struct acl_object_label *obj, *tmp;
50685+ struct acl_subject_label *subj;
50686+ unsigned int bufsize;
50687+ int is_not_root;
50688+ char *path;
50689+ dev_t dev = __get_dev(dentry);
50690+
50691+ if (unlikely(!(gr_status & GR_READY)))
50692+ return 1;
50693+
50694+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50695+ return 1;
50696+
50697+ /* ignore Eric Biederman */
50698+ if (IS_PRIVATE(dentry->d_inode))
50699+ return 1;
50700+
50701+ subj = task->acl;
50702+ do {
50703+ obj = lookup_acl_obj_label(ino, dev, subj);
50704+ if (obj != NULL)
50705+ return (obj->mode & GR_FIND) ? 1 : 0;
50706+ } while ((subj = subj->parent_subject));
50707+
50708+ /* this is purely an optimization since we're looking for an object
50709+ for the directory we're doing a readdir on
50710+ if it's possible for any globbed object to match the entry we're
50711+ filling into the directory, then the object we find here will be
50712+ an anchor point with attached globbed objects
50713+ */
50714+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
50715+ if (obj->globbed == NULL)
50716+ return (obj->mode & GR_FIND) ? 1 : 0;
50717+
50718+ is_not_root = ((obj->filename[0] == '/') &&
50719+ (obj->filename[1] == '\0')) ? 0 : 1;
50720+ bufsize = PAGE_SIZE - namelen - is_not_root;
50721+
50722+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
50723+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
50724+ return 1;
50725+
50726+ preempt_disable();
50727+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50728+ bufsize);
50729+
50730+ bufsize = strlen(path);
50731+
50732+ /* if base is "/", don't append an additional slash */
50733+ if (is_not_root)
50734+ *(path + bufsize) = '/';
50735+ memcpy(path + bufsize + is_not_root, name, namelen);
50736+ *(path + bufsize + namelen + is_not_root) = '\0';
50737+
50738+ tmp = obj->globbed;
50739+ while (tmp) {
50740+ if (!glob_match(tmp->filename, path)) {
50741+ preempt_enable();
50742+ return (tmp->mode & GR_FIND) ? 1 : 0;
50743+ }
50744+ tmp = tmp->next;
50745+ }
50746+ preempt_enable();
50747+ return (obj->mode & GR_FIND) ? 1 : 0;
50748+}
50749+
50750+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
50751+EXPORT_SYMBOL(gr_acl_is_enabled);
50752+#endif
50753+EXPORT_SYMBOL(gr_learn_resource);
50754+EXPORT_SYMBOL(gr_set_kernel_label);
50755+#ifdef CONFIG_SECURITY
50756+EXPORT_SYMBOL(gr_check_user_change);
50757+EXPORT_SYMBOL(gr_check_group_change);
50758+#endif
50759+
50760diff -urNp linux-2.6.32.45/grsecurity/gracl_cap.c linux-2.6.32.45/grsecurity/gracl_cap.c
50761--- linux-2.6.32.45/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
50762+++ linux-2.6.32.45/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
50763@@ -0,0 +1,138 @@
50764+#include <linux/kernel.h>
50765+#include <linux/module.h>
50766+#include <linux/sched.h>
50767+#include <linux/gracl.h>
50768+#include <linux/grsecurity.h>
50769+#include <linux/grinternal.h>
50770+
50771+static const char *captab_log[] = {
50772+ "CAP_CHOWN",
50773+ "CAP_DAC_OVERRIDE",
50774+ "CAP_DAC_READ_SEARCH",
50775+ "CAP_FOWNER",
50776+ "CAP_FSETID",
50777+ "CAP_KILL",
50778+ "CAP_SETGID",
50779+ "CAP_SETUID",
50780+ "CAP_SETPCAP",
50781+ "CAP_LINUX_IMMUTABLE",
50782+ "CAP_NET_BIND_SERVICE",
50783+ "CAP_NET_BROADCAST",
50784+ "CAP_NET_ADMIN",
50785+ "CAP_NET_RAW",
50786+ "CAP_IPC_LOCK",
50787+ "CAP_IPC_OWNER",
50788+ "CAP_SYS_MODULE",
50789+ "CAP_SYS_RAWIO",
50790+ "CAP_SYS_CHROOT",
50791+ "CAP_SYS_PTRACE",
50792+ "CAP_SYS_PACCT",
50793+ "CAP_SYS_ADMIN",
50794+ "CAP_SYS_BOOT",
50795+ "CAP_SYS_NICE",
50796+ "CAP_SYS_RESOURCE",
50797+ "CAP_SYS_TIME",
50798+ "CAP_SYS_TTY_CONFIG",
50799+ "CAP_MKNOD",
50800+ "CAP_LEASE",
50801+ "CAP_AUDIT_WRITE",
50802+ "CAP_AUDIT_CONTROL",
50803+ "CAP_SETFCAP",
50804+ "CAP_MAC_OVERRIDE",
50805+ "CAP_MAC_ADMIN"
50806+};
50807+
50808+EXPORT_SYMBOL(gr_is_capable);
50809+EXPORT_SYMBOL(gr_is_capable_nolog);
50810+
50811+int
50812+gr_is_capable(const int cap)
50813+{
50814+ struct task_struct *task = current;
50815+ const struct cred *cred = current_cred();
50816+ struct acl_subject_label *curracl;
50817+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
50818+ kernel_cap_t cap_audit = __cap_empty_set;
50819+
50820+ if (!gr_acl_is_enabled())
50821+ return 1;
50822+
50823+ curracl = task->acl;
50824+
50825+ cap_drop = curracl->cap_lower;
50826+ cap_mask = curracl->cap_mask;
50827+ cap_audit = curracl->cap_invert_audit;
50828+
50829+ while ((curracl = curracl->parent_subject)) {
50830+ /* if the cap isn't specified in the current computed mask but is specified in the
50831+ current level subject, and is lowered in the current level subject, then add
50832+ it to the set of dropped capabilities
50833+ otherwise, add the current level subject's mask to the current computed mask
50834+ */
50835+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
50836+ cap_raise(cap_mask, cap);
50837+ if (cap_raised(curracl->cap_lower, cap))
50838+ cap_raise(cap_drop, cap);
50839+ if (cap_raised(curracl->cap_invert_audit, cap))
50840+ cap_raise(cap_audit, cap);
50841+ }
50842+ }
50843+
50844+ if (!cap_raised(cap_drop, cap)) {
50845+ if (cap_raised(cap_audit, cap))
50846+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
50847+ return 1;
50848+ }
50849+
50850+ curracl = task->acl;
50851+
50852+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
50853+ && cap_raised(cred->cap_effective, cap)) {
50854+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50855+ task->role->roletype, cred->uid,
50856+ cred->gid, task->exec_file ?
50857+ gr_to_filename(task->exec_file->f_path.dentry,
50858+ task->exec_file->f_path.mnt) : curracl->filename,
50859+ curracl->filename, 0UL,
50860+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
50861+ return 1;
50862+ }
50863+
50864+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
50865+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
50866+ return 0;
50867+}
50868+
50869+int
50870+gr_is_capable_nolog(const int cap)
50871+{
50872+ struct acl_subject_label *curracl;
50873+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
50874+
50875+ if (!gr_acl_is_enabled())
50876+ return 1;
50877+
50878+ curracl = current->acl;
50879+
50880+ cap_drop = curracl->cap_lower;
50881+ cap_mask = curracl->cap_mask;
50882+
50883+ while ((curracl = curracl->parent_subject)) {
50884+ /* if the cap isn't specified in the current computed mask but is specified in the
50885+ current level subject, and is lowered in the current level subject, then add
50886+ it to the set of dropped capabilities
50887+ otherwise, add the current level subject's mask to the current computed mask
50888+ */
50889+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
50890+ cap_raise(cap_mask, cap);
50891+ if (cap_raised(curracl->cap_lower, cap))
50892+ cap_raise(cap_drop, cap);
50893+ }
50894+ }
50895+
50896+ if (!cap_raised(cap_drop, cap))
50897+ return 1;
50898+
50899+ return 0;
50900+}
50901+
50902diff -urNp linux-2.6.32.45/grsecurity/gracl_fs.c linux-2.6.32.45/grsecurity/gracl_fs.c
50903--- linux-2.6.32.45/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
50904+++ linux-2.6.32.45/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
50905@@ -0,0 +1,431 @@
50906+#include <linux/kernel.h>
50907+#include <linux/sched.h>
50908+#include <linux/types.h>
50909+#include <linux/fs.h>
50910+#include <linux/file.h>
50911+#include <linux/stat.h>
50912+#include <linux/grsecurity.h>
50913+#include <linux/grinternal.h>
50914+#include <linux/gracl.h>
50915+
50916+__u32
50917+gr_acl_handle_hidden_file(const struct dentry * dentry,
50918+ const struct vfsmount * mnt)
50919+{
50920+ __u32 mode;
50921+
50922+ if (unlikely(!dentry->d_inode))
50923+ return GR_FIND;
50924+
50925+ mode =
50926+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
50927+
50928+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
50929+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
50930+ return mode;
50931+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
50932+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
50933+ return 0;
50934+ } else if (unlikely(!(mode & GR_FIND)))
50935+ return 0;
50936+
50937+ return GR_FIND;
50938+}
50939+
50940+__u32
50941+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50942+ const int fmode)
50943+{
50944+ __u32 reqmode = GR_FIND;
50945+ __u32 mode;
50946+
50947+ if (unlikely(!dentry->d_inode))
50948+ return reqmode;
50949+
50950+ if (unlikely(fmode & O_APPEND))
50951+ reqmode |= GR_APPEND;
50952+ else if (unlikely(fmode & FMODE_WRITE))
50953+ reqmode |= GR_WRITE;
50954+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
50955+ reqmode |= GR_READ;
50956+ if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
50957+ reqmode &= ~GR_READ;
50958+ mode =
50959+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
50960+ mnt);
50961+
50962+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
50963+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
50964+ reqmode & GR_READ ? " reading" : "",
50965+ reqmode & GR_WRITE ? " writing" : reqmode &
50966+ GR_APPEND ? " appending" : "");
50967+ return reqmode;
50968+ } else
50969+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
50970+ {
50971+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
50972+ reqmode & GR_READ ? " reading" : "",
50973+ reqmode & GR_WRITE ? " writing" : reqmode &
50974+ GR_APPEND ? " appending" : "");
50975+ return 0;
50976+ } else if (unlikely((mode & reqmode) != reqmode))
50977+ return 0;
50978+
50979+ return reqmode;
50980+}
50981+
50982+__u32
50983+gr_acl_handle_creat(const struct dentry * dentry,
50984+ const struct dentry * p_dentry,
50985+ const struct vfsmount * p_mnt, const int fmode,
50986+ const int imode)
50987+{
50988+ __u32 reqmode = GR_WRITE | GR_CREATE;
50989+ __u32 mode;
50990+
50991+ if (unlikely(fmode & O_APPEND))
50992+ reqmode |= GR_APPEND;
50993+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
50994+ reqmode |= GR_READ;
50995+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
50996+ reqmode |= GR_SETID;
50997+
50998+ mode =
50999+ gr_check_create(dentry, p_dentry, p_mnt,
51000+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51001+
51002+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51003+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51004+ reqmode & GR_READ ? " reading" : "",
51005+ reqmode & GR_WRITE ? " writing" : reqmode &
51006+ GR_APPEND ? " appending" : "");
51007+ return reqmode;
51008+ } else
51009+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51010+ {
51011+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51012+ reqmode & GR_READ ? " reading" : "",
51013+ reqmode & GR_WRITE ? " writing" : reqmode &
51014+ GR_APPEND ? " appending" : "");
51015+ return 0;
51016+ } else if (unlikely((mode & reqmode) != reqmode))
51017+ return 0;
51018+
51019+ return reqmode;
51020+}
51021+
51022+__u32
51023+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51024+ const int fmode)
51025+{
51026+ __u32 mode, reqmode = GR_FIND;
51027+
51028+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51029+ reqmode |= GR_EXEC;
51030+ if (fmode & S_IWOTH)
51031+ reqmode |= GR_WRITE;
51032+ if (fmode & S_IROTH)
51033+ reqmode |= GR_READ;
51034+
51035+ mode =
51036+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51037+ mnt);
51038+
51039+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51040+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51041+ reqmode & GR_READ ? " reading" : "",
51042+ reqmode & GR_WRITE ? " writing" : "",
51043+ reqmode & GR_EXEC ? " executing" : "");
51044+ return reqmode;
51045+ } else
51046+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51047+ {
51048+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51049+ reqmode & GR_READ ? " reading" : "",
51050+ reqmode & GR_WRITE ? " writing" : "",
51051+ reqmode & GR_EXEC ? " executing" : "");
51052+ return 0;
51053+ } else if (unlikely((mode & reqmode) != reqmode))
51054+ return 0;
51055+
51056+ return reqmode;
51057+}
51058+
51059+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51060+{
51061+ __u32 mode;
51062+
51063+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51064+
51065+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51066+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51067+ return mode;
51068+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51069+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51070+ return 0;
51071+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51072+ return 0;
51073+
51074+ return (reqmode);
51075+}
51076+
51077+__u32
51078+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51079+{
51080+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51081+}
51082+
51083+__u32
51084+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51085+{
51086+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51087+}
51088+
51089+__u32
51090+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51091+{
51092+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51093+}
51094+
51095+__u32
51096+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51097+{
51098+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51099+}
51100+
51101+__u32
51102+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51103+ mode_t mode)
51104+{
51105+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51106+ return 1;
51107+
51108+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51109+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51110+ GR_FCHMOD_ACL_MSG);
51111+ } else {
51112+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51113+ }
51114+}
51115+
51116+__u32
51117+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51118+ mode_t mode)
51119+{
51120+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51121+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51122+ GR_CHMOD_ACL_MSG);
51123+ } else {
51124+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51125+ }
51126+}
51127+
51128+__u32
51129+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51130+{
51131+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51132+}
51133+
51134+__u32
51135+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51136+{
51137+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51138+}
51139+
51140+__u32
51141+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51142+{
51143+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51144+}
51145+
51146+__u32
51147+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51148+{
51149+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51150+ GR_UNIXCONNECT_ACL_MSG);
51151+}
51152+
51153+/* hardlinks require at minimum create permission,
51154+ any additional privilege required is based on the
51155+ privilege of the file being linked to
51156+*/
51157+__u32
51158+gr_acl_handle_link(const struct dentry * new_dentry,
51159+ const struct dentry * parent_dentry,
51160+ const struct vfsmount * parent_mnt,
51161+ const struct dentry * old_dentry,
51162+ const struct vfsmount * old_mnt, const char *to)
51163+{
51164+ __u32 mode;
51165+ __u32 needmode = GR_CREATE | GR_LINK;
51166+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51167+
51168+ mode =
51169+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51170+ old_mnt);
51171+
51172+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51173+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51174+ return mode;
51175+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51176+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51177+ return 0;
51178+ } else if (unlikely((mode & needmode) != needmode))
51179+ return 0;
51180+
51181+ return 1;
51182+}
51183+
51184+__u32
51185+gr_acl_handle_symlink(const struct dentry * new_dentry,
51186+ const struct dentry * parent_dentry,
51187+ const struct vfsmount * parent_mnt, const char *from)
51188+{
51189+ __u32 needmode = GR_WRITE | GR_CREATE;
51190+ __u32 mode;
51191+
51192+ mode =
51193+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
51194+ GR_CREATE | GR_AUDIT_CREATE |
51195+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51196+
51197+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51198+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51199+ return mode;
51200+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51201+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51202+ return 0;
51203+ } else if (unlikely((mode & needmode) != needmode))
51204+ return 0;
51205+
51206+ return (GR_WRITE | GR_CREATE);
51207+}
51208+
51209+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51210+{
51211+ __u32 mode;
51212+
51213+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51214+
51215+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51216+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51217+ return mode;
51218+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51219+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51220+ return 0;
51221+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51222+ return 0;
51223+
51224+ return (reqmode);
51225+}
51226+
51227+__u32
51228+gr_acl_handle_mknod(const struct dentry * new_dentry,
51229+ const struct dentry * parent_dentry,
51230+ const struct vfsmount * parent_mnt,
51231+ const int mode)
51232+{
51233+ __u32 reqmode = GR_WRITE | GR_CREATE;
51234+ if (unlikely(mode & (S_ISUID | S_ISGID)))
51235+ reqmode |= GR_SETID;
51236+
51237+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51238+ reqmode, GR_MKNOD_ACL_MSG);
51239+}
51240+
51241+__u32
51242+gr_acl_handle_mkdir(const struct dentry *new_dentry,
51243+ const struct dentry *parent_dentry,
51244+ const struct vfsmount *parent_mnt)
51245+{
51246+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51247+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51248+}
51249+
51250+#define RENAME_CHECK_SUCCESS(old, new) \
51251+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51252+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51253+
51254+int
51255+gr_acl_handle_rename(struct dentry *new_dentry,
51256+ struct dentry *parent_dentry,
51257+ const struct vfsmount *parent_mnt,
51258+ struct dentry *old_dentry,
51259+ struct inode *old_parent_inode,
51260+ struct vfsmount *old_mnt, const char *newname)
51261+{
51262+ __u32 comp1, comp2;
51263+ int error = 0;
51264+
51265+ if (unlikely(!gr_acl_is_enabled()))
51266+ return 0;
51267+
51268+ if (!new_dentry->d_inode) {
51269+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51270+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51271+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51272+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51273+ GR_DELETE | GR_AUDIT_DELETE |
51274+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51275+ GR_SUPPRESS, old_mnt);
51276+ } else {
51277+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51278+ GR_CREATE | GR_DELETE |
51279+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51280+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51281+ GR_SUPPRESS, parent_mnt);
51282+ comp2 =
51283+ gr_search_file(old_dentry,
51284+ GR_READ | GR_WRITE | GR_AUDIT_READ |
51285+ GR_DELETE | GR_AUDIT_DELETE |
51286+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51287+ }
51288+
51289+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51290+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51291+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51292+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51293+ && !(comp2 & GR_SUPPRESS)) {
51294+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51295+ error = -EACCES;
51296+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51297+ error = -EACCES;
51298+
51299+ return error;
51300+}
51301+
51302+void
51303+gr_acl_handle_exit(void)
51304+{
51305+ u16 id;
51306+ char *rolename;
51307+ struct file *exec_file;
51308+
51309+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51310+ !(current->role->roletype & GR_ROLE_PERSIST))) {
51311+ id = current->acl_role_id;
51312+ rolename = current->role->rolename;
51313+ gr_set_acls(1);
51314+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51315+ }
51316+
51317+ write_lock(&grsec_exec_file_lock);
51318+ exec_file = current->exec_file;
51319+ current->exec_file = NULL;
51320+ write_unlock(&grsec_exec_file_lock);
51321+
51322+ if (exec_file)
51323+ fput(exec_file);
51324+}
51325+
51326+int
51327+gr_acl_handle_procpidmem(const struct task_struct *task)
51328+{
51329+ if (unlikely(!gr_acl_is_enabled()))
51330+ return 0;
51331+
51332+ if (task != current && task->acl->mode & GR_PROTPROCFD)
51333+ return -EACCES;
51334+
51335+ return 0;
51336+}
51337diff -urNp linux-2.6.32.45/grsecurity/gracl_ip.c linux-2.6.32.45/grsecurity/gracl_ip.c
51338--- linux-2.6.32.45/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51339+++ linux-2.6.32.45/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
51340@@ -0,0 +1,382 @@
51341+#include <linux/kernel.h>
51342+#include <asm/uaccess.h>
51343+#include <asm/errno.h>
51344+#include <net/sock.h>
51345+#include <linux/file.h>
51346+#include <linux/fs.h>
51347+#include <linux/net.h>
51348+#include <linux/in.h>
51349+#include <linux/skbuff.h>
51350+#include <linux/ip.h>
51351+#include <linux/udp.h>
51352+#include <linux/smp_lock.h>
51353+#include <linux/types.h>
51354+#include <linux/sched.h>
51355+#include <linux/netdevice.h>
51356+#include <linux/inetdevice.h>
51357+#include <linux/gracl.h>
51358+#include <linux/grsecurity.h>
51359+#include <linux/grinternal.h>
51360+
51361+#define GR_BIND 0x01
51362+#define GR_CONNECT 0x02
51363+#define GR_INVERT 0x04
51364+#define GR_BINDOVERRIDE 0x08
51365+#define GR_CONNECTOVERRIDE 0x10
51366+#define GR_SOCK_FAMILY 0x20
51367+
51368+static const char * gr_protocols[IPPROTO_MAX] = {
51369+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51370+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51371+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51372+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51373+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51374+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51375+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51376+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51377+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51378+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51379+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51380+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51381+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51382+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51383+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51384+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51385+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51386+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51387+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51388+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51389+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51390+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51391+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51392+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51393+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51394+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51395+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51396+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51397+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51398+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51399+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51400+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51401+ };
51402+
51403+static const char * gr_socktypes[SOCK_MAX] = {
51404+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51405+ "unknown:7", "unknown:8", "unknown:9", "packet"
51406+ };
51407+
51408+static const char * gr_sockfamilies[AF_MAX+1] = {
51409+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51410+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51411+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51412+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
51413+ };
51414+
51415+const char *
51416+gr_proto_to_name(unsigned char proto)
51417+{
51418+ return gr_protocols[proto];
51419+}
51420+
51421+const char *
51422+gr_socktype_to_name(unsigned char type)
51423+{
51424+ return gr_socktypes[type];
51425+}
51426+
51427+const char *
51428+gr_sockfamily_to_name(unsigned char family)
51429+{
51430+ return gr_sockfamilies[family];
51431+}
51432+
51433+int
51434+gr_search_socket(const int domain, const int type, const int protocol)
51435+{
51436+ struct acl_subject_label *curr;
51437+ const struct cred *cred = current_cred();
51438+
51439+ if (unlikely(!gr_acl_is_enabled()))
51440+ goto exit;
51441+
51442+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
51443+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
51444+ goto exit; // let the kernel handle it
51445+
51446+ curr = current->acl;
51447+
51448+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
51449+ /* the family is allowed, if this is PF_INET allow it only if
51450+ the extra sock type/protocol checks pass */
51451+ if (domain == PF_INET)
51452+ goto inet_check;
51453+ goto exit;
51454+ } else {
51455+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51456+ __u32 fakeip = 0;
51457+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51458+ current->role->roletype, cred->uid,
51459+ cred->gid, current->exec_file ?
51460+ gr_to_filename(current->exec_file->f_path.dentry,
51461+ current->exec_file->f_path.mnt) :
51462+ curr->filename, curr->filename,
51463+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
51464+ &current->signal->saved_ip);
51465+ goto exit;
51466+ }
51467+ goto exit_fail;
51468+ }
51469+
51470+inet_check:
51471+ /* the rest of this checking is for IPv4 only */
51472+ if (!curr->ips)
51473+ goto exit;
51474+
51475+ if ((curr->ip_type & (1 << type)) &&
51476+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
51477+ goto exit;
51478+
51479+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51480+ /* we don't place acls on raw sockets , and sometimes
51481+ dgram/ip sockets are opened for ioctl and not
51482+ bind/connect, so we'll fake a bind learn log */
51483+ if (type == SOCK_RAW || type == SOCK_PACKET) {
51484+ __u32 fakeip = 0;
51485+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51486+ current->role->roletype, cred->uid,
51487+ cred->gid, current->exec_file ?
51488+ gr_to_filename(current->exec_file->f_path.dentry,
51489+ current->exec_file->f_path.mnt) :
51490+ curr->filename, curr->filename,
51491+ &fakeip, 0, type,
51492+ protocol, GR_CONNECT, &current->signal->saved_ip);
51493+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
51494+ __u32 fakeip = 0;
51495+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51496+ current->role->roletype, cred->uid,
51497+ cred->gid, current->exec_file ?
51498+ gr_to_filename(current->exec_file->f_path.dentry,
51499+ current->exec_file->f_path.mnt) :
51500+ curr->filename, curr->filename,
51501+ &fakeip, 0, type,
51502+ protocol, GR_BIND, &current->signal->saved_ip);
51503+ }
51504+ /* we'll log when they use connect or bind */
51505+ goto exit;
51506+ }
51507+
51508+exit_fail:
51509+ if (domain == PF_INET)
51510+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
51511+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
51512+ else
51513+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
51514+ gr_socktype_to_name(type), protocol);
51515+
51516+ return 0;
51517+exit:
51518+ return 1;
51519+}
51520+
51521+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
51522+{
51523+ if ((ip->mode & mode) &&
51524+ (ip_port >= ip->low) &&
51525+ (ip_port <= ip->high) &&
51526+ ((ntohl(ip_addr) & our_netmask) ==
51527+ (ntohl(our_addr) & our_netmask))
51528+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
51529+ && (ip->type & (1 << type))) {
51530+ if (ip->mode & GR_INVERT)
51531+ return 2; // specifically denied
51532+ else
51533+ return 1; // allowed
51534+ }
51535+
51536+ return 0; // not specifically allowed, may continue parsing
51537+}
51538+
51539+static int
51540+gr_search_connectbind(const int full_mode, struct sock *sk,
51541+ struct sockaddr_in *addr, const int type)
51542+{
51543+ char iface[IFNAMSIZ] = {0};
51544+ struct acl_subject_label *curr;
51545+ struct acl_ip_label *ip;
51546+ struct inet_sock *isk;
51547+ struct net_device *dev;
51548+ struct in_device *idev;
51549+ unsigned long i;
51550+ int ret;
51551+ int mode = full_mode & (GR_BIND | GR_CONNECT);
51552+ __u32 ip_addr = 0;
51553+ __u32 our_addr;
51554+ __u32 our_netmask;
51555+ char *p;
51556+ __u16 ip_port = 0;
51557+ const struct cred *cred = current_cred();
51558+
51559+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
51560+ return 0;
51561+
51562+ curr = current->acl;
51563+ isk = inet_sk(sk);
51564+
51565+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
51566+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
51567+ addr->sin_addr.s_addr = curr->inaddr_any_override;
51568+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
51569+ struct sockaddr_in saddr;
51570+ int err;
51571+
51572+ saddr.sin_family = AF_INET;
51573+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
51574+ saddr.sin_port = isk->sport;
51575+
51576+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51577+ if (err)
51578+ return err;
51579+
51580+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51581+ if (err)
51582+ return err;
51583+ }
51584+
51585+ if (!curr->ips)
51586+ return 0;
51587+
51588+ ip_addr = addr->sin_addr.s_addr;
51589+ ip_port = ntohs(addr->sin_port);
51590+
51591+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51592+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51593+ current->role->roletype, cred->uid,
51594+ cred->gid, current->exec_file ?
51595+ gr_to_filename(current->exec_file->f_path.dentry,
51596+ current->exec_file->f_path.mnt) :
51597+ curr->filename, curr->filename,
51598+ &ip_addr, ip_port, type,
51599+ sk->sk_protocol, mode, &current->signal->saved_ip);
51600+ return 0;
51601+ }
51602+
51603+ for (i = 0; i < curr->ip_num; i++) {
51604+ ip = *(curr->ips + i);
51605+ if (ip->iface != NULL) {
51606+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
51607+ p = strchr(iface, ':');
51608+ if (p != NULL)
51609+ *p = '\0';
51610+ dev = dev_get_by_name(sock_net(sk), iface);
51611+ if (dev == NULL)
51612+ continue;
51613+ idev = in_dev_get(dev);
51614+ if (idev == NULL) {
51615+ dev_put(dev);
51616+ continue;
51617+ }
51618+ rcu_read_lock();
51619+ for_ifa(idev) {
51620+ if (!strcmp(ip->iface, ifa->ifa_label)) {
51621+ our_addr = ifa->ifa_address;
51622+ our_netmask = 0xffffffff;
51623+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51624+ if (ret == 1) {
51625+ rcu_read_unlock();
51626+ in_dev_put(idev);
51627+ dev_put(dev);
51628+ return 0;
51629+ } else if (ret == 2) {
51630+ rcu_read_unlock();
51631+ in_dev_put(idev);
51632+ dev_put(dev);
51633+ goto denied;
51634+ }
51635+ }
51636+ } endfor_ifa(idev);
51637+ rcu_read_unlock();
51638+ in_dev_put(idev);
51639+ dev_put(dev);
51640+ } else {
51641+ our_addr = ip->addr;
51642+ our_netmask = ip->netmask;
51643+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51644+ if (ret == 1)
51645+ return 0;
51646+ else if (ret == 2)
51647+ goto denied;
51648+ }
51649+ }
51650+
51651+denied:
51652+ if (mode == GR_BIND)
51653+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51654+ else if (mode == GR_CONNECT)
51655+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51656+
51657+ return -EACCES;
51658+}
51659+
51660+int
51661+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
51662+{
51663+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
51664+}
51665+
51666+int
51667+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
51668+{
51669+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
51670+}
51671+
51672+int gr_search_listen(struct socket *sock)
51673+{
51674+ struct sock *sk = sock->sk;
51675+ struct sockaddr_in addr;
51676+
51677+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51678+ addr.sin_port = inet_sk(sk)->sport;
51679+
51680+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51681+}
51682+
51683+int gr_search_accept(struct socket *sock)
51684+{
51685+ struct sock *sk = sock->sk;
51686+ struct sockaddr_in addr;
51687+
51688+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51689+ addr.sin_port = inet_sk(sk)->sport;
51690+
51691+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51692+}
51693+
51694+int
51695+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
51696+{
51697+ if (addr)
51698+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
51699+ else {
51700+ struct sockaddr_in sin;
51701+ const struct inet_sock *inet = inet_sk(sk);
51702+
51703+ sin.sin_addr.s_addr = inet->daddr;
51704+ sin.sin_port = inet->dport;
51705+
51706+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51707+ }
51708+}
51709+
51710+int
51711+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
51712+{
51713+ struct sockaddr_in sin;
51714+
51715+ if (unlikely(skb->len < sizeof (struct udphdr)))
51716+ return 0; // skip this packet
51717+
51718+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
51719+ sin.sin_port = udp_hdr(skb)->source;
51720+
51721+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51722+}
51723diff -urNp linux-2.6.32.45/grsecurity/gracl_learn.c linux-2.6.32.45/grsecurity/gracl_learn.c
51724--- linux-2.6.32.45/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
51725+++ linux-2.6.32.45/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
51726@@ -0,0 +1,208 @@
51727+#include <linux/kernel.h>
51728+#include <linux/mm.h>
51729+#include <linux/sched.h>
51730+#include <linux/poll.h>
51731+#include <linux/smp_lock.h>
51732+#include <linux/string.h>
51733+#include <linux/file.h>
51734+#include <linux/types.h>
51735+#include <linux/vmalloc.h>
51736+#include <linux/grinternal.h>
51737+
51738+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
51739+ size_t count, loff_t *ppos);
51740+extern int gr_acl_is_enabled(void);
51741+
51742+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
51743+static int gr_learn_attached;
51744+
51745+/* use a 512k buffer */
51746+#define LEARN_BUFFER_SIZE (512 * 1024)
51747+
51748+static DEFINE_SPINLOCK(gr_learn_lock);
51749+static DEFINE_MUTEX(gr_learn_user_mutex);
51750+
51751+/* we need to maintain two buffers, so that the kernel context of grlearn
51752+ uses a semaphore around the userspace copying, and the other kernel contexts
51753+ use a spinlock when copying into the buffer, since they cannot sleep
51754+*/
51755+static char *learn_buffer;
51756+static char *learn_buffer_user;
51757+static int learn_buffer_len;
51758+static int learn_buffer_user_len;
51759+
51760+static ssize_t
51761+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
51762+{
51763+ DECLARE_WAITQUEUE(wait, current);
51764+ ssize_t retval = 0;
51765+
51766+ add_wait_queue(&learn_wait, &wait);
51767+ set_current_state(TASK_INTERRUPTIBLE);
51768+ do {
51769+ mutex_lock(&gr_learn_user_mutex);
51770+ spin_lock(&gr_learn_lock);
51771+ if (learn_buffer_len)
51772+ break;
51773+ spin_unlock(&gr_learn_lock);
51774+ mutex_unlock(&gr_learn_user_mutex);
51775+ if (file->f_flags & O_NONBLOCK) {
51776+ retval = -EAGAIN;
51777+ goto out;
51778+ }
51779+ if (signal_pending(current)) {
51780+ retval = -ERESTARTSYS;
51781+ goto out;
51782+ }
51783+
51784+ schedule();
51785+ } while (1);
51786+
51787+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
51788+ learn_buffer_user_len = learn_buffer_len;
51789+ retval = learn_buffer_len;
51790+ learn_buffer_len = 0;
51791+
51792+ spin_unlock(&gr_learn_lock);
51793+
51794+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
51795+ retval = -EFAULT;
51796+
51797+ mutex_unlock(&gr_learn_user_mutex);
51798+out:
51799+ set_current_state(TASK_RUNNING);
51800+ remove_wait_queue(&learn_wait, &wait);
51801+ return retval;
51802+}
51803+
51804+static unsigned int
51805+poll_learn(struct file * file, poll_table * wait)
51806+{
51807+ poll_wait(file, &learn_wait, wait);
51808+
51809+ if (learn_buffer_len)
51810+ return (POLLIN | POLLRDNORM);
51811+
51812+ return 0;
51813+}
51814+
51815+void
51816+gr_clear_learn_entries(void)
51817+{
51818+ char *tmp;
51819+
51820+ mutex_lock(&gr_learn_user_mutex);
51821+ spin_lock(&gr_learn_lock);
51822+ tmp = learn_buffer;
51823+ learn_buffer = NULL;
51824+ spin_unlock(&gr_learn_lock);
51825+ if (tmp)
51826+ vfree(tmp);
51827+ if (learn_buffer_user != NULL) {
51828+ vfree(learn_buffer_user);
51829+ learn_buffer_user = NULL;
51830+ }
51831+ learn_buffer_len = 0;
51832+ mutex_unlock(&gr_learn_user_mutex);
51833+
51834+ return;
51835+}
51836+
51837+void
51838+gr_add_learn_entry(const char *fmt, ...)
51839+{
51840+ va_list args;
51841+ unsigned int len;
51842+
51843+ if (!gr_learn_attached)
51844+ return;
51845+
51846+ spin_lock(&gr_learn_lock);
51847+
51848+ /* leave a gap at the end so we know when it's "full" but don't have to
51849+ compute the exact length of the string we're trying to append
51850+ */
51851+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
51852+ spin_unlock(&gr_learn_lock);
51853+ wake_up_interruptible(&learn_wait);
51854+ return;
51855+ }
51856+ if (learn_buffer == NULL) {
51857+ spin_unlock(&gr_learn_lock);
51858+ return;
51859+ }
51860+
51861+ va_start(args, fmt);
51862+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
51863+ va_end(args);
51864+
51865+ learn_buffer_len += len + 1;
51866+
51867+ spin_unlock(&gr_learn_lock);
51868+ wake_up_interruptible(&learn_wait);
51869+
51870+ return;
51871+}
51872+
51873+static int
51874+open_learn(struct inode *inode, struct file *file)
51875+{
51876+ if (file->f_mode & FMODE_READ && gr_learn_attached)
51877+ return -EBUSY;
51878+ if (file->f_mode & FMODE_READ) {
51879+ int retval = 0;
51880+ mutex_lock(&gr_learn_user_mutex);
51881+ if (learn_buffer == NULL)
51882+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
51883+ if (learn_buffer_user == NULL)
51884+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
51885+ if (learn_buffer == NULL) {
51886+ retval = -ENOMEM;
51887+ goto out_error;
51888+ }
51889+ if (learn_buffer_user == NULL) {
51890+ retval = -ENOMEM;
51891+ goto out_error;
51892+ }
51893+ learn_buffer_len = 0;
51894+ learn_buffer_user_len = 0;
51895+ gr_learn_attached = 1;
51896+out_error:
51897+ mutex_unlock(&gr_learn_user_mutex);
51898+ return retval;
51899+ }
51900+ return 0;
51901+}
51902+
51903+static int
51904+close_learn(struct inode *inode, struct file *file)
51905+{
51906+ if (file->f_mode & FMODE_READ) {
51907+ char *tmp = NULL;
51908+ mutex_lock(&gr_learn_user_mutex);
51909+ spin_lock(&gr_learn_lock);
51910+ tmp = learn_buffer;
51911+ learn_buffer = NULL;
51912+ spin_unlock(&gr_learn_lock);
51913+ if (tmp)
51914+ vfree(tmp);
51915+ if (learn_buffer_user != NULL) {
51916+ vfree(learn_buffer_user);
51917+ learn_buffer_user = NULL;
51918+ }
51919+ learn_buffer_len = 0;
51920+ learn_buffer_user_len = 0;
51921+ gr_learn_attached = 0;
51922+ mutex_unlock(&gr_learn_user_mutex);
51923+ }
51924+
51925+ return 0;
51926+}
51927+
51928+const struct file_operations grsec_fops = {
51929+ .read = read_learn,
51930+ .write = write_grsec_handler,
51931+ .open = open_learn,
51932+ .release = close_learn,
51933+ .poll = poll_learn,
51934+};
51935diff -urNp linux-2.6.32.45/grsecurity/gracl_res.c linux-2.6.32.45/grsecurity/gracl_res.c
51936--- linux-2.6.32.45/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
51937+++ linux-2.6.32.45/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
51938@@ -0,0 +1,67 @@
51939+#include <linux/kernel.h>
51940+#include <linux/sched.h>
51941+#include <linux/gracl.h>
51942+#include <linux/grinternal.h>
51943+
51944+static const char *restab_log[] = {
51945+ [RLIMIT_CPU] = "RLIMIT_CPU",
51946+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
51947+ [RLIMIT_DATA] = "RLIMIT_DATA",
51948+ [RLIMIT_STACK] = "RLIMIT_STACK",
51949+ [RLIMIT_CORE] = "RLIMIT_CORE",
51950+ [RLIMIT_RSS] = "RLIMIT_RSS",
51951+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
51952+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
51953+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
51954+ [RLIMIT_AS] = "RLIMIT_AS",
51955+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
51956+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
51957+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
51958+ [RLIMIT_NICE] = "RLIMIT_NICE",
51959+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
51960+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
51961+ [GR_CRASH_RES] = "RLIMIT_CRASH"
51962+};
51963+
51964+void
51965+gr_log_resource(const struct task_struct *task,
51966+ const int res, const unsigned long wanted, const int gt)
51967+{
51968+ const struct cred *cred;
51969+ unsigned long rlim;
51970+
51971+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
51972+ return;
51973+
51974+ // not yet supported resource
51975+ if (unlikely(!restab_log[res]))
51976+ return;
51977+
51978+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
51979+ rlim = task->signal->rlim[res].rlim_max;
51980+ else
51981+ rlim = task->signal->rlim[res].rlim_cur;
51982+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
51983+ return;
51984+
51985+ rcu_read_lock();
51986+ cred = __task_cred(task);
51987+
51988+ if (res == RLIMIT_NPROC &&
51989+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
51990+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
51991+ goto out_rcu_unlock;
51992+ else if (res == RLIMIT_MEMLOCK &&
51993+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
51994+ goto out_rcu_unlock;
51995+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
51996+ goto out_rcu_unlock;
51997+ rcu_read_unlock();
51998+
51999+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52000+
52001+ return;
52002+out_rcu_unlock:
52003+ rcu_read_unlock();
52004+ return;
52005+}
52006diff -urNp linux-2.6.32.45/grsecurity/gracl_segv.c linux-2.6.32.45/grsecurity/gracl_segv.c
52007--- linux-2.6.32.45/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52008+++ linux-2.6.32.45/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
52009@@ -0,0 +1,284 @@
52010+#include <linux/kernel.h>
52011+#include <linux/mm.h>
52012+#include <asm/uaccess.h>
52013+#include <asm/errno.h>
52014+#include <asm/mman.h>
52015+#include <net/sock.h>
52016+#include <linux/file.h>
52017+#include <linux/fs.h>
52018+#include <linux/net.h>
52019+#include <linux/in.h>
52020+#include <linux/smp_lock.h>
52021+#include <linux/slab.h>
52022+#include <linux/types.h>
52023+#include <linux/sched.h>
52024+#include <linux/timer.h>
52025+#include <linux/gracl.h>
52026+#include <linux/grsecurity.h>
52027+#include <linux/grinternal.h>
52028+
52029+static struct crash_uid *uid_set;
52030+static unsigned short uid_used;
52031+static DEFINE_SPINLOCK(gr_uid_lock);
52032+extern rwlock_t gr_inode_lock;
52033+extern struct acl_subject_label *
52034+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52035+ struct acl_role_label *role);
52036+extern int gr_fake_force_sig(int sig, struct task_struct *t);
52037+
52038+int
52039+gr_init_uidset(void)
52040+{
52041+ uid_set =
52042+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52043+ uid_used = 0;
52044+
52045+ return uid_set ? 1 : 0;
52046+}
52047+
52048+void
52049+gr_free_uidset(void)
52050+{
52051+ if (uid_set)
52052+ kfree(uid_set);
52053+
52054+ return;
52055+}
52056+
52057+int
52058+gr_find_uid(const uid_t uid)
52059+{
52060+ struct crash_uid *tmp = uid_set;
52061+ uid_t buid;
52062+ int low = 0, high = uid_used - 1, mid;
52063+
52064+ while (high >= low) {
52065+ mid = (low + high) >> 1;
52066+ buid = tmp[mid].uid;
52067+ if (buid == uid)
52068+ return mid;
52069+ if (buid > uid)
52070+ high = mid - 1;
52071+ if (buid < uid)
52072+ low = mid + 1;
52073+ }
52074+
52075+ return -1;
52076+}
52077+
52078+static __inline__ void
52079+gr_insertsort(void)
52080+{
52081+ unsigned short i, j;
52082+ struct crash_uid index;
52083+
52084+ for (i = 1; i < uid_used; i++) {
52085+ index = uid_set[i];
52086+ j = i;
52087+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52088+ uid_set[j] = uid_set[j - 1];
52089+ j--;
52090+ }
52091+ uid_set[j] = index;
52092+ }
52093+
52094+ return;
52095+}
52096+
52097+static __inline__ void
52098+gr_insert_uid(const uid_t uid, const unsigned long expires)
52099+{
52100+ int loc;
52101+
52102+ if (uid_used == GR_UIDTABLE_MAX)
52103+ return;
52104+
52105+ loc = gr_find_uid(uid);
52106+
52107+ if (loc >= 0) {
52108+ uid_set[loc].expires = expires;
52109+ return;
52110+ }
52111+
52112+ uid_set[uid_used].uid = uid;
52113+ uid_set[uid_used].expires = expires;
52114+ uid_used++;
52115+
52116+ gr_insertsort();
52117+
52118+ return;
52119+}
52120+
52121+void
52122+gr_remove_uid(const unsigned short loc)
52123+{
52124+ unsigned short i;
52125+
52126+ for (i = loc + 1; i < uid_used; i++)
52127+ uid_set[i - 1] = uid_set[i];
52128+
52129+ uid_used--;
52130+
52131+ return;
52132+}
52133+
52134+int
52135+gr_check_crash_uid(const uid_t uid)
52136+{
52137+ int loc;
52138+ int ret = 0;
52139+
52140+ if (unlikely(!gr_acl_is_enabled()))
52141+ return 0;
52142+
52143+ spin_lock(&gr_uid_lock);
52144+ loc = gr_find_uid(uid);
52145+
52146+ if (loc < 0)
52147+ goto out_unlock;
52148+
52149+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
52150+ gr_remove_uid(loc);
52151+ else
52152+ ret = 1;
52153+
52154+out_unlock:
52155+ spin_unlock(&gr_uid_lock);
52156+ return ret;
52157+}
52158+
52159+static __inline__ int
52160+proc_is_setxid(const struct cred *cred)
52161+{
52162+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
52163+ cred->uid != cred->fsuid)
52164+ return 1;
52165+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52166+ cred->gid != cred->fsgid)
52167+ return 1;
52168+
52169+ return 0;
52170+}
52171+
52172+void
52173+gr_handle_crash(struct task_struct *task, const int sig)
52174+{
52175+ struct acl_subject_label *curr;
52176+ struct acl_subject_label *curr2;
52177+ struct task_struct *tsk, *tsk2;
52178+ const struct cred *cred;
52179+ const struct cred *cred2;
52180+
52181+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52182+ return;
52183+
52184+ if (unlikely(!gr_acl_is_enabled()))
52185+ return;
52186+
52187+ curr = task->acl;
52188+
52189+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
52190+ return;
52191+
52192+ if (time_before_eq(curr->expires, get_seconds())) {
52193+ curr->expires = 0;
52194+ curr->crashes = 0;
52195+ }
52196+
52197+ curr->crashes++;
52198+
52199+ if (!curr->expires)
52200+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52201+
52202+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52203+ time_after(curr->expires, get_seconds())) {
52204+ rcu_read_lock();
52205+ cred = __task_cred(task);
52206+ if (cred->uid && proc_is_setxid(cred)) {
52207+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52208+ spin_lock(&gr_uid_lock);
52209+ gr_insert_uid(cred->uid, curr->expires);
52210+ spin_unlock(&gr_uid_lock);
52211+ curr->expires = 0;
52212+ curr->crashes = 0;
52213+ read_lock(&tasklist_lock);
52214+ do_each_thread(tsk2, tsk) {
52215+ cred2 = __task_cred(tsk);
52216+ if (tsk != task && cred2->uid == cred->uid)
52217+ gr_fake_force_sig(SIGKILL, tsk);
52218+ } while_each_thread(tsk2, tsk);
52219+ read_unlock(&tasklist_lock);
52220+ } else {
52221+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52222+ read_lock(&tasklist_lock);
52223+ do_each_thread(tsk2, tsk) {
52224+ if (likely(tsk != task)) {
52225+ curr2 = tsk->acl;
52226+
52227+ if (curr2->device == curr->device &&
52228+ curr2->inode == curr->inode)
52229+ gr_fake_force_sig(SIGKILL, tsk);
52230+ }
52231+ } while_each_thread(tsk2, tsk);
52232+ read_unlock(&tasklist_lock);
52233+ }
52234+ rcu_read_unlock();
52235+ }
52236+
52237+ return;
52238+}
52239+
52240+int
52241+gr_check_crash_exec(const struct file *filp)
52242+{
52243+ struct acl_subject_label *curr;
52244+
52245+ if (unlikely(!gr_acl_is_enabled()))
52246+ return 0;
52247+
52248+ read_lock(&gr_inode_lock);
52249+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52250+ filp->f_path.dentry->d_inode->i_sb->s_dev,
52251+ current->role);
52252+ read_unlock(&gr_inode_lock);
52253+
52254+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52255+ (!curr->crashes && !curr->expires))
52256+ return 0;
52257+
52258+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52259+ time_after(curr->expires, get_seconds()))
52260+ return 1;
52261+ else if (time_before_eq(curr->expires, get_seconds())) {
52262+ curr->crashes = 0;
52263+ curr->expires = 0;
52264+ }
52265+
52266+ return 0;
52267+}
52268+
52269+void
52270+gr_handle_alertkill(struct task_struct *task)
52271+{
52272+ struct acl_subject_label *curracl;
52273+ __u32 curr_ip;
52274+ struct task_struct *p, *p2;
52275+
52276+ if (unlikely(!gr_acl_is_enabled()))
52277+ return;
52278+
52279+ curracl = task->acl;
52280+ curr_ip = task->signal->curr_ip;
52281+
52282+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52283+ read_lock(&tasklist_lock);
52284+ do_each_thread(p2, p) {
52285+ if (p->signal->curr_ip == curr_ip)
52286+ gr_fake_force_sig(SIGKILL, p);
52287+ } while_each_thread(p2, p);
52288+ read_unlock(&tasklist_lock);
52289+ } else if (curracl->mode & GR_KILLPROC)
52290+ gr_fake_force_sig(SIGKILL, task);
52291+
52292+ return;
52293+}
52294diff -urNp linux-2.6.32.45/grsecurity/gracl_shm.c linux-2.6.32.45/grsecurity/gracl_shm.c
52295--- linux-2.6.32.45/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52296+++ linux-2.6.32.45/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
52297@@ -0,0 +1,40 @@
52298+#include <linux/kernel.h>
52299+#include <linux/mm.h>
52300+#include <linux/sched.h>
52301+#include <linux/file.h>
52302+#include <linux/ipc.h>
52303+#include <linux/gracl.h>
52304+#include <linux/grsecurity.h>
52305+#include <linux/grinternal.h>
52306+
52307+int
52308+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52309+ const time_t shm_createtime, const uid_t cuid, const int shmid)
52310+{
52311+ struct task_struct *task;
52312+
52313+ if (!gr_acl_is_enabled())
52314+ return 1;
52315+
52316+ rcu_read_lock();
52317+ read_lock(&tasklist_lock);
52318+
52319+ task = find_task_by_vpid(shm_cprid);
52320+
52321+ if (unlikely(!task))
52322+ task = find_task_by_vpid(shm_lapid);
52323+
52324+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52325+ (task->pid == shm_lapid)) &&
52326+ (task->acl->mode & GR_PROTSHM) &&
52327+ (task->acl != current->acl))) {
52328+ read_unlock(&tasklist_lock);
52329+ rcu_read_unlock();
52330+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52331+ return 0;
52332+ }
52333+ read_unlock(&tasklist_lock);
52334+ rcu_read_unlock();
52335+
52336+ return 1;
52337+}
52338diff -urNp linux-2.6.32.45/grsecurity/grsec_chdir.c linux-2.6.32.45/grsecurity/grsec_chdir.c
52339--- linux-2.6.32.45/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52340+++ linux-2.6.32.45/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
52341@@ -0,0 +1,19 @@
52342+#include <linux/kernel.h>
52343+#include <linux/sched.h>
52344+#include <linux/fs.h>
52345+#include <linux/file.h>
52346+#include <linux/grsecurity.h>
52347+#include <linux/grinternal.h>
52348+
52349+void
52350+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52351+{
52352+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52353+ if ((grsec_enable_chdir && grsec_enable_group &&
52354+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52355+ !grsec_enable_group)) {
52356+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52357+ }
52358+#endif
52359+ return;
52360+}
52361diff -urNp linux-2.6.32.45/grsecurity/grsec_chroot.c linux-2.6.32.45/grsecurity/grsec_chroot.c
52362--- linux-2.6.32.45/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52363+++ linux-2.6.32.45/grsecurity/grsec_chroot.c 2011-07-18 17:14:10.000000000 -0400
52364@@ -0,0 +1,384 @@
52365+#include <linux/kernel.h>
52366+#include <linux/module.h>
52367+#include <linux/sched.h>
52368+#include <linux/file.h>
52369+#include <linux/fs.h>
52370+#include <linux/mount.h>
52371+#include <linux/types.h>
52372+#include <linux/pid_namespace.h>
52373+#include <linux/grsecurity.h>
52374+#include <linux/grinternal.h>
52375+
52376+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52377+{
52378+#ifdef CONFIG_GRKERNSEC
52379+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52380+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52381+ task->gr_is_chrooted = 1;
52382+ else
52383+ task->gr_is_chrooted = 0;
52384+
52385+ task->gr_chroot_dentry = path->dentry;
52386+#endif
52387+ return;
52388+}
52389+
52390+void gr_clear_chroot_entries(struct task_struct *task)
52391+{
52392+#ifdef CONFIG_GRKERNSEC
52393+ task->gr_is_chrooted = 0;
52394+ task->gr_chroot_dentry = NULL;
52395+#endif
52396+ return;
52397+}
52398+
52399+int
52400+gr_handle_chroot_unix(const pid_t pid)
52401+{
52402+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52403+ struct task_struct *p;
52404+
52405+ if (unlikely(!grsec_enable_chroot_unix))
52406+ return 1;
52407+
52408+ if (likely(!proc_is_chrooted(current)))
52409+ return 1;
52410+
52411+ rcu_read_lock();
52412+ read_lock(&tasklist_lock);
52413+
52414+ p = find_task_by_vpid_unrestricted(pid);
52415+ if (unlikely(p && !have_same_root(current, p))) {
52416+ read_unlock(&tasklist_lock);
52417+ rcu_read_unlock();
52418+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
52419+ return 0;
52420+ }
52421+ read_unlock(&tasklist_lock);
52422+ rcu_read_unlock();
52423+#endif
52424+ return 1;
52425+}
52426+
52427+int
52428+gr_handle_chroot_nice(void)
52429+{
52430+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52431+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
52432+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
52433+ return -EPERM;
52434+ }
52435+#endif
52436+ return 0;
52437+}
52438+
52439+int
52440+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
52441+{
52442+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52443+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
52444+ && proc_is_chrooted(current)) {
52445+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
52446+ return -EACCES;
52447+ }
52448+#endif
52449+ return 0;
52450+}
52451+
52452+int
52453+gr_handle_chroot_rawio(const struct inode *inode)
52454+{
52455+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52456+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
52457+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
52458+ return 1;
52459+#endif
52460+ return 0;
52461+}
52462+
52463+int
52464+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
52465+{
52466+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52467+ struct task_struct *p;
52468+ int ret = 0;
52469+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
52470+ return ret;
52471+
52472+ read_lock(&tasklist_lock);
52473+ do_each_pid_task(pid, type, p) {
52474+ if (!have_same_root(current, p)) {
52475+ ret = 1;
52476+ goto out;
52477+ }
52478+ } while_each_pid_task(pid, type, p);
52479+out:
52480+ read_unlock(&tasklist_lock);
52481+ return ret;
52482+#endif
52483+ return 0;
52484+}
52485+
52486+int
52487+gr_pid_is_chrooted(struct task_struct *p)
52488+{
52489+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52490+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
52491+ return 0;
52492+
52493+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
52494+ !have_same_root(current, p)) {
52495+ return 1;
52496+ }
52497+#endif
52498+ return 0;
52499+}
52500+
52501+EXPORT_SYMBOL(gr_pid_is_chrooted);
52502+
52503+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
52504+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
52505+{
52506+ struct dentry *dentry = (struct dentry *)u_dentry;
52507+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
52508+ struct dentry *realroot;
52509+ struct vfsmount *realrootmnt;
52510+ struct dentry *currentroot;
52511+ struct vfsmount *currentmnt;
52512+ struct task_struct *reaper = &init_task;
52513+ int ret = 1;
52514+
52515+ read_lock(&reaper->fs->lock);
52516+ realrootmnt = mntget(reaper->fs->root.mnt);
52517+ realroot = dget(reaper->fs->root.dentry);
52518+ read_unlock(&reaper->fs->lock);
52519+
52520+ read_lock(&current->fs->lock);
52521+ currentmnt = mntget(current->fs->root.mnt);
52522+ currentroot = dget(current->fs->root.dentry);
52523+ read_unlock(&current->fs->lock);
52524+
52525+ spin_lock(&dcache_lock);
52526+ for (;;) {
52527+ if (unlikely((dentry == realroot && mnt == realrootmnt)
52528+ || (dentry == currentroot && mnt == currentmnt)))
52529+ break;
52530+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
52531+ if (mnt->mnt_parent == mnt)
52532+ break;
52533+ dentry = mnt->mnt_mountpoint;
52534+ mnt = mnt->mnt_parent;
52535+ continue;
52536+ }
52537+ dentry = dentry->d_parent;
52538+ }
52539+ spin_unlock(&dcache_lock);
52540+
52541+ dput(currentroot);
52542+ mntput(currentmnt);
52543+
52544+ /* access is outside of chroot */
52545+ if (dentry == realroot && mnt == realrootmnt)
52546+ ret = 0;
52547+
52548+ dput(realroot);
52549+ mntput(realrootmnt);
52550+ return ret;
52551+}
52552+#endif
52553+
52554+int
52555+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
52556+{
52557+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52558+ if (!grsec_enable_chroot_fchdir)
52559+ return 1;
52560+
52561+ if (!proc_is_chrooted(current))
52562+ return 1;
52563+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
52564+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
52565+ return 0;
52566+ }
52567+#endif
52568+ return 1;
52569+}
52570+
52571+int
52572+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52573+ const time_t shm_createtime)
52574+{
52575+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52576+ struct task_struct *p;
52577+ time_t starttime;
52578+
52579+ if (unlikely(!grsec_enable_chroot_shmat))
52580+ return 1;
52581+
52582+ if (likely(!proc_is_chrooted(current)))
52583+ return 1;
52584+
52585+ rcu_read_lock();
52586+ read_lock(&tasklist_lock);
52587+
52588+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
52589+ starttime = p->start_time.tv_sec;
52590+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
52591+ if (have_same_root(current, p)) {
52592+ goto allow;
52593+ } else {
52594+ read_unlock(&tasklist_lock);
52595+ rcu_read_unlock();
52596+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52597+ return 0;
52598+ }
52599+ }
52600+ /* creator exited, pid reuse, fall through to next check */
52601+ }
52602+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
52603+ if (unlikely(!have_same_root(current, p))) {
52604+ read_unlock(&tasklist_lock);
52605+ rcu_read_unlock();
52606+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52607+ return 0;
52608+ }
52609+ }
52610+
52611+allow:
52612+ read_unlock(&tasklist_lock);
52613+ rcu_read_unlock();
52614+#endif
52615+ return 1;
52616+}
52617+
52618+void
52619+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
52620+{
52621+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52622+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
52623+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
52624+#endif
52625+ return;
52626+}
52627+
52628+int
52629+gr_handle_chroot_mknod(const struct dentry *dentry,
52630+ const struct vfsmount *mnt, const int mode)
52631+{
52632+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52633+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
52634+ proc_is_chrooted(current)) {
52635+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
52636+ return -EPERM;
52637+ }
52638+#endif
52639+ return 0;
52640+}
52641+
52642+int
52643+gr_handle_chroot_mount(const struct dentry *dentry,
52644+ const struct vfsmount *mnt, const char *dev_name)
52645+{
52646+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52647+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
52648+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
52649+ return -EPERM;
52650+ }
52651+#endif
52652+ return 0;
52653+}
52654+
52655+int
52656+gr_handle_chroot_pivot(void)
52657+{
52658+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52659+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
52660+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
52661+ return -EPERM;
52662+ }
52663+#endif
52664+ return 0;
52665+}
52666+
52667+int
52668+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
52669+{
52670+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52671+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
52672+ !gr_is_outside_chroot(dentry, mnt)) {
52673+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
52674+ return -EPERM;
52675+ }
52676+#endif
52677+ return 0;
52678+}
52679+
52680+int
52681+gr_handle_chroot_caps(struct path *path)
52682+{
52683+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52684+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
52685+ (init_task.fs->root.dentry != path->dentry) &&
52686+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
52687+
52688+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52689+ const struct cred *old = current_cred();
52690+ struct cred *new = prepare_creds();
52691+ if (new == NULL)
52692+ return 1;
52693+
52694+ new->cap_permitted = cap_drop(old->cap_permitted,
52695+ chroot_caps);
52696+ new->cap_inheritable = cap_drop(old->cap_inheritable,
52697+ chroot_caps);
52698+ new->cap_effective = cap_drop(old->cap_effective,
52699+ chroot_caps);
52700+
52701+ commit_creds(new);
52702+
52703+ return 0;
52704+ }
52705+#endif
52706+ return 0;
52707+}
52708+
52709+int
52710+gr_handle_chroot_sysctl(const int op)
52711+{
52712+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52713+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
52714+ && (op & MAY_WRITE))
52715+ return -EACCES;
52716+#endif
52717+ return 0;
52718+}
52719+
52720+void
52721+gr_handle_chroot_chdir(struct path *path)
52722+{
52723+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52724+ if (grsec_enable_chroot_chdir)
52725+ set_fs_pwd(current->fs, path);
52726+#endif
52727+ return;
52728+}
52729+
52730+int
52731+gr_handle_chroot_chmod(const struct dentry *dentry,
52732+ const struct vfsmount *mnt, const int mode)
52733+{
52734+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52735+ /* allow chmod +s on directories, but not on files */
52736+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
52737+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
52738+ proc_is_chrooted(current)) {
52739+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
52740+ return -EPERM;
52741+ }
52742+#endif
52743+ return 0;
52744+}
52745+
52746+#ifdef CONFIG_SECURITY
52747+EXPORT_SYMBOL(gr_handle_chroot_caps);
52748+#endif
52749diff -urNp linux-2.6.32.45/grsecurity/grsec_disabled.c linux-2.6.32.45/grsecurity/grsec_disabled.c
52750--- linux-2.6.32.45/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
52751+++ linux-2.6.32.45/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
52752@@ -0,0 +1,447 @@
52753+#include <linux/kernel.h>
52754+#include <linux/module.h>
52755+#include <linux/sched.h>
52756+#include <linux/file.h>
52757+#include <linux/fs.h>
52758+#include <linux/kdev_t.h>
52759+#include <linux/net.h>
52760+#include <linux/in.h>
52761+#include <linux/ip.h>
52762+#include <linux/skbuff.h>
52763+#include <linux/sysctl.h>
52764+
52765+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
52766+void
52767+pax_set_initial_flags(struct linux_binprm *bprm)
52768+{
52769+ return;
52770+}
52771+#endif
52772+
52773+#ifdef CONFIG_SYSCTL
52774+__u32
52775+gr_handle_sysctl(const struct ctl_table * table, const int op)
52776+{
52777+ return 0;
52778+}
52779+#endif
52780+
52781+#ifdef CONFIG_TASKSTATS
52782+int gr_is_taskstats_denied(int pid)
52783+{
52784+ return 0;
52785+}
52786+#endif
52787+
52788+int
52789+gr_acl_is_enabled(void)
52790+{
52791+ return 0;
52792+}
52793+
52794+int
52795+gr_handle_rawio(const struct inode *inode)
52796+{
52797+ return 0;
52798+}
52799+
52800+void
52801+gr_acl_handle_psacct(struct task_struct *task, const long code)
52802+{
52803+ return;
52804+}
52805+
52806+int
52807+gr_handle_ptrace(struct task_struct *task, const long request)
52808+{
52809+ return 0;
52810+}
52811+
52812+int
52813+gr_handle_proc_ptrace(struct task_struct *task)
52814+{
52815+ return 0;
52816+}
52817+
52818+void
52819+gr_learn_resource(const struct task_struct *task,
52820+ const int res, const unsigned long wanted, const int gt)
52821+{
52822+ return;
52823+}
52824+
52825+int
52826+gr_set_acls(const int type)
52827+{
52828+ return 0;
52829+}
52830+
52831+int
52832+gr_check_hidden_task(const struct task_struct *tsk)
52833+{
52834+ return 0;
52835+}
52836+
52837+int
52838+gr_check_protected_task(const struct task_struct *task)
52839+{
52840+ return 0;
52841+}
52842+
52843+int
52844+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52845+{
52846+ return 0;
52847+}
52848+
52849+void
52850+gr_copy_label(struct task_struct *tsk)
52851+{
52852+ return;
52853+}
52854+
52855+void
52856+gr_set_pax_flags(struct task_struct *task)
52857+{
52858+ return;
52859+}
52860+
52861+int
52862+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52863+ const int unsafe_share)
52864+{
52865+ return 0;
52866+}
52867+
52868+void
52869+gr_handle_delete(const ino_t ino, const dev_t dev)
52870+{
52871+ return;
52872+}
52873+
52874+void
52875+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52876+{
52877+ return;
52878+}
52879+
52880+void
52881+gr_handle_crash(struct task_struct *task, const int sig)
52882+{
52883+ return;
52884+}
52885+
52886+int
52887+gr_check_crash_exec(const struct file *filp)
52888+{
52889+ return 0;
52890+}
52891+
52892+int
52893+gr_check_crash_uid(const uid_t uid)
52894+{
52895+ return 0;
52896+}
52897+
52898+void
52899+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52900+ struct dentry *old_dentry,
52901+ struct dentry *new_dentry,
52902+ struct vfsmount *mnt, const __u8 replace)
52903+{
52904+ return;
52905+}
52906+
52907+int
52908+gr_search_socket(const int family, const int type, const int protocol)
52909+{
52910+ return 1;
52911+}
52912+
52913+int
52914+gr_search_connectbind(const int mode, const struct socket *sock,
52915+ const struct sockaddr_in *addr)
52916+{
52917+ return 0;
52918+}
52919+
52920+int
52921+gr_is_capable(const int cap)
52922+{
52923+ return 1;
52924+}
52925+
52926+int
52927+gr_is_capable_nolog(const int cap)
52928+{
52929+ return 1;
52930+}
52931+
52932+void
52933+gr_handle_alertkill(struct task_struct *task)
52934+{
52935+ return;
52936+}
52937+
52938+__u32
52939+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
52940+{
52941+ return 1;
52942+}
52943+
52944+__u32
52945+gr_acl_handle_hidden_file(const struct dentry * dentry,
52946+ const struct vfsmount * mnt)
52947+{
52948+ return 1;
52949+}
52950+
52951+__u32
52952+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52953+ const int fmode)
52954+{
52955+ return 1;
52956+}
52957+
52958+__u32
52959+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52960+{
52961+ return 1;
52962+}
52963+
52964+__u32
52965+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
52966+{
52967+ return 1;
52968+}
52969+
52970+int
52971+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
52972+ unsigned int *vm_flags)
52973+{
52974+ return 1;
52975+}
52976+
52977+__u32
52978+gr_acl_handle_truncate(const struct dentry * dentry,
52979+ const struct vfsmount * mnt)
52980+{
52981+ return 1;
52982+}
52983+
52984+__u32
52985+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
52986+{
52987+ return 1;
52988+}
52989+
52990+__u32
52991+gr_acl_handle_access(const struct dentry * dentry,
52992+ const struct vfsmount * mnt, const int fmode)
52993+{
52994+ return 1;
52995+}
52996+
52997+__u32
52998+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
52999+ mode_t mode)
53000+{
53001+ return 1;
53002+}
53003+
53004+__u32
53005+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53006+ mode_t mode)
53007+{
53008+ return 1;
53009+}
53010+
53011+__u32
53012+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53013+{
53014+ return 1;
53015+}
53016+
53017+__u32
53018+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53019+{
53020+ return 1;
53021+}
53022+
53023+void
53024+grsecurity_init(void)
53025+{
53026+ return;
53027+}
53028+
53029+__u32
53030+gr_acl_handle_mknod(const struct dentry * new_dentry,
53031+ const struct dentry * parent_dentry,
53032+ const struct vfsmount * parent_mnt,
53033+ const int mode)
53034+{
53035+ return 1;
53036+}
53037+
53038+__u32
53039+gr_acl_handle_mkdir(const struct dentry * new_dentry,
53040+ const struct dentry * parent_dentry,
53041+ const struct vfsmount * parent_mnt)
53042+{
53043+ return 1;
53044+}
53045+
53046+__u32
53047+gr_acl_handle_symlink(const struct dentry * new_dentry,
53048+ const struct dentry * parent_dentry,
53049+ const struct vfsmount * parent_mnt, const char *from)
53050+{
53051+ return 1;
53052+}
53053+
53054+__u32
53055+gr_acl_handle_link(const struct dentry * new_dentry,
53056+ const struct dentry * parent_dentry,
53057+ const struct vfsmount * parent_mnt,
53058+ const struct dentry * old_dentry,
53059+ const struct vfsmount * old_mnt, const char *to)
53060+{
53061+ return 1;
53062+}
53063+
53064+int
53065+gr_acl_handle_rename(const struct dentry *new_dentry,
53066+ const struct dentry *parent_dentry,
53067+ const struct vfsmount *parent_mnt,
53068+ const struct dentry *old_dentry,
53069+ const struct inode *old_parent_inode,
53070+ const struct vfsmount *old_mnt, const char *newname)
53071+{
53072+ return 0;
53073+}
53074+
53075+int
53076+gr_acl_handle_filldir(const struct file *file, const char *name,
53077+ const int namelen, const ino_t ino)
53078+{
53079+ return 1;
53080+}
53081+
53082+int
53083+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53084+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53085+{
53086+ return 1;
53087+}
53088+
53089+int
53090+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53091+{
53092+ return 0;
53093+}
53094+
53095+int
53096+gr_search_accept(const struct socket *sock)
53097+{
53098+ return 0;
53099+}
53100+
53101+int
53102+gr_search_listen(const struct socket *sock)
53103+{
53104+ return 0;
53105+}
53106+
53107+int
53108+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53109+{
53110+ return 0;
53111+}
53112+
53113+__u32
53114+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53115+{
53116+ return 1;
53117+}
53118+
53119+__u32
53120+gr_acl_handle_creat(const struct dentry * dentry,
53121+ const struct dentry * p_dentry,
53122+ const struct vfsmount * p_mnt, const int fmode,
53123+ const int imode)
53124+{
53125+ return 1;
53126+}
53127+
53128+void
53129+gr_acl_handle_exit(void)
53130+{
53131+ return;
53132+}
53133+
53134+int
53135+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53136+{
53137+ return 1;
53138+}
53139+
53140+void
53141+gr_set_role_label(const uid_t uid, const gid_t gid)
53142+{
53143+ return;
53144+}
53145+
53146+int
53147+gr_acl_handle_procpidmem(const struct task_struct *task)
53148+{
53149+ return 0;
53150+}
53151+
53152+int
53153+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53154+{
53155+ return 0;
53156+}
53157+
53158+int
53159+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53160+{
53161+ return 0;
53162+}
53163+
53164+void
53165+gr_set_kernel_label(struct task_struct *task)
53166+{
53167+ return;
53168+}
53169+
53170+int
53171+gr_check_user_change(int real, int effective, int fs)
53172+{
53173+ return 0;
53174+}
53175+
53176+int
53177+gr_check_group_change(int real, int effective, int fs)
53178+{
53179+ return 0;
53180+}
53181+
53182+int gr_acl_enable_at_secure(void)
53183+{
53184+ return 0;
53185+}
53186+
53187+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53188+{
53189+ return dentry->d_inode->i_sb->s_dev;
53190+}
53191+
53192+EXPORT_SYMBOL(gr_is_capable);
53193+EXPORT_SYMBOL(gr_is_capable_nolog);
53194+EXPORT_SYMBOL(gr_learn_resource);
53195+EXPORT_SYMBOL(gr_set_kernel_label);
53196+#ifdef CONFIG_SECURITY
53197+EXPORT_SYMBOL(gr_check_user_change);
53198+EXPORT_SYMBOL(gr_check_group_change);
53199+#endif
53200diff -urNp linux-2.6.32.45/grsecurity/grsec_exec.c linux-2.6.32.45/grsecurity/grsec_exec.c
53201--- linux-2.6.32.45/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53202+++ linux-2.6.32.45/grsecurity/grsec_exec.c 2011-08-11 19:57:19.000000000 -0400
53203@@ -0,0 +1,132 @@
53204+#include <linux/kernel.h>
53205+#include <linux/sched.h>
53206+#include <linux/file.h>
53207+#include <linux/binfmts.h>
53208+#include <linux/smp_lock.h>
53209+#include <linux/fs.h>
53210+#include <linux/types.h>
53211+#include <linux/grdefs.h>
53212+#include <linux/grinternal.h>
53213+#include <linux/capability.h>
53214+#include <linux/compat.h>
53215+
53216+#include <asm/uaccess.h>
53217+
53218+#ifdef CONFIG_GRKERNSEC_EXECLOG
53219+static char gr_exec_arg_buf[132];
53220+static DEFINE_MUTEX(gr_exec_arg_mutex);
53221+#endif
53222+
53223+void
53224+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
53225+{
53226+#ifdef CONFIG_GRKERNSEC_EXECLOG
53227+ char *grarg = gr_exec_arg_buf;
53228+ unsigned int i, x, execlen = 0;
53229+ char c;
53230+
53231+ if (!((grsec_enable_execlog && grsec_enable_group &&
53232+ in_group_p(grsec_audit_gid))
53233+ || (grsec_enable_execlog && !grsec_enable_group)))
53234+ return;
53235+
53236+ mutex_lock(&gr_exec_arg_mutex);
53237+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53238+
53239+ if (unlikely(argv == NULL))
53240+ goto log;
53241+
53242+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53243+ const char __user *p;
53244+ unsigned int len;
53245+
53246+ if (copy_from_user(&p, argv + i, sizeof(p)))
53247+ goto log;
53248+ if (!p)
53249+ goto log;
53250+ len = strnlen_user(p, 128 - execlen);
53251+ if (len > 128 - execlen)
53252+ len = 128 - execlen;
53253+ else if (len > 0)
53254+ len--;
53255+ if (copy_from_user(grarg + execlen, p, len))
53256+ goto log;
53257+
53258+ /* rewrite unprintable characters */
53259+ for (x = 0; x < len; x++) {
53260+ c = *(grarg + execlen + x);
53261+ if (c < 32 || c > 126)
53262+ *(grarg + execlen + x) = ' ';
53263+ }
53264+
53265+ execlen += len;
53266+ *(grarg + execlen) = ' ';
53267+ *(grarg + execlen + 1) = '\0';
53268+ execlen++;
53269+ }
53270+
53271+ log:
53272+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53273+ bprm->file->f_path.mnt, grarg);
53274+ mutex_unlock(&gr_exec_arg_mutex);
53275+#endif
53276+ return;
53277+}
53278+
53279+#ifdef CONFIG_COMPAT
53280+void
53281+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
53282+{
53283+#ifdef CONFIG_GRKERNSEC_EXECLOG
53284+ char *grarg = gr_exec_arg_buf;
53285+ unsigned int i, x, execlen = 0;
53286+ char c;
53287+
53288+ if (!((grsec_enable_execlog && grsec_enable_group &&
53289+ in_group_p(grsec_audit_gid))
53290+ || (grsec_enable_execlog && !grsec_enable_group)))
53291+ return;
53292+
53293+ mutex_lock(&gr_exec_arg_mutex);
53294+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53295+
53296+ if (unlikely(argv == NULL))
53297+ goto log;
53298+
53299+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53300+ compat_uptr_t p;
53301+ unsigned int len;
53302+
53303+ if (get_user(p, argv + i))
53304+ goto log;
53305+ len = strnlen_user(compat_ptr(p), 128 - execlen);
53306+ if (len > 128 - execlen)
53307+ len = 128 - execlen;
53308+ else if (len > 0)
53309+ len--;
53310+ else
53311+ goto log;
53312+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
53313+ goto log;
53314+
53315+ /* rewrite unprintable characters */
53316+ for (x = 0; x < len; x++) {
53317+ c = *(grarg + execlen + x);
53318+ if (c < 32 || c > 126)
53319+ *(grarg + execlen + x) = ' ';
53320+ }
53321+
53322+ execlen += len;
53323+ *(grarg + execlen) = ' ';
53324+ *(grarg + execlen + 1) = '\0';
53325+ execlen++;
53326+ }
53327+
53328+ log:
53329+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53330+ bprm->file->f_path.mnt, grarg);
53331+ mutex_unlock(&gr_exec_arg_mutex);
53332+#endif
53333+ return;
53334+}
53335+#endif
53336diff -urNp linux-2.6.32.45/grsecurity/grsec_fifo.c linux-2.6.32.45/grsecurity/grsec_fifo.c
53337--- linux-2.6.32.45/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53338+++ linux-2.6.32.45/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
53339@@ -0,0 +1,24 @@
53340+#include <linux/kernel.h>
53341+#include <linux/sched.h>
53342+#include <linux/fs.h>
53343+#include <linux/file.h>
53344+#include <linux/grinternal.h>
53345+
53346+int
53347+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53348+ const struct dentry *dir, const int flag, const int acc_mode)
53349+{
53350+#ifdef CONFIG_GRKERNSEC_FIFO
53351+ const struct cred *cred = current_cred();
53352+
53353+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53354+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53355+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53356+ (cred->fsuid != dentry->d_inode->i_uid)) {
53357+ if (!inode_permission(dentry->d_inode, acc_mode))
53358+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53359+ return -EACCES;
53360+ }
53361+#endif
53362+ return 0;
53363+}
53364diff -urNp linux-2.6.32.45/grsecurity/grsec_fork.c linux-2.6.32.45/grsecurity/grsec_fork.c
53365--- linux-2.6.32.45/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53366+++ linux-2.6.32.45/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
53367@@ -0,0 +1,23 @@
53368+#include <linux/kernel.h>
53369+#include <linux/sched.h>
53370+#include <linux/grsecurity.h>
53371+#include <linux/grinternal.h>
53372+#include <linux/errno.h>
53373+
53374+void
53375+gr_log_forkfail(const int retval)
53376+{
53377+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53378+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53379+ switch (retval) {
53380+ case -EAGAIN:
53381+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53382+ break;
53383+ case -ENOMEM:
53384+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53385+ break;
53386+ }
53387+ }
53388+#endif
53389+ return;
53390+}
53391diff -urNp linux-2.6.32.45/grsecurity/grsec_init.c linux-2.6.32.45/grsecurity/grsec_init.c
53392--- linux-2.6.32.45/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53393+++ linux-2.6.32.45/grsecurity/grsec_init.c 2011-08-11 19:57:42.000000000 -0400
53394@@ -0,0 +1,270 @@
53395+#include <linux/kernel.h>
53396+#include <linux/sched.h>
53397+#include <linux/mm.h>
53398+#include <linux/smp_lock.h>
53399+#include <linux/gracl.h>
53400+#include <linux/slab.h>
53401+#include <linux/vmalloc.h>
53402+#include <linux/percpu.h>
53403+#include <linux/module.h>
53404+
53405+int grsec_enable_brute;
53406+int grsec_enable_link;
53407+int grsec_enable_dmesg;
53408+int grsec_enable_harden_ptrace;
53409+int grsec_enable_fifo;
53410+int grsec_enable_execlog;
53411+int grsec_enable_signal;
53412+int grsec_enable_forkfail;
53413+int grsec_enable_audit_ptrace;
53414+int grsec_enable_time;
53415+int grsec_enable_audit_textrel;
53416+int grsec_enable_group;
53417+int grsec_audit_gid;
53418+int grsec_enable_chdir;
53419+int grsec_enable_mount;
53420+int grsec_enable_rofs;
53421+int grsec_enable_chroot_findtask;
53422+int grsec_enable_chroot_mount;
53423+int grsec_enable_chroot_shmat;
53424+int grsec_enable_chroot_fchdir;
53425+int grsec_enable_chroot_double;
53426+int grsec_enable_chroot_pivot;
53427+int grsec_enable_chroot_chdir;
53428+int grsec_enable_chroot_chmod;
53429+int grsec_enable_chroot_mknod;
53430+int grsec_enable_chroot_nice;
53431+int grsec_enable_chroot_execlog;
53432+int grsec_enable_chroot_caps;
53433+int grsec_enable_chroot_sysctl;
53434+int grsec_enable_chroot_unix;
53435+int grsec_enable_tpe;
53436+int grsec_tpe_gid;
53437+int grsec_enable_blackhole;
53438+#ifdef CONFIG_IPV6_MODULE
53439+EXPORT_SYMBOL(grsec_enable_blackhole);
53440+#endif
53441+int grsec_lastack_retries;
53442+int grsec_enable_tpe_all;
53443+int grsec_enable_tpe_invert;
53444+int grsec_enable_socket_all;
53445+int grsec_socket_all_gid;
53446+int grsec_enable_socket_client;
53447+int grsec_socket_client_gid;
53448+int grsec_enable_socket_server;
53449+int grsec_socket_server_gid;
53450+int grsec_resource_logging;
53451+int grsec_disable_privio;
53452+int grsec_enable_log_rwxmaps;
53453+int grsec_lock;
53454+
53455+DEFINE_SPINLOCK(grsec_alert_lock);
53456+unsigned long grsec_alert_wtime = 0;
53457+unsigned long grsec_alert_fyet = 0;
53458+
53459+DEFINE_SPINLOCK(grsec_audit_lock);
53460+
53461+DEFINE_RWLOCK(grsec_exec_file_lock);
53462+
53463+char *gr_shared_page[4];
53464+
53465+char *gr_alert_log_fmt;
53466+char *gr_audit_log_fmt;
53467+char *gr_alert_log_buf;
53468+char *gr_audit_log_buf;
53469+
53470+extern struct gr_arg *gr_usermode;
53471+extern unsigned char *gr_system_salt;
53472+extern unsigned char *gr_system_sum;
53473+
53474+void __init
53475+grsecurity_init(void)
53476+{
53477+ int j;
53478+ /* create the per-cpu shared pages */
53479+
53480+#ifdef CONFIG_X86
53481+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
53482+#endif
53483+
53484+ for (j = 0; j < 4; j++) {
53485+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
53486+ if (gr_shared_page[j] == NULL) {
53487+ panic("Unable to allocate grsecurity shared page");
53488+ return;
53489+ }
53490+ }
53491+
53492+ /* allocate log buffers */
53493+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
53494+ if (!gr_alert_log_fmt) {
53495+ panic("Unable to allocate grsecurity alert log format buffer");
53496+ return;
53497+ }
53498+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
53499+ if (!gr_audit_log_fmt) {
53500+ panic("Unable to allocate grsecurity audit log format buffer");
53501+ return;
53502+ }
53503+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53504+ if (!gr_alert_log_buf) {
53505+ panic("Unable to allocate grsecurity alert log buffer");
53506+ return;
53507+ }
53508+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53509+ if (!gr_audit_log_buf) {
53510+ panic("Unable to allocate grsecurity audit log buffer");
53511+ return;
53512+ }
53513+
53514+ /* allocate memory for authentication structure */
53515+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
53516+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
53517+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
53518+
53519+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
53520+ panic("Unable to allocate grsecurity authentication structure");
53521+ return;
53522+ }
53523+
53524+
53525+#ifdef CONFIG_GRKERNSEC_IO
53526+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
53527+ grsec_disable_privio = 1;
53528+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53529+ grsec_disable_privio = 1;
53530+#else
53531+ grsec_disable_privio = 0;
53532+#endif
53533+#endif
53534+
53535+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53536+ /* for backward compatibility, tpe_invert always defaults to on if
53537+ enabled in the kernel
53538+ */
53539+ grsec_enable_tpe_invert = 1;
53540+#endif
53541+
53542+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53543+#ifndef CONFIG_GRKERNSEC_SYSCTL
53544+ grsec_lock = 1;
53545+#endif
53546+
53547+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53548+ grsec_enable_audit_textrel = 1;
53549+#endif
53550+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53551+ grsec_enable_log_rwxmaps = 1;
53552+#endif
53553+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53554+ grsec_enable_group = 1;
53555+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
53556+#endif
53557+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53558+ grsec_enable_chdir = 1;
53559+#endif
53560+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53561+ grsec_enable_harden_ptrace = 1;
53562+#endif
53563+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53564+ grsec_enable_mount = 1;
53565+#endif
53566+#ifdef CONFIG_GRKERNSEC_LINK
53567+ grsec_enable_link = 1;
53568+#endif
53569+#ifdef CONFIG_GRKERNSEC_BRUTE
53570+ grsec_enable_brute = 1;
53571+#endif
53572+#ifdef CONFIG_GRKERNSEC_DMESG
53573+ grsec_enable_dmesg = 1;
53574+#endif
53575+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53576+ grsec_enable_blackhole = 1;
53577+ grsec_lastack_retries = 4;
53578+#endif
53579+#ifdef CONFIG_GRKERNSEC_FIFO
53580+ grsec_enable_fifo = 1;
53581+#endif
53582+#ifdef CONFIG_GRKERNSEC_EXECLOG
53583+ grsec_enable_execlog = 1;
53584+#endif
53585+#ifdef CONFIG_GRKERNSEC_SIGNAL
53586+ grsec_enable_signal = 1;
53587+#endif
53588+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53589+ grsec_enable_forkfail = 1;
53590+#endif
53591+#ifdef CONFIG_GRKERNSEC_TIME
53592+ grsec_enable_time = 1;
53593+#endif
53594+#ifdef CONFIG_GRKERNSEC_RESLOG
53595+ grsec_resource_logging = 1;
53596+#endif
53597+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53598+ grsec_enable_chroot_findtask = 1;
53599+#endif
53600+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53601+ grsec_enable_chroot_unix = 1;
53602+#endif
53603+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53604+ grsec_enable_chroot_mount = 1;
53605+#endif
53606+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53607+ grsec_enable_chroot_fchdir = 1;
53608+#endif
53609+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53610+ grsec_enable_chroot_shmat = 1;
53611+#endif
53612+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53613+ grsec_enable_audit_ptrace = 1;
53614+#endif
53615+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53616+ grsec_enable_chroot_double = 1;
53617+#endif
53618+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53619+ grsec_enable_chroot_pivot = 1;
53620+#endif
53621+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53622+ grsec_enable_chroot_chdir = 1;
53623+#endif
53624+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53625+ grsec_enable_chroot_chmod = 1;
53626+#endif
53627+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53628+ grsec_enable_chroot_mknod = 1;
53629+#endif
53630+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53631+ grsec_enable_chroot_nice = 1;
53632+#endif
53633+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53634+ grsec_enable_chroot_execlog = 1;
53635+#endif
53636+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53637+ grsec_enable_chroot_caps = 1;
53638+#endif
53639+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53640+ grsec_enable_chroot_sysctl = 1;
53641+#endif
53642+#ifdef CONFIG_GRKERNSEC_TPE
53643+ grsec_enable_tpe = 1;
53644+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
53645+#ifdef CONFIG_GRKERNSEC_TPE_ALL
53646+ grsec_enable_tpe_all = 1;
53647+#endif
53648+#endif
53649+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53650+ grsec_enable_socket_all = 1;
53651+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
53652+#endif
53653+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53654+ grsec_enable_socket_client = 1;
53655+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
53656+#endif
53657+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53658+ grsec_enable_socket_server = 1;
53659+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
53660+#endif
53661+#endif
53662+
53663+ return;
53664+}
53665diff -urNp linux-2.6.32.45/grsecurity/grsec_link.c linux-2.6.32.45/grsecurity/grsec_link.c
53666--- linux-2.6.32.45/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
53667+++ linux-2.6.32.45/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
53668@@ -0,0 +1,43 @@
53669+#include <linux/kernel.h>
53670+#include <linux/sched.h>
53671+#include <linux/fs.h>
53672+#include <linux/file.h>
53673+#include <linux/grinternal.h>
53674+
53675+int
53676+gr_handle_follow_link(const struct inode *parent,
53677+ const struct inode *inode,
53678+ const struct dentry *dentry, const struct vfsmount *mnt)
53679+{
53680+#ifdef CONFIG_GRKERNSEC_LINK
53681+ const struct cred *cred = current_cred();
53682+
53683+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
53684+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
53685+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
53686+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
53687+ return -EACCES;
53688+ }
53689+#endif
53690+ return 0;
53691+}
53692+
53693+int
53694+gr_handle_hardlink(const struct dentry *dentry,
53695+ const struct vfsmount *mnt,
53696+ struct inode *inode, const int mode, const char *to)
53697+{
53698+#ifdef CONFIG_GRKERNSEC_LINK
53699+ const struct cred *cred = current_cred();
53700+
53701+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
53702+ (!S_ISREG(mode) || (mode & S_ISUID) ||
53703+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
53704+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
53705+ !capable(CAP_FOWNER) && cred->uid) {
53706+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
53707+ return -EPERM;
53708+ }
53709+#endif
53710+ return 0;
53711+}
53712diff -urNp linux-2.6.32.45/grsecurity/grsec_log.c linux-2.6.32.45/grsecurity/grsec_log.c
53713--- linux-2.6.32.45/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
53714+++ linux-2.6.32.45/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
53715@@ -0,0 +1,310 @@
53716+#include <linux/kernel.h>
53717+#include <linux/sched.h>
53718+#include <linux/file.h>
53719+#include <linux/tty.h>
53720+#include <linux/fs.h>
53721+#include <linux/grinternal.h>
53722+
53723+#ifdef CONFIG_TREE_PREEMPT_RCU
53724+#define DISABLE_PREEMPT() preempt_disable()
53725+#define ENABLE_PREEMPT() preempt_enable()
53726+#else
53727+#define DISABLE_PREEMPT()
53728+#define ENABLE_PREEMPT()
53729+#endif
53730+
53731+#define BEGIN_LOCKS(x) \
53732+ DISABLE_PREEMPT(); \
53733+ rcu_read_lock(); \
53734+ read_lock(&tasklist_lock); \
53735+ read_lock(&grsec_exec_file_lock); \
53736+ if (x != GR_DO_AUDIT) \
53737+ spin_lock(&grsec_alert_lock); \
53738+ else \
53739+ spin_lock(&grsec_audit_lock)
53740+
53741+#define END_LOCKS(x) \
53742+ if (x != GR_DO_AUDIT) \
53743+ spin_unlock(&grsec_alert_lock); \
53744+ else \
53745+ spin_unlock(&grsec_audit_lock); \
53746+ read_unlock(&grsec_exec_file_lock); \
53747+ read_unlock(&tasklist_lock); \
53748+ rcu_read_unlock(); \
53749+ ENABLE_PREEMPT(); \
53750+ if (x == GR_DONT_AUDIT) \
53751+ gr_handle_alertkill(current)
53752+
53753+enum {
53754+ FLOODING,
53755+ NO_FLOODING
53756+};
53757+
53758+extern char *gr_alert_log_fmt;
53759+extern char *gr_audit_log_fmt;
53760+extern char *gr_alert_log_buf;
53761+extern char *gr_audit_log_buf;
53762+
53763+static int gr_log_start(int audit)
53764+{
53765+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
53766+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
53767+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53768+
53769+ if (audit == GR_DO_AUDIT)
53770+ goto set_fmt;
53771+
53772+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
53773+ grsec_alert_wtime = jiffies;
53774+ grsec_alert_fyet = 0;
53775+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
53776+ grsec_alert_fyet++;
53777+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
53778+ grsec_alert_wtime = jiffies;
53779+ grsec_alert_fyet++;
53780+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
53781+ return FLOODING;
53782+ } else return FLOODING;
53783+
53784+set_fmt:
53785+ memset(buf, 0, PAGE_SIZE);
53786+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
53787+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
53788+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53789+ } else if (current->signal->curr_ip) {
53790+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
53791+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
53792+ } else if (gr_acl_is_enabled()) {
53793+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
53794+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53795+ } else {
53796+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
53797+ strcpy(buf, fmt);
53798+ }
53799+
53800+ return NO_FLOODING;
53801+}
53802+
53803+static void gr_log_middle(int audit, const char *msg, va_list ap)
53804+ __attribute__ ((format (printf, 2, 0)));
53805+
53806+static void gr_log_middle(int audit, const char *msg, va_list ap)
53807+{
53808+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53809+ unsigned int len = strlen(buf);
53810+
53811+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53812+
53813+ return;
53814+}
53815+
53816+static void gr_log_middle_varargs(int audit, const char *msg, ...)
53817+ __attribute__ ((format (printf, 2, 3)));
53818+
53819+static void gr_log_middle_varargs(int audit, const char *msg, ...)
53820+{
53821+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53822+ unsigned int len = strlen(buf);
53823+ va_list ap;
53824+
53825+ va_start(ap, msg);
53826+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53827+ va_end(ap);
53828+
53829+ return;
53830+}
53831+
53832+static void gr_log_end(int audit)
53833+{
53834+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53835+ unsigned int len = strlen(buf);
53836+
53837+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
53838+ printk("%s\n", buf);
53839+
53840+ return;
53841+}
53842+
53843+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
53844+{
53845+ int logtype;
53846+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
53847+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
53848+ void *voidptr = NULL;
53849+ int num1 = 0, num2 = 0;
53850+ unsigned long ulong1 = 0, ulong2 = 0;
53851+ struct dentry *dentry = NULL;
53852+ struct vfsmount *mnt = NULL;
53853+ struct file *file = NULL;
53854+ struct task_struct *task = NULL;
53855+ const struct cred *cred, *pcred;
53856+ va_list ap;
53857+
53858+ BEGIN_LOCKS(audit);
53859+ logtype = gr_log_start(audit);
53860+ if (logtype == FLOODING) {
53861+ END_LOCKS(audit);
53862+ return;
53863+ }
53864+ va_start(ap, argtypes);
53865+ switch (argtypes) {
53866+ case GR_TTYSNIFF:
53867+ task = va_arg(ap, struct task_struct *);
53868+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
53869+ break;
53870+ case GR_SYSCTL_HIDDEN:
53871+ str1 = va_arg(ap, char *);
53872+ gr_log_middle_varargs(audit, msg, result, str1);
53873+ break;
53874+ case GR_RBAC:
53875+ dentry = va_arg(ap, struct dentry *);
53876+ mnt = va_arg(ap, struct vfsmount *);
53877+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
53878+ break;
53879+ case GR_RBAC_STR:
53880+ dentry = va_arg(ap, struct dentry *);
53881+ mnt = va_arg(ap, struct vfsmount *);
53882+ str1 = va_arg(ap, char *);
53883+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
53884+ break;
53885+ case GR_STR_RBAC:
53886+ str1 = va_arg(ap, char *);
53887+ dentry = va_arg(ap, struct dentry *);
53888+ mnt = va_arg(ap, struct vfsmount *);
53889+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
53890+ break;
53891+ case GR_RBAC_MODE2:
53892+ dentry = va_arg(ap, struct dentry *);
53893+ mnt = va_arg(ap, struct vfsmount *);
53894+ str1 = va_arg(ap, char *);
53895+ str2 = va_arg(ap, char *);
53896+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
53897+ break;
53898+ case GR_RBAC_MODE3:
53899+ dentry = va_arg(ap, struct dentry *);
53900+ mnt = va_arg(ap, struct vfsmount *);
53901+ str1 = va_arg(ap, char *);
53902+ str2 = va_arg(ap, char *);
53903+ str3 = va_arg(ap, char *);
53904+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
53905+ break;
53906+ case GR_FILENAME:
53907+ dentry = va_arg(ap, struct dentry *);
53908+ mnt = va_arg(ap, struct vfsmount *);
53909+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
53910+ break;
53911+ case GR_STR_FILENAME:
53912+ str1 = va_arg(ap, char *);
53913+ dentry = va_arg(ap, struct dentry *);
53914+ mnt = va_arg(ap, struct vfsmount *);
53915+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
53916+ break;
53917+ case GR_FILENAME_STR:
53918+ dentry = va_arg(ap, struct dentry *);
53919+ mnt = va_arg(ap, struct vfsmount *);
53920+ str1 = va_arg(ap, char *);
53921+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
53922+ break;
53923+ case GR_FILENAME_TWO_INT:
53924+ dentry = va_arg(ap, struct dentry *);
53925+ mnt = va_arg(ap, struct vfsmount *);
53926+ num1 = va_arg(ap, int);
53927+ num2 = va_arg(ap, int);
53928+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
53929+ break;
53930+ case GR_FILENAME_TWO_INT_STR:
53931+ dentry = va_arg(ap, struct dentry *);
53932+ mnt = va_arg(ap, struct vfsmount *);
53933+ num1 = va_arg(ap, int);
53934+ num2 = va_arg(ap, int);
53935+ str1 = va_arg(ap, char *);
53936+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
53937+ break;
53938+ case GR_TEXTREL:
53939+ file = va_arg(ap, struct file *);
53940+ ulong1 = va_arg(ap, unsigned long);
53941+ ulong2 = va_arg(ap, unsigned long);
53942+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
53943+ break;
53944+ case GR_PTRACE:
53945+ task = va_arg(ap, struct task_struct *);
53946+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
53947+ break;
53948+ case GR_RESOURCE:
53949+ task = va_arg(ap, struct task_struct *);
53950+ cred = __task_cred(task);
53951+ pcred = __task_cred(task->real_parent);
53952+ ulong1 = va_arg(ap, unsigned long);
53953+ str1 = va_arg(ap, char *);
53954+ ulong2 = va_arg(ap, unsigned long);
53955+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53956+ break;
53957+ case GR_CAP:
53958+ task = va_arg(ap, struct task_struct *);
53959+ cred = __task_cred(task);
53960+ pcred = __task_cred(task->real_parent);
53961+ str1 = va_arg(ap, char *);
53962+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53963+ break;
53964+ case GR_SIG:
53965+ str1 = va_arg(ap, char *);
53966+ voidptr = va_arg(ap, void *);
53967+ gr_log_middle_varargs(audit, msg, str1, voidptr);
53968+ break;
53969+ case GR_SIG2:
53970+ task = va_arg(ap, struct task_struct *);
53971+ cred = __task_cred(task);
53972+ pcred = __task_cred(task->real_parent);
53973+ num1 = va_arg(ap, int);
53974+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53975+ break;
53976+ case GR_CRASH1:
53977+ task = va_arg(ap, struct task_struct *);
53978+ cred = __task_cred(task);
53979+ pcred = __task_cred(task->real_parent);
53980+ ulong1 = va_arg(ap, unsigned long);
53981+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
53982+ break;
53983+ case GR_CRASH2:
53984+ task = va_arg(ap, struct task_struct *);
53985+ cred = __task_cred(task);
53986+ pcred = __task_cred(task->real_parent);
53987+ ulong1 = va_arg(ap, unsigned long);
53988+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
53989+ break;
53990+ case GR_RWXMAP:
53991+ file = va_arg(ap, struct file *);
53992+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
53993+ break;
53994+ case GR_PSACCT:
53995+ {
53996+ unsigned int wday, cday;
53997+ __u8 whr, chr;
53998+ __u8 wmin, cmin;
53999+ __u8 wsec, csec;
54000+ char cur_tty[64] = { 0 };
54001+ char parent_tty[64] = { 0 };
54002+
54003+ task = va_arg(ap, struct task_struct *);
54004+ wday = va_arg(ap, unsigned int);
54005+ cday = va_arg(ap, unsigned int);
54006+ whr = va_arg(ap, int);
54007+ chr = va_arg(ap, int);
54008+ wmin = va_arg(ap, int);
54009+ cmin = va_arg(ap, int);
54010+ wsec = va_arg(ap, int);
54011+ csec = va_arg(ap, int);
54012+ ulong1 = va_arg(ap, unsigned long);
54013+ cred = __task_cred(task);
54014+ pcred = __task_cred(task->real_parent);
54015+
54016+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54017+ }
54018+ break;
54019+ default:
54020+ gr_log_middle(audit, msg, ap);
54021+ }
54022+ va_end(ap);
54023+ gr_log_end(audit);
54024+ END_LOCKS(audit);
54025+}
54026diff -urNp linux-2.6.32.45/grsecurity/grsec_mem.c linux-2.6.32.45/grsecurity/grsec_mem.c
54027--- linux-2.6.32.45/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54028+++ linux-2.6.32.45/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
54029@@ -0,0 +1,33 @@
54030+#include <linux/kernel.h>
54031+#include <linux/sched.h>
54032+#include <linux/mm.h>
54033+#include <linux/mman.h>
54034+#include <linux/grinternal.h>
54035+
54036+void
54037+gr_handle_ioperm(void)
54038+{
54039+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54040+ return;
54041+}
54042+
54043+void
54044+gr_handle_iopl(void)
54045+{
54046+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54047+ return;
54048+}
54049+
54050+void
54051+gr_handle_mem_readwrite(u64 from, u64 to)
54052+{
54053+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54054+ return;
54055+}
54056+
54057+void
54058+gr_handle_vm86(void)
54059+{
54060+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54061+ return;
54062+}
54063diff -urNp linux-2.6.32.45/grsecurity/grsec_mount.c linux-2.6.32.45/grsecurity/grsec_mount.c
54064--- linux-2.6.32.45/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54065+++ linux-2.6.32.45/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
54066@@ -0,0 +1,62 @@
54067+#include <linux/kernel.h>
54068+#include <linux/sched.h>
54069+#include <linux/mount.h>
54070+#include <linux/grsecurity.h>
54071+#include <linux/grinternal.h>
54072+
54073+void
54074+gr_log_remount(const char *devname, const int retval)
54075+{
54076+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54077+ if (grsec_enable_mount && (retval >= 0))
54078+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54079+#endif
54080+ return;
54081+}
54082+
54083+void
54084+gr_log_unmount(const char *devname, const int retval)
54085+{
54086+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54087+ if (grsec_enable_mount && (retval >= 0))
54088+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54089+#endif
54090+ return;
54091+}
54092+
54093+void
54094+gr_log_mount(const char *from, const char *to, const int retval)
54095+{
54096+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54097+ if (grsec_enable_mount && (retval >= 0))
54098+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54099+#endif
54100+ return;
54101+}
54102+
54103+int
54104+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54105+{
54106+#ifdef CONFIG_GRKERNSEC_ROFS
54107+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54108+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54109+ return -EPERM;
54110+ } else
54111+ return 0;
54112+#endif
54113+ return 0;
54114+}
54115+
54116+int
54117+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54118+{
54119+#ifdef CONFIG_GRKERNSEC_ROFS
54120+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54121+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54122+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54123+ return -EPERM;
54124+ } else
54125+ return 0;
54126+#endif
54127+ return 0;
54128+}
54129diff -urNp linux-2.6.32.45/grsecurity/grsec_pax.c linux-2.6.32.45/grsecurity/grsec_pax.c
54130--- linux-2.6.32.45/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54131+++ linux-2.6.32.45/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
54132@@ -0,0 +1,36 @@
54133+#include <linux/kernel.h>
54134+#include <linux/sched.h>
54135+#include <linux/mm.h>
54136+#include <linux/file.h>
54137+#include <linux/grinternal.h>
54138+#include <linux/grsecurity.h>
54139+
54140+void
54141+gr_log_textrel(struct vm_area_struct * vma)
54142+{
54143+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54144+ if (grsec_enable_audit_textrel)
54145+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54146+#endif
54147+ return;
54148+}
54149+
54150+void
54151+gr_log_rwxmmap(struct file *file)
54152+{
54153+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54154+ if (grsec_enable_log_rwxmaps)
54155+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54156+#endif
54157+ return;
54158+}
54159+
54160+void
54161+gr_log_rwxmprotect(struct file *file)
54162+{
54163+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54164+ if (grsec_enable_log_rwxmaps)
54165+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54166+#endif
54167+ return;
54168+}
54169diff -urNp linux-2.6.32.45/grsecurity/grsec_ptrace.c linux-2.6.32.45/grsecurity/grsec_ptrace.c
54170--- linux-2.6.32.45/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54171+++ linux-2.6.32.45/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
54172@@ -0,0 +1,14 @@
54173+#include <linux/kernel.h>
54174+#include <linux/sched.h>
54175+#include <linux/grinternal.h>
54176+#include <linux/grsecurity.h>
54177+
54178+void
54179+gr_audit_ptrace(struct task_struct *task)
54180+{
54181+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54182+ if (grsec_enable_audit_ptrace)
54183+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54184+#endif
54185+ return;
54186+}
54187diff -urNp linux-2.6.32.45/grsecurity/grsec_sig.c linux-2.6.32.45/grsecurity/grsec_sig.c
54188--- linux-2.6.32.45/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54189+++ linux-2.6.32.45/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
54190@@ -0,0 +1,205 @@
54191+#include <linux/kernel.h>
54192+#include <linux/sched.h>
54193+#include <linux/delay.h>
54194+#include <linux/grsecurity.h>
54195+#include <linux/grinternal.h>
54196+#include <linux/hardirq.h>
54197+
54198+char *signames[] = {
54199+ [SIGSEGV] = "Segmentation fault",
54200+ [SIGILL] = "Illegal instruction",
54201+ [SIGABRT] = "Abort",
54202+ [SIGBUS] = "Invalid alignment/Bus error"
54203+};
54204+
54205+void
54206+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54207+{
54208+#ifdef CONFIG_GRKERNSEC_SIGNAL
54209+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54210+ (sig == SIGABRT) || (sig == SIGBUS))) {
54211+ if (t->pid == current->pid) {
54212+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54213+ } else {
54214+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54215+ }
54216+ }
54217+#endif
54218+ return;
54219+}
54220+
54221+int
54222+gr_handle_signal(const struct task_struct *p, const int sig)
54223+{
54224+#ifdef CONFIG_GRKERNSEC
54225+ if (current->pid > 1 && gr_check_protected_task(p)) {
54226+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54227+ return -EPERM;
54228+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54229+ return -EPERM;
54230+ }
54231+#endif
54232+ return 0;
54233+}
54234+
54235+#ifdef CONFIG_GRKERNSEC
54236+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54237+
54238+int gr_fake_force_sig(int sig, struct task_struct *t)
54239+{
54240+ unsigned long int flags;
54241+ int ret, blocked, ignored;
54242+ struct k_sigaction *action;
54243+
54244+ spin_lock_irqsave(&t->sighand->siglock, flags);
54245+ action = &t->sighand->action[sig-1];
54246+ ignored = action->sa.sa_handler == SIG_IGN;
54247+ blocked = sigismember(&t->blocked, sig);
54248+ if (blocked || ignored) {
54249+ action->sa.sa_handler = SIG_DFL;
54250+ if (blocked) {
54251+ sigdelset(&t->blocked, sig);
54252+ recalc_sigpending_and_wake(t);
54253+ }
54254+ }
54255+ if (action->sa.sa_handler == SIG_DFL)
54256+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
54257+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54258+
54259+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
54260+
54261+ return ret;
54262+}
54263+#endif
54264+
54265+#ifdef CONFIG_GRKERNSEC_BRUTE
54266+#define GR_USER_BAN_TIME (15 * 60)
54267+
54268+static int __get_dumpable(unsigned long mm_flags)
54269+{
54270+ int ret;
54271+
54272+ ret = mm_flags & MMF_DUMPABLE_MASK;
54273+ return (ret >= 2) ? 2 : ret;
54274+}
54275+#endif
54276+
54277+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54278+{
54279+#ifdef CONFIG_GRKERNSEC_BRUTE
54280+ uid_t uid = 0;
54281+
54282+ if (!grsec_enable_brute)
54283+ return;
54284+
54285+ rcu_read_lock();
54286+ read_lock(&tasklist_lock);
54287+ read_lock(&grsec_exec_file_lock);
54288+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54289+ p->real_parent->brute = 1;
54290+ else {
54291+ const struct cred *cred = __task_cred(p), *cred2;
54292+ struct task_struct *tsk, *tsk2;
54293+
54294+ if (!__get_dumpable(mm_flags) && cred->uid) {
54295+ struct user_struct *user;
54296+
54297+ uid = cred->uid;
54298+
54299+ /* this is put upon execution past expiration */
54300+ user = find_user(uid);
54301+ if (user == NULL)
54302+ goto unlock;
54303+ user->banned = 1;
54304+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54305+ if (user->ban_expires == ~0UL)
54306+ user->ban_expires--;
54307+
54308+ do_each_thread(tsk2, tsk) {
54309+ cred2 = __task_cred(tsk);
54310+ if (tsk != p && cred2->uid == uid)
54311+ gr_fake_force_sig(SIGKILL, tsk);
54312+ } while_each_thread(tsk2, tsk);
54313+ }
54314+ }
54315+unlock:
54316+ read_unlock(&grsec_exec_file_lock);
54317+ read_unlock(&tasklist_lock);
54318+ rcu_read_unlock();
54319+
54320+ if (uid)
54321+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54322+#endif
54323+ return;
54324+}
54325+
54326+void gr_handle_brute_check(void)
54327+{
54328+#ifdef CONFIG_GRKERNSEC_BRUTE
54329+ if (current->brute)
54330+ msleep(30 * 1000);
54331+#endif
54332+ return;
54333+}
54334+
54335+void gr_handle_kernel_exploit(void)
54336+{
54337+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54338+ const struct cred *cred;
54339+ struct task_struct *tsk, *tsk2;
54340+ struct user_struct *user;
54341+ uid_t uid;
54342+
54343+ if (in_irq() || in_serving_softirq() || in_nmi())
54344+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54345+
54346+ uid = current_uid();
54347+
54348+ if (uid == 0)
54349+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
54350+ else {
54351+ /* kill all the processes of this user, hold a reference
54352+ to their creds struct, and prevent them from creating
54353+ another process until system reset
54354+ */
54355+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54356+ /* we intentionally leak this ref */
54357+ user = get_uid(current->cred->user);
54358+ if (user) {
54359+ user->banned = 1;
54360+ user->ban_expires = ~0UL;
54361+ }
54362+
54363+ read_lock(&tasklist_lock);
54364+ do_each_thread(tsk2, tsk) {
54365+ cred = __task_cred(tsk);
54366+ if (cred->uid == uid)
54367+ gr_fake_force_sig(SIGKILL, tsk);
54368+ } while_each_thread(tsk2, tsk);
54369+ read_unlock(&tasklist_lock);
54370+ }
54371+#endif
54372+}
54373+
54374+int __gr_process_user_ban(struct user_struct *user)
54375+{
54376+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54377+ if (unlikely(user->banned)) {
54378+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54379+ user->banned = 0;
54380+ user->ban_expires = 0;
54381+ free_uid(user);
54382+ } else
54383+ return -EPERM;
54384+ }
54385+#endif
54386+ return 0;
54387+}
54388+
54389+int gr_process_user_ban(void)
54390+{
54391+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54392+ return __gr_process_user_ban(current->cred->user);
54393+#endif
54394+ return 0;
54395+}
54396diff -urNp linux-2.6.32.45/grsecurity/grsec_sock.c linux-2.6.32.45/grsecurity/grsec_sock.c
54397--- linux-2.6.32.45/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54398+++ linux-2.6.32.45/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
54399@@ -0,0 +1,275 @@
54400+#include <linux/kernel.h>
54401+#include <linux/module.h>
54402+#include <linux/sched.h>
54403+#include <linux/file.h>
54404+#include <linux/net.h>
54405+#include <linux/in.h>
54406+#include <linux/ip.h>
54407+#include <net/sock.h>
54408+#include <net/inet_sock.h>
54409+#include <linux/grsecurity.h>
54410+#include <linux/grinternal.h>
54411+#include <linux/gracl.h>
54412+
54413+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
54414+EXPORT_SYMBOL(gr_cap_rtnetlink);
54415+
54416+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54417+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54418+
54419+EXPORT_SYMBOL(gr_search_udp_recvmsg);
54420+EXPORT_SYMBOL(gr_search_udp_sendmsg);
54421+
54422+#ifdef CONFIG_UNIX_MODULE
54423+EXPORT_SYMBOL(gr_acl_handle_unix);
54424+EXPORT_SYMBOL(gr_acl_handle_mknod);
54425+EXPORT_SYMBOL(gr_handle_chroot_unix);
54426+EXPORT_SYMBOL(gr_handle_create);
54427+#endif
54428+
54429+#ifdef CONFIG_GRKERNSEC
54430+#define gr_conn_table_size 32749
54431+struct conn_table_entry {
54432+ struct conn_table_entry *next;
54433+ struct signal_struct *sig;
54434+};
54435+
54436+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
54437+DEFINE_SPINLOCK(gr_conn_table_lock);
54438+
54439+extern const char * gr_socktype_to_name(unsigned char type);
54440+extern const char * gr_proto_to_name(unsigned char proto);
54441+extern const char * gr_sockfamily_to_name(unsigned char family);
54442+
54443+static __inline__ int
54444+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
54445+{
54446+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
54447+}
54448+
54449+static __inline__ int
54450+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
54451+ __u16 sport, __u16 dport)
54452+{
54453+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
54454+ sig->gr_sport == sport && sig->gr_dport == dport))
54455+ return 1;
54456+ else
54457+ return 0;
54458+}
54459+
54460+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
54461+{
54462+ struct conn_table_entry **match;
54463+ unsigned int index;
54464+
54465+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54466+ sig->gr_sport, sig->gr_dport,
54467+ gr_conn_table_size);
54468+
54469+ newent->sig = sig;
54470+
54471+ match = &gr_conn_table[index];
54472+ newent->next = *match;
54473+ *match = newent;
54474+
54475+ return;
54476+}
54477+
54478+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
54479+{
54480+ struct conn_table_entry *match, *last = NULL;
54481+ unsigned int index;
54482+
54483+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54484+ sig->gr_sport, sig->gr_dport,
54485+ gr_conn_table_size);
54486+
54487+ match = gr_conn_table[index];
54488+ while (match && !conn_match(match->sig,
54489+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
54490+ sig->gr_dport)) {
54491+ last = match;
54492+ match = match->next;
54493+ }
54494+
54495+ if (match) {
54496+ if (last)
54497+ last->next = match->next;
54498+ else
54499+ gr_conn_table[index] = NULL;
54500+ kfree(match);
54501+ }
54502+
54503+ return;
54504+}
54505+
54506+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
54507+ __u16 sport, __u16 dport)
54508+{
54509+ struct conn_table_entry *match;
54510+ unsigned int index;
54511+
54512+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
54513+
54514+ match = gr_conn_table[index];
54515+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
54516+ match = match->next;
54517+
54518+ if (match)
54519+ return match->sig;
54520+ else
54521+ return NULL;
54522+}
54523+
54524+#endif
54525+
54526+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
54527+{
54528+#ifdef CONFIG_GRKERNSEC
54529+ struct signal_struct *sig = task->signal;
54530+ struct conn_table_entry *newent;
54531+
54532+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
54533+ if (newent == NULL)
54534+ return;
54535+ /* no bh lock needed since we are called with bh disabled */
54536+ spin_lock(&gr_conn_table_lock);
54537+ gr_del_task_from_ip_table_nolock(sig);
54538+ sig->gr_saddr = inet->rcv_saddr;
54539+ sig->gr_daddr = inet->daddr;
54540+ sig->gr_sport = inet->sport;
54541+ sig->gr_dport = inet->dport;
54542+ gr_add_to_task_ip_table_nolock(sig, newent);
54543+ spin_unlock(&gr_conn_table_lock);
54544+#endif
54545+ return;
54546+}
54547+
54548+void gr_del_task_from_ip_table(struct task_struct *task)
54549+{
54550+#ifdef CONFIG_GRKERNSEC
54551+ spin_lock_bh(&gr_conn_table_lock);
54552+ gr_del_task_from_ip_table_nolock(task->signal);
54553+ spin_unlock_bh(&gr_conn_table_lock);
54554+#endif
54555+ return;
54556+}
54557+
54558+void
54559+gr_attach_curr_ip(const struct sock *sk)
54560+{
54561+#ifdef CONFIG_GRKERNSEC
54562+ struct signal_struct *p, *set;
54563+ const struct inet_sock *inet = inet_sk(sk);
54564+
54565+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
54566+ return;
54567+
54568+ set = current->signal;
54569+
54570+ spin_lock_bh(&gr_conn_table_lock);
54571+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
54572+ inet->dport, inet->sport);
54573+ if (unlikely(p != NULL)) {
54574+ set->curr_ip = p->curr_ip;
54575+ set->used_accept = 1;
54576+ gr_del_task_from_ip_table_nolock(p);
54577+ spin_unlock_bh(&gr_conn_table_lock);
54578+ return;
54579+ }
54580+ spin_unlock_bh(&gr_conn_table_lock);
54581+
54582+ set->curr_ip = inet->daddr;
54583+ set->used_accept = 1;
54584+#endif
54585+ return;
54586+}
54587+
54588+int
54589+gr_handle_sock_all(const int family, const int type, const int protocol)
54590+{
54591+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54592+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
54593+ (family != AF_UNIX)) {
54594+ if (family == AF_INET)
54595+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
54596+ else
54597+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
54598+ return -EACCES;
54599+ }
54600+#endif
54601+ return 0;
54602+}
54603+
54604+int
54605+gr_handle_sock_server(const struct sockaddr *sck)
54606+{
54607+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54608+ if (grsec_enable_socket_server &&
54609+ in_group_p(grsec_socket_server_gid) &&
54610+ sck && (sck->sa_family != AF_UNIX) &&
54611+ (sck->sa_family != AF_LOCAL)) {
54612+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54613+ return -EACCES;
54614+ }
54615+#endif
54616+ return 0;
54617+}
54618+
54619+int
54620+gr_handle_sock_server_other(const struct sock *sck)
54621+{
54622+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54623+ if (grsec_enable_socket_server &&
54624+ in_group_p(grsec_socket_server_gid) &&
54625+ sck && (sck->sk_family != AF_UNIX) &&
54626+ (sck->sk_family != AF_LOCAL)) {
54627+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54628+ return -EACCES;
54629+ }
54630+#endif
54631+ return 0;
54632+}
54633+
54634+int
54635+gr_handle_sock_client(const struct sockaddr *sck)
54636+{
54637+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54638+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
54639+ sck && (sck->sa_family != AF_UNIX) &&
54640+ (sck->sa_family != AF_LOCAL)) {
54641+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
54642+ return -EACCES;
54643+ }
54644+#endif
54645+ return 0;
54646+}
54647+
54648+kernel_cap_t
54649+gr_cap_rtnetlink(struct sock *sock)
54650+{
54651+#ifdef CONFIG_GRKERNSEC
54652+ if (!gr_acl_is_enabled())
54653+ return current_cap();
54654+ else if (sock->sk_protocol == NETLINK_ISCSI &&
54655+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
54656+ gr_is_capable(CAP_SYS_ADMIN))
54657+ return current_cap();
54658+ else if (sock->sk_protocol == NETLINK_AUDIT &&
54659+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
54660+ gr_is_capable(CAP_AUDIT_WRITE) &&
54661+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
54662+ gr_is_capable(CAP_AUDIT_CONTROL))
54663+ return current_cap();
54664+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
54665+ ((sock->sk_protocol == NETLINK_ROUTE) ?
54666+ gr_is_capable_nolog(CAP_NET_ADMIN) :
54667+ gr_is_capable(CAP_NET_ADMIN)))
54668+ return current_cap();
54669+ else
54670+ return __cap_empty_set;
54671+#else
54672+ return current_cap();
54673+#endif
54674+}
54675diff -urNp linux-2.6.32.45/grsecurity/grsec_sysctl.c linux-2.6.32.45/grsecurity/grsec_sysctl.c
54676--- linux-2.6.32.45/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
54677+++ linux-2.6.32.45/grsecurity/grsec_sysctl.c 2011-08-11 19:57:54.000000000 -0400
54678@@ -0,0 +1,479 @@
54679+#include <linux/kernel.h>
54680+#include <linux/sched.h>
54681+#include <linux/sysctl.h>
54682+#include <linux/grsecurity.h>
54683+#include <linux/grinternal.h>
54684+
54685+int
54686+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
54687+{
54688+#ifdef CONFIG_GRKERNSEC_SYSCTL
54689+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
54690+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
54691+ return -EACCES;
54692+ }
54693+#endif
54694+ return 0;
54695+}
54696+
54697+#ifdef CONFIG_GRKERNSEC_ROFS
54698+static int __maybe_unused one = 1;
54699+#endif
54700+
54701+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
54702+ctl_table grsecurity_table[] = {
54703+#ifdef CONFIG_GRKERNSEC_SYSCTL
54704+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
54705+#ifdef CONFIG_GRKERNSEC_IO
54706+ {
54707+ .ctl_name = CTL_UNNUMBERED,
54708+ .procname = "disable_priv_io",
54709+ .data = &grsec_disable_privio,
54710+ .maxlen = sizeof(int),
54711+ .mode = 0600,
54712+ .proc_handler = &proc_dointvec,
54713+ },
54714+#endif
54715+#endif
54716+#ifdef CONFIG_GRKERNSEC_LINK
54717+ {
54718+ .ctl_name = CTL_UNNUMBERED,
54719+ .procname = "linking_restrictions",
54720+ .data = &grsec_enable_link,
54721+ .maxlen = sizeof(int),
54722+ .mode = 0600,
54723+ .proc_handler = &proc_dointvec,
54724+ },
54725+#endif
54726+#ifdef CONFIG_GRKERNSEC_BRUTE
54727+ {
54728+ .ctl_name = CTL_UNNUMBERED,
54729+ .procname = "deter_bruteforce",
54730+ .data = &grsec_enable_brute,
54731+ .maxlen = sizeof(int),
54732+ .mode = 0600,
54733+ .proc_handler = &proc_dointvec,
54734+ },
54735+#endif
54736+#ifdef CONFIG_GRKERNSEC_FIFO
54737+ {
54738+ .ctl_name = CTL_UNNUMBERED,
54739+ .procname = "fifo_restrictions",
54740+ .data = &grsec_enable_fifo,
54741+ .maxlen = sizeof(int),
54742+ .mode = 0600,
54743+ .proc_handler = &proc_dointvec,
54744+ },
54745+#endif
54746+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54747+ {
54748+ .ctl_name = CTL_UNNUMBERED,
54749+ .procname = "ip_blackhole",
54750+ .data = &grsec_enable_blackhole,
54751+ .maxlen = sizeof(int),
54752+ .mode = 0600,
54753+ .proc_handler = &proc_dointvec,
54754+ },
54755+ {
54756+ .ctl_name = CTL_UNNUMBERED,
54757+ .procname = "lastack_retries",
54758+ .data = &grsec_lastack_retries,
54759+ .maxlen = sizeof(int),
54760+ .mode = 0600,
54761+ .proc_handler = &proc_dointvec,
54762+ },
54763+#endif
54764+#ifdef CONFIG_GRKERNSEC_EXECLOG
54765+ {
54766+ .ctl_name = CTL_UNNUMBERED,
54767+ .procname = "exec_logging",
54768+ .data = &grsec_enable_execlog,
54769+ .maxlen = sizeof(int),
54770+ .mode = 0600,
54771+ .proc_handler = &proc_dointvec,
54772+ },
54773+#endif
54774+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54775+ {
54776+ .ctl_name = CTL_UNNUMBERED,
54777+ .procname = "rwxmap_logging",
54778+ .data = &grsec_enable_log_rwxmaps,
54779+ .maxlen = sizeof(int),
54780+ .mode = 0600,
54781+ .proc_handler = &proc_dointvec,
54782+ },
54783+#endif
54784+#ifdef CONFIG_GRKERNSEC_SIGNAL
54785+ {
54786+ .ctl_name = CTL_UNNUMBERED,
54787+ .procname = "signal_logging",
54788+ .data = &grsec_enable_signal,
54789+ .maxlen = sizeof(int),
54790+ .mode = 0600,
54791+ .proc_handler = &proc_dointvec,
54792+ },
54793+#endif
54794+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54795+ {
54796+ .ctl_name = CTL_UNNUMBERED,
54797+ .procname = "forkfail_logging",
54798+ .data = &grsec_enable_forkfail,
54799+ .maxlen = sizeof(int),
54800+ .mode = 0600,
54801+ .proc_handler = &proc_dointvec,
54802+ },
54803+#endif
54804+#ifdef CONFIG_GRKERNSEC_TIME
54805+ {
54806+ .ctl_name = CTL_UNNUMBERED,
54807+ .procname = "timechange_logging",
54808+ .data = &grsec_enable_time,
54809+ .maxlen = sizeof(int),
54810+ .mode = 0600,
54811+ .proc_handler = &proc_dointvec,
54812+ },
54813+#endif
54814+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54815+ {
54816+ .ctl_name = CTL_UNNUMBERED,
54817+ .procname = "chroot_deny_shmat",
54818+ .data = &grsec_enable_chroot_shmat,
54819+ .maxlen = sizeof(int),
54820+ .mode = 0600,
54821+ .proc_handler = &proc_dointvec,
54822+ },
54823+#endif
54824+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54825+ {
54826+ .ctl_name = CTL_UNNUMBERED,
54827+ .procname = "chroot_deny_unix",
54828+ .data = &grsec_enable_chroot_unix,
54829+ .maxlen = sizeof(int),
54830+ .mode = 0600,
54831+ .proc_handler = &proc_dointvec,
54832+ },
54833+#endif
54834+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54835+ {
54836+ .ctl_name = CTL_UNNUMBERED,
54837+ .procname = "chroot_deny_mount",
54838+ .data = &grsec_enable_chroot_mount,
54839+ .maxlen = sizeof(int),
54840+ .mode = 0600,
54841+ .proc_handler = &proc_dointvec,
54842+ },
54843+#endif
54844+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54845+ {
54846+ .ctl_name = CTL_UNNUMBERED,
54847+ .procname = "chroot_deny_fchdir",
54848+ .data = &grsec_enable_chroot_fchdir,
54849+ .maxlen = sizeof(int),
54850+ .mode = 0600,
54851+ .proc_handler = &proc_dointvec,
54852+ },
54853+#endif
54854+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54855+ {
54856+ .ctl_name = CTL_UNNUMBERED,
54857+ .procname = "chroot_deny_chroot",
54858+ .data = &grsec_enable_chroot_double,
54859+ .maxlen = sizeof(int),
54860+ .mode = 0600,
54861+ .proc_handler = &proc_dointvec,
54862+ },
54863+#endif
54864+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54865+ {
54866+ .ctl_name = CTL_UNNUMBERED,
54867+ .procname = "chroot_deny_pivot",
54868+ .data = &grsec_enable_chroot_pivot,
54869+ .maxlen = sizeof(int),
54870+ .mode = 0600,
54871+ .proc_handler = &proc_dointvec,
54872+ },
54873+#endif
54874+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54875+ {
54876+ .ctl_name = CTL_UNNUMBERED,
54877+ .procname = "chroot_enforce_chdir",
54878+ .data = &grsec_enable_chroot_chdir,
54879+ .maxlen = sizeof(int),
54880+ .mode = 0600,
54881+ .proc_handler = &proc_dointvec,
54882+ },
54883+#endif
54884+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54885+ {
54886+ .ctl_name = CTL_UNNUMBERED,
54887+ .procname = "chroot_deny_chmod",
54888+ .data = &grsec_enable_chroot_chmod,
54889+ .maxlen = sizeof(int),
54890+ .mode = 0600,
54891+ .proc_handler = &proc_dointvec,
54892+ },
54893+#endif
54894+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54895+ {
54896+ .ctl_name = CTL_UNNUMBERED,
54897+ .procname = "chroot_deny_mknod",
54898+ .data = &grsec_enable_chroot_mknod,
54899+ .maxlen = sizeof(int),
54900+ .mode = 0600,
54901+ .proc_handler = &proc_dointvec,
54902+ },
54903+#endif
54904+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54905+ {
54906+ .ctl_name = CTL_UNNUMBERED,
54907+ .procname = "chroot_restrict_nice",
54908+ .data = &grsec_enable_chroot_nice,
54909+ .maxlen = sizeof(int),
54910+ .mode = 0600,
54911+ .proc_handler = &proc_dointvec,
54912+ },
54913+#endif
54914+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54915+ {
54916+ .ctl_name = CTL_UNNUMBERED,
54917+ .procname = "chroot_execlog",
54918+ .data = &grsec_enable_chroot_execlog,
54919+ .maxlen = sizeof(int),
54920+ .mode = 0600,
54921+ .proc_handler = &proc_dointvec,
54922+ },
54923+#endif
54924+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54925+ {
54926+ .ctl_name = CTL_UNNUMBERED,
54927+ .procname = "chroot_caps",
54928+ .data = &grsec_enable_chroot_caps,
54929+ .maxlen = sizeof(int),
54930+ .mode = 0600,
54931+ .proc_handler = &proc_dointvec,
54932+ },
54933+#endif
54934+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54935+ {
54936+ .ctl_name = CTL_UNNUMBERED,
54937+ .procname = "chroot_deny_sysctl",
54938+ .data = &grsec_enable_chroot_sysctl,
54939+ .maxlen = sizeof(int),
54940+ .mode = 0600,
54941+ .proc_handler = &proc_dointvec,
54942+ },
54943+#endif
54944+#ifdef CONFIG_GRKERNSEC_TPE
54945+ {
54946+ .ctl_name = CTL_UNNUMBERED,
54947+ .procname = "tpe",
54948+ .data = &grsec_enable_tpe,
54949+ .maxlen = sizeof(int),
54950+ .mode = 0600,
54951+ .proc_handler = &proc_dointvec,
54952+ },
54953+ {
54954+ .ctl_name = CTL_UNNUMBERED,
54955+ .procname = "tpe_gid",
54956+ .data = &grsec_tpe_gid,
54957+ .maxlen = sizeof(int),
54958+ .mode = 0600,
54959+ .proc_handler = &proc_dointvec,
54960+ },
54961+#endif
54962+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54963+ {
54964+ .ctl_name = CTL_UNNUMBERED,
54965+ .procname = "tpe_invert",
54966+ .data = &grsec_enable_tpe_invert,
54967+ .maxlen = sizeof(int),
54968+ .mode = 0600,
54969+ .proc_handler = &proc_dointvec,
54970+ },
54971+#endif
54972+#ifdef CONFIG_GRKERNSEC_TPE_ALL
54973+ {
54974+ .ctl_name = CTL_UNNUMBERED,
54975+ .procname = "tpe_restrict_all",
54976+ .data = &grsec_enable_tpe_all,
54977+ .maxlen = sizeof(int),
54978+ .mode = 0600,
54979+ .proc_handler = &proc_dointvec,
54980+ },
54981+#endif
54982+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54983+ {
54984+ .ctl_name = CTL_UNNUMBERED,
54985+ .procname = "socket_all",
54986+ .data = &grsec_enable_socket_all,
54987+ .maxlen = sizeof(int),
54988+ .mode = 0600,
54989+ .proc_handler = &proc_dointvec,
54990+ },
54991+ {
54992+ .ctl_name = CTL_UNNUMBERED,
54993+ .procname = "socket_all_gid",
54994+ .data = &grsec_socket_all_gid,
54995+ .maxlen = sizeof(int),
54996+ .mode = 0600,
54997+ .proc_handler = &proc_dointvec,
54998+ },
54999+#endif
55000+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55001+ {
55002+ .ctl_name = CTL_UNNUMBERED,
55003+ .procname = "socket_client",
55004+ .data = &grsec_enable_socket_client,
55005+ .maxlen = sizeof(int),
55006+ .mode = 0600,
55007+ .proc_handler = &proc_dointvec,
55008+ },
55009+ {
55010+ .ctl_name = CTL_UNNUMBERED,
55011+ .procname = "socket_client_gid",
55012+ .data = &grsec_socket_client_gid,
55013+ .maxlen = sizeof(int),
55014+ .mode = 0600,
55015+ .proc_handler = &proc_dointvec,
55016+ },
55017+#endif
55018+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55019+ {
55020+ .ctl_name = CTL_UNNUMBERED,
55021+ .procname = "socket_server",
55022+ .data = &grsec_enable_socket_server,
55023+ .maxlen = sizeof(int),
55024+ .mode = 0600,
55025+ .proc_handler = &proc_dointvec,
55026+ },
55027+ {
55028+ .ctl_name = CTL_UNNUMBERED,
55029+ .procname = "socket_server_gid",
55030+ .data = &grsec_socket_server_gid,
55031+ .maxlen = sizeof(int),
55032+ .mode = 0600,
55033+ .proc_handler = &proc_dointvec,
55034+ },
55035+#endif
55036+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55037+ {
55038+ .ctl_name = CTL_UNNUMBERED,
55039+ .procname = "audit_group",
55040+ .data = &grsec_enable_group,
55041+ .maxlen = sizeof(int),
55042+ .mode = 0600,
55043+ .proc_handler = &proc_dointvec,
55044+ },
55045+ {
55046+ .ctl_name = CTL_UNNUMBERED,
55047+ .procname = "audit_gid",
55048+ .data = &grsec_audit_gid,
55049+ .maxlen = sizeof(int),
55050+ .mode = 0600,
55051+ .proc_handler = &proc_dointvec,
55052+ },
55053+#endif
55054+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55055+ {
55056+ .ctl_name = CTL_UNNUMBERED,
55057+ .procname = "audit_chdir",
55058+ .data = &grsec_enable_chdir,
55059+ .maxlen = sizeof(int),
55060+ .mode = 0600,
55061+ .proc_handler = &proc_dointvec,
55062+ },
55063+#endif
55064+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55065+ {
55066+ .ctl_name = CTL_UNNUMBERED,
55067+ .procname = "audit_mount",
55068+ .data = &grsec_enable_mount,
55069+ .maxlen = sizeof(int),
55070+ .mode = 0600,
55071+ .proc_handler = &proc_dointvec,
55072+ },
55073+#endif
55074+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55075+ {
55076+ .ctl_name = CTL_UNNUMBERED,
55077+ .procname = "audit_textrel",
55078+ .data = &grsec_enable_audit_textrel,
55079+ .maxlen = sizeof(int),
55080+ .mode = 0600,
55081+ .proc_handler = &proc_dointvec,
55082+ },
55083+#endif
55084+#ifdef CONFIG_GRKERNSEC_DMESG
55085+ {
55086+ .ctl_name = CTL_UNNUMBERED,
55087+ .procname = "dmesg",
55088+ .data = &grsec_enable_dmesg,
55089+ .maxlen = sizeof(int),
55090+ .mode = 0600,
55091+ .proc_handler = &proc_dointvec,
55092+ },
55093+#endif
55094+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55095+ {
55096+ .ctl_name = CTL_UNNUMBERED,
55097+ .procname = "chroot_findtask",
55098+ .data = &grsec_enable_chroot_findtask,
55099+ .maxlen = sizeof(int),
55100+ .mode = 0600,
55101+ .proc_handler = &proc_dointvec,
55102+ },
55103+#endif
55104+#ifdef CONFIG_GRKERNSEC_RESLOG
55105+ {
55106+ .ctl_name = CTL_UNNUMBERED,
55107+ .procname = "resource_logging",
55108+ .data = &grsec_resource_logging,
55109+ .maxlen = sizeof(int),
55110+ .mode = 0600,
55111+ .proc_handler = &proc_dointvec,
55112+ },
55113+#endif
55114+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55115+ {
55116+ .ctl_name = CTL_UNNUMBERED,
55117+ .procname = "audit_ptrace",
55118+ .data = &grsec_enable_audit_ptrace,
55119+ .maxlen = sizeof(int),
55120+ .mode = 0600,
55121+ .proc_handler = &proc_dointvec,
55122+ },
55123+#endif
55124+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55125+ {
55126+ .ctl_name = CTL_UNNUMBERED,
55127+ .procname = "harden_ptrace",
55128+ .data = &grsec_enable_harden_ptrace,
55129+ .maxlen = sizeof(int),
55130+ .mode = 0600,
55131+ .proc_handler = &proc_dointvec,
55132+ },
55133+#endif
55134+ {
55135+ .ctl_name = CTL_UNNUMBERED,
55136+ .procname = "grsec_lock",
55137+ .data = &grsec_lock,
55138+ .maxlen = sizeof(int),
55139+ .mode = 0600,
55140+ .proc_handler = &proc_dointvec,
55141+ },
55142+#endif
55143+#ifdef CONFIG_GRKERNSEC_ROFS
55144+ {
55145+ .ctl_name = CTL_UNNUMBERED,
55146+ .procname = "romount_protect",
55147+ .data = &grsec_enable_rofs,
55148+ .maxlen = sizeof(int),
55149+ .mode = 0600,
55150+ .proc_handler = &proc_dointvec_minmax,
55151+ .extra1 = &one,
55152+ .extra2 = &one,
55153+ },
55154+#endif
55155+ { .ctl_name = 0 }
55156+};
55157+#endif
55158diff -urNp linux-2.6.32.45/grsecurity/grsec_time.c linux-2.6.32.45/grsecurity/grsec_time.c
55159--- linux-2.6.32.45/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55160+++ linux-2.6.32.45/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
55161@@ -0,0 +1,16 @@
55162+#include <linux/kernel.h>
55163+#include <linux/sched.h>
55164+#include <linux/grinternal.h>
55165+#include <linux/module.h>
55166+
55167+void
55168+gr_log_timechange(void)
55169+{
55170+#ifdef CONFIG_GRKERNSEC_TIME
55171+ if (grsec_enable_time)
55172+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55173+#endif
55174+ return;
55175+}
55176+
55177+EXPORT_SYMBOL(gr_log_timechange);
55178diff -urNp linux-2.6.32.45/grsecurity/grsec_tpe.c linux-2.6.32.45/grsecurity/grsec_tpe.c
55179--- linux-2.6.32.45/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55180+++ linux-2.6.32.45/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
55181@@ -0,0 +1,39 @@
55182+#include <linux/kernel.h>
55183+#include <linux/sched.h>
55184+#include <linux/file.h>
55185+#include <linux/fs.h>
55186+#include <linux/grinternal.h>
55187+
55188+extern int gr_acl_tpe_check(void);
55189+
55190+int
55191+gr_tpe_allow(const struct file *file)
55192+{
55193+#ifdef CONFIG_GRKERNSEC
55194+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55195+ const struct cred *cred = current_cred();
55196+
55197+ if (cred->uid && ((grsec_enable_tpe &&
55198+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55199+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55200+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55201+#else
55202+ in_group_p(grsec_tpe_gid)
55203+#endif
55204+ ) || gr_acl_tpe_check()) &&
55205+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55206+ (inode->i_mode & S_IWOTH))))) {
55207+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55208+ return 0;
55209+ }
55210+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55211+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55212+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55213+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55214+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55215+ return 0;
55216+ }
55217+#endif
55218+#endif
55219+ return 1;
55220+}
55221diff -urNp linux-2.6.32.45/grsecurity/grsum.c linux-2.6.32.45/grsecurity/grsum.c
55222--- linux-2.6.32.45/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55223+++ linux-2.6.32.45/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
55224@@ -0,0 +1,61 @@
55225+#include <linux/err.h>
55226+#include <linux/kernel.h>
55227+#include <linux/sched.h>
55228+#include <linux/mm.h>
55229+#include <linux/scatterlist.h>
55230+#include <linux/crypto.h>
55231+#include <linux/gracl.h>
55232+
55233+
55234+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55235+#error "crypto and sha256 must be built into the kernel"
55236+#endif
55237+
55238+int
55239+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55240+{
55241+ char *p;
55242+ struct crypto_hash *tfm;
55243+ struct hash_desc desc;
55244+ struct scatterlist sg;
55245+ unsigned char temp_sum[GR_SHA_LEN];
55246+ volatile int retval = 0;
55247+ volatile int dummy = 0;
55248+ unsigned int i;
55249+
55250+ sg_init_table(&sg, 1);
55251+
55252+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55253+ if (IS_ERR(tfm)) {
55254+ /* should never happen, since sha256 should be built in */
55255+ return 1;
55256+ }
55257+
55258+ desc.tfm = tfm;
55259+ desc.flags = 0;
55260+
55261+ crypto_hash_init(&desc);
55262+
55263+ p = salt;
55264+ sg_set_buf(&sg, p, GR_SALT_LEN);
55265+ crypto_hash_update(&desc, &sg, sg.length);
55266+
55267+ p = entry->pw;
55268+ sg_set_buf(&sg, p, strlen(p));
55269+
55270+ crypto_hash_update(&desc, &sg, sg.length);
55271+
55272+ crypto_hash_final(&desc, temp_sum);
55273+
55274+ memset(entry->pw, 0, GR_PW_LEN);
55275+
55276+ for (i = 0; i < GR_SHA_LEN; i++)
55277+ if (sum[i] != temp_sum[i])
55278+ retval = 1;
55279+ else
55280+ dummy = 1; // waste a cycle
55281+
55282+ crypto_free_hash(tfm);
55283+
55284+ return retval;
55285+}
55286diff -urNp linux-2.6.32.45/grsecurity/Kconfig linux-2.6.32.45/grsecurity/Kconfig
55287--- linux-2.6.32.45/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55288+++ linux-2.6.32.45/grsecurity/Kconfig 2011-08-17 19:04:25.000000000 -0400
55289@@ -0,0 +1,1037 @@
55290+#
55291+# grecurity configuration
55292+#
55293+
55294+menu "Grsecurity"
55295+
55296+config GRKERNSEC
55297+ bool "Grsecurity"
55298+ select CRYPTO
55299+ select CRYPTO_SHA256
55300+ help
55301+ If you say Y here, you will be able to configure many features
55302+ that will enhance the security of your system. It is highly
55303+ recommended that you say Y here and read through the help
55304+ for each option so that you fully understand the features and
55305+ can evaluate their usefulness for your machine.
55306+
55307+choice
55308+ prompt "Security Level"
55309+ depends on GRKERNSEC
55310+ default GRKERNSEC_CUSTOM
55311+
55312+config GRKERNSEC_LOW
55313+ bool "Low"
55314+ select GRKERNSEC_LINK
55315+ select GRKERNSEC_FIFO
55316+ select GRKERNSEC_RANDNET
55317+ select GRKERNSEC_DMESG
55318+ select GRKERNSEC_CHROOT
55319+ select GRKERNSEC_CHROOT_CHDIR
55320+
55321+ help
55322+ If you choose this option, several of the grsecurity options will
55323+ be enabled that will give you greater protection against a number
55324+ of attacks, while assuring that none of your software will have any
55325+ conflicts with the additional security measures. If you run a lot
55326+ of unusual software, or you are having problems with the higher
55327+ security levels, you should say Y here. With this option, the
55328+ following features are enabled:
55329+
55330+ - Linking restrictions
55331+ - FIFO restrictions
55332+ - Restricted dmesg
55333+ - Enforced chdir("/") on chroot
55334+ - Runtime module disabling
55335+
55336+config GRKERNSEC_MEDIUM
55337+ bool "Medium"
55338+ select PAX
55339+ select PAX_EI_PAX
55340+ select PAX_PT_PAX_FLAGS
55341+ select PAX_HAVE_ACL_FLAGS
55342+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55343+ select GRKERNSEC_CHROOT
55344+ select GRKERNSEC_CHROOT_SYSCTL
55345+ select GRKERNSEC_LINK
55346+ select GRKERNSEC_FIFO
55347+ select GRKERNSEC_DMESG
55348+ select GRKERNSEC_RANDNET
55349+ select GRKERNSEC_FORKFAIL
55350+ select GRKERNSEC_TIME
55351+ select GRKERNSEC_SIGNAL
55352+ select GRKERNSEC_CHROOT
55353+ select GRKERNSEC_CHROOT_UNIX
55354+ select GRKERNSEC_CHROOT_MOUNT
55355+ select GRKERNSEC_CHROOT_PIVOT
55356+ select GRKERNSEC_CHROOT_DOUBLE
55357+ select GRKERNSEC_CHROOT_CHDIR
55358+ select GRKERNSEC_CHROOT_MKNOD
55359+ select GRKERNSEC_PROC
55360+ select GRKERNSEC_PROC_USERGROUP
55361+ select PAX_RANDUSTACK
55362+ select PAX_ASLR
55363+ select PAX_RANDMMAP
55364+ select PAX_REFCOUNT if (X86 || SPARC64)
55365+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55366+
55367+ help
55368+ If you say Y here, several features in addition to those included
55369+ in the low additional security level will be enabled. These
55370+ features provide even more security to your system, though in rare
55371+ cases they may be incompatible with very old or poorly written
55372+ software. If you enable this option, make sure that your auth
55373+ service (identd) is running as gid 1001. With this option,
55374+ the following features (in addition to those provided in the
55375+ low additional security level) will be enabled:
55376+
55377+ - Failed fork logging
55378+ - Time change logging
55379+ - Signal logging
55380+ - Deny mounts in chroot
55381+ - Deny double chrooting
55382+ - Deny sysctl writes in chroot
55383+ - Deny mknod in chroot
55384+ - Deny access to abstract AF_UNIX sockets out of chroot
55385+ - Deny pivot_root in chroot
55386+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
55387+ - /proc restrictions with special GID set to 10 (usually wheel)
55388+ - Address Space Layout Randomization (ASLR)
55389+ - Prevent exploitation of most refcount overflows
55390+ - Bounds checking of copying between the kernel and userland
55391+
55392+config GRKERNSEC_HIGH
55393+ bool "High"
55394+ select GRKERNSEC_LINK
55395+ select GRKERNSEC_FIFO
55396+ select GRKERNSEC_DMESG
55397+ select GRKERNSEC_FORKFAIL
55398+ select GRKERNSEC_TIME
55399+ select GRKERNSEC_SIGNAL
55400+ select GRKERNSEC_CHROOT
55401+ select GRKERNSEC_CHROOT_SHMAT
55402+ select GRKERNSEC_CHROOT_UNIX
55403+ select GRKERNSEC_CHROOT_MOUNT
55404+ select GRKERNSEC_CHROOT_FCHDIR
55405+ select GRKERNSEC_CHROOT_PIVOT
55406+ select GRKERNSEC_CHROOT_DOUBLE
55407+ select GRKERNSEC_CHROOT_CHDIR
55408+ select GRKERNSEC_CHROOT_MKNOD
55409+ select GRKERNSEC_CHROOT_CAPS
55410+ select GRKERNSEC_CHROOT_SYSCTL
55411+ select GRKERNSEC_CHROOT_FINDTASK
55412+ select GRKERNSEC_SYSFS_RESTRICT
55413+ select GRKERNSEC_PROC
55414+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55415+ select GRKERNSEC_HIDESYM
55416+ select GRKERNSEC_BRUTE
55417+ select GRKERNSEC_PROC_USERGROUP
55418+ select GRKERNSEC_KMEM
55419+ select GRKERNSEC_RESLOG
55420+ select GRKERNSEC_RANDNET
55421+ select GRKERNSEC_PROC_ADD
55422+ select GRKERNSEC_CHROOT_CHMOD
55423+ select GRKERNSEC_CHROOT_NICE
55424+ select GRKERNSEC_AUDIT_MOUNT
55425+ select GRKERNSEC_MODHARDEN if (MODULES)
55426+ select GRKERNSEC_HARDEN_PTRACE
55427+ select GRKERNSEC_VM86 if (X86_32)
55428+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55429+ select PAX
55430+ select PAX_RANDUSTACK
55431+ select PAX_ASLR
55432+ select PAX_RANDMMAP
55433+ select PAX_NOEXEC
55434+ select PAX_MPROTECT
55435+ select PAX_EI_PAX
55436+ select PAX_PT_PAX_FLAGS
55437+ select PAX_HAVE_ACL_FLAGS
55438+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55439+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
55440+ select PAX_RANDKSTACK if (X86_TSC && X86)
55441+ select PAX_SEGMEXEC if (X86_32)
55442+ select PAX_PAGEEXEC
55443+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55444+ select PAX_EMUTRAMP if (PARISC)
55445+ select PAX_EMUSIGRT if (PARISC)
55446+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55447+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55448+ select PAX_REFCOUNT if (X86 || SPARC64)
55449+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55450+ help
55451+ If you say Y here, many of the features of grsecurity will be
55452+ enabled, which will protect you against many kinds of attacks
55453+ against your system. The heightened security comes at a cost
55454+ of an increased chance of incompatibilities with rare software
55455+ on your machine. Since this security level enables PaX, you should
55456+ view <http://pax.grsecurity.net> and read about the PaX
55457+ project. While you are there, download chpax and run it on
55458+ binaries that cause problems with PaX. Also remember that
55459+ since the /proc restrictions are enabled, you must run your
55460+ identd as gid 1001. This security level enables the following
55461+ features in addition to those listed in the low and medium
55462+ security levels:
55463+
55464+ - Additional /proc restrictions
55465+ - Chmod restrictions in chroot
55466+ - No signals, ptrace, or viewing of processes outside of chroot
55467+ - Capability restrictions in chroot
55468+ - Deny fchdir out of chroot
55469+ - Priority restrictions in chroot
55470+ - Segmentation-based implementation of PaX
55471+ - Mprotect restrictions
55472+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55473+ - Kernel stack randomization
55474+ - Mount/unmount/remount logging
55475+ - Kernel symbol hiding
55476+ - Prevention of memory exhaustion-based exploits
55477+ - Hardening of module auto-loading
55478+ - Ptrace restrictions
55479+ - Restricted vm86 mode
55480+ - Restricted sysfs/debugfs
55481+ - Active kernel exploit response
55482+
55483+config GRKERNSEC_CUSTOM
55484+ bool "Custom"
55485+ help
55486+ If you say Y here, you will be able to configure every grsecurity
55487+ option, which allows you to enable many more features that aren't
55488+ covered in the basic security levels. These additional features
55489+ include TPE, socket restrictions, and the sysctl system for
55490+ grsecurity. It is advised that you read through the help for
55491+ each option to determine its usefulness in your situation.
55492+
55493+endchoice
55494+
55495+menu "Address Space Protection"
55496+depends on GRKERNSEC
55497+
55498+config GRKERNSEC_KMEM
55499+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
55500+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55501+ help
55502+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55503+ be written to via mmap or otherwise to modify the running kernel.
55504+ /dev/port will also not be allowed to be opened. If you have module
55505+ support disabled, enabling this will close up four ways that are
55506+ currently used to insert malicious code into the running kernel.
55507+ Even with all these features enabled, we still highly recommend that
55508+ you use the RBAC system, as it is still possible for an attacker to
55509+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55510+ If you are not using XFree86, you may be able to stop this additional
55511+ case by enabling the 'Disable privileged I/O' option. Though nothing
55512+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55513+ but only to video memory, which is the only writing we allow in this
55514+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55515+ not be allowed to mprotect it with PROT_WRITE later.
55516+ It is highly recommended that you say Y here if you meet all the
55517+ conditions above.
55518+
55519+config GRKERNSEC_VM86
55520+ bool "Restrict VM86 mode"
55521+ depends on X86_32
55522+
55523+ help
55524+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55525+ make use of a special execution mode on 32bit x86 processors called
55526+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55527+ video cards and will still work with this option enabled. The purpose
55528+ of the option is to prevent exploitation of emulation errors in
55529+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55530+ Nearly all users should be able to enable this option.
55531+
55532+config GRKERNSEC_IO
55533+ bool "Disable privileged I/O"
55534+ depends on X86
55535+ select RTC_CLASS
55536+ select RTC_INTF_DEV
55537+ select RTC_DRV_CMOS
55538+
55539+ help
55540+ If you say Y here, all ioperm and iopl calls will return an error.
55541+ Ioperm and iopl can be used to modify the running kernel.
55542+ Unfortunately, some programs need this access to operate properly,
55543+ the most notable of which are XFree86 and hwclock. hwclock can be
55544+ remedied by having RTC support in the kernel, so real-time
55545+ clock support is enabled if this option is enabled, to ensure
55546+ that hwclock operates correctly. XFree86 still will not
55547+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55548+ IF YOU USE XFree86. If you use XFree86 and you still want to
55549+ protect your kernel against modification, use the RBAC system.
55550+
55551+config GRKERNSEC_PROC_MEMMAP
55552+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55553+ default y if (PAX_NOEXEC || PAX_ASLR)
55554+ depends on PAX_NOEXEC || PAX_ASLR
55555+ help
55556+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55557+ give no information about the addresses of its mappings if
55558+ PaX features that rely on random addresses are enabled on the task.
55559+ If you use PaX it is greatly recommended that you say Y here as it
55560+ closes up a hole that makes the full ASLR useless for suid
55561+ binaries.
55562+
55563+config GRKERNSEC_BRUTE
55564+ bool "Deter exploit bruteforcing"
55565+ help
55566+ If you say Y here, attempts to bruteforce exploits against forking
55567+ daemons such as apache or sshd, as well as against suid/sgid binaries
55568+ will be deterred. When a child of a forking daemon is killed by PaX
55569+ or crashes due to an illegal instruction or other suspicious signal,
55570+ the parent process will be delayed 30 seconds upon every subsequent
55571+ fork until the administrator is able to assess the situation and
55572+ restart the daemon.
55573+ In the suid/sgid case, the attempt is logged, the user has all their
55574+ processes terminated, and they are prevented from executing any further
55575+ processes for 15 minutes.
55576+ It is recommended that you also enable signal logging in the auditing
55577+ section so that logs are generated when a process triggers a suspicious
55578+ signal.
55579+ If the sysctl option is enabled, a sysctl option with name
55580+ "deter_bruteforce" is created.
55581+
55582+config GRKERNSEC_MODHARDEN
55583+ bool "Harden module auto-loading"
55584+ depends on MODULES
55585+ help
55586+ If you say Y here, module auto-loading in response to use of some
55587+ feature implemented by an unloaded module will be restricted to
55588+ root users. Enabling this option helps defend against attacks
55589+ by unprivileged users who abuse the auto-loading behavior to
55590+ cause a vulnerable module to load that is then exploited.
55591+
55592+ If this option prevents a legitimate use of auto-loading for a
55593+ non-root user, the administrator can execute modprobe manually
55594+ with the exact name of the module mentioned in the alert log.
55595+ Alternatively, the administrator can add the module to the list
55596+ of modules loaded at boot by modifying init scripts.
55597+
55598+ Modification of init scripts will most likely be needed on
55599+ Ubuntu servers with encrypted home directory support enabled,
55600+ as the first non-root user logging in will cause the ecb(aes),
55601+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55602+
55603+config GRKERNSEC_HIDESYM
55604+ bool "Hide kernel symbols"
55605+ help
55606+ If you say Y here, getting information on loaded modules, and
55607+ displaying all kernel symbols through a syscall will be restricted
55608+ to users with CAP_SYS_MODULE. For software compatibility reasons,
55609+ /proc/kallsyms will be restricted to the root user. The RBAC
55610+ system can hide that entry even from root.
55611+
55612+ This option also prevents leaking of kernel addresses through
55613+ several /proc entries.
55614+
55615+ Note that this option is only effective provided the following
55616+ conditions are met:
55617+ 1) The kernel using grsecurity is not precompiled by some distribution
55618+ 2) You have also enabled GRKERNSEC_DMESG
55619+ 3) You are using the RBAC system and hiding other files such as your
55620+ kernel image and System.map. Alternatively, enabling this option
55621+ causes the permissions on /boot, /lib/modules, and the kernel
55622+ source directory to change at compile time to prevent
55623+ reading by non-root users.
55624+ If the above conditions are met, this option will aid in providing a
55625+ useful protection against local kernel exploitation of overflows
55626+ and arbitrary read/write vulnerabilities.
55627+
55628+config GRKERNSEC_KERN_LOCKOUT
55629+ bool "Active kernel exploit response"
55630+ depends on X86 || ARM || PPC || SPARC
55631+ help
55632+ If you say Y here, when a PaX alert is triggered due to suspicious
55633+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55634+ or an OOPs occurs due to bad memory accesses, instead of just
55635+ terminating the offending process (and potentially allowing
55636+ a subsequent exploit from the same user), we will take one of two
55637+ actions:
55638+ If the user was root, we will panic the system
55639+ If the user was non-root, we will log the attempt, terminate
55640+ all processes owned by the user, then prevent them from creating
55641+ any new processes until the system is restarted
55642+ This deters repeated kernel exploitation/bruteforcing attempts
55643+ and is useful for later forensics.
55644+
55645+endmenu
55646+menu "Role Based Access Control Options"
55647+depends on GRKERNSEC
55648+
55649+config GRKERNSEC_RBAC_DEBUG
55650+ bool
55651+
55652+config GRKERNSEC_NO_RBAC
55653+ bool "Disable RBAC system"
55654+ help
55655+ If you say Y here, the /dev/grsec device will be removed from the kernel,
55656+ preventing the RBAC system from being enabled. You should only say Y
55657+ here if you have no intention of using the RBAC system, so as to prevent
55658+ an attacker with root access from misusing the RBAC system to hide files
55659+ and processes when loadable module support and /dev/[k]mem have been
55660+ locked down.
55661+
55662+config GRKERNSEC_ACL_HIDEKERN
55663+ bool "Hide kernel processes"
55664+ help
55665+ If you say Y here, all kernel threads will be hidden to all
55666+ processes but those whose subject has the "view hidden processes"
55667+ flag.
55668+
55669+config GRKERNSEC_ACL_MAXTRIES
55670+ int "Maximum tries before password lockout"
55671+ default 3
55672+ help
55673+ This option enforces the maximum number of times a user can attempt
55674+ to authorize themselves with the grsecurity RBAC system before being
55675+ denied the ability to attempt authorization again for a specified time.
55676+ The lower the number, the harder it will be to brute-force a password.
55677+
55678+config GRKERNSEC_ACL_TIMEOUT
55679+ int "Time to wait after max password tries, in seconds"
55680+ default 30
55681+ help
55682+ This option specifies the time the user must wait after attempting to
55683+ authorize to the RBAC system with the maximum number of invalid
55684+ passwords. The higher the number, the harder it will be to brute-force
55685+ a password.
55686+
55687+endmenu
55688+menu "Filesystem Protections"
55689+depends on GRKERNSEC
55690+
55691+config GRKERNSEC_PROC
55692+ bool "Proc restrictions"
55693+ help
55694+ If you say Y here, the permissions of the /proc filesystem
55695+ will be altered to enhance system security and privacy. You MUST
55696+ choose either a user only restriction or a user and group restriction.
55697+ Depending upon the option you choose, you can either restrict users to
55698+ see only the processes they themselves run, or choose a group that can
55699+ view all processes and files normally restricted to root if you choose
55700+ the "restrict to user only" option. NOTE: If you're running identd as
55701+ a non-root user, you will have to run it as the group you specify here.
55702+
55703+config GRKERNSEC_PROC_USER
55704+ bool "Restrict /proc to user only"
55705+ depends on GRKERNSEC_PROC
55706+ help
55707+ If you say Y here, non-root users will only be able to view their own
55708+ processes, and restricts them from viewing network-related information,
55709+ and viewing kernel symbol and module information.
55710+
55711+config GRKERNSEC_PROC_USERGROUP
55712+ bool "Allow special group"
55713+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55714+ help
55715+ If you say Y here, you will be able to select a group that will be
55716+ able to view all processes and network-related information. If you've
55717+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55718+ remain hidden. This option is useful if you want to run identd as
55719+ a non-root user.
55720+
55721+config GRKERNSEC_PROC_GID
55722+ int "GID for special group"
55723+ depends on GRKERNSEC_PROC_USERGROUP
55724+ default 1001
55725+
55726+config GRKERNSEC_PROC_ADD
55727+ bool "Additional restrictions"
55728+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55729+ help
55730+ If you say Y here, additional restrictions will be placed on
55731+ /proc that keep normal users from viewing device information and
55732+ slabinfo information that could be useful for exploits.
55733+
55734+config GRKERNSEC_LINK
55735+ bool "Linking restrictions"
55736+ help
55737+ If you say Y here, /tmp race exploits will be prevented, since users
55738+ will no longer be able to follow symlinks owned by other users in
55739+ world-writable +t directories (e.g. /tmp), unless the owner of the
55740+ symlink is the owner of the directory. users will also not be
55741+ able to hardlink to files they do not own. If the sysctl option is
55742+ enabled, a sysctl option with name "linking_restrictions" is created.
55743+
55744+config GRKERNSEC_FIFO
55745+ bool "FIFO restrictions"
55746+ help
55747+ If you say Y here, users will not be able to write to FIFOs they don't
55748+ own in world-writable +t directories (e.g. /tmp), unless the owner of
55749+ the FIFO is the same owner of the directory it's held in. If the sysctl
55750+ option is enabled, a sysctl option with name "fifo_restrictions" is
55751+ created.
55752+
55753+config GRKERNSEC_SYSFS_RESTRICT
55754+ bool "Sysfs/debugfs restriction"
55755+ depends on SYSFS
55756+ help
55757+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55758+ any filesystem normally mounted under it (e.g. debugfs) will only
55759+ be accessible by root. These filesystems generally provide access
55760+ to hardware and debug information that isn't appropriate for unprivileged
55761+ users of the system. Sysfs and debugfs have also become a large source
55762+ of new vulnerabilities, ranging from infoleaks to local compromise.
55763+ There has been very little oversight with an eye toward security involved
55764+ in adding new exporters of information to these filesystems, so their
55765+ use is discouraged.
55766+ This option is equivalent to a chmod 0700 of the mount paths.
55767+
55768+config GRKERNSEC_ROFS
55769+ bool "Runtime read-only mount protection"
55770+ help
55771+ If you say Y here, a sysctl option with name "romount_protect" will
55772+ be created. By setting this option to 1 at runtime, filesystems
55773+ will be protected in the following ways:
55774+ * No new writable mounts will be allowed
55775+ * Existing read-only mounts won't be able to be remounted read/write
55776+ * Write operations will be denied on all block devices
55777+ This option acts independently of grsec_lock: once it is set to 1,
55778+ it cannot be turned off. Therefore, please be mindful of the resulting
55779+ behavior if this option is enabled in an init script on a read-only
55780+ filesystem. This feature is mainly intended for secure embedded systems.
55781+
55782+config GRKERNSEC_CHROOT
55783+ bool "Chroot jail restrictions"
55784+ help
55785+ If you say Y here, you will be able to choose several options that will
55786+ make breaking out of a chrooted jail much more difficult. If you
55787+ encounter no software incompatibilities with the following options, it
55788+ is recommended that you enable each one.
55789+
55790+config GRKERNSEC_CHROOT_MOUNT
55791+ bool "Deny mounts"
55792+ depends on GRKERNSEC_CHROOT
55793+ help
55794+ If you say Y here, processes inside a chroot will not be able to
55795+ mount or remount filesystems. If the sysctl option is enabled, a
55796+ sysctl option with name "chroot_deny_mount" is created.
55797+
55798+config GRKERNSEC_CHROOT_DOUBLE
55799+ bool "Deny double-chroots"
55800+ depends on GRKERNSEC_CHROOT
55801+ help
55802+ If you say Y here, processes inside a chroot will not be able to chroot
55803+ again outside the chroot. This is a widely used method of breaking
55804+ out of a chroot jail and should not be allowed. If the sysctl
55805+ option is enabled, a sysctl option with name
55806+ "chroot_deny_chroot" is created.
55807+
55808+config GRKERNSEC_CHROOT_PIVOT
55809+ bool "Deny pivot_root in chroot"
55810+ depends on GRKERNSEC_CHROOT
55811+ help
55812+ If you say Y here, processes inside a chroot will not be able to use
55813+ a function called pivot_root() that was introduced in Linux 2.3.41. It
55814+ works similar to chroot in that it changes the root filesystem. This
55815+ function could be misused in a chrooted process to attempt to break out
55816+ of the chroot, and therefore should not be allowed. If the sysctl
55817+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
55818+ created.
55819+
55820+config GRKERNSEC_CHROOT_CHDIR
55821+ bool "Enforce chdir(\"/\") on all chroots"
55822+ depends on GRKERNSEC_CHROOT
55823+ help
55824+ If you say Y here, the current working directory of all newly-chrooted
55825+ applications will be set to the the root directory of the chroot.
55826+ The man page on chroot(2) states:
55827+ Note that this call does not change the current working
55828+ directory, so that `.' can be outside the tree rooted at
55829+ `/'. In particular, the super-user can escape from a
55830+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55831+
55832+ It is recommended that you say Y here, since it's not known to break
55833+ any software. If the sysctl option is enabled, a sysctl option with
55834+ name "chroot_enforce_chdir" is created.
55835+
55836+config GRKERNSEC_CHROOT_CHMOD
55837+ bool "Deny (f)chmod +s"
55838+ depends on GRKERNSEC_CHROOT
55839+ help
55840+ If you say Y here, processes inside a chroot will not be able to chmod
55841+ or fchmod files to make them have suid or sgid bits. This protects
55842+ against another published method of breaking a chroot. If the sysctl
55843+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
55844+ created.
55845+
55846+config GRKERNSEC_CHROOT_FCHDIR
55847+ bool "Deny fchdir out of chroot"
55848+ depends on GRKERNSEC_CHROOT
55849+ help
55850+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
55851+ to a file descriptor of the chrooting process that points to a directory
55852+ outside the filesystem will be stopped. If the sysctl option
55853+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55854+
55855+config GRKERNSEC_CHROOT_MKNOD
55856+ bool "Deny mknod"
55857+ depends on GRKERNSEC_CHROOT
55858+ help
55859+ If you say Y here, processes inside a chroot will not be allowed to
55860+ mknod. The problem with using mknod inside a chroot is that it
55861+ would allow an attacker to create a device entry that is the same
55862+ as one on the physical root of your system, which could range from
55863+ anything from the console device to a device for your harddrive (which
55864+ they could then use to wipe the drive or steal data). It is recommended
55865+ that you say Y here, unless you run into software incompatibilities.
55866+ If the sysctl option is enabled, a sysctl option with name
55867+ "chroot_deny_mknod" is created.
55868+
55869+config GRKERNSEC_CHROOT_SHMAT
55870+ bool "Deny shmat() out of chroot"
55871+ depends on GRKERNSEC_CHROOT
55872+ help
55873+ If you say Y here, processes inside a chroot will not be able to attach
55874+ to shared memory segments that were created outside of the chroot jail.
55875+ It is recommended that you say Y here. If the sysctl option is enabled,
55876+ a sysctl option with name "chroot_deny_shmat" is created.
55877+
55878+config GRKERNSEC_CHROOT_UNIX
55879+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
55880+ depends on GRKERNSEC_CHROOT
55881+ help
55882+ If you say Y here, processes inside a chroot will not be able to
55883+ connect to abstract (meaning not belonging to a filesystem) Unix
55884+ domain sockets that were bound outside of a chroot. It is recommended
55885+ that you say Y here. If the sysctl option is enabled, a sysctl option
55886+ with name "chroot_deny_unix" is created.
55887+
55888+config GRKERNSEC_CHROOT_FINDTASK
55889+ bool "Protect outside processes"
55890+ depends on GRKERNSEC_CHROOT
55891+ help
55892+ If you say Y here, processes inside a chroot will not be able to
55893+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55894+ getsid, or view any process outside of the chroot. If the sysctl
55895+ option is enabled, a sysctl option with name "chroot_findtask" is
55896+ created.
55897+
55898+config GRKERNSEC_CHROOT_NICE
55899+ bool "Restrict priority changes"
55900+ depends on GRKERNSEC_CHROOT
55901+ help
55902+ If you say Y here, processes inside a chroot will not be able to raise
55903+ the priority of processes in the chroot, or alter the priority of
55904+ processes outside the chroot. This provides more security than simply
55905+ removing CAP_SYS_NICE from the process' capability set. If the
55906+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55907+ is created.
55908+
55909+config GRKERNSEC_CHROOT_SYSCTL
55910+ bool "Deny sysctl writes"
55911+ depends on GRKERNSEC_CHROOT
55912+ help
55913+ If you say Y here, an attacker in a chroot will not be able to
55914+ write to sysctl entries, either by sysctl(2) or through a /proc
55915+ interface. It is strongly recommended that you say Y here. If the
55916+ sysctl option is enabled, a sysctl option with name
55917+ "chroot_deny_sysctl" is created.
55918+
55919+config GRKERNSEC_CHROOT_CAPS
55920+ bool "Capability restrictions"
55921+ depends on GRKERNSEC_CHROOT
55922+ help
55923+ If you say Y here, the capabilities on all root processes within a
55924+ chroot jail will be lowered to stop module insertion, raw i/o,
55925+ system and net admin tasks, rebooting the system, modifying immutable
55926+ files, modifying IPC owned by another, and changing the system time.
55927+ This is left an option because it can break some apps. Disable this
55928+ if your chrooted apps are having problems performing those kinds of
55929+ tasks. If the sysctl option is enabled, a sysctl option with
55930+ name "chroot_caps" is created.
55931+
55932+endmenu
55933+menu "Kernel Auditing"
55934+depends on GRKERNSEC
55935+
55936+config GRKERNSEC_AUDIT_GROUP
55937+ bool "Single group for auditing"
55938+ help
55939+ If you say Y here, the exec, chdir, and (un)mount logging features
55940+ will only operate on a group you specify. This option is recommended
55941+ if you only want to watch certain users instead of having a large
55942+ amount of logs from the entire system. If the sysctl option is enabled,
55943+ a sysctl option with name "audit_group" is created.
55944+
55945+config GRKERNSEC_AUDIT_GID
55946+ int "GID for auditing"
55947+ depends on GRKERNSEC_AUDIT_GROUP
55948+ default 1007
55949+
55950+config GRKERNSEC_EXECLOG
55951+ bool "Exec logging"
55952+ help
55953+ If you say Y here, all execve() calls will be logged (since the
55954+ other exec*() calls are frontends to execve(), all execution
55955+ will be logged). Useful for shell-servers that like to keep track
55956+ of their users. If the sysctl option is enabled, a sysctl option with
55957+ name "exec_logging" is created.
55958+ WARNING: This option when enabled will produce a LOT of logs, especially
55959+ on an active system.
55960+
55961+config GRKERNSEC_RESLOG
55962+ bool "Resource logging"
55963+ help
55964+ If you say Y here, all attempts to overstep resource limits will
55965+ be logged with the resource name, the requested size, and the current
55966+ limit. It is highly recommended that you say Y here. If the sysctl
55967+ option is enabled, a sysctl option with name "resource_logging" is
55968+ created. If the RBAC system is enabled, the sysctl value is ignored.
55969+
55970+config GRKERNSEC_CHROOT_EXECLOG
55971+ bool "Log execs within chroot"
55972+ help
55973+ If you say Y here, all executions inside a chroot jail will be logged
55974+ to syslog. This can cause a large amount of logs if certain
55975+ applications (eg. djb's daemontools) are installed on the system, and
55976+ is therefore left as an option. If the sysctl option is enabled, a
55977+ sysctl option with name "chroot_execlog" is created.
55978+
55979+config GRKERNSEC_AUDIT_PTRACE
55980+ bool "Ptrace logging"
55981+ help
55982+ If you say Y here, all attempts to attach to a process via ptrace
55983+ will be logged. If the sysctl option is enabled, a sysctl option
55984+ with name "audit_ptrace" is created.
55985+
55986+config GRKERNSEC_AUDIT_CHDIR
55987+ bool "Chdir logging"
55988+ help
55989+ If you say Y here, all chdir() calls will be logged. If the sysctl
55990+ option is enabled, a sysctl option with name "audit_chdir" is created.
55991+
55992+config GRKERNSEC_AUDIT_MOUNT
55993+ bool "(Un)Mount logging"
55994+ help
55995+ If you say Y here, all mounts and unmounts will be logged. If the
55996+ sysctl option is enabled, a sysctl option with name "audit_mount" is
55997+ created.
55998+
55999+config GRKERNSEC_SIGNAL
56000+ bool "Signal logging"
56001+ help
56002+ If you say Y here, certain important signals will be logged, such as
56003+ SIGSEGV, which will as a result inform you of when a error in a program
56004+ occurred, which in some cases could mean a possible exploit attempt.
56005+ If the sysctl option is enabled, a sysctl option with name
56006+ "signal_logging" is created.
56007+
56008+config GRKERNSEC_FORKFAIL
56009+ bool "Fork failure logging"
56010+ help
56011+ If you say Y here, all failed fork() attempts will be logged.
56012+ This could suggest a fork bomb, or someone attempting to overstep
56013+ their process limit. If the sysctl option is enabled, a sysctl option
56014+ with name "forkfail_logging" is created.
56015+
56016+config GRKERNSEC_TIME
56017+ bool "Time change logging"
56018+ help
56019+ If you say Y here, any changes of the system clock will be logged.
56020+ If the sysctl option is enabled, a sysctl option with name
56021+ "timechange_logging" is created.
56022+
56023+config GRKERNSEC_PROC_IPADDR
56024+ bool "/proc/<pid>/ipaddr support"
56025+ help
56026+ If you say Y here, a new entry will be added to each /proc/<pid>
56027+ directory that contains the IP address of the person using the task.
56028+ The IP is carried across local TCP and AF_UNIX stream sockets.
56029+ This information can be useful for IDS/IPSes to perform remote response
56030+ to a local attack. The entry is readable by only the owner of the
56031+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56032+ the RBAC system), and thus does not create privacy concerns.
56033+
56034+config GRKERNSEC_RWXMAP_LOG
56035+ bool 'Denied RWX mmap/mprotect logging'
56036+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56037+ help
56038+ If you say Y here, calls to mmap() and mprotect() with explicit
56039+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56040+ denied by the PAX_MPROTECT feature. If the sysctl option is
56041+ enabled, a sysctl option with name "rwxmap_logging" is created.
56042+
56043+config GRKERNSEC_AUDIT_TEXTREL
56044+ bool 'ELF text relocations logging (READ HELP)'
56045+ depends on PAX_MPROTECT
56046+ help
56047+ If you say Y here, text relocations will be logged with the filename
56048+ of the offending library or binary. The purpose of the feature is
56049+ to help Linux distribution developers get rid of libraries and
56050+ binaries that need text relocations which hinder the future progress
56051+ of PaX. Only Linux distribution developers should say Y here, and
56052+ never on a production machine, as this option creates an information
56053+ leak that could aid an attacker in defeating the randomization of
56054+ a single memory region. If the sysctl option is enabled, a sysctl
56055+ option with name "audit_textrel" is created.
56056+
56057+endmenu
56058+
56059+menu "Executable Protections"
56060+depends on GRKERNSEC
56061+
56062+config GRKERNSEC_DMESG
56063+ bool "Dmesg(8) restriction"
56064+ help
56065+ If you say Y here, non-root users will not be able to use dmesg(8)
56066+ to view up to the last 4kb of messages in the kernel's log buffer.
56067+ The kernel's log buffer often contains kernel addresses and other
56068+ identifying information useful to an attacker in fingerprinting a
56069+ system for a targeted exploit.
56070+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56071+ created.
56072+
56073+config GRKERNSEC_HARDEN_PTRACE
56074+ bool "Deter ptrace-based process snooping"
56075+ help
56076+ If you say Y here, TTY sniffers and other malicious monitoring
56077+ programs implemented through ptrace will be defeated. If you
56078+ have been using the RBAC system, this option has already been
56079+ enabled for several years for all users, with the ability to make
56080+ fine-grained exceptions.
56081+
56082+ This option only affects the ability of non-root users to ptrace
56083+ processes that are not a descendent of the ptracing process.
56084+ This means that strace ./binary and gdb ./binary will still work,
56085+ but attaching to arbitrary processes will not. If the sysctl
56086+ option is enabled, a sysctl option with name "harden_ptrace" is
56087+ created.
56088+
56089+config GRKERNSEC_TPE
56090+ bool "Trusted Path Execution (TPE)"
56091+ help
56092+ If you say Y here, you will be able to choose a gid to add to the
56093+ supplementary groups of users you want to mark as "untrusted."
56094+ These users will not be able to execute any files that are not in
56095+ root-owned directories writable only by root. If the sysctl option
56096+ is enabled, a sysctl option with name "tpe" is created.
56097+
56098+config GRKERNSEC_TPE_ALL
56099+ bool "Partially restrict all non-root users"
56100+ depends on GRKERNSEC_TPE
56101+ help
56102+ If you say Y here, all non-root users will be covered under
56103+ a weaker TPE restriction. This is separate from, and in addition to,
56104+ the main TPE options that you have selected elsewhere. Thus, if a
56105+ "trusted" GID is chosen, this restriction applies to even that GID.
56106+ Under this restriction, all non-root users will only be allowed to
56107+ execute files in directories they own that are not group or
56108+ world-writable, or in directories owned by root and writable only by
56109+ root. If the sysctl option is enabled, a sysctl option with name
56110+ "tpe_restrict_all" is created.
56111+
56112+config GRKERNSEC_TPE_INVERT
56113+ bool "Invert GID option"
56114+ depends on GRKERNSEC_TPE
56115+ help
56116+ If you say Y here, the group you specify in the TPE configuration will
56117+ decide what group TPE restrictions will be *disabled* for. This
56118+ option is useful if you want TPE restrictions to be applied to most
56119+ users on the system. If the sysctl option is enabled, a sysctl option
56120+ with name "tpe_invert" is created. Unlike other sysctl options, this
56121+ entry will default to on for backward-compatibility.
56122+
56123+config GRKERNSEC_TPE_GID
56124+ int "GID for untrusted users"
56125+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56126+ default 1005
56127+ help
56128+ Setting this GID determines what group TPE restrictions will be
56129+ *enabled* for. If the sysctl option is enabled, a sysctl option
56130+ with name "tpe_gid" is created.
56131+
56132+config GRKERNSEC_TPE_GID
56133+ int "GID for trusted users"
56134+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56135+ default 1005
56136+ help
56137+ Setting this GID determines what group TPE restrictions will be
56138+ *disabled* for. If the sysctl option is enabled, a sysctl option
56139+ with name "tpe_gid" is created.
56140+
56141+endmenu
56142+menu "Network Protections"
56143+depends on GRKERNSEC
56144+
56145+config GRKERNSEC_RANDNET
56146+ bool "Larger entropy pools"
56147+ help
56148+ If you say Y here, the entropy pools used for many features of Linux
56149+ and grsecurity will be doubled in size. Since several grsecurity
56150+ features use additional randomness, it is recommended that you say Y
56151+ here. Saying Y here has a similar effect as modifying
56152+ /proc/sys/kernel/random/poolsize.
56153+
56154+config GRKERNSEC_BLACKHOLE
56155+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56156+ depends on NET
56157+ help
56158+ If you say Y here, neither TCP resets nor ICMP
56159+ destination-unreachable packets will be sent in response to packets
56160+ sent to ports for which no associated listening process exists.
56161+ This feature supports both IPV4 and IPV6 and exempts the
56162+ loopback interface from blackholing. Enabling this feature
56163+ makes a host more resilient to DoS attacks and reduces network
56164+ visibility against scanners.
56165+
56166+ The blackhole feature as-implemented is equivalent to the FreeBSD
56167+ blackhole feature, as it prevents RST responses to all packets, not
56168+ just SYNs. Under most application behavior this causes no
56169+ problems, but applications (like haproxy) may not close certain
56170+ connections in a way that cleanly terminates them on the remote
56171+ end, leaving the remote host in LAST_ACK state. Because of this
56172+ side-effect and to prevent intentional LAST_ACK DoSes, this
56173+ feature also adds automatic mitigation against such attacks.
56174+ The mitigation drastically reduces the amount of time a socket
56175+ can spend in LAST_ACK state. If you're using haproxy and not
56176+ all servers it connects to have this option enabled, consider
56177+ disabling this feature on the haproxy host.
56178+
56179+ If the sysctl option is enabled, two sysctl options with names
56180+ "ip_blackhole" and "lastack_retries" will be created.
56181+ While "ip_blackhole" takes the standard zero/non-zero on/off
56182+ toggle, "lastack_retries" uses the same kinds of values as
56183+ "tcp_retries1" and "tcp_retries2". The default value of 4
56184+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56185+ state.
56186+
56187+config GRKERNSEC_SOCKET
56188+ bool "Socket restrictions"
56189+ depends on NET
56190+ help
56191+ If you say Y here, you will be able to choose from several options.
56192+ If you assign a GID on your system and add it to the supplementary
56193+ groups of users you want to restrict socket access to, this patch
56194+ will perform up to three things, based on the option(s) you choose.
56195+
56196+config GRKERNSEC_SOCKET_ALL
56197+ bool "Deny any sockets to group"
56198+ depends on GRKERNSEC_SOCKET
56199+ help
56200+ If you say Y here, you will be able to choose a GID of whose users will
56201+ be unable to connect to other hosts from your machine or run server
56202+ applications from your machine. If the sysctl option is enabled, a
56203+ sysctl option with name "socket_all" is created.
56204+
56205+config GRKERNSEC_SOCKET_ALL_GID
56206+ int "GID to deny all sockets for"
56207+ depends on GRKERNSEC_SOCKET_ALL
56208+ default 1004
56209+ help
56210+ Here you can choose the GID to disable socket access for. Remember to
56211+ add the users you want socket access disabled for to the GID
56212+ specified here. If the sysctl option is enabled, a sysctl option
56213+ with name "socket_all_gid" is created.
56214+
56215+config GRKERNSEC_SOCKET_CLIENT
56216+ bool "Deny client sockets to group"
56217+ depends on GRKERNSEC_SOCKET
56218+ help
56219+ If you say Y here, you will be able to choose a GID of whose users will
56220+ be unable to connect to other hosts from your machine, but will be
56221+ able to run servers. If this option is enabled, all users in the group
56222+ you specify will have to use passive mode when initiating ftp transfers
56223+ from the shell on your machine. If the sysctl option is enabled, a
56224+ sysctl option with name "socket_client" is created.
56225+
56226+config GRKERNSEC_SOCKET_CLIENT_GID
56227+ int "GID to deny client sockets for"
56228+ depends on GRKERNSEC_SOCKET_CLIENT
56229+ default 1003
56230+ help
56231+ Here you can choose the GID to disable client socket access for.
56232+ Remember to add the users you want client socket access disabled for to
56233+ the GID specified here. If the sysctl option is enabled, a sysctl
56234+ option with name "socket_client_gid" is created.
56235+
56236+config GRKERNSEC_SOCKET_SERVER
56237+ bool "Deny server sockets to group"
56238+ depends on GRKERNSEC_SOCKET
56239+ help
56240+ If you say Y here, you will be able to choose a GID of whose users will
56241+ be unable to run server applications from your machine. If the sysctl
56242+ option is enabled, a sysctl option with name "socket_server" is created.
56243+
56244+config GRKERNSEC_SOCKET_SERVER_GID
56245+ int "GID to deny server sockets for"
56246+ depends on GRKERNSEC_SOCKET_SERVER
56247+ default 1002
56248+ help
56249+ Here you can choose the GID to disable server socket access for.
56250+ Remember to add the users you want server socket access disabled for to
56251+ the GID specified here. If the sysctl option is enabled, a sysctl
56252+ option with name "socket_server_gid" is created.
56253+
56254+endmenu
56255+menu "Sysctl support"
56256+depends on GRKERNSEC && SYSCTL
56257+
56258+config GRKERNSEC_SYSCTL
56259+ bool "Sysctl support"
56260+ help
56261+ If you say Y here, you will be able to change the options that
56262+ grsecurity runs with at bootup, without having to recompile your
56263+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56264+ to enable (1) or disable (0) various features. All the sysctl entries
56265+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56266+ All features enabled in the kernel configuration are disabled at boot
56267+ if you do not say Y to the "Turn on features by default" option.
56268+ All options should be set at startup, and the grsec_lock entry should
56269+ be set to a non-zero value after all the options are set.
56270+ *THIS IS EXTREMELY IMPORTANT*
56271+
56272+config GRKERNSEC_SYSCTL_DISTRO
56273+ bool "Extra sysctl support for distro makers (READ HELP)"
56274+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56275+ help
56276+ If you say Y here, additional sysctl options will be created
56277+ for features that affect processes running as root. Therefore,
56278+ it is critical when using this option that the grsec_lock entry be
56279+ enabled after boot. Only distros with prebuilt kernel packages
56280+ with this option enabled that can ensure grsec_lock is enabled
56281+ after boot should use this option.
56282+ *Failure to set grsec_lock after boot makes all grsec features
56283+ this option covers useless*
56284+
56285+ Currently this option creates the following sysctl entries:
56286+ "Disable Privileged I/O": "disable_priv_io"
56287+
56288+config GRKERNSEC_SYSCTL_ON
56289+ bool "Turn on features by default"
56290+ depends on GRKERNSEC_SYSCTL
56291+ help
56292+ If you say Y here, instead of having all features enabled in the
56293+ kernel configuration disabled at boot time, the features will be
56294+ enabled at boot time. It is recommended you say Y here unless
56295+ there is some reason you would want all sysctl-tunable features to
56296+ be disabled by default. As mentioned elsewhere, it is important
56297+ to enable the grsec_lock entry once you have finished modifying
56298+ the sysctl entries.
56299+
56300+endmenu
56301+menu "Logging Options"
56302+depends on GRKERNSEC
56303+
56304+config GRKERNSEC_FLOODTIME
56305+ int "Seconds in between log messages (minimum)"
56306+ default 10
56307+ help
56308+ This option allows you to enforce the number of seconds between
56309+ grsecurity log messages. The default should be suitable for most
56310+ people, however, if you choose to change it, choose a value small enough
56311+ to allow informative logs to be produced, but large enough to
56312+ prevent flooding.
56313+
56314+config GRKERNSEC_FLOODBURST
56315+ int "Number of messages in a burst (maximum)"
56316+ default 4
56317+ help
56318+ This option allows you to choose the maximum number of messages allowed
56319+ within the flood time interval you chose in a separate option. The
56320+ default should be suitable for most people, however if you find that
56321+ many of your logs are being interpreted as flooding, you may want to
56322+ raise this value.
56323+
56324+endmenu
56325+
56326+endmenu
56327diff -urNp linux-2.6.32.45/grsecurity/Makefile linux-2.6.32.45/grsecurity/Makefile
56328--- linux-2.6.32.45/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56329+++ linux-2.6.32.45/grsecurity/Makefile 2011-08-21 18:54:34.000000000 -0400
56330@@ -0,0 +1,34 @@
56331+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56332+# during 2001-2009 it has been completely redesigned by Brad Spengler
56333+# into an RBAC system
56334+#
56335+# All code in this directory and various hooks inserted throughout the kernel
56336+# are copyright Brad Spengler - Open Source Security, Inc., and released
56337+# under the GPL v2 or higher
56338+
56339+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56340+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56341+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56342+
56343+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56344+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56345+ gracl_learn.o grsec_log.o
56346+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56347+
56348+ifdef CONFIG_NET
56349+obj-y += grsec_sock.o
56350+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56351+endif
56352+
56353+ifndef CONFIG_GRKERNSEC
56354+obj-y += grsec_disabled.o
56355+endif
56356+
56357+ifdef CONFIG_GRKERNSEC_HIDESYM
56358+extra-y := grsec_hidesym.o
56359+$(obj)/grsec_hidesym.o:
56360+ @-chmod -f 500 /boot
56361+ @-chmod -f 500 /lib/modules
56362+ @-chmod -f 700 .
56363+ @echo ' grsec: protected kernel image paths'
56364+endif
56365diff -urNp linux-2.6.32.45/include/acpi/acpi_bus.h linux-2.6.32.45/include/acpi/acpi_bus.h
56366--- linux-2.6.32.45/include/acpi/acpi_bus.h 2011-03-27 14:31:47.000000000 -0400
56367+++ linux-2.6.32.45/include/acpi/acpi_bus.h 2011-08-05 20:33:55.000000000 -0400
56368@@ -107,7 +107,7 @@ struct acpi_device_ops {
56369 acpi_op_bind bind;
56370 acpi_op_unbind unbind;
56371 acpi_op_notify notify;
56372-};
56373+} __no_const;
56374
56375 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56376
56377diff -urNp linux-2.6.32.45/include/acpi/acpi_drivers.h linux-2.6.32.45/include/acpi/acpi_drivers.h
56378--- linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
56379+++ linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
56380@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
56381 Dock Station
56382 -------------------------------------------------------------------------- */
56383 struct acpi_dock_ops {
56384- acpi_notify_handler handler;
56385- acpi_notify_handler uevent;
56386+ const acpi_notify_handler handler;
56387+ const acpi_notify_handler uevent;
56388 };
56389
56390 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
56391@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
56392 extern int register_dock_notifier(struct notifier_block *nb);
56393 extern void unregister_dock_notifier(struct notifier_block *nb);
56394 extern int register_hotplug_dock_device(acpi_handle handle,
56395- struct acpi_dock_ops *ops,
56396+ const struct acpi_dock_ops *ops,
56397 void *context);
56398 extern void unregister_hotplug_dock_device(acpi_handle handle);
56399 #else
56400@@ -144,7 +144,7 @@ static inline void unregister_dock_notif
56401 {
56402 }
56403 static inline int register_hotplug_dock_device(acpi_handle handle,
56404- struct acpi_dock_ops *ops,
56405+ const struct acpi_dock_ops *ops,
56406 void *context)
56407 {
56408 return -ENODEV;
56409diff -urNp linux-2.6.32.45/include/asm-generic/atomic-long.h linux-2.6.32.45/include/asm-generic/atomic-long.h
56410--- linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
56411+++ linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
56412@@ -22,6 +22,12 @@
56413
56414 typedef atomic64_t atomic_long_t;
56415
56416+#ifdef CONFIG_PAX_REFCOUNT
56417+typedef atomic64_unchecked_t atomic_long_unchecked_t;
56418+#else
56419+typedef atomic64_t atomic_long_unchecked_t;
56420+#endif
56421+
56422 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56423
56424 static inline long atomic_long_read(atomic_long_t *l)
56425@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56426 return (long)atomic64_read(v);
56427 }
56428
56429+#ifdef CONFIG_PAX_REFCOUNT
56430+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56431+{
56432+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56433+
56434+ return (long)atomic64_read_unchecked(v);
56435+}
56436+#endif
56437+
56438 static inline void atomic_long_set(atomic_long_t *l, long i)
56439 {
56440 atomic64_t *v = (atomic64_t *)l;
56441@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56442 atomic64_set(v, i);
56443 }
56444
56445+#ifdef CONFIG_PAX_REFCOUNT
56446+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56447+{
56448+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56449+
56450+ atomic64_set_unchecked(v, i);
56451+}
56452+#endif
56453+
56454 static inline void atomic_long_inc(atomic_long_t *l)
56455 {
56456 atomic64_t *v = (atomic64_t *)l;
56457@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56458 atomic64_inc(v);
56459 }
56460
56461+#ifdef CONFIG_PAX_REFCOUNT
56462+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56463+{
56464+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56465+
56466+ atomic64_inc_unchecked(v);
56467+}
56468+#endif
56469+
56470 static inline void atomic_long_dec(atomic_long_t *l)
56471 {
56472 atomic64_t *v = (atomic64_t *)l;
56473@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56474 atomic64_dec(v);
56475 }
56476
56477+#ifdef CONFIG_PAX_REFCOUNT
56478+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56479+{
56480+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56481+
56482+ atomic64_dec_unchecked(v);
56483+}
56484+#endif
56485+
56486 static inline void atomic_long_add(long i, atomic_long_t *l)
56487 {
56488 atomic64_t *v = (atomic64_t *)l;
56489@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56490 atomic64_add(i, v);
56491 }
56492
56493+#ifdef CONFIG_PAX_REFCOUNT
56494+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56495+{
56496+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56497+
56498+ atomic64_add_unchecked(i, v);
56499+}
56500+#endif
56501+
56502 static inline void atomic_long_sub(long i, atomic_long_t *l)
56503 {
56504 atomic64_t *v = (atomic64_t *)l;
56505@@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
56506 return (long)atomic64_inc_return(v);
56507 }
56508
56509+#ifdef CONFIG_PAX_REFCOUNT
56510+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56511+{
56512+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56513+
56514+ return (long)atomic64_inc_return_unchecked(v);
56515+}
56516+#endif
56517+
56518 static inline long atomic_long_dec_return(atomic_long_t *l)
56519 {
56520 atomic64_t *v = (atomic64_t *)l;
56521@@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
56522
56523 typedef atomic_t atomic_long_t;
56524
56525+#ifdef CONFIG_PAX_REFCOUNT
56526+typedef atomic_unchecked_t atomic_long_unchecked_t;
56527+#else
56528+typedef atomic_t atomic_long_unchecked_t;
56529+#endif
56530+
56531 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56532 static inline long atomic_long_read(atomic_long_t *l)
56533 {
56534@@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
56535 return (long)atomic_read(v);
56536 }
56537
56538+#ifdef CONFIG_PAX_REFCOUNT
56539+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56540+{
56541+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56542+
56543+ return (long)atomic_read_unchecked(v);
56544+}
56545+#endif
56546+
56547 static inline void atomic_long_set(atomic_long_t *l, long i)
56548 {
56549 atomic_t *v = (atomic_t *)l;
56550@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
56551 atomic_set(v, i);
56552 }
56553
56554+#ifdef CONFIG_PAX_REFCOUNT
56555+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56556+{
56557+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56558+
56559+ atomic_set_unchecked(v, i);
56560+}
56561+#endif
56562+
56563 static inline void atomic_long_inc(atomic_long_t *l)
56564 {
56565 atomic_t *v = (atomic_t *)l;
56566@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
56567 atomic_inc(v);
56568 }
56569
56570+#ifdef CONFIG_PAX_REFCOUNT
56571+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56572+{
56573+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56574+
56575+ atomic_inc_unchecked(v);
56576+}
56577+#endif
56578+
56579 static inline void atomic_long_dec(atomic_long_t *l)
56580 {
56581 atomic_t *v = (atomic_t *)l;
56582@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
56583 atomic_dec(v);
56584 }
56585
56586+#ifdef CONFIG_PAX_REFCOUNT
56587+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56588+{
56589+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56590+
56591+ atomic_dec_unchecked(v);
56592+}
56593+#endif
56594+
56595 static inline void atomic_long_add(long i, atomic_long_t *l)
56596 {
56597 atomic_t *v = (atomic_t *)l;
56598@@ -176,6 +278,15 @@ static inline void atomic_long_add(long
56599 atomic_add(i, v);
56600 }
56601
56602+#ifdef CONFIG_PAX_REFCOUNT
56603+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56604+{
56605+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56606+
56607+ atomic_add_unchecked(i, v);
56608+}
56609+#endif
56610+
56611 static inline void atomic_long_sub(long i, atomic_long_t *l)
56612 {
56613 atomic_t *v = (atomic_t *)l;
56614@@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
56615 return (long)atomic_inc_return(v);
56616 }
56617
56618+#ifdef CONFIG_PAX_REFCOUNT
56619+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56620+{
56621+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56622+
56623+ return (long)atomic_inc_return_unchecked(v);
56624+}
56625+#endif
56626+
56627 static inline long atomic_long_dec_return(atomic_long_t *l)
56628 {
56629 atomic_t *v = (atomic_t *)l;
56630@@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
56631
56632 #endif /* BITS_PER_LONG == 64 */
56633
56634+#ifdef CONFIG_PAX_REFCOUNT
56635+static inline void pax_refcount_needs_these_functions(void)
56636+{
56637+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
56638+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56639+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56640+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56641+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56642+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56643+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56644+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56645+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56646+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56647+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56648+
56649+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56650+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56651+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56652+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56653+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56654+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56655+}
56656+#else
56657+#define atomic_read_unchecked(v) atomic_read(v)
56658+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56659+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56660+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56661+#define atomic_inc_unchecked(v) atomic_inc(v)
56662+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56663+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56664+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56665+#define atomic_dec_unchecked(v) atomic_dec(v)
56666+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56667+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56668+
56669+#define atomic_long_read_unchecked(v) atomic_long_read(v)
56670+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56671+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56672+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56673+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56674+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56675+#endif
56676+
56677 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56678diff -urNp linux-2.6.32.45/include/asm-generic/bug.h linux-2.6.32.45/include/asm-generic/bug.h
56679--- linux-2.6.32.45/include/asm-generic/bug.h 2011-07-13 17:23:04.000000000 -0400
56680+++ linux-2.6.32.45/include/asm-generic/bug.h 2011-08-21 17:56:07.000000000 -0400
56681@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const cha
56682
56683 #else /* !CONFIG_BUG */
56684 #ifndef HAVE_ARCH_BUG
56685-#define BUG() do {} while(0)
56686+#define BUG() do { for (;;) ; } while(0)
56687 #endif
56688
56689 #ifndef HAVE_ARCH_BUG_ON
56690-#define BUG_ON(condition) do { if (condition) ; } while(0)
56691+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
56692 #endif
56693
56694 #ifndef HAVE_ARCH_WARN_ON
56695diff -urNp linux-2.6.32.45/include/asm-generic/cache.h linux-2.6.32.45/include/asm-generic/cache.h
56696--- linux-2.6.32.45/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
56697+++ linux-2.6.32.45/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
56698@@ -6,7 +6,7 @@
56699 * cache lines need to provide their own cache.h.
56700 */
56701
56702-#define L1_CACHE_SHIFT 5
56703-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
56704+#define L1_CACHE_SHIFT 5UL
56705+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
56706
56707 #endif /* __ASM_GENERIC_CACHE_H */
56708diff -urNp linux-2.6.32.45/include/asm-generic/dma-mapping-common.h linux-2.6.32.45/include/asm-generic/dma-mapping-common.h
56709--- linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
56710+++ linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
56711@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
56712 enum dma_data_direction dir,
56713 struct dma_attrs *attrs)
56714 {
56715- struct dma_map_ops *ops = get_dma_ops(dev);
56716+ const struct dma_map_ops *ops = get_dma_ops(dev);
56717 dma_addr_t addr;
56718
56719 kmemcheck_mark_initialized(ptr, size);
56720@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
56721 enum dma_data_direction dir,
56722 struct dma_attrs *attrs)
56723 {
56724- struct dma_map_ops *ops = get_dma_ops(dev);
56725+ const struct dma_map_ops *ops = get_dma_ops(dev);
56726
56727 BUG_ON(!valid_dma_direction(dir));
56728 if (ops->unmap_page)
56729@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
56730 int nents, enum dma_data_direction dir,
56731 struct dma_attrs *attrs)
56732 {
56733- struct dma_map_ops *ops = get_dma_ops(dev);
56734+ const struct dma_map_ops *ops = get_dma_ops(dev);
56735 int i, ents;
56736 struct scatterlist *s;
56737
56738@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
56739 int nents, enum dma_data_direction dir,
56740 struct dma_attrs *attrs)
56741 {
56742- struct dma_map_ops *ops = get_dma_ops(dev);
56743+ const struct dma_map_ops *ops = get_dma_ops(dev);
56744
56745 BUG_ON(!valid_dma_direction(dir));
56746 debug_dma_unmap_sg(dev, sg, nents, dir);
56747@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
56748 size_t offset, size_t size,
56749 enum dma_data_direction dir)
56750 {
56751- struct dma_map_ops *ops = get_dma_ops(dev);
56752+ const struct dma_map_ops *ops = get_dma_ops(dev);
56753 dma_addr_t addr;
56754
56755 kmemcheck_mark_initialized(page_address(page) + offset, size);
56756@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
56757 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
56758 size_t size, enum dma_data_direction dir)
56759 {
56760- struct dma_map_ops *ops = get_dma_ops(dev);
56761+ const struct dma_map_ops *ops = get_dma_ops(dev);
56762
56763 BUG_ON(!valid_dma_direction(dir));
56764 if (ops->unmap_page)
56765@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
56766 size_t size,
56767 enum dma_data_direction dir)
56768 {
56769- struct dma_map_ops *ops = get_dma_ops(dev);
56770+ const struct dma_map_ops *ops = get_dma_ops(dev);
56771
56772 BUG_ON(!valid_dma_direction(dir));
56773 if (ops->sync_single_for_cpu)
56774@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
56775 dma_addr_t addr, size_t size,
56776 enum dma_data_direction dir)
56777 {
56778- struct dma_map_ops *ops = get_dma_ops(dev);
56779+ const struct dma_map_ops *ops = get_dma_ops(dev);
56780
56781 BUG_ON(!valid_dma_direction(dir));
56782 if (ops->sync_single_for_device)
56783@@ -123,7 +123,7 @@ static inline void dma_sync_single_range
56784 size_t size,
56785 enum dma_data_direction dir)
56786 {
56787- struct dma_map_ops *ops = get_dma_ops(dev);
56788+ const struct dma_map_ops *ops = get_dma_ops(dev);
56789
56790 BUG_ON(!valid_dma_direction(dir));
56791 if (ops->sync_single_range_for_cpu) {
56792@@ -140,7 +140,7 @@ static inline void dma_sync_single_range
56793 size_t size,
56794 enum dma_data_direction dir)
56795 {
56796- struct dma_map_ops *ops = get_dma_ops(dev);
56797+ const struct dma_map_ops *ops = get_dma_ops(dev);
56798
56799 BUG_ON(!valid_dma_direction(dir));
56800 if (ops->sync_single_range_for_device) {
56801@@ -155,7 +155,7 @@ static inline void
56802 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
56803 int nelems, enum dma_data_direction dir)
56804 {
56805- struct dma_map_ops *ops = get_dma_ops(dev);
56806+ const struct dma_map_ops *ops = get_dma_ops(dev);
56807
56808 BUG_ON(!valid_dma_direction(dir));
56809 if (ops->sync_sg_for_cpu)
56810@@ -167,7 +167,7 @@ static inline void
56811 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
56812 int nelems, enum dma_data_direction dir)
56813 {
56814- struct dma_map_ops *ops = get_dma_ops(dev);
56815+ const struct dma_map_ops *ops = get_dma_ops(dev);
56816
56817 BUG_ON(!valid_dma_direction(dir));
56818 if (ops->sync_sg_for_device)
56819diff -urNp linux-2.6.32.45/include/asm-generic/emergency-restart.h linux-2.6.32.45/include/asm-generic/emergency-restart.h
56820--- linux-2.6.32.45/include/asm-generic/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
56821+++ linux-2.6.32.45/include/asm-generic/emergency-restart.h 2011-08-21 19:17:17.000000000 -0400
56822@@ -1,7 +1,7 @@
56823 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
56824 #define _ASM_GENERIC_EMERGENCY_RESTART_H
56825
56826-static inline void machine_emergency_restart(void)
56827+static inline __noreturn void machine_emergency_restart(void)
56828 {
56829 machine_restart(NULL);
56830 }
56831diff -urNp linux-2.6.32.45/include/asm-generic/futex.h linux-2.6.32.45/include/asm-generic/futex.h
56832--- linux-2.6.32.45/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
56833+++ linux-2.6.32.45/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
56834@@ -6,7 +6,7 @@
56835 #include <asm/errno.h>
56836
56837 static inline int
56838-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
56839+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
56840 {
56841 int op = (encoded_op >> 28) & 7;
56842 int cmp = (encoded_op >> 24) & 15;
56843@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
56844 }
56845
56846 static inline int
56847-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
56848+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
56849 {
56850 return -ENOSYS;
56851 }
56852diff -urNp linux-2.6.32.45/include/asm-generic/int-l64.h linux-2.6.32.45/include/asm-generic/int-l64.h
56853--- linux-2.6.32.45/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
56854+++ linux-2.6.32.45/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
56855@@ -46,6 +46,8 @@ typedef unsigned int u32;
56856 typedef signed long s64;
56857 typedef unsigned long u64;
56858
56859+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
56860+
56861 #define S8_C(x) x
56862 #define U8_C(x) x ## U
56863 #define S16_C(x) x
56864diff -urNp linux-2.6.32.45/include/asm-generic/int-ll64.h linux-2.6.32.45/include/asm-generic/int-ll64.h
56865--- linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
56866+++ linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
56867@@ -51,6 +51,8 @@ typedef unsigned int u32;
56868 typedef signed long long s64;
56869 typedef unsigned long long u64;
56870
56871+typedef unsigned long long intoverflow_t;
56872+
56873 #define S8_C(x) x
56874 #define U8_C(x) x ## U
56875 #define S16_C(x) x
56876diff -urNp linux-2.6.32.45/include/asm-generic/kmap_types.h linux-2.6.32.45/include/asm-generic/kmap_types.h
56877--- linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
56878+++ linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
56879@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
56880 KMAP_D(16) KM_IRQ_PTE,
56881 KMAP_D(17) KM_NMI,
56882 KMAP_D(18) KM_NMI_PTE,
56883-KMAP_D(19) KM_TYPE_NR
56884+KMAP_D(19) KM_CLEARPAGE,
56885+KMAP_D(20) KM_TYPE_NR
56886 };
56887
56888 #undef KMAP_D
56889diff -urNp linux-2.6.32.45/include/asm-generic/pgtable.h linux-2.6.32.45/include/asm-generic/pgtable.h
56890--- linux-2.6.32.45/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
56891+++ linux-2.6.32.45/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
56892@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
56893 unsigned long size);
56894 #endif
56895
56896+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
56897+static inline unsigned long pax_open_kernel(void) { return 0; }
56898+#endif
56899+
56900+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
56901+static inline unsigned long pax_close_kernel(void) { return 0; }
56902+#endif
56903+
56904 #endif /* !__ASSEMBLY__ */
56905
56906 #endif /* _ASM_GENERIC_PGTABLE_H */
56907diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h
56908--- linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
56909+++ linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
56910@@ -1,14 +1,19 @@
56911 #ifndef _PGTABLE_NOPMD_H
56912 #define _PGTABLE_NOPMD_H
56913
56914-#ifndef __ASSEMBLY__
56915-
56916 #include <asm-generic/pgtable-nopud.h>
56917
56918-struct mm_struct;
56919-
56920 #define __PAGETABLE_PMD_FOLDED
56921
56922+#define PMD_SHIFT PUD_SHIFT
56923+#define PTRS_PER_PMD 1
56924+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
56925+#define PMD_MASK (~(PMD_SIZE-1))
56926+
56927+#ifndef __ASSEMBLY__
56928+
56929+struct mm_struct;
56930+
56931 /*
56932 * Having the pmd type consist of a pud gets the size right, and allows
56933 * us to conceptually access the pud entry that this pmd is folded into
56934@@ -16,11 +21,6 @@ struct mm_struct;
56935 */
56936 typedef struct { pud_t pud; } pmd_t;
56937
56938-#define PMD_SHIFT PUD_SHIFT
56939-#define PTRS_PER_PMD 1
56940-#define PMD_SIZE (1UL << PMD_SHIFT)
56941-#define PMD_MASK (~(PMD_SIZE-1))
56942-
56943 /*
56944 * The "pud_xxx()" functions here are trivial for a folded two-level
56945 * setup: the pmd is never bad, and a pmd always exists (as it's folded
56946diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopud.h linux-2.6.32.45/include/asm-generic/pgtable-nopud.h
56947--- linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
56948+++ linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
56949@@ -1,10 +1,15 @@
56950 #ifndef _PGTABLE_NOPUD_H
56951 #define _PGTABLE_NOPUD_H
56952
56953-#ifndef __ASSEMBLY__
56954-
56955 #define __PAGETABLE_PUD_FOLDED
56956
56957+#define PUD_SHIFT PGDIR_SHIFT
56958+#define PTRS_PER_PUD 1
56959+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
56960+#define PUD_MASK (~(PUD_SIZE-1))
56961+
56962+#ifndef __ASSEMBLY__
56963+
56964 /*
56965 * Having the pud type consist of a pgd gets the size right, and allows
56966 * us to conceptually access the pgd entry that this pud is folded into
56967@@ -12,11 +17,6 @@
56968 */
56969 typedef struct { pgd_t pgd; } pud_t;
56970
56971-#define PUD_SHIFT PGDIR_SHIFT
56972-#define PTRS_PER_PUD 1
56973-#define PUD_SIZE (1UL << PUD_SHIFT)
56974-#define PUD_MASK (~(PUD_SIZE-1))
56975-
56976 /*
56977 * The "pgd_xxx()" functions here are trivial for a folded two-level
56978 * setup: the pud is never bad, and a pud always exists (as it's folded
56979diff -urNp linux-2.6.32.45/include/asm-generic/vmlinux.lds.h linux-2.6.32.45/include/asm-generic/vmlinux.lds.h
56980--- linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
56981+++ linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
56982@@ -199,6 +199,7 @@
56983 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
56984 VMLINUX_SYMBOL(__start_rodata) = .; \
56985 *(.rodata) *(.rodata.*) \
56986+ *(.data.read_only) \
56987 *(__vermagic) /* Kernel version magic */ \
56988 *(__markers_strings) /* Markers: strings */ \
56989 *(__tracepoints_strings)/* Tracepoints: strings */ \
56990@@ -656,22 +657,24 @@
56991 * section in the linker script will go there too. @phdr should have
56992 * a leading colon.
56993 *
56994- * Note that this macros defines __per_cpu_load as an absolute symbol.
56995+ * Note that this macros defines per_cpu_load as an absolute symbol.
56996 * If there is no need to put the percpu section at a predetermined
56997 * address, use PERCPU().
56998 */
56999 #define PERCPU_VADDR(vaddr, phdr) \
57000- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57001- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57002+ per_cpu_load = .; \
57003+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57004 - LOAD_OFFSET) { \
57005+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57006 VMLINUX_SYMBOL(__per_cpu_start) = .; \
57007 *(.data.percpu.first) \
57008- *(.data.percpu.page_aligned) \
57009 *(.data.percpu) \
57010+ . = ALIGN(PAGE_SIZE); \
57011+ *(.data.percpu.page_aligned) \
57012 *(.data.percpu.shared_aligned) \
57013 VMLINUX_SYMBOL(__per_cpu_end) = .; \
57014 } phdr \
57015- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
57016+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
57017
57018 /**
57019 * PERCPU - define output section for percpu area, simple version
57020diff -urNp linux-2.6.32.45/include/drm/drm_crtc_helper.h linux-2.6.32.45/include/drm/drm_crtc_helper.h
57021--- linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-03-27 14:31:47.000000000 -0400
57022+++ linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-08-05 20:33:55.000000000 -0400
57023@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
57024
57025 /* reload the current crtc LUT */
57026 void (*load_lut)(struct drm_crtc *crtc);
57027-};
57028+} __no_const;
57029
57030 struct drm_encoder_helper_funcs {
57031 void (*dpms)(struct drm_encoder *encoder, int mode);
57032@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
57033 struct drm_connector *connector);
57034 /* disable encoder when not in use - more explicit than dpms off */
57035 void (*disable)(struct drm_encoder *encoder);
57036-};
57037+} __no_const;
57038
57039 struct drm_connector_helper_funcs {
57040 int (*get_modes)(struct drm_connector *connector);
57041diff -urNp linux-2.6.32.45/include/drm/drmP.h linux-2.6.32.45/include/drm/drmP.h
57042--- linux-2.6.32.45/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
57043+++ linux-2.6.32.45/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
57044@@ -71,6 +71,7 @@
57045 #include <linux/workqueue.h>
57046 #include <linux/poll.h>
57047 #include <asm/pgalloc.h>
57048+#include <asm/local.h>
57049 #include "drm.h"
57050
57051 #include <linux/idr.h>
57052@@ -814,7 +815,7 @@ struct drm_driver {
57053 void (*vgaarb_irq)(struct drm_device *dev, bool state);
57054
57055 /* Driver private ops for this object */
57056- struct vm_operations_struct *gem_vm_ops;
57057+ const struct vm_operations_struct *gem_vm_ops;
57058
57059 int major;
57060 int minor;
57061@@ -917,7 +918,7 @@ struct drm_device {
57062
57063 /** \name Usage Counters */
57064 /*@{ */
57065- int open_count; /**< Outstanding files open */
57066+ local_t open_count; /**< Outstanding files open */
57067 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57068 atomic_t vma_count; /**< Outstanding vma areas open */
57069 int buf_use; /**< Buffers in use -- cannot alloc */
57070@@ -928,7 +929,7 @@ struct drm_device {
57071 /*@{ */
57072 unsigned long counters;
57073 enum drm_stat_type types[15];
57074- atomic_t counts[15];
57075+ atomic_unchecked_t counts[15];
57076 /*@} */
57077
57078 struct list_head filelist;
57079@@ -1016,7 +1017,7 @@ struct drm_device {
57080 struct pci_controller *hose;
57081 #endif
57082 struct drm_sg_mem *sg; /**< Scatter gather memory */
57083- unsigned int num_crtcs; /**< Number of CRTCs on this device */
57084+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
57085 void *dev_private; /**< device private data */
57086 void *mm_private;
57087 struct address_space *dev_mapping;
57088@@ -1042,11 +1043,11 @@ struct drm_device {
57089 spinlock_t object_name_lock;
57090 struct idr object_name_idr;
57091 atomic_t object_count;
57092- atomic_t object_memory;
57093+ atomic_unchecked_t object_memory;
57094 atomic_t pin_count;
57095- atomic_t pin_memory;
57096+ atomic_unchecked_t pin_memory;
57097 atomic_t gtt_count;
57098- atomic_t gtt_memory;
57099+ atomic_unchecked_t gtt_memory;
57100 uint32_t gtt_total;
57101 uint32_t invalidate_domains; /* domains pending invalidation */
57102 uint32_t flush_domains; /* domains pending flush */
57103diff -urNp linux-2.6.32.45/include/drm/ttm/ttm_memory.h linux-2.6.32.45/include/drm/ttm/ttm_memory.h
57104--- linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-03-27 14:31:47.000000000 -0400
57105+++ linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-08-05 20:33:55.000000000 -0400
57106@@ -47,7 +47,7 @@
57107
57108 struct ttm_mem_shrink {
57109 int (*do_shrink) (struct ttm_mem_shrink *);
57110-};
57111+} __no_const;
57112
57113 /**
57114 * struct ttm_mem_global - Global memory accounting structure.
57115diff -urNp linux-2.6.32.45/include/linux/a.out.h linux-2.6.32.45/include/linux/a.out.h
57116--- linux-2.6.32.45/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
57117+++ linux-2.6.32.45/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
57118@@ -39,6 +39,14 @@ enum machine_type {
57119 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57120 };
57121
57122+/* Constants for the N_FLAGS field */
57123+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57124+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57125+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57126+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57127+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57128+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57129+
57130 #if !defined (N_MAGIC)
57131 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57132 #endif
57133diff -urNp linux-2.6.32.45/include/linux/atmdev.h linux-2.6.32.45/include/linux/atmdev.h
57134--- linux-2.6.32.45/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
57135+++ linux-2.6.32.45/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
57136@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57137 #endif
57138
57139 struct k_atm_aal_stats {
57140-#define __HANDLE_ITEM(i) atomic_t i
57141+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57142 __AAL_STAT_ITEMS
57143 #undef __HANDLE_ITEM
57144 };
57145diff -urNp linux-2.6.32.45/include/linux/backlight.h linux-2.6.32.45/include/linux/backlight.h
57146--- linux-2.6.32.45/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
57147+++ linux-2.6.32.45/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
57148@@ -36,18 +36,18 @@ struct backlight_device;
57149 struct fb_info;
57150
57151 struct backlight_ops {
57152- unsigned int options;
57153+ const unsigned int options;
57154
57155 #define BL_CORE_SUSPENDRESUME (1 << 0)
57156
57157 /* Notify the backlight driver some property has changed */
57158- int (*update_status)(struct backlight_device *);
57159+ int (* const update_status)(struct backlight_device *);
57160 /* Return the current backlight brightness (accounting for power,
57161 fb_blank etc.) */
57162- int (*get_brightness)(struct backlight_device *);
57163+ int (* const get_brightness)(struct backlight_device *);
57164 /* Check if given framebuffer device is the one bound to this backlight;
57165 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
57166- int (*check_fb)(struct fb_info *);
57167+ int (* const check_fb)(struct fb_info *);
57168 };
57169
57170 /* This structure defines all the properties of a backlight */
57171@@ -86,7 +86,7 @@ struct backlight_device {
57172 registered this device has been unloaded, and if class_get_devdata()
57173 points to something in the body of that driver, it is also invalid. */
57174 struct mutex ops_lock;
57175- struct backlight_ops *ops;
57176+ const struct backlight_ops *ops;
57177
57178 /* The framebuffer notifier block */
57179 struct notifier_block fb_notif;
57180@@ -103,7 +103,7 @@ static inline void backlight_update_stat
57181 }
57182
57183 extern struct backlight_device *backlight_device_register(const char *name,
57184- struct device *dev, void *devdata, struct backlight_ops *ops);
57185+ struct device *dev, void *devdata, const struct backlight_ops *ops);
57186 extern void backlight_device_unregister(struct backlight_device *bd);
57187 extern void backlight_force_update(struct backlight_device *bd,
57188 enum backlight_update_reason reason);
57189diff -urNp linux-2.6.32.45/include/linux/binfmts.h linux-2.6.32.45/include/linux/binfmts.h
57190--- linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
57191+++ linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
57192@@ -83,6 +83,7 @@ struct linux_binfmt {
57193 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57194 int (*load_shlib)(struct file *);
57195 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
57196+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57197 unsigned long min_coredump; /* minimal dump size */
57198 int hasvdso;
57199 };
57200diff -urNp linux-2.6.32.45/include/linux/blkdev.h linux-2.6.32.45/include/linux/blkdev.h
57201--- linux-2.6.32.45/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
57202+++ linux-2.6.32.45/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
57203@@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
57204 #endif /* CONFIG_BLK_DEV_INTEGRITY */
57205
57206 struct block_device_operations {
57207- int (*open) (struct block_device *, fmode_t);
57208- int (*release) (struct gendisk *, fmode_t);
57209- int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57210- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57211- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57212- int (*direct_access) (struct block_device *, sector_t,
57213+ int (* const open) (struct block_device *, fmode_t);
57214+ int (* const release) (struct gendisk *, fmode_t);
57215+ int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57216+ int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57217+ int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57218+ int (* const direct_access) (struct block_device *, sector_t,
57219 void **, unsigned long *);
57220- int (*media_changed) (struct gendisk *);
57221- unsigned long long (*set_capacity) (struct gendisk *,
57222+ int (* const media_changed) (struct gendisk *);
57223+ unsigned long long (* const set_capacity) (struct gendisk *,
57224 unsigned long long);
57225- int (*revalidate_disk) (struct gendisk *);
57226- int (*getgeo)(struct block_device *, struct hd_geometry *);
57227- struct module *owner;
57228+ int (* const revalidate_disk) (struct gendisk *);
57229+ int (*const getgeo)(struct block_device *, struct hd_geometry *);
57230+ struct module * const owner;
57231 };
57232
57233 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57234diff -urNp linux-2.6.32.45/include/linux/blktrace_api.h linux-2.6.32.45/include/linux/blktrace_api.h
57235--- linux-2.6.32.45/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
57236+++ linux-2.6.32.45/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
57237@@ -160,7 +160,7 @@ struct blk_trace {
57238 struct dentry *dir;
57239 struct dentry *dropped_file;
57240 struct dentry *msg_file;
57241- atomic_t dropped;
57242+ atomic_unchecked_t dropped;
57243 };
57244
57245 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57246diff -urNp linux-2.6.32.45/include/linux/byteorder/little_endian.h linux-2.6.32.45/include/linux/byteorder/little_endian.h
57247--- linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
57248+++ linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
57249@@ -42,51 +42,51 @@
57250
57251 static inline __le64 __cpu_to_le64p(const __u64 *p)
57252 {
57253- return (__force __le64)*p;
57254+ return (__force const __le64)*p;
57255 }
57256 static inline __u64 __le64_to_cpup(const __le64 *p)
57257 {
57258- return (__force __u64)*p;
57259+ return (__force const __u64)*p;
57260 }
57261 static inline __le32 __cpu_to_le32p(const __u32 *p)
57262 {
57263- return (__force __le32)*p;
57264+ return (__force const __le32)*p;
57265 }
57266 static inline __u32 __le32_to_cpup(const __le32 *p)
57267 {
57268- return (__force __u32)*p;
57269+ return (__force const __u32)*p;
57270 }
57271 static inline __le16 __cpu_to_le16p(const __u16 *p)
57272 {
57273- return (__force __le16)*p;
57274+ return (__force const __le16)*p;
57275 }
57276 static inline __u16 __le16_to_cpup(const __le16 *p)
57277 {
57278- return (__force __u16)*p;
57279+ return (__force const __u16)*p;
57280 }
57281 static inline __be64 __cpu_to_be64p(const __u64 *p)
57282 {
57283- return (__force __be64)__swab64p(p);
57284+ return (__force const __be64)__swab64p(p);
57285 }
57286 static inline __u64 __be64_to_cpup(const __be64 *p)
57287 {
57288- return __swab64p((__u64 *)p);
57289+ return __swab64p((const __u64 *)p);
57290 }
57291 static inline __be32 __cpu_to_be32p(const __u32 *p)
57292 {
57293- return (__force __be32)__swab32p(p);
57294+ return (__force const __be32)__swab32p(p);
57295 }
57296 static inline __u32 __be32_to_cpup(const __be32 *p)
57297 {
57298- return __swab32p((__u32 *)p);
57299+ return __swab32p((const __u32 *)p);
57300 }
57301 static inline __be16 __cpu_to_be16p(const __u16 *p)
57302 {
57303- return (__force __be16)__swab16p(p);
57304+ return (__force const __be16)__swab16p(p);
57305 }
57306 static inline __u16 __be16_to_cpup(const __be16 *p)
57307 {
57308- return __swab16p((__u16 *)p);
57309+ return __swab16p((const __u16 *)p);
57310 }
57311 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57312 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57313diff -urNp linux-2.6.32.45/include/linux/cache.h linux-2.6.32.45/include/linux/cache.h
57314--- linux-2.6.32.45/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
57315+++ linux-2.6.32.45/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
57316@@ -16,6 +16,10 @@
57317 #define __read_mostly
57318 #endif
57319
57320+#ifndef __read_only
57321+#define __read_only __read_mostly
57322+#endif
57323+
57324 #ifndef ____cacheline_aligned
57325 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57326 #endif
57327diff -urNp linux-2.6.32.45/include/linux/capability.h linux-2.6.32.45/include/linux/capability.h
57328--- linux-2.6.32.45/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
57329+++ linux-2.6.32.45/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
57330@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
57331 (security_real_capable_noaudit((t), (cap)) == 0)
57332
57333 extern int capable(int cap);
57334+int capable_nolog(int cap);
57335
57336 /* audit system wants to get cap info from files as well */
57337 struct dentry;
57338diff -urNp linux-2.6.32.45/include/linux/compiler-gcc4.h linux-2.6.32.45/include/linux/compiler-gcc4.h
57339--- linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
57340+++ linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-08-05 20:33:55.000000000 -0400
57341@@ -36,4 +36,13 @@
57342 the kernel context */
57343 #define __cold __attribute__((__cold__))
57344
57345+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57346+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57347+#define __bos0(ptr) __bos((ptr), 0)
57348+#define __bos1(ptr) __bos((ptr), 1)
57349+
57350+#if __GNUC_MINOR__ >= 5
57351+#define __no_const __attribute__((no_const))
57352+#endif
57353+
57354 #endif
57355diff -urNp linux-2.6.32.45/include/linux/compiler.h linux-2.6.32.45/include/linux/compiler.h
57356--- linux-2.6.32.45/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
57357+++ linux-2.6.32.45/include/linux/compiler.h 2011-08-05 20:33:55.000000000 -0400
57358@@ -247,6 +247,10 @@ void ftrace_likely_update(struct ftrace_
57359 # define __attribute_const__ /* unimplemented */
57360 #endif
57361
57362+#ifndef __no_const
57363+# define __no_const
57364+#endif
57365+
57366 /*
57367 * Tell gcc if a function is cold. The compiler will assume any path
57368 * directly leading to the call is unlikely.
57369@@ -256,6 +260,22 @@ void ftrace_likely_update(struct ftrace_
57370 #define __cold
57371 #endif
57372
57373+#ifndef __alloc_size
57374+#define __alloc_size(...)
57375+#endif
57376+
57377+#ifndef __bos
57378+#define __bos(ptr, arg)
57379+#endif
57380+
57381+#ifndef __bos0
57382+#define __bos0(ptr)
57383+#endif
57384+
57385+#ifndef __bos1
57386+#define __bos1(ptr)
57387+#endif
57388+
57389 /* Simple shorthand for a section definition */
57390 #ifndef __section
57391 # define __section(S) __attribute__ ((__section__(#S)))
57392@@ -278,6 +298,7 @@ void ftrace_likely_update(struct ftrace_
57393 * use is to mediate communication between process-level code and irq/NMI
57394 * handlers, all running on the same CPU.
57395 */
57396-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57397+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57398+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57399
57400 #endif /* __LINUX_COMPILER_H */
57401diff -urNp linux-2.6.32.45/include/linux/crypto.h linux-2.6.32.45/include/linux/crypto.h
57402--- linux-2.6.32.45/include/linux/crypto.h 2011-03-27 14:31:47.000000000 -0400
57403+++ linux-2.6.32.45/include/linux/crypto.h 2011-08-05 20:33:55.000000000 -0400
57404@@ -394,7 +394,7 @@ struct cipher_tfm {
57405 const u8 *key, unsigned int keylen);
57406 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57407 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57408-};
57409+} __no_const;
57410
57411 struct hash_tfm {
57412 int (*init)(struct hash_desc *desc);
57413@@ -415,13 +415,13 @@ struct compress_tfm {
57414 int (*cot_decompress)(struct crypto_tfm *tfm,
57415 const u8 *src, unsigned int slen,
57416 u8 *dst, unsigned int *dlen);
57417-};
57418+} __no_const;
57419
57420 struct rng_tfm {
57421 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57422 unsigned int dlen);
57423 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57424-};
57425+} __no_const;
57426
57427 #define crt_ablkcipher crt_u.ablkcipher
57428 #define crt_aead crt_u.aead
57429diff -urNp linux-2.6.32.45/include/linux/dcache.h linux-2.6.32.45/include/linux/dcache.h
57430--- linux-2.6.32.45/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
57431+++ linux-2.6.32.45/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
57432@@ -119,6 +119,8 @@ struct dentry {
57433 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
57434 };
57435
57436+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
57437+
57438 /*
57439 * dentry->d_lock spinlock nesting subclasses:
57440 *
57441diff -urNp linux-2.6.32.45/include/linux/decompress/mm.h linux-2.6.32.45/include/linux/decompress/mm.h
57442--- linux-2.6.32.45/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
57443+++ linux-2.6.32.45/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
57444@@ -78,7 +78,7 @@ static void free(void *where)
57445 * warnings when not needed (indeed large_malloc / large_free are not
57446 * needed by inflate */
57447
57448-#define malloc(a) kmalloc(a, GFP_KERNEL)
57449+#define malloc(a) kmalloc((a), GFP_KERNEL)
57450 #define free(a) kfree(a)
57451
57452 #define large_malloc(a) vmalloc(a)
57453diff -urNp linux-2.6.32.45/include/linux/dma-mapping.h linux-2.6.32.45/include/linux/dma-mapping.h
57454--- linux-2.6.32.45/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
57455+++ linux-2.6.32.45/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
57456@@ -16,50 +16,50 @@ enum dma_data_direction {
57457 };
57458
57459 struct dma_map_ops {
57460- void* (*alloc_coherent)(struct device *dev, size_t size,
57461+ void* (* const alloc_coherent)(struct device *dev, size_t size,
57462 dma_addr_t *dma_handle, gfp_t gfp);
57463- void (*free_coherent)(struct device *dev, size_t size,
57464+ void (* const free_coherent)(struct device *dev, size_t size,
57465 void *vaddr, dma_addr_t dma_handle);
57466- dma_addr_t (*map_page)(struct device *dev, struct page *page,
57467+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
57468 unsigned long offset, size_t size,
57469 enum dma_data_direction dir,
57470 struct dma_attrs *attrs);
57471- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
57472+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
57473 size_t size, enum dma_data_direction dir,
57474 struct dma_attrs *attrs);
57475- int (*map_sg)(struct device *dev, struct scatterlist *sg,
57476+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
57477 int nents, enum dma_data_direction dir,
57478 struct dma_attrs *attrs);
57479- void (*unmap_sg)(struct device *dev,
57480+ void (* const unmap_sg)(struct device *dev,
57481 struct scatterlist *sg, int nents,
57482 enum dma_data_direction dir,
57483 struct dma_attrs *attrs);
57484- void (*sync_single_for_cpu)(struct device *dev,
57485+ void (* const sync_single_for_cpu)(struct device *dev,
57486 dma_addr_t dma_handle, size_t size,
57487 enum dma_data_direction dir);
57488- void (*sync_single_for_device)(struct device *dev,
57489+ void (* const sync_single_for_device)(struct device *dev,
57490 dma_addr_t dma_handle, size_t size,
57491 enum dma_data_direction dir);
57492- void (*sync_single_range_for_cpu)(struct device *dev,
57493+ void (* const sync_single_range_for_cpu)(struct device *dev,
57494 dma_addr_t dma_handle,
57495 unsigned long offset,
57496 size_t size,
57497 enum dma_data_direction dir);
57498- void (*sync_single_range_for_device)(struct device *dev,
57499+ void (* const sync_single_range_for_device)(struct device *dev,
57500 dma_addr_t dma_handle,
57501 unsigned long offset,
57502 size_t size,
57503 enum dma_data_direction dir);
57504- void (*sync_sg_for_cpu)(struct device *dev,
57505+ void (* const sync_sg_for_cpu)(struct device *dev,
57506 struct scatterlist *sg, int nents,
57507 enum dma_data_direction dir);
57508- void (*sync_sg_for_device)(struct device *dev,
57509+ void (* const sync_sg_for_device)(struct device *dev,
57510 struct scatterlist *sg, int nents,
57511 enum dma_data_direction dir);
57512- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
57513- int (*dma_supported)(struct device *dev, u64 mask);
57514+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
57515+ int (* const dma_supported)(struct device *dev, u64 mask);
57516 int (*set_dma_mask)(struct device *dev, u64 mask);
57517- int is_phys;
57518+ const int is_phys;
57519 };
57520
57521 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57522diff -urNp linux-2.6.32.45/include/linux/dst.h linux-2.6.32.45/include/linux/dst.h
57523--- linux-2.6.32.45/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
57524+++ linux-2.6.32.45/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
57525@@ -380,7 +380,7 @@ struct dst_node
57526 struct thread_pool *pool;
57527
57528 /* Transaction IDs live here */
57529- atomic_long_t gen;
57530+ atomic_long_unchecked_t gen;
57531
57532 /*
57533 * How frequently and how many times transaction
57534diff -urNp linux-2.6.32.45/include/linux/elf.h linux-2.6.32.45/include/linux/elf.h
57535--- linux-2.6.32.45/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
57536+++ linux-2.6.32.45/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
57537@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57538 #define PT_GNU_EH_FRAME 0x6474e550
57539
57540 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57541+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57542+
57543+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57544+
57545+/* Constants for the e_flags field */
57546+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57547+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57548+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57549+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57550+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57551+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57552
57553 /* These constants define the different elf file types */
57554 #define ET_NONE 0
57555@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
57556 #define DT_DEBUG 21
57557 #define DT_TEXTREL 22
57558 #define DT_JMPREL 23
57559+#define DT_FLAGS 30
57560+ #define DF_TEXTREL 0x00000004
57561 #define DT_ENCODING 32
57562 #define OLD_DT_LOOS 0x60000000
57563 #define DT_LOOS 0x6000000d
57564@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
57565 #define PF_W 0x2
57566 #define PF_X 0x1
57567
57568+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57569+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57570+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57571+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57572+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57573+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57574+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57575+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57576+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57577+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57578+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57579+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57580+
57581 typedef struct elf32_phdr{
57582 Elf32_Word p_type;
57583 Elf32_Off p_offset;
57584@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
57585 #define EI_OSABI 7
57586 #define EI_PAD 8
57587
57588+#define EI_PAX 14
57589+
57590 #define ELFMAG0 0x7f /* EI_MAG */
57591 #define ELFMAG1 'E'
57592 #define ELFMAG2 'L'
57593@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
57594 #define elf_phdr elf32_phdr
57595 #define elf_note elf32_note
57596 #define elf_addr_t Elf32_Off
57597+#define elf_dyn Elf32_Dyn
57598
57599 #else
57600
57601@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
57602 #define elf_phdr elf64_phdr
57603 #define elf_note elf64_note
57604 #define elf_addr_t Elf64_Off
57605+#define elf_dyn Elf64_Dyn
57606
57607 #endif
57608
57609diff -urNp linux-2.6.32.45/include/linux/fscache-cache.h linux-2.6.32.45/include/linux/fscache-cache.h
57610--- linux-2.6.32.45/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
57611+++ linux-2.6.32.45/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
57612@@ -116,7 +116,7 @@ struct fscache_operation {
57613 #endif
57614 };
57615
57616-extern atomic_t fscache_op_debug_id;
57617+extern atomic_unchecked_t fscache_op_debug_id;
57618 extern const struct slow_work_ops fscache_op_slow_work_ops;
57619
57620 extern void fscache_enqueue_operation(struct fscache_operation *);
57621@@ -134,7 +134,7 @@ static inline void fscache_operation_ini
57622 fscache_operation_release_t release)
57623 {
57624 atomic_set(&op->usage, 1);
57625- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57626+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57627 op->release = release;
57628 INIT_LIST_HEAD(&op->pend_link);
57629 fscache_set_op_state(op, "Init");
57630diff -urNp linux-2.6.32.45/include/linux/fs.h linux-2.6.32.45/include/linux/fs.h
57631--- linux-2.6.32.45/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
57632+++ linux-2.6.32.45/include/linux/fs.h 2011-08-23 21:22:32.000000000 -0400
57633@@ -90,6 +90,11 @@ struct inodes_stat_t {
57634 /* Expect random access pattern */
57635 #define FMODE_RANDOM ((__force fmode_t)4096)
57636
57637+/* Hack for grsec so as not to require read permission simply to execute
57638+ * a binary
57639+ */
57640+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
57641+
57642 /*
57643 * The below are the various read and write types that we support. Some of
57644 * them include behavioral modifiers that send information down to the
57645@@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
57646 unsigned long, unsigned long);
57647
57648 struct address_space_operations {
57649- int (*writepage)(struct page *page, struct writeback_control *wbc);
57650- int (*readpage)(struct file *, struct page *);
57651- void (*sync_page)(struct page *);
57652+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
57653+ int (* const readpage)(struct file *, struct page *);
57654+ void (* const sync_page)(struct page *);
57655
57656 /* Write back some dirty pages from this mapping. */
57657- int (*writepages)(struct address_space *, struct writeback_control *);
57658+ int (* const writepages)(struct address_space *, struct writeback_control *);
57659
57660 /* Set a page dirty. Return true if this dirtied it */
57661- int (*set_page_dirty)(struct page *page);
57662+ int (* const set_page_dirty)(struct page *page);
57663
57664- int (*readpages)(struct file *filp, struct address_space *mapping,
57665+ int (* const readpages)(struct file *filp, struct address_space *mapping,
57666 struct list_head *pages, unsigned nr_pages);
57667
57668- int (*write_begin)(struct file *, struct address_space *mapping,
57669+ int (* const write_begin)(struct file *, struct address_space *mapping,
57670 loff_t pos, unsigned len, unsigned flags,
57671 struct page **pagep, void **fsdata);
57672- int (*write_end)(struct file *, struct address_space *mapping,
57673+ int (* const write_end)(struct file *, struct address_space *mapping,
57674 loff_t pos, unsigned len, unsigned copied,
57675 struct page *page, void *fsdata);
57676
57677 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
57678- sector_t (*bmap)(struct address_space *, sector_t);
57679- void (*invalidatepage) (struct page *, unsigned long);
57680- int (*releasepage) (struct page *, gfp_t);
57681- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
57682+ sector_t (* const bmap)(struct address_space *, sector_t);
57683+ void (* const invalidatepage) (struct page *, unsigned long);
57684+ int (* const releasepage) (struct page *, gfp_t);
57685+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
57686 loff_t offset, unsigned long nr_segs);
57687- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
57688+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
57689 void **, unsigned long *);
57690 /* migrate the contents of a page to the specified target */
57691- int (*migratepage) (struct address_space *,
57692+ int (* const migratepage) (struct address_space *,
57693 struct page *, struct page *);
57694- int (*launder_page) (struct page *);
57695- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
57696+ int (* const launder_page) (struct page *);
57697+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
57698 unsigned long);
57699- int (*error_remove_page)(struct address_space *, struct page *);
57700+ int (* const error_remove_page)(struct address_space *, struct page *);
57701 };
57702
57703 /*
57704@@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
57705 typedef struct files_struct *fl_owner_t;
57706
57707 struct file_lock_operations {
57708- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57709- void (*fl_release_private)(struct file_lock *);
57710+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57711+ void (* const fl_release_private)(struct file_lock *);
57712 };
57713
57714 struct lock_manager_operations {
57715- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
57716- void (*fl_notify)(struct file_lock *); /* unblock callback */
57717- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
57718- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57719- void (*fl_release_private)(struct file_lock *);
57720- void (*fl_break)(struct file_lock *);
57721- int (*fl_mylease)(struct file_lock *, struct file_lock *);
57722- int (*fl_change)(struct file_lock **, int);
57723+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
57724+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
57725+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
57726+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57727+ void (* const fl_release_private)(struct file_lock *);
57728+ void (* const fl_break)(struct file_lock *);
57729+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
57730+ int (* const fl_change)(struct file_lock **, int);
57731 };
57732
57733 struct lock_manager {
57734@@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
57735 unsigned int fi_flags; /* Flags as passed from user */
57736 unsigned int fi_extents_mapped; /* Number of mapped extents */
57737 unsigned int fi_extents_max; /* Size of fiemap_extent array */
57738- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
57739+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
57740 * array */
57741 };
57742 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
57743@@ -1486,7 +1491,7 @@ struct block_device_operations;
57744 * can be called without the big kernel lock held in all filesystems.
57745 */
57746 struct file_operations {
57747- struct module *owner;
57748+ struct module * const owner;
57749 loff_t (*llseek) (struct file *, loff_t, int);
57750 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
57751 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
57752@@ -1513,6 +1518,7 @@ struct file_operations {
57753 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
57754 int (*setlease)(struct file *, long, struct file_lock **);
57755 };
57756+typedef struct file_operations __no_const file_operations_no_const;
57757
57758 struct inode_operations {
57759 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
57760@@ -1559,30 +1565,30 @@ extern ssize_t vfs_writev(struct file *,
57761 unsigned long, loff_t *);
57762
57763 struct super_operations {
57764- struct inode *(*alloc_inode)(struct super_block *sb);
57765- void (*destroy_inode)(struct inode *);
57766+ struct inode *(* const alloc_inode)(struct super_block *sb);
57767+ void (* const destroy_inode)(struct inode *);
57768
57769- void (*dirty_inode) (struct inode *);
57770- int (*write_inode) (struct inode *, int);
57771- void (*drop_inode) (struct inode *);
57772- void (*delete_inode) (struct inode *);
57773- void (*put_super) (struct super_block *);
57774- void (*write_super) (struct super_block *);
57775- int (*sync_fs)(struct super_block *sb, int wait);
57776- int (*freeze_fs) (struct super_block *);
57777- int (*unfreeze_fs) (struct super_block *);
57778- int (*statfs) (struct dentry *, struct kstatfs *);
57779- int (*remount_fs) (struct super_block *, int *, char *);
57780- void (*clear_inode) (struct inode *);
57781- void (*umount_begin) (struct super_block *);
57782+ void (* const dirty_inode) (struct inode *);
57783+ int (* const write_inode) (struct inode *, int);
57784+ void (* const drop_inode) (struct inode *);
57785+ void (* const delete_inode) (struct inode *);
57786+ void (* const put_super) (struct super_block *);
57787+ void (* const write_super) (struct super_block *);
57788+ int (* const sync_fs)(struct super_block *sb, int wait);
57789+ int (* const freeze_fs) (struct super_block *);
57790+ int (* const unfreeze_fs) (struct super_block *);
57791+ int (* const statfs) (struct dentry *, struct kstatfs *);
57792+ int (* const remount_fs) (struct super_block *, int *, char *);
57793+ void (* const clear_inode) (struct inode *);
57794+ void (* const umount_begin) (struct super_block *);
57795
57796- int (*show_options)(struct seq_file *, struct vfsmount *);
57797- int (*show_stats)(struct seq_file *, struct vfsmount *);
57798+ int (* const show_options)(struct seq_file *, struct vfsmount *);
57799+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
57800 #ifdef CONFIG_QUOTA
57801- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
57802- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
57803+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
57804+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
57805 #endif
57806- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
57807+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
57808 };
57809
57810 /*
57811diff -urNp linux-2.6.32.45/include/linux/fs_struct.h linux-2.6.32.45/include/linux/fs_struct.h
57812--- linux-2.6.32.45/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
57813+++ linux-2.6.32.45/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
57814@@ -4,7 +4,7 @@
57815 #include <linux/path.h>
57816
57817 struct fs_struct {
57818- int users;
57819+ atomic_t users;
57820 rwlock_t lock;
57821 int umask;
57822 int in_exec;
57823diff -urNp linux-2.6.32.45/include/linux/ftrace_event.h linux-2.6.32.45/include/linux/ftrace_event.h
57824--- linux-2.6.32.45/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
57825+++ linux-2.6.32.45/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
57826@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
57827 int filter_type);
57828 extern int trace_define_common_fields(struct ftrace_event_call *call);
57829
57830-#define is_signed_type(type) (((type)(-1)) < 0)
57831+#define is_signed_type(type) (((type)(-1)) < (type)1)
57832
57833 int trace_set_clr_event(const char *system, const char *event, int set);
57834
57835diff -urNp linux-2.6.32.45/include/linux/genhd.h linux-2.6.32.45/include/linux/genhd.h
57836--- linux-2.6.32.45/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
57837+++ linux-2.6.32.45/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
57838@@ -161,7 +161,7 @@ struct gendisk {
57839
57840 struct timer_rand_state *random;
57841
57842- atomic_t sync_io; /* RAID */
57843+ atomic_unchecked_t sync_io; /* RAID */
57844 struct work_struct async_notify;
57845 #ifdef CONFIG_BLK_DEV_INTEGRITY
57846 struct blk_integrity *integrity;
57847diff -urNp linux-2.6.32.45/include/linux/gracl.h linux-2.6.32.45/include/linux/gracl.h
57848--- linux-2.6.32.45/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
57849+++ linux-2.6.32.45/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
57850@@ -0,0 +1,317 @@
57851+#ifndef GR_ACL_H
57852+#define GR_ACL_H
57853+
57854+#include <linux/grdefs.h>
57855+#include <linux/resource.h>
57856+#include <linux/capability.h>
57857+#include <linux/dcache.h>
57858+#include <asm/resource.h>
57859+
57860+/* Major status information */
57861+
57862+#define GR_VERSION "grsecurity 2.2.2"
57863+#define GRSECURITY_VERSION 0x2202
57864+
57865+enum {
57866+ GR_SHUTDOWN = 0,
57867+ GR_ENABLE = 1,
57868+ GR_SPROLE = 2,
57869+ GR_RELOAD = 3,
57870+ GR_SEGVMOD = 4,
57871+ GR_STATUS = 5,
57872+ GR_UNSPROLE = 6,
57873+ GR_PASSSET = 7,
57874+ GR_SPROLEPAM = 8,
57875+};
57876+
57877+/* Password setup definitions
57878+ * kernel/grhash.c */
57879+enum {
57880+ GR_PW_LEN = 128,
57881+ GR_SALT_LEN = 16,
57882+ GR_SHA_LEN = 32,
57883+};
57884+
57885+enum {
57886+ GR_SPROLE_LEN = 64,
57887+};
57888+
57889+enum {
57890+ GR_NO_GLOB = 0,
57891+ GR_REG_GLOB,
57892+ GR_CREATE_GLOB
57893+};
57894+
57895+#define GR_NLIMITS 32
57896+
57897+/* Begin Data Structures */
57898+
57899+struct sprole_pw {
57900+ unsigned char *rolename;
57901+ unsigned char salt[GR_SALT_LEN];
57902+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
57903+};
57904+
57905+struct name_entry {
57906+ __u32 key;
57907+ ino_t inode;
57908+ dev_t device;
57909+ char *name;
57910+ __u16 len;
57911+ __u8 deleted;
57912+ struct name_entry *prev;
57913+ struct name_entry *next;
57914+};
57915+
57916+struct inodev_entry {
57917+ struct name_entry *nentry;
57918+ struct inodev_entry *prev;
57919+ struct inodev_entry *next;
57920+};
57921+
57922+struct acl_role_db {
57923+ struct acl_role_label **r_hash;
57924+ __u32 r_size;
57925+};
57926+
57927+struct inodev_db {
57928+ struct inodev_entry **i_hash;
57929+ __u32 i_size;
57930+};
57931+
57932+struct name_db {
57933+ struct name_entry **n_hash;
57934+ __u32 n_size;
57935+};
57936+
57937+struct crash_uid {
57938+ uid_t uid;
57939+ unsigned long expires;
57940+};
57941+
57942+struct gr_hash_struct {
57943+ void **table;
57944+ void **nametable;
57945+ void *first;
57946+ __u32 table_size;
57947+ __u32 used_size;
57948+ int type;
57949+};
57950+
57951+/* Userspace Grsecurity ACL data structures */
57952+
57953+struct acl_subject_label {
57954+ char *filename;
57955+ ino_t inode;
57956+ dev_t device;
57957+ __u32 mode;
57958+ kernel_cap_t cap_mask;
57959+ kernel_cap_t cap_lower;
57960+ kernel_cap_t cap_invert_audit;
57961+
57962+ struct rlimit res[GR_NLIMITS];
57963+ __u32 resmask;
57964+
57965+ __u8 user_trans_type;
57966+ __u8 group_trans_type;
57967+ uid_t *user_transitions;
57968+ gid_t *group_transitions;
57969+ __u16 user_trans_num;
57970+ __u16 group_trans_num;
57971+
57972+ __u32 sock_families[2];
57973+ __u32 ip_proto[8];
57974+ __u32 ip_type;
57975+ struct acl_ip_label **ips;
57976+ __u32 ip_num;
57977+ __u32 inaddr_any_override;
57978+
57979+ __u32 crashes;
57980+ unsigned long expires;
57981+
57982+ struct acl_subject_label *parent_subject;
57983+ struct gr_hash_struct *hash;
57984+ struct acl_subject_label *prev;
57985+ struct acl_subject_label *next;
57986+
57987+ struct acl_object_label **obj_hash;
57988+ __u32 obj_hash_size;
57989+ __u16 pax_flags;
57990+};
57991+
57992+struct role_allowed_ip {
57993+ __u32 addr;
57994+ __u32 netmask;
57995+
57996+ struct role_allowed_ip *prev;
57997+ struct role_allowed_ip *next;
57998+};
57999+
58000+struct role_transition {
58001+ char *rolename;
58002+
58003+ struct role_transition *prev;
58004+ struct role_transition *next;
58005+};
58006+
58007+struct acl_role_label {
58008+ char *rolename;
58009+ uid_t uidgid;
58010+ __u16 roletype;
58011+
58012+ __u16 auth_attempts;
58013+ unsigned long expires;
58014+
58015+ struct acl_subject_label *root_label;
58016+ struct gr_hash_struct *hash;
58017+
58018+ struct acl_role_label *prev;
58019+ struct acl_role_label *next;
58020+
58021+ struct role_transition *transitions;
58022+ struct role_allowed_ip *allowed_ips;
58023+ uid_t *domain_children;
58024+ __u16 domain_child_num;
58025+
58026+ struct acl_subject_label **subj_hash;
58027+ __u32 subj_hash_size;
58028+};
58029+
58030+struct user_acl_role_db {
58031+ struct acl_role_label **r_table;
58032+ __u32 num_pointers; /* Number of allocations to track */
58033+ __u32 num_roles; /* Number of roles */
58034+ __u32 num_domain_children; /* Number of domain children */
58035+ __u32 num_subjects; /* Number of subjects */
58036+ __u32 num_objects; /* Number of objects */
58037+};
58038+
58039+struct acl_object_label {
58040+ char *filename;
58041+ ino_t inode;
58042+ dev_t device;
58043+ __u32 mode;
58044+
58045+ struct acl_subject_label *nested;
58046+ struct acl_object_label *globbed;
58047+
58048+ /* next two structures not used */
58049+
58050+ struct acl_object_label *prev;
58051+ struct acl_object_label *next;
58052+};
58053+
58054+struct acl_ip_label {
58055+ char *iface;
58056+ __u32 addr;
58057+ __u32 netmask;
58058+ __u16 low, high;
58059+ __u8 mode;
58060+ __u32 type;
58061+ __u32 proto[8];
58062+
58063+ /* next two structures not used */
58064+
58065+ struct acl_ip_label *prev;
58066+ struct acl_ip_label *next;
58067+};
58068+
58069+struct gr_arg {
58070+ struct user_acl_role_db role_db;
58071+ unsigned char pw[GR_PW_LEN];
58072+ unsigned char salt[GR_SALT_LEN];
58073+ unsigned char sum[GR_SHA_LEN];
58074+ unsigned char sp_role[GR_SPROLE_LEN];
58075+ struct sprole_pw *sprole_pws;
58076+ dev_t segv_device;
58077+ ino_t segv_inode;
58078+ uid_t segv_uid;
58079+ __u16 num_sprole_pws;
58080+ __u16 mode;
58081+};
58082+
58083+struct gr_arg_wrapper {
58084+ struct gr_arg *arg;
58085+ __u32 version;
58086+ __u32 size;
58087+};
58088+
58089+struct subject_map {
58090+ struct acl_subject_label *user;
58091+ struct acl_subject_label *kernel;
58092+ struct subject_map *prev;
58093+ struct subject_map *next;
58094+};
58095+
58096+struct acl_subj_map_db {
58097+ struct subject_map **s_hash;
58098+ __u32 s_size;
58099+};
58100+
58101+/* End Data Structures Section */
58102+
58103+/* Hash functions generated by empirical testing by Brad Spengler
58104+ Makes good use of the low bits of the inode. Generally 0-1 times
58105+ in loop for successful match. 0-3 for unsuccessful match.
58106+ Shift/add algorithm with modulus of table size and an XOR*/
58107+
58108+static __inline__ unsigned int
58109+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58110+{
58111+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58112+}
58113+
58114+ static __inline__ unsigned int
58115+shash(const struct acl_subject_label *userp, const unsigned int sz)
58116+{
58117+ return ((const unsigned long)userp % sz);
58118+}
58119+
58120+static __inline__ unsigned int
58121+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58122+{
58123+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58124+}
58125+
58126+static __inline__ unsigned int
58127+nhash(const char *name, const __u16 len, const unsigned int sz)
58128+{
58129+ return full_name_hash((const unsigned char *)name, len) % sz;
58130+}
58131+
58132+#define FOR_EACH_ROLE_START(role) \
58133+ role = role_list; \
58134+ while (role) {
58135+
58136+#define FOR_EACH_ROLE_END(role) \
58137+ role = role->prev; \
58138+ }
58139+
58140+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58141+ subj = NULL; \
58142+ iter = 0; \
58143+ while (iter < role->subj_hash_size) { \
58144+ if (subj == NULL) \
58145+ subj = role->subj_hash[iter]; \
58146+ if (subj == NULL) { \
58147+ iter++; \
58148+ continue; \
58149+ }
58150+
58151+#define FOR_EACH_SUBJECT_END(subj,iter) \
58152+ subj = subj->next; \
58153+ if (subj == NULL) \
58154+ iter++; \
58155+ }
58156+
58157+
58158+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58159+ subj = role->hash->first; \
58160+ while (subj != NULL) {
58161+
58162+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58163+ subj = subj->next; \
58164+ }
58165+
58166+#endif
58167+
58168diff -urNp linux-2.6.32.45/include/linux/gralloc.h linux-2.6.32.45/include/linux/gralloc.h
58169--- linux-2.6.32.45/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58170+++ linux-2.6.32.45/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
58171@@ -0,0 +1,9 @@
58172+#ifndef __GRALLOC_H
58173+#define __GRALLOC_H
58174+
58175+void acl_free_all(void);
58176+int acl_alloc_stack_init(unsigned long size);
58177+void *acl_alloc(unsigned long len);
58178+void *acl_alloc_num(unsigned long num, unsigned long len);
58179+
58180+#endif
58181diff -urNp linux-2.6.32.45/include/linux/grdefs.h linux-2.6.32.45/include/linux/grdefs.h
58182--- linux-2.6.32.45/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58183+++ linux-2.6.32.45/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
58184@@ -0,0 +1,140 @@
58185+#ifndef GRDEFS_H
58186+#define GRDEFS_H
58187+
58188+/* Begin grsecurity status declarations */
58189+
58190+enum {
58191+ GR_READY = 0x01,
58192+ GR_STATUS_INIT = 0x00 // disabled state
58193+};
58194+
58195+/* Begin ACL declarations */
58196+
58197+/* Role flags */
58198+
58199+enum {
58200+ GR_ROLE_USER = 0x0001,
58201+ GR_ROLE_GROUP = 0x0002,
58202+ GR_ROLE_DEFAULT = 0x0004,
58203+ GR_ROLE_SPECIAL = 0x0008,
58204+ GR_ROLE_AUTH = 0x0010,
58205+ GR_ROLE_NOPW = 0x0020,
58206+ GR_ROLE_GOD = 0x0040,
58207+ GR_ROLE_LEARN = 0x0080,
58208+ GR_ROLE_TPE = 0x0100,
58209+ GR_ROLE_DOMAIN = 0x0200,
58210+ GR_ROLE_PAM = 0x0400,
58211+ GR_ROLE_PERSIST = 0x800
58212+};
58213+
58214+/* ACL Subject and Object mode flags */
58215+enum {
58216+ GR_DELETED = 0x80000000
58217+};
58218+
58219+/* ACL Object-only mode flags */
58220+enum {
58221+ GR_READ = 0x00000001,
58222+ GR_APPEND = 0x00000002,
58223+ GR_WRITE = 0x00000004,
58224+ GR_EXEC = 0x00000008,
58225+ GR_FIND = 0x00000010,
58226+ GR_INHERIT = 0x00000020,
58227+ GR_SETID = 0x00000040,
58228+ GR_CREATE = 0x00000080,
58229+ GR_DELETE = 0x00000100,
58230+ GR_LINK = 0x00000200,
58231+ GR_AUDIT_READ = 0x00000400,
58232+ GR_AUDIT_APPEND = 0x00000800,
58233+ GR_AUDIT_WRITE = 0x00001000,
58234+ GR_AUDIT_EXEC = 0x00002000,
58235+ GR_AUDIT_FIND = 0x00004000,
58236+ GR_AUDIT_INHERIT= 0x00008000,
58237+ GR_AUDIT_SETID = 0x00010000,
58238+ GR_AUDIT_CREATE = 0x00020000,
58239+ GR_AUDIT_DELETE = 0x00040000,
58240+ GR_AUDIT_LINK = 0x00080000,
58241+ GR_PTRACERD = 0x00100000,
58242+ GR_NOPTRACE = 0x00200000,
58243+ GR_SUPPRESS = 0x00400000,
58244+ GR_NOLEARN = 0x00800000,
58245+ GR_INIT_TRANSFER= 0x01000000
58246+};
58247+
58248+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58249+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58250+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58251+
58252+/* ACL subject-only mode flags */
58253+enum {
58254+ GR_KILL = 0x00000001,
58255+ GR_VIEW = 0x00000002,
58256+ GR_PROTECTED = 0x00000004,
58257+ GR_LEARN = 0x00000008,
58258+ GR_OVERRIDE = 0x00000010,
58259+ /* just a placeholder, this mode is only used in userspace */
58260+ GR_DUMMY = 0x00000020,
58261+ GR_PROTSHM = 0x00000040,
58262+ GR_KILLPROC = 0x00000080,
58263+ GR_KILLIPPROC = 0x00000100,
58264+ /* just a placeholder, this mode is only used in userspace */
58265+ GR_NOTROJAN = 0x00000200,
58266+ GR_PROTPROCFD = 0x00000400,
58267+ GR_PROCACCT = 0x00000800,
58268+ GR_RELAXPTRACE = 0x00001000,
58269+ GR_NESTED = 0x00002000,
58270+ GR_INHERITLEARN = 0x00004000,
58271+ GR_PROCFIND = 0x00008000,
58272+ GR_POVERRIDE = 0x00010000,
58273+ GR_KERNELAUTH = 0x00020000,
58274+ GR_ATSECURE = 0x00040000,
58275+ GR_SHMEXEC = 0x00080000
58276+};
58277+
58278+enum {
58279+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58280+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58281+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58282+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58283+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58284+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58285+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58286+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58287+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58288+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58289+};
58290+
58291+enum {
58292+ GR_ID_USER = 0x01,
58293+ GR_ID_GROUP = 0x02,
58294+};
58295+
58296+enum {
58297+ GR_ID_ALLOW = 0x01,
58298+ GR_ID_DENY = 0x02,
58299+};
58300+
58301+#define GR_CRASH_RES 31
58302+#define GR_UIDTABLE_MAX 500
58303+
58304+/* begin resource learning section */
58305+enum {
58306+ GR_RLIM_CPU_BUMP = 60,
58307+ GR_RLIM_FSIZE_BUMP = 50000,
58308+ GR_RLIM_DATA_BUMP = 10000,
58309+ GR_RLIM_STACK_BUMP = 1000,
58310+ GR_RLIM_CORE_BUMP = 10000,
58311+ GR_RLIM_RSS_BUMP = 500000,
58312+ GR_RLIM_NPROC_BUMP = 1,
58313+ GR_RLIM_NOFILE_BUMP = 5,
58314+ GR_RLIM_MEMLOCK_BUMP = 50000,
58315+ GR_RLIM_AS_BUMP = 500000,
58316+ GR_RLIM_LOCKS_BUMP = 2,
58317+ GR_RLIM_SIGPENDING_BUMP = 5,
58318+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58319+ GR_RLIM_NICE_BUMP = 1,
58320+ GR_RLIM_RTPRIO_BUMP = 1,
58321+ GR_RLIM_RTTIME_BUMP = 1000000
58322+};
58323+
58324+#endif
58325diff -urNp linux-2.6.32.45/include/linux/grinternal.h linux-2.6.32.45/include/linux/grinternal.h
58326--- linux-2.6.32.45/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58327+++ linux-2.6.32.45/include/linux/grinternal.h 2011-08-11 19:58:37.000000000 -0400
58328@@ -0,0 +1,217 @@
58329+#ifndef __GRINTERNAL_H
58330+#define __GRINTERNAL_H
58331+
58332+#ifdef CONFIG_GRKERNSEC
58333+
58334+#include <linux/fs.h>
58335+#include <linux/mnt_namespace.h>
58336+#include <linux/nsproxy.h>
58337+#include <linux/gracl.h>
58338+#include <linux/grdefs.h>
58339+#include <linux/grmsg.h>
58340+
58341+void gr_add_learn_entry(const char *fmt, ...)
58342+ __attribute__ ((format (printf, 1, 2)));
58343+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58344+ const struct vfsmount *mnt);
58345+__u32 gr_check_create(const struct dentry *new_dentry,
58346+ const struct dentry *parent,
58347+ const struct vfsmount *mnt, const __u32 mode);
58348+int gr_check_protected_task(const struct task_struct *task);
58349+__u32 to_gr_audit(const __u32 reqmode);
58350+int gr_set_acls(const int type);
58351+int gr_apply_subject_to_task(struct task_struct *task);
58352+int gr_acl_is_enabled(void);
58353+char gr_roletype_to_char(void);
58354+
58355+void gr_handle_alertkill(struct task_struct *task);
58356+char *gr_to_filename(const struct dentry *dentry,
58357+ const struct vfsmount *mnt);
58358+char *gr_to_filename1(const struct dentry *dentry,
58359+ const struct vfsmount *mnt);
58360+char *gr_to_filename2(const struct dentry *dentry,
58361+ const struct vfsmount *mnt);
58362+char *gr_to_filename3(const struct dentry *dentry,
58363+ const struct vfsmount *mnt);
58364+
58365+extern int grsec_enable_harden_ptrace;
58366+extern int grsec_enable_link;
58367+extern int grsec_enable_fifo;
58368+extern int grsec_enable_shm;
58369+extern int grsec_enable_execlog;
58370+extern int grsec_enable_signal;
58371+extern int grsec_enable_audit_ptrace;
58372+extern int grsec_enable_forkfail;
58373+extern int grsec_enable_time;
58374+extern int grsec_enable_rofs;
58375+extern int grsec_enable_chroot_shmat;
58376+extern int grsec_enable_chroot_mount;
58377+extern int grsec_enable_chroot_double;
58378+extern int grsec_enable_chroot_pivot;
58379+extern int grsec_enable_chroot_chdir;
58380+extern int grsec_enable_chroot_chmod;
58381+extern int grsec_enable_chroot_mknod;
58382+extern int grsec_enable_chroot_fchdir;
58383+extern int grsec_enable_chroot_nice;
58384+extern int grsec_enable_chroot_execlog;
58385+extern int grsec_enable_chroot_caps;
58386+extern int grsec_enable_chroot_sysctl;
58387+extern int grsec_enable_chroot_unix;
58388+extern int grsec_enable_tpe;
58389+extern int grsec_tpe_gid;
58390+extern int grsec_enable_tpe_all;
58391+extern int grsec_enable_tpe_invert;
58392+extern int grsec_enable_socket_all;
58393+extern int grsec_socket_all_gid;
58394+extern int grsec_enable_socket_client;
58395+extern int grsec_socket_client_gid;
58396+extern int grsec_enable_socket_server;
58397+extern int grsec_socket_server_gid;
58398+extern int grsec_audit_gid;
58399+extern int grsec_enable_group;
58400+extern int grsec_enable_audit_textrel;
58401+extern int grsec_enable_log_rwxmaps;
58402+extern int grsec_enable_mount;
58403+extern int grsec_enable_chdir;
58404+extern int grsec_resource_logging;
58405+extern int grsec_enable_blackhole;
58406+extern int grsec_lastack_retries;
58407+extern int grsec_enable_brute;
58408+extern int grsec_lock;
58409+
58410+extern spinlock_t grsec_alert_lock;
58411+extern unsigned long grsec_alert_wtime;
58412+extern unsigned long grsec_alert_fyet;
58413+
58414+extern spinlock_t grsec_audit_lock;
58415+
58416+extern rwlock_t grsec_exec_file_lock;
58417+
58418+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58419+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58420+ (tsk)->exec_file->f_vfsmnt) : "/")
58421+
58422+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58423+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58424+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58425+
58426+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58427+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58428+ (tsk)->exec_file->f_vfsmnt) : "/")
58429+
58430+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58431+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58432+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58433+
58434+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58435+
58436+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58437+
58438+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58439+ (task)->pid, (cred)->uid, \
58440+ (cred)->euid, (cred)->gid, (cred)->egid, \
58441+ gr_parent_task_fullpath(task), \
58442+ (task)->real_parent->comm, (task)->real_parent->pid, \
58443+ (pcred)->uid, (pcred)->euid, \
58444+ (pcred)->gid, (pcred)->egid
58445+
58446+#define GR_CHROOT_CAPS {{ \
58447+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58448+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58449+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58450+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58451+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58452+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
58453+
58454+#define security_learn(normal_msg,args...) \
58455+({ \
58456+ read_lock(&grsec_exec_file_lock); \
58457+ gr_add_learn_entry(normal_msg "\n", ## args); \
58458+ read_unlock(&grsec_exec_file_lock); \
58459+})
58460+
58461+enum {
58462+ GR_DO_AUDIT,
58463+ GR_DONT_AUDIT,
58464+ GR_DONT_AUDIT_GOOD
58465+};
58466+
58467+enum {
58468+ GR_TTYSNIFF,
58469+ GR_RBAC,
58470+ GR_RBAC_STR,
58471+ GR_STR_RBAC,
58472+ GR_RBAC_MODE2,
58473+ GR_RBAC_MODE3,
58474+ GR_FILENAME,
58475+ GR_SYSCTL_HIDDEN,
58476+ GR_NOARGS,
58477+ GR_ONE_INT,
58478+ GR_ONE_INT_TWO_STR,
58479+ GR_ONE_STR,
58480+ GR_STR_INT,
58481+ GR_TWO_STR_INT,
58482+ GR_TWO_INT,
58483+ GR_TWO_U64,
58484+ GR_THREE_INT,
58485+ GR_FIVE_INT_TWO_STR,
58486+ GR_TWO_STR,
58487+ GR_THREE_STR,
58488+ GR_FOUR_STR,
58489+ GR_STR_FILENAME,
58490+ GR_FILENAME_STR,
58491+ GR_FILENAME_TWO_INT,
58492+ GR_FILENAME_TWO_INT_STR,
58493+ GR_TEXTREL,
58494+ GR_PTRACE,
58495+ GR_RESOURCE,
58496+ GR_CAP,
58497+ GR_SIG,
58498+ GR_SIG2,
58499+ GR_CRASH1,
58500+ GR_CRASH2,
58501+ GR_PSACCT,
58502+ GR_RWXMAP
58503+};
58504+
58505+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58506+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58507+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58508+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58509+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58510+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58511+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58512+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58513+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58514+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58515+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58516+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58517+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58518+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58519+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58520+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58521+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58522+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58523+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58524+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58525+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58526+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58527+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58528+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58529+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58530+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58531+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58532+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58533+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58534+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58535+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58536+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58537+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58538+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58539+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58540+
58541+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58542+
58543+#endif
58544+
58545+#endif
58546diff -urNp linux-2.6.32.45/include/linux/grmsg.h linux-2.6.32.45/include/linux/grmsg.h
58547--- linux-2.6.32.45/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58548+++ linux-2.6.32.45/include/linux/grmsg.h 2011-08-25 17:28:11.000000000 -0400
58549@@ -0,0 +1,107 @@
58550+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58551+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58552+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58553+#define GR_STOPMOD_MSG "denied modification of module state by "
58554+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58555+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58556+#define GR_IOPERM_MSG "denied use of ioperm() by "
58557+#define GR_IOPL_MSG "denied use of iopl() by "
58558+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58559+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58560+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58561+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58562+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58563+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58564+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58565+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58566+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58567+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58568+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58569+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58570+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58571+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58572+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58573+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58574+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58575+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58576+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58577+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58578+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58579+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58580+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58581+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58582+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58583+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58584+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58585+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58586+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58587+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58588+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58589+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58590+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58591+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58592+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58593+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58594+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58595+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58596+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58597+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58598+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58599+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58600+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58601+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58602+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58603+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58604+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58605+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58606+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58607+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58608+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58609+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58610+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58611+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58612+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58613+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58614+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58615+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58616+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58617+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58618+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58619+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58620+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58621+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58622+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58623+#define GR_NICE_CHROOT_MSG "denied priority change by "
58624+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58625+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58626+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58627+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58628+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58629+#define GR_TIME_MSG "time set by "
58630+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58631+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58632+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58633+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58634+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58635+#define GR_BIND_MSG "denied bind() by "
58636+#define GR_CONNECT_MSG "denied connect() by "
58637+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58638+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58639+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58640+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58641+#define GR_CAP_ACL_MSG "use of %s denied for "
58642+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58643+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58644+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58645+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58646+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58647+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58648+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58649+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58650+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58651+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58652+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58653+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58654+#define GR_VM86_MSG "denied use of vm86 by "
58655+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58656+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58657diff -urNp linux-2.6.32.45/include/linux/grsecurity.h linux-2.6.32.45/include/linux/grsecurity.h
58658--- linux-2.6.32.45/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58659+++ linux-2.6.32.45/include/linux/grsecurity.h 2011-08-11 19:58:57.000000000 -0400
58660@@ -0,0 +1,217 @@
58661+#ifndef GR_SECURITY_H
58662+#define GR_SECURITY_H
58663+#include <linux/fs.h>
58664+#include <linux/fs_struct.h>
58665+#include <linux/binfmts.h>
58666+#include <linux/gracl.h>
58667+#include <linux/compat.h>
58668+
58669+/* notify of brain-dead configs */
58670+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58671+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58672+#endif
58673+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58674+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58675+#endif
58676+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58677+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58678+#endif
58679+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58680+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58681+#endif
58682+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58683+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58684+#endif
58685+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58686+#error "CONFIG_PAX enabled, but no PaX options are enabled."
58687+#endif
58688+
58689+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58690+void gr_handle_brute_check(void);
58691+void gr_handle_kernel_exploit(void);
58692+int gr_process_user_ban(void);
58693+
58694+char gr_roletype_to_char(void);
58695+
58696+int gr_acl_enable_at_secure(void);
58697+
58698+int gr_check_user_change(int real, int effective, int fs);
58699+int gr_check_group_change(int real, int effective, int fs);
58700+
58701+void gr_del_task_from_ip_table(struct task_struct *p);
58702+
58703+int gr_pid_is_chrooted(struct task_struct *p);
58704+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58705+int gr_handle_chroot_nice(void);
58706+int gr_handle_chroot_sysctl(const int op);
58707+int gr_handle_chroot_setpriority(struct task_struct *p,
58708+ const int niceval);
58709+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58710+int gr_handle_chroot_chroot(const struct dentry *dentry,
58711+ const struct vfsmount *mnt);
58712+int gr_handle_chroot_caps(struct path *path);
58713+void gr_handle_chroot_chdir(struct path *path);
58714+int gr_handle_chroot_chmod(const struct dentry *dentry,
58715+ const struct vfsmount *mnt, const int mode);
58716+int gr_handle_chroot_mknod(const struct dentry *dentry,
58717+ const struct vfsmount *mnt, const int mode);
58718+int gr_handle_chroot_mount(const struct dentry *dentry,
58719+ const struct vfsmount *mnt,
58720+ const char *dev_name);
58721+int gr_handle_chroot_pivot(void);
58722+int gr_handle_chroot_unix(const pid_t pid);
58723+
58724+int gr_handle_rawio(const struct inode *inode);
58725+
58726+void gr_handle_ioperm(void);
58727+void gr_handle_iopl(void);
58728+
58729+int gr_tpe_allow(const struct file *file);
58730+
58731+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58732+void gr_clear_chroot_entries(struct task_struct *task);
58733+
58734+void gr_log_forkfail(const int retval);
58735+void gr_log_timechange(void);
58736+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58737+void gr_log_chdir(const struct dentry *dentry,
58738+ const struct vfsmount *mnt);
58739+void gr_log_chroot_exec(const struct dentry *dentry,
58740+ const struct vfsmount *mnt);
58741+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
58742+#ifdef CONFIG_COMPAT
58743+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
58744+#endif
58745+void gr_log_remount(const char *devname, const int retval);
58746+void gr_log_unmount(const char *devname, const int retval);
58747+void gr_log_mount(const char *from, const char *to, const int retval);
58748+void gr_log_textrel(struct vm_area_struct *vma);
58749+void gr_log_rwxmmap(struct file *file);
58750+void gr_log_rwxmprotect(struct file *file);
58751+
58752+int gr_handle_follow_link(const struct inode *parent,
58753+ const struct inode *inode,
58754+ const struct dentry *dentry,
58755+ const struct vfsmount *mnt);
58756+int gr_handle_fifo(const struct dentry *dentry,
58757+ const struct vfsmount *mnt,
58758+ const struct dentry *dir, const int flag,
58759+ const int acc_mode);
58760+int gr_handle_hardlink(const struct dentry *dentry,
58761+ const struct vfsmount *mnt,
58762+ struct inode *inode,
58763+ const int mode, const char *to);
58764+
58765+int gr_is_capable(const int cap);
58766+int gr_is_capable_nolog(const int cap);
58767+void gr_learn_resource(const struct task_struct *task, const int limit,
58768+ const unsigned long wanted, const int gt);
58769+void gr_copy_label(struct task_struct *tsk);
58770+void gr_handle_crash(struct task_struct *task, const int sig);
58771+int gr_handle_signal(const struct task_struct *p, const int sig);
58772+int gr_check_crash_uid(const uid_t uid);
58773+int gr_check_protected_task(const struct task_struct *task);
58774+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58775+int gr_acl_handle_mmap(const struct file *file,
58776+ const unsigned long prot);
58777+int gr_acl_handle_mprotect(const struct file *file,
58778+ const unsigned long prot);
58779+int gr_check_hidden_task(const struct task_struct *tsk);
58780+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
58781+ const struct vfsmount *mnt);
58782+__u32 gr_acl_handle_utime(const struct dentry *dentry,
58783+ const struct vfsmount *mnt);
58784+__u32 gr_acl_handle_access(const struct dentry *dentry,
58785+ const struct vfsmount *mnt, const int fmode);
58786+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
58787+ const struct vfsmount *mnt, mode_t mode);
58788+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
58789+ const struct vfsmount *mnt, mode_t mode);
58790+__u32 gr_acl_handle_chown(const struct dentry *dentry,
58791+ const struct vfsmount *mnt);
58792+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
58793+ const struct vfsmount *mnt);
58794+int gr_handle_ptrace(struct task_struct *task, const long request);
58795+int gr_handle_proc_ptrace(struct task_struct *task);
58796+__u32 gr_acl_handle_execve(const struct dentry *dentry,
58797+ const struct vfsmount *mnt);
58798+int gr_check_crash_exec(const struct file *filp);
58799+int gr_acl_is_enabled(void);
58800+void gr_set_kernel_label(struct task_struct *task);
58801+void gr_set_role_label(struct task_struct *task, const uid_t uid,
58802+ const gid_t gid);
58803+int gr_set_proc_label(const struct dentry *dentry,
58804+ const struct vfsmount *mnt,
58805+ const int unsafe_share);
58806+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
58807+ const struct vfsmount *mnt);
58808+__u32 gr_acl_handle_open(const struct dentry *dentry,
58809+ const struct vfsmount *mnt, const int fmode);
58810+__u32 gr_acl_handle_creat(const struct dentry *dentry,
58811+ const struct dentry *p_dentry,
58812+ const struct vfsmount *p_mnt, const int fmode,
58813+ const int imode);
58814+void gr_handle_create(const struct dentry *dentry,
58815+ const struct vfsmount *mnt);
58816+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
58817+ const struct dentry *parent_dentry,
58818+ const struct vfsmount *parent_mnt,
58819+ const int mode);
58820+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
58821+ const struct dentry *parent_dentry,
58822+ const struct vfsmount *parent_mnt);
58823+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
58824+ const struct vfsmount *mnt);
58825+void gr_handle_delete(const ino_t ino, const dev_t dev);
58826+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
58827+ const struct vfsmount *mnt);
58828+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
58829+ const struct dentry *parent_dentry,
58830+ const struct vfsmount *parent_mnt,
58831+ const char *from);
58832+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
58833+ const struct dentry *parent_dentry,
58834+ const struct vfsmount *parent_mnt,
58835+ const struct dentry *old_dentry,
58836+ const struct vfsmount *old_mnt, const char *to);
58837+int gr_acl_handle_rename(struct dentry *new_dentry,
58838+ struct dentry *parent_dentry,
58839+ const struct vfsmount *parent_mnt,
58840+ struct dentry *old_dentry,
58841+ struct inode *old_parent_inode,
58842+ struct vfsmount *old_mnt, const char *newname);
58843+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58844+ struct dentry *old_dentry,
58845+ struct dentry *new_dentry,
58846+ struct vfsmount *mnt, const __u8 replace);
58847+__u32 gr_check_link(const struct dentry *new_dentry,
58848+ const struct dentry *parent_dentry,
58849+ const struct vfsmount *parent_mnt,
58850+ const struct dentry *old_dentry,
58851+ const struct vfsmount *old_mnt);
58852+int gr_acl_handle_filldir(const struct file *file, const char *name,
58853+ const unsigned int namelen, const ino_t ino);
58854+
58855+__u32 gr_acl_handle_unix(const struct dentry *dentry,
58856+ const struct vfsmount *mnt);
58857+void gr_acl_handle_exit(void);
58858+void gr_acl_handle_psacct(struct task_struct *task, const long code);
58859+int gr_acl_handle_procpidmem(const struct task_struct *task);
58860+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
58861+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
58862+void gr_audit_ptrace(struct task_struct *task);
58863+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
58864+
58865+#ifdef CONFIG_GRKERNSEC
58866+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
58867+void gr_handle_vm86(void);
58868+void gr_handle_mem_readwrite(u64 from, u64 to);
58869+
58870+extern int grsec_enable_dmesg;
58871+extern int grsec_disable_privio;
58872+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58873+extern int grsec_enable_chroot_findtask;
58874+#endif
58875+#endif
58876+
58877+#endif
58878diff -urNp linux-2.6.32.45/include/linux/hdpu_features.h linux-2.6.32.45/include/linux/hdpu_features.h
58879--- linux-2.6.32.45/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
58880+++ linux-2.6.32.45/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
58881@@ -3,7 +3,7 @@
58882 struct cpustate_t {
58883 spinlock_t lock;
58884 int excl;
58885- int open_count;
58886+ atomic_t open_count;
58887 unsigned char cached_val;
58888 int inited;
58889 unsigned long *set_addr;
58890diff -urNp linux-2.6.32.45/include/linux/highmem.h linux-2.6.32.45/include/linux/highmem.h
58891--- linux-2.6.32.45/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
58892+++ linux-2.6.32.45/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
58893@@ -137,6 +137,18 @@ static inline void clear_highpage(struct
58894 kunmap_atomic(kaddr, KM_USER0);
58895 }
58896
58897+static inline void sanitize_highpage(struct page *page)
58898+{
58899+ void *kaddr;
58900+ unsigned long flags;
58901+
58902+ local_irq_save(flags);
58903+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
58904+ clear_page(kaddr);
58905+ kunmap_atomic(kaddr, KM_CLEARPAGE);
58906+ local_irq_restore(flags);
58907+}
58908+
58909 static inline void zero_user_segments(struct page *page,
58910 unsigned start1, unsigned end1,
58911 unsigned start2, unsigned end2)
58912diff -urNp linux-2.6.32.45/include/linux/i2c.h linux-2.6.32.45/include/linux/i2c.h
58913--- linux-2.6.32.45/include/linux/i2c.h 2011-03-27 14:31:47.000000000 -0400
58914+++ linux-2.6.32.45/include/linux/i2c.h 2011-08-23 21:22:38.000000000 -0400
58915@@ -325,6 +325,7 @@ struct i2c_algorithm {
58916 /* To determine what the adapter supports */
58917 u32 (*functionality) (struct i2c_adapter *);
58918 };
58919+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
58920
58921 /*
58922 * i2c_adapter is the structure used to identify a physical i2c bus along
58923diff -urNp linux-2.6.32.45/include/linux/i2o.h linux-2.6.32.45/include/linux/i2o.h
58924--- linux-2.6.32.45/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
58925+++ linux-2.6.32.45/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
58926@@ -564,7 +564,7 @@ struct i2o_controller {
58927 struct i2o_device *exec; /* Executive */
58928 #if BITS_PER_LONG == 64
58929 spinlock_t context_list_lock; /* lock for context_list */
58930- atomic_t context_list_counter; /* needed for unique contexts */
58931+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
58932 struct list_head context_list; /* list of context id's
58933 and pointers */
58934 #endif
58935diff -urNp linux-2.6.32.45/include/linux/init_task.h linux-2.6.32.45/include/linux/init_task.h
58936--- linux-2.6.32.45/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
58937+++ linux-2.6.32.45/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
58938@@ -83,6 +83,12 @@ extern struct group_info init_groups;
58939 #define INIT_IDS
58940 #endif
58941
58942+#ifdef CONFIG_X86
58943+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
58944+#else
58945+#define INIT_TASK_THREAD_INFO
58946+#endif
58947+
58948 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
58949 /*
58950 * Because of the reduced scope of CAP_SETPCAP when filesystem
58951@@ -156,6 +162,7 @@ extern struct cred init_cred;
58952 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
58953 .comm = "swapper", \
58954 .thread = INIT_THREAD, \
58955+ INIT_TASK_THREAD_INFO \
58956 .fs = &init_fs, \
58957 .files = &init_files, \
58958 .signal = &init_signals, \
58959diff -urNp linux-2.6.32.45/include/linux/intel-iommu.h linux-2.6.32.45/include/linux/intel-iommu.h
58960--- linux-2.6.32.45/include/linux/intel-iommu.h 2011-03-27 14:31:47.000000000 -0400
58961+++ linux-2.6.32.45/include/linux/intel-iommu.h 2011-08-05 20:33:55.000000000 -0400
58962@@ -296,7 +296,7 @@ struct iommu_flush {
58963 u8 fm, u64 type);
58964 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
58965 unsigned int size_order, u64 type);
58966-};
58967+} __no_const;
58968
58969 enum {
58970 SR_DMAR_FECTL_REG,
58971diff -urNp linux-2.6.32.45/include/linux/interrupt.h linux-2.6.32.45/include/linux/interrupt.h
58972--- linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
58973+++ linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
58974@@ -363,7 +363,7 @@ enum
58975 /* map softirq index to softirq name. update 'softirq_to_name' in
58976 * kernel/softirq.c when adding a new softirq.
58977 */
58978-extern char *softirq_to_name[NR_SOFTIRQS];
58979+extern const char * const softirq_to_name[NR_SOFTIRQS];
58980
58981 /* softirq mask and active fields moved to irq_cpustat_t in
58982 * asm/hardirq.h to get better cache usage. KAO
58983@@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
58984
58985 struct softirq_action
58986 {
58987- void (*action)(struct softirq_action *);
58988+ void (*action)(void);
58989 };
58990
58991 asmlinkage void do_softirq(void);
58992 asmlinkage void __do_softirq(void);
58993-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
58994+extern void open_softirq(int nr, void (*action)(void));
58995 extern void softirq_init(void);
58996 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
58997 extern void raise_softirq_irqoff(unsigned int nr);
58998diff -urNp linux-2.6.32.45/include/linux/irq.h linux-2.6.32.45/include/linux/irq.h
58999--- linux-2.6.32.45/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
59000+++ linux-2.6.32.45/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
59001@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
59002 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
59003 bool boot)
59004 {
59005+#ifdef CONFIG_CPUMASK_OFFSTACK
59006 gfp_t gfp = GFP_ATOMIC;
59007
59008 if (boot)
59009 gfp = GFP_NOWAIT;
59010
59011-#ifdef CONFIG_CPUMASK_OFFSTACK
59012 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
59013 return false;
59014
59015diff -urNp linux-2.6.32.45/include/linux/kallsyms.h linux-2.6.32.45/include/linux/kallsyms.h
59016--- linux-2.6.32.45/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
59017+++ linux-2.6.32.45/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
59018@@ -15,7 +15,8 @@
59019
59020 struct module;
59021
59022-#ifdef CONFIG_KALLSYMS
59023+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59024+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59025 /* Lookup the address for a symbol. Returns 0 if not found. */
59026 unsigned long kallsyms_lookup_name(const char *name);
59027
59028@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
59029 /* Stupid that this does nothing, but I didn't create this mess. */
59030 #define __print_symbol(fmt, addr)
59031 #endif /*CONFIG_KALLSYMS*/
59032+#else /* when included by kallsyms.c, vsnprintf.c, or
59033+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59034+extern void __print_symbol(const char *fmt, unsigned long address);
59035+extern int sprint_symbol(char *buffer, unsigned long address);
59036+const char *kallsyms_lookup(unsigned long addr,
59037+ unsigned long *symbolsize,
59038+ unsigned long *offset,
59039+ char **modname, char *namebuf);
59040+#endif
59041
59042 /* This macro allows us to keep printk typechecking */
59043 static void __check_printsym_format(const char *fmt, ...)
59044diff -urNp linux-2.6.32.45/include/linux/kgdb.h linux-2.6.32.45/include/linux/kgdb.h
59045--- linux-2.6.32.45/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
59046+++ linux-2.6.32.45/include/linux/kgdb.h 2011-08-05 20:33:55.000000000 -0400
59047@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
59048
59049 extern int kgdb_connected;
59050
59051-extern atomic_t kgdb_setting_breakpoint;
59052-extern atomic_t kgdb_cpu_doing_single_step;
59053+extern atomic_unchecked_t kgdb_setting_breakpoint;
59054+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59055
59056 extern struct task_struct *kgdb_usethread;
59057 extern struct task_struct *kgdb_contthread;
59058@@ -226,8 +226,8 @@ extern int kgdb_arch_remove_breakpoint(u
59059 * hardware debug registers.
59060 */
59061 struct kgdb_arch {
59062- unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59063- unsigned long flags;
59064+ const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59065+ const unsigned long flags;
59066
59067 int (*set_breakpoint)(unsigned long, char *);
59068 int (*remove_breakpoint)(unsigned long, char *);
59069@@ -251,20 +251,20 @@ struct kgdb_arch {
59070 */
59071 struct kgdb_io {
59072 const char *name;
59073- int (*read_char) (void);
59074- void (*write_char) (u8);
59075- void (*flush) (void);
59076- int (*init) (void);
59077- void (*pre_exception) (void);
59078- void (*post_exception) (void);
59079+ int (* const read_char) (void);
59080+ void (* const write_char) (u8);
59081+ void (* const flush) (void);
59082+ int (* const init) (void);
59083+ void (* const pre_exception) (void);
59084+ void (* const post_exception) (void);
59085 };
59086
59087-extern struct kgdb_arch arch_kgdb_ops;
59088+extern const struct kgdb_arch arch_kgdb_ops;
59089
59090 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
59091
59092-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
59093-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
59094+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
59095+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
59096
59097 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
59098 extern int kgdb_mem2hex(char *mem, char *buf, int count);
59099diff -urNp linux-2.6.32.45/include/linux/kmod.h linux-2.6.32.45/include/linux/kmod.h
59100--- linux-2.6.32.45/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
59101+++ linux-2.6.32.45/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
59102@@ -31,6 +31,8 @@
59103 * usually useless though. */
59104 extern int __request_module(bool wait, const char *name, ...) \
59105 __attribute__((format(printf, 2, 3)));
59106+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59107+ __attribute__((format(printf, 3, 4)));
59108 #define request_module(mod...) __request_module(true, mod)
59109 #define request_module_nowait(mod...) __request_module(false, mod)
59110 #define try_then_request_module(x, mod...) \
59111diff -urNp linux-2.6.32.45/include/linux/kobject.h linux-2.6.32.45/include/linux/kobject.h
59112--- linux-2.6.32.45/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
59113+++ linux-2.6.32.45/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
59114@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
59115
59116 struct kobj_type {
59117 void (*release)(struct kobject *kobj);
59118- struct sysfs_ops *sysfs_ops;
59119+ const struct sysfs_ops *sysfs_ops;
59120 struct attribute **default_attrs;
59121 };
59122
59123@@ -118,9 +118,9 @@ struct kobj_uevent_env {
59124 };
59125
59126 struct kset_uevent_ops {
59127- int (*filter)(struct kset *kset, struct kobject *kobj);
59128- const char *(*name)(struct kset *kset, struct kobject *kobj);
59129- int (*uevent)(struct kset *kset, struct kobject *kobj,
59130+ int (* const filter)(struct kset *kset, struct kobject *kobj);
59131+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
59132+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
59133 struct kobj_uevent_env *env);
59134 };
59135
59136@@ -132,7 +132,7 @@ struct kobj_attribute {
59137 const char *buf, size_t count);
59138 };
59139
59140-extern struct sysfs_ops kobj_sysfs_ops;
59141+extern const struct sysfs_ops kobj_sysfs_ops;
59142
59143 /**
59144 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
59145@@ -155,14 +155,14 @@ struct kset {
59146 struct list_head list;
59147 spinlock_t list_lock;
59148 struct kobject kobj;
59149- struct kset_uevent_ops *uevent_ops;
59150+ const struct kset_uevent_ops *uevent_ops;
59151 };
59152
59153 extern void kset_init(struct kset *kset);
59154 extern int __must_check kset_register(struct kset *kset);
59155 extern void kset_unregister(struct kset *kset);
59156 extern struct kset * __must_check kset_create_and_add(const char *name,
59157- struct kset_uevent_ops *u,
59158+ const struct kset_uevent_ops *u,
59159 struct kobject *parent_kobj);
59160
59161 static inline struct kset *to_kset(struct kobject *kobj)
59162diff -urNp linux-2.6.32.45/include/linux/kvm_host.h linux-2.6.32.45/include/linux/kvm_host.h
59163--- linux-2.6.32.45/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
59164+++ linux-2.6.32.45/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
59165@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59166 void vcpu_load(struct kvm_vcpu *vcpu);
59167 void vcpu_put(struct kvm_vcpu *vcpu);
59168
59169-int kvm_init(void *opaque, unsigned int vcpu_size,
59170+int kvm_init(const void *opaque, unsigned int vcpu_size,
59171 struct module *module);
59172 void kvm_exit(void);
59173
59174@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59175 struct kvm_guest_debug *dbg);
59176 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59177
59178-int kvm_arch_init(void *opaque);
59179+int kvm_arch_init(const void *opaque);
59180 void kvm_arch_exit(void);
59181
59182 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59183diff -urNp linux-2.6.32.45/include/linux/libata.h linux-2.6.32.45/include/linux/libata.h
59184--- linux-2.6.32.45/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
59185+++ linux-2.6.32.45/include/linux/libata.h 2011-08-05 20:33:55.000000000 -0400
59186@@ -525,11 +525,11 @@ struct ata_ioports {
59187
59188 struct ata_host {
59189 spinlock_t lock;
59190- struct device *dev;
59191+ struct device *dev;
59192 void __iomem * const *iomap;
59193 unsigned int n_ports;
59194 void *private_data;
59195- struct ata_port_operations *ops;
59196+ const struct ata_port_operations *ops;
59197 unsigned long flags;
59198 #ifdef CONFIG_ATA_ACPI
59199 acpi_handle acpi_handle;
59200@@ -710,7 +710,7 @@ struct ata_link {
59201
59202 struct ata_port {
59203 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
59204- struct ata_port_operations *ops;
59205+ const struct ata_port_operations *ops;
59206 spinlock_t *lock;
59207 /* Flags owned by the EH context. Only EH should touch these once the
59208 port is active */
59209@@ -883,7 +883,7 @@ struct ata_port_operations {
59210 * ->inherits must be the last field and all the preceding
59211 * fields must be pointers.
59212 */
59213- const struct ata_port_operations *inherits;
59214+ const struct ata_port_operations * const inherits;
59215 };
59216
59217 struct ata_port_info {
59218@@ -892,7 +892,7 @@ struct ata_port_info {
59219 unsigned long pio_mask;
59220 unsigned long mwdma_mask;
59221 unsigned long udma_mask;
59222- struct ata_port_operations *port_ops;
59223+ const struct ata_port_operations *port_ops;
59224 void *private_data;
59225 };
59226
59227@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
59228 extern const unsigned long sata_deb_timing_hotplug[];
59229 extern const unsigned long sata_deb_timing_long[];
59230
59231-extern struct ata_port_operations ata_dummy_port_ops;
59232+extern const struct ata_port_operations ata_dummy_port_ops;
59233 extern const struct ata_port_info ata_dummy_port_info;
59234
59235 static inline const unsigned long *
59236@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
59237 struct scsi_host_template *sht);
59238 extern void ata_host_detach(struct ata_host *host);
59239 extern void ata_host_init(struct ata_host *, struct device *,
59240- unsigned long, struct ata_port_operations *);
59241+ unsigned long, const struct ata_port_operations *);
59242 extern int ata_scsi_detect(struct scsi_host_template *sht);
59243 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
59244 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
59245diff -urNp linux-2.6.32.45/include/linux/lockd/bind.h linux-2.6.32.45/include/linux/lockd/bind.h
59246--- linux-2.6.32.45/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
59247+++ linux-2.6.32.45/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
59248@@ -23,13 +23,13 @@ struct svc_rqst;
59249 * This is the set of functions for lockd->nfsd communication
59250 */
59251 struct nlmsvc_binding {
59252- __be32 (*fopen)(struct svc_rqst *,
59253+ __be32 (* const fopen)(struct svc_rqst *,
59254 struct nfs_fh *,
59255 struct file **);
59256- void (*fclose)(struct file *);
59257+ void (* const fclose)(struct file *);
59258 };
59259
59260-extern struct nlmsvc_binding * nlmsvc_ops;
59261+extern const struct nlmsvc_binding * nlmsvc_ops;
59262
59263 /*
59264 * Similar to nfs_client_initdata, but without the NFS-specific
59265diff -urNp linux-2.6.32.45/include/linux/mca.h linux-2.6.32.45/include/linux/mca.h
59266--- linux-2.6.32.45/include/linux/mca.h 2011-03-27 14:31:47.000000000 -0400
59267+++ linux-2.6.32.45/include/linux/mca.h 2011-08-05 20:33:55.000000000 -0400
59268@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59269 int region);
59270 void * (*mca_transform_memory)(struct mca_device *,
59271 void *memory);
59272-};
59273+} __no_const;
59274
59275 struct mca_bus {
59276 u64 default_dma_mask;
59277diff -urNp linux-2.6.32.45/include/linux/memory.h linux-2.6.32.45/include/linux/memory.h
59278--- linux-2.6.32.45/include/linux/memory.h 2011-03-27 14:31:47.000000000 -0400
59279+++ linux-2.6.32.45/include/linux/memory.h 2011-08-05 20:33:55.000000000 -0400
59280@@ -108,7 +108,7 @@ struct memory_accessor {
59281 size_t count);
59282 ssize_t (*write)(struct memory_accessor *, const char *buf,
59283 off_t offset, size_t count);
59284-};
59285+} __no_const;
59286
59287 /*
59288 * Kernel text modification mutex, used for code patching. Users of this lock
59289diff -urNp linux-2.6.32.45/include/linux/mm.h linux-2.6.32.45/include/linux/mm.h
59290--- linux-2.6.32.45/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
59291+++ linux-2.6.32.45/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
59292@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
59293
59294 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59295 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59296+
59297+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59298+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59299+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59300+#else
59301 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59302+#endif
59303+
59304 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59305 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59306
59307@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
59308 int set_page_dirty_lock(struct page *page);
59309 int clear_page_dirty_for_io(struct page *page);
59310
59311-/* Is the vma a continuation of the stack vma above it? */
59312-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
59313-{
59314- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59315-}
59316-
59317 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59318 unsigned long old_addr, struct vm_area_struct *new_vma,
59319 unsigned long new_addr, unsigned long len);
59320@@ -890,6 +891,8 @@ struct shrinker {
59321 extern void register_shrinker(struct shrinker *);
59322 extern void unregister_shrinker(struct shrinker *);
59323
59324+pgprot_t vm_get_page_prot(unsigned long vm_flags);
59325+
59326 int vma_wants_writenotify(struct vm_area_struct *vma);
59327
59328 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
59329@@ -1162,6 +1165,7 @@ out:
59330 }
59331
59332 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59333+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59334
59335 extern unsigned long do_brk(unsigned long, unsigned long);
59336
59337@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
59338 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59339 struct vm_area_struct **pprev);
59340
59341+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59342+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59343+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59344+
59345 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59346 NULL if none. Assume start_addr < end_addr. */
59347 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59348@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
59349 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59350 }
59351
59352-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59353 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59354 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59355 unsigned long pfn, unsigned long size, pgprot_t);
59356@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
59357 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
59358 extern int sysctl_memory_failure_early_kill;
59359 extern int sysctl_memory_failure_recovery;
59360-extern atomic_long_t mce_bad_pages;
59361+extern atomic_long_unchecked_t mce_bad_pages;
59362+
59363+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59364+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59365+#else
59366+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59367+#endif
59368
59369 #endif /* __KERNEL__ */
59370 #endif /* _LINUX_MM_H */
59371diff -urNp linux-2.6.32.45/include/linux/mm_types.h linux-2.6.32.45/include/linux/mm_types.h
59372--- linux-2.6.32.45/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
59373+++ linux-2.6.32.45/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
59374@@ -186,6 +186,8 @@ struct vm_area_struct {
59375 #ifdef CONFIG_NUMA
59376 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59377 #endif
59378+
59379+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59380 };
59381
59382 struct core_thread {
59383@@ -287,6 +289,24 @@ struct mm_struct {
59384 #ifdef CONFIG_MMU_NOTIFIER
59385 struct mmu_notifier_mm *mmu_notifier_mm;
59386 #endif
59387+
59388+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59389+ unsigned long pax_flags;
59390+#endif
59391+
59392+#ifdef CONFIG_PAX_DLRESOLVE
59393+ unsigned long call_dl_resolve;
59394+#endif
59395+
59396+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59397+ unsigned long call_syscall;
59398+#endif
59399+
59400+#ifdef CONFIG_PAX_ASLR
59401+ unsigned long delta_mmap; /* randomized offset */
59402+ unsigned long delta_stack; /* randomized offset */
59403+#endif
59404+
59405 };
59406
59407 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
59408diff -urNp linux-2.6.32.45/include/linux/mmu_notifier.h linux-2.6.32.45/include/linux/mmu_notifier.h
59409--- linux-2.6.32.45/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
59410+++ linux-2.6.32.45/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
59411@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
59412 */
59413 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59414 ({ \
59415- pte_t __pte; \
59416+ pte_t ___pte; \
59417 struct vm_area_struct *___vma = __vma; \
59418 unsigned long ___address = __address; \
59419- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59420+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59421 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59422- __pte; \
59423+ ___pte; \
59424 })
59425
59426 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
59427diff -urNp linux-2.6.32.45/include/linux/mmzone.h linux-2.6.32.45/include/linux/mmzone.h
59428--- linux-2.6.32.45/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
59429+++ linux-2.6.32.45/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
59430@@ -350,7 +350,7 @@ struct zone {
59431 unsigned long flags; /* zone flags, see below */
59432
59433 /* Zone statistics */
59434- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59435+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59436
59437 /*
59438 * prev_priority holds the scanning priority for this zone. It is
59439diff -urNp linux-2.6.32.45/include/linux/mod_devicetable.h linux-2.6.32.45/include/linux/mod_devicetable.h
59440--- linux-2.6.32.45/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
59441+++ linux-2.6.32.45/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
59442@@ -12,7 +12,7 @@
59443 typedef unsigned long kernel_ulong_t;
59444 #endif
59445
59446-#define PCI_ANY_ID (~0)
59447+#define PCI_ANY_ID ((__u16)~0)
59448
59449 struct pci_device_id {
59450 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59451@@ -131,7 +131,7 @@ struct usb_device_id {
59452 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59453 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59454
59455-#define HID_ANY_ID (~0)
59456+#define HID_ANY_ID (~0U)
59457
59458 struct hid_device_id {
59459 __u16 bus;
59460diff -urNp linux-2.6.32.45/include/linux/module.h linux-2.6.32.45/include/linux/module.h
59461--- linux-2.6.32.45/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
59462+++ linux-2.6.32.45/include/linux/module.h 2011-08-05 20:33:55.000000000 -0400
59463@@ -16,6 +16,7 @@
59464 #include <linux/kobject.h>
59465 #include <linux/moduleparam.h>
59466 #include <linux/tracepoint.h>
59467+#include <linux/fs.h>
59468
59469 #include <asm/local.h>
59470 #include <asm/module.h>
59471@@ -287,16 +288,16 @@ struct module
59472 int (*init)(void);
59473
59474 /* If this is non-NULL, vfree after init() returns */
59475- void *module_init;
59476+ void *module_init_rx, *module_init_rw;
59477
59478 /* Here is the actual code + data, vfree'd on unload. */
59479- void *module_core;
59480+ void *module_core_rx, *module_core_rw;
59481
59482 /* Here are the sizes of the init and core sections */
59483- unsigned int init_size, core_size;
59484+ unsigned int init_size_rw, core_size_rw;
59485
59486 /* The size of the executable code in each section. */
59487- unsigned int init_text_size, core_text_size;
59488+ unsigned int init_size_rx, core_size_rx;
59489
59490 /* Arch-specific module values */
59491 struct mod_arch_specific arch;
59492@@ -345,6 +346,10 @@ struct module
59493 #ifdef CONFIG_EVENT_TRACING
59494 struct ftrace_event_call *trace_events;
59495 unsigned int num_trace_events;
59496+ struct file_operations trace_id;
59497+ struct file_operations trace_enable;
59498+ struct file_operations trace_format;
59499+ struct file_operations trace_filter;
59500 #endif
59501 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59502 unsigned long *ftrace_callsites;
59503@@ -393,16 +398,46 @@ struct module *__module_address(unsigned
59504 bool is_module_address(unsigned long addr);
59505 bool is_module_text_address(unsigned long addr);
59506
59507+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59508+{
59509+
59510+#ifdef CONFIG_PAX_KERNEXEC
59511+ if (ktla_ktva(addr) >= (unsigned long)start &&
59512+ ktla_ktva(addr) < (unsigned long)start + size)
59513+ return 1;
59514+#endif
59515+
59516+ return ((void *)addr >= start && (void *)addr < start + size);
59517+}
59518+
59519+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59520+{
59521+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59522+}
59523+
59524+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59525+{
59526+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59527+}
59528+
59529+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59530+{
59531+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59532+}
59533+
59534+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59535+{
59536+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59537+}
59538+
59539 static inline int within_module_core(unsigned long addr, struct module *mod)
59540 {
59541- return (unsigned long)mod->module_core <= addr &&
59542- addr < (unsigned long)mod->module_core + mod->core_size;
59543+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59544 }
59545
59546 static inline int within_module_init(unsigned long addr, struct module *mod)
59547 {
59548- return (unsigned long)mod->module_init <= addr &&
59549- addr < (unsigned long)mod->module_init + mod->init_size;
59550+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59551 }
59552
59553 /* Search for module by name: must hold module_mutex. */
59554diff -urNp linux-2.6.32.45/include/linux/moduleloader.h linux-2.6.32.45/include/linux/moduleloader.h
59555--- linux-2.6.32.45/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
59556+++ linux-2.6.32.45/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
59557@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59558 sections. Returns NULL on failure. */
59559 void *module_alloc(unsigned long size);
59560
59561+#ifdef CONFIG_PAX_KERNEXEC
59562+void *module_alloc_exec(unsigned long size);
59563+#else
59564+#define module_alloc_exec(x) module_alloc(x)
59565+#endif
59566+
59567 /* Free memory returned from module_alloc. */
59568 void module_free(struct module *mod, void *module_region);
59569
59570+#ifdef CONFIG_PAX_KERNEXEC
59571+void module_free_exec(struct module *mod, void *module_region);
59572+#else
59573+#define module_free_exec(x, y) module_free((x), (y))
59574+#endif
59575+
59576 /* Apply the given relocation to the (simplified) ELF. Return -error
59577 or 0. */
59578 int apply_relocate(Elf_Shdr *sechdrs,
59579diff -urNp linux-2.6.32.45/include/linux/moduleparam.h linux-2.6.32.45/include/linux/moduleparam.h
59580--- linux-2.6.32.45/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
59581+++ linux-2.6.32.45/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
59582@@ -132,7 +132,7 @@ struct kparam_array
59583
59584 /* Actually copy string: maxlen param is usually sizeof(string). */
59585 #define module_param_string(name, string, len, perm) \
59586- static const struct kparam_string __param_string_##name \
59587+ static const struct kparam_string __param_string_##name __used \
59588 = { len, string }; \
59589 __module_param_call(MODULE_PARAM_PREFIX, name, \
59590 param_set_copystring, param_get_string, \
59591@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
59592
59593 /* Comma-separated array: *nump is set to number they actually specified. */
59594 #define module_param_array_named(name, array, type, nump, perm) \
59595- static const struct kparam_array __param_arr_##name \
59596+ static const struct kparam_array __param_arr_##name __used \
59597 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
59598 sizeof(array[0]), array }; \
59599 __module_param_call(MODULE_PARAM_PREFIX, name, \
59600diff -urNp linux-2.6.32.45/include/linux/mutex.h linux-2.6.32.45/include/linux/mutex.h
59601--- linux-2.6.32.45/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
59602+++ linux-2.6.32.45/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
59603@@ -51,7 +51,7 @@ struct mutex {
59604 spinlock_t wait_lock;
59605 struct list_head wait_list;
59606 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
59607- struct thread_info *owner;
59608+ struct task_struct *owner;
59609 #endif
59610 #ifdef CONFIG_DEBUG_MUTEXES
59611 const char *name;
59612diff -urNp linux-2.6.32.45/include/linux/namei.h linux-2.6.32.45/include/linux/namei.h
59613--- linux-2.6.32.45/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
59614+++ linux-2.6.32.45/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
59615@@ -22,7 +22,7 @@ struct nameidata {
59616 unsigned int flags;
59617 int last_type;
59618 unsigned depth;
59619- char *saved_names[MAX_NESTED_LINKS + 1];
59620+ const char *saved_names[MAX_NESTED_LINKS + 1];
59621
59622 /* Intent data */
59623 union {
59624@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
59625 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59626 extern void unlock_rename(struct dentry *, struct dentry *);
59627
59628-static inline void nd_set_link(struct nameidata *nd, char *path)
59629+static inline void nd_set_link(struct nameidata *nd, const char *path)
59630 {
59631 nd->saved_names[nd->depth] = path;
59632 }
59633
59634-static inline char *nd_get_link(struct nameidata *nd)
59635+static inline const char *nd_get_link(const struct nameidata *nd)
59636 {
59637 return nd->saved_names[nd->depth];
59638 }
59639diff -urNp linux-2.6.32.45/include/linux/netdevice.h linux-2.6.32.45/include/linux/netdevice.h
59640--- linux-2.6.32.45/include/linux/netdevice.h 2011-08-09 18:35:30.000000000 -0400
59641+++ linux-2.6.32.45/include/linux/netdevice.h 2011-08-23 21:22:38.000000000 -0400
59642@@ -637,6 +637,7 @@ struct net_device_ops {
59643 u16 xid);
59644 #endif
59645 };
59646+typedef struct net_device_ops __no_const net_device_ops_no_const;
59647
59648 /*
59649 * The DEVICE structure.
59650diff -urNp linux-2.6.32.45/include/linux/netfilter/xt_gradm.h linux-2.6.32.45/include/linux/netfilter/xt_gradm.h
59651--- linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59652+++ linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
59653@@ -0,0 +1,9 @@
59654+#ifndef _LINUX_NETFILTER_XT_GRADM_H
59655+#define _LINUX_NETFILTER_XT_GRADM_H 1
59656+
59657+struct xt_gradm_mtinfo {
59658+ __u16 flags;
59659+ __u16 invflags;
59660+};
59661+
59662+#endif
59663diff -urNp linux-2.6.32.45/include/linux/nodemask.h linux-2.6.32.45/include/linux/nodemask.h
59664--- linux-2.6.32.45/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
59665+++ linux-2.6.32.45/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
59666@@ -464,11 +464,11 @@ static inline int num_node_state(enum no
59667
59668 #define any_online_node(mask) \
59669 ({ \
59670- int node; \
59671- for_each_node_mask(node, (mask)) \
59672- if (node_online(node)) \
59673+ int __node; \
59674+ for_each_node_mask(__node, (mask)) \
59675+ if (node_online(__node)) \
59676 break; \
59677- node; \
59678+ __node; \
59679 })
59680
59681 #define num_online_nodes() num_node_state(N_ONLINE)
59682diff -urNp linux-2.6.32.45/include/linux/oprofile.h linux-2.6.32.45/include/linux/oprofile.h
59683--- linux-2.6.32.45/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
59684+++ linux-2.6.32.45/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
59685@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
59686 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59687 char const * name, ulong * val);
59688
59689-/** Create a file for read-only access to an atomic_t. */
59690+/** Create a file for read-only access to an atomic_unchecked_t. */
59691 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59692- char const * name, atomic_t * val);
59693+ char const * name, atomic_unchecked_t * val);
59694
59695 /** create a directory */
59696 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59697diff -urNp linux-2.6.32.45/include/linux/pagemap.h linux-2.6.32.45/include/linux/pagemap.h
59698--- linux-2.6.32.45/include/linux/pagemap.h 2011-03-27 14:31:47.000000000 -0400
59699+++ linux-2.6.32.45/include/linux/pagemap.h 2011-08-17 19:36:28.000000000 -0400
59700@@ -425,6 +425,7 @@ static inline int fault_in_pages_readabl
59701 if (((unsigned long)uaddr & PAGE_MASK) !=
59702 ((unsigned long)end & PAGE_MASK))
59703 ret = __get_user(c, end);
59704+ (void)c;
59705 }
59706 return ret;
59707 }
59708diff -urNp linux-2.6.32.45/include/linux/perf_event.h linux-2.6.32.45/include/linux/perf_event.h
59709--- linux-2.6.32.45/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
59710+++ linux-2.6.32.45/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
59711@@ -476,7 +476,7 @@ struct hw_perf_event {
59712 struct hrtimer hrtimer;
59713 };
59714 };
59715- atomic64_t prev_count;
59716+ atomic64_unchecked_t prev_count;
59717 u64 sample_period;
59718 u64 last_period;
59719 atomic64_t period_left;
59720@@ -557,7 +557,7 @@ struct perf_event {
59721 const struct pmu *pmu;
59722
59723 enum perf_event_active_state state;
59724- atomic64_t count;
59725+ atomic64_unchecked_t count;
59726
59727 /*
59728 * These are the total time in nanoseconds that the event
59729@@ -595,8 +595,8 @@ struct perf_event {
59730 * These accumulate total time (in nanoseconds) that children
59731 * events have been enabled and running, respectively.
59732 */
59733- atomic64_t child_total_time_enabled;
59734- atomic64_t child_total_time_running;
59735+ atomic64_unchecked_t child_total_time_enabled;
59736+ atomic64_unchecked_t child_total_time_running;
59737
59738 /*
59739 * Protect attach/detach and child_list:
59740diff -urNp linux-2.6.32.45/include/linux/pipe_fs_i.h linux-2.6.32.45/include/linux/pipe_fs_i.h
59741--- linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
59742+++ linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
59743@@ -46,9 +46,9 @@ struct pipe_inode_info {
59744 wait_queue_head_t wait;
59745 unsigned int nrbufs, curbuf;
59746 struct page *tmp_page;
59747- unsigned int readers;
59748- unsigned int writers;
59749- unsigned int waiting_writers;
59750+ atomic_t readers;
59751+ atomic_t writers;
59752+ atomic_t waiting_writers;
59753 unsigned int r_counter;
59754 unsigned int w_counter;
59755 struct fasync_struct *fasync_readers;
59756diff -urNp linux-2.6.32.45/include/linux/poison.h linux-2.6.32.45/include/linux/poison.h
59757--- linux-2.6.32.45/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
59758+++ linux-2.6.32.45/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
59759@@ -19,8 +19,8 @@
59760 * under normal circumstances, used to verify that nobody uses
59761 * non-initialized list entries.
59762 */
59763-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59764-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59765+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59766+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59767
59768 /********** include/linux/timer.h **********/
59769 /*
59770diff -urNp linux-2.6.32.45/include/linux/posix-timers.h linux-2.6.32.45/include/linux/posix-timers.h
59771--- linux-2.6.32.45/include/linux/posix-timers.h 2011-03-27 14:31:47.000000000 -0400
59772+++ linux-2.6.32.45/include/linux/posix-timers.h 2011-08-05 20:33:55.000000000 -0400
59773@@ -67,7 +67,7 @@ struct k_itimer {
59774 };
59775
59776 struct k_clock {
59777- int res; /* in nanoseconds */
59778+ const int res; /* in nanoseconds */
59779 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
59780 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
59781 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
59782diff -urNp linux-2.6.32.45/include/linux/preempt.h linux-2.6.32.45/include/linux/preempt.h
59783--- linux-2.6.32.45/include/linux/preempt.h 2011-03-27 14:31:47.000000000 -0400
59784+++ linux-2.6.32.45/include/linux/preempt.h 2011-08-05 20:33:55.000000000 -0400
59785@@ -110,7 +110,7 @@ struct preempt_ops {
59786 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59787 void (*sched_out)(struct preempt_notifier *notifier,
59788 struct task_struct *next);
59789-};
59790+} __no_const;
59791
59792 /**
59793 * preempt_notifier - key for installing preemption notifiers
59794diff -urNp linux-2.6.32.45/include/linux/proc_fs.h linux-2.6.32.45/include/linux/proc_fs.h
59795--- linux-2.6.32.45/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
59796+++ linux-2.6.32.45/include/linux/proc_fs.h 2011-08-05 20:33:55.000000000 -0400
59797@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
59798 return proc_create_data(name, mode, parent, proc_fops, NULL);
59799 }
59800
59801+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
59802+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
59803+{
59804+#ifdef CONFIG_GRKERNSEC_PROC_USER
59805+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
59806+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59807+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
59808+#else
59809+ return proc_create_data(name, mode, parent, proc_fops, NULL);
59810+#endif
59811+}
59812+
59813+
59814 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
59815 mode_t mode, struct proc_dir_entry *base,
59816 read_proc_t *read_proc, void * data)
59817@@ -256,7 +269,7 @@ union proc_op {
59818 int (*proc_show)(struct seq_file *m,
59819 struct pid_namespace *ns, struct pid *pid,
59820 struct task_struct *task);
59821-};
59822+} __no_const;
59823
59824 struct ctl_table_header;
59825 struct ctl_table;
59826diff -urNp linux-2.6.32.45/include/linux/ptrace.h linux-2.6.32.45/include/linux/ptrace.h
59827--- linux-2.6.32.45/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
59828+++ linux-2.6.32.45/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
59829@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
59830 extern void exit_ptrace(struct task_struct *tracer);
59831 #define PTRACE_MODE_READ 1
59832 #define PTRACE_MODE_ATTACH 2
59833-/* Returns 0 on success, -errno on denial. */
59834-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
59835 /* Returns true on success, false on denial. */
59836 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
59837+/* Returns true on success, false on denial. */
59838+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
59839
59840 static inline int ptrace_reparented(struct task_struct *child)
59841 {
59842diff -urNp linux-2.6.32.45/include/linux/random.h linux-2.6.32.45/include/linux/random.h
59843--- linux-2.6.32.45/include/linux/random.h 2011-08-16 20:37:25.000000000 -0400
59844+++ linux-2.6.32.45/include/linux/random.h 2011-08-07 19:48:09.000000000 -0400
59845@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned l
59846 u32 random32(void);
59847 void srandom32(u32 seed);
59848
59849+static inline unsigned long pax_get_random_long(void)
59850+{
59851+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
59852+}
59853+
59854 #endif /* __KERNEL___ */
59855
59856 #endif /* _LINUX_RANDOM_H */
59857diff -urNp linux-2.6.32.45/include/linux/reboot.h linux-2.6.32.45/include/linux/reboot.h
59858--- linux-2.6.32.45/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
59859+++ linux-2.6.32.45/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
59860@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
59861 * Architecture-specific implementations of sys_reboot commands.
59862 */
59863
59864-extern void machine_restart(char *cmd);
59865-extern void machine_halt(void);
59866-extern void machine_power_off(void);
59867+extern void machine_restart(char *cmd) __noreturn;
59868+extern void machine_halt(void) __noreturn;
59869+extern void machine_power_off(void) __noreturn;
59870
59871 extern void machine_shutdown(void);
59872 struct pt_regs;
59873@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
59874 */
59875
59876 extern void kernel_restart_prepare(char *cmd);
59877-extern void kernel_restart(char *cmd);
59878-extern void kernel_halt(void);
59879-extern void kernel_power_off(void);
59880+extern void kernel_restart(char *cmd) __noreturn;
59881+extern void kernel_halt(void) __noreturn;
59882+extern void kernel_power_off(void) __noreturn;
59883
59884 void ctrl_alt_del(void);
59885
59886@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
59887 * Emergency restart, callable from an interrupt handler.
59888 */
59889
59890-extern void emergency_restart(void);
59891+extern void emergency_restart(void) __noreturn;
59892 #include <asm/emergency-restart.h>
59893
59894 #endif
59895diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs.h linux-2.6.32.45/include/linux/reiserfs_fs.h
59896--- linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
59897+++ linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
59898@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
59899 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
59900
59901 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
59902-#define get_generation(s) atomic_read (&fs_generation(s))
59903+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
59904 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
59905 #define __fs_changed(gen,s) (gen != get_generation (s))
59906 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
59907@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
59908 */
59909
59910 struct item_operations {
59911- int (*bytes_number) (struct item_head * ih, int block_size);
59912- void (*decrement_key) (struct cpu_key *);
59913- int (*is_left_mergeable) (struct reiserfs_key * ih,
59914+ int (* const bytes_number) (struct item_head * ih, int block_size);
59915+ void (* const decrement_key) (struct cpu_key *);
59916+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
59917 unsigned long bsize);
59918- void (*print_item) (struct item_head *, char *item);
59919- void (*check_item) (struct item_head *, char *item);
59920+ void (* const print_item) (struct item_head *, char *item);
59921+ void (* const check_item) (struct item_head *, char *item);
59922
59923- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
59924+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
59925 int is_affected, int insert_size);
59926- int (*check_left) (struct virtual_item * vi, int free,
59927+ int (* const check_left) (struct virtual_item * vi, int free,
59928 int start_skip, int end_skip);
59929- int (*check_right) (struct virtual_item * vi, int free);
59930- int (*part_size) (struct virtual_item * vi, int from, int to);
59931- int (*unit_num) (struct virtual_item * vi);
59932- void (*print_vi) (struct virtual_item * vi);
59933+ int (* const check_right) (struct virtual_item * vi, int free);
59934+ int (* const part_size) (struct virtual_item * vi, int from, int to);
59935+ int (* const unit_num) (struct virtual_item * vi);
59936+ void (* const print_vi) (struct virtual_item * vi);
59937 };
59938
59939-extern struct item_operations *item_ops[TYPE_ANY + 1];
59940+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
59941
59942 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
59943 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
59944diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs_sb.h linux-2.6.32.45/include/linux/reiserfs_fs_sb.h
59945--- linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
59946+++ linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
59947@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
59948 /* Comment? -Hans */
59949 wait_queue_head_t s_wait;
59950 /* To be obsoleted soon by per buffer seals.. -Hans */
59951- atomic_t s_generation_counter; // increased by one every time the
59952+ atomic_unchecked_t s_generation_counter; // increased by one every time the
59953 // tree gets re-balanced
59954 unsigned long s_properties; /* File system properties. Currently holds
59955 on-disk FS format */
59956diff -urNp linux-2.6.32.45/include/linux/relay.h linux-2.6.32.45/include/linux/relay.h
59957--- linux-2.6.32.45/include/linux/relay.h 2011-03-27 14:31:47.000000000 -0400
59958+++ linux-2.6.32.45/include/linux/relay.h 2011-08-05 20:33:55.000000000 -0400
59959@@ -159,7 +159,7 @@ struct rchan_callbacks
59960 * The callback should return 0 if successful, negative if not.
59961 */
59962 int (*remove_buf_file)(struct dentry *dentry);
59963-};
59964+} __no_const;
59965
59966 /*
59967 * CONFIG_RELAY kernel API, kernel/relay.c
59968diff -urNp linux-2.6.32.45/include/linux/rfkill.h linux-2.6.32.45/include/linux/rfkill.h
59969--- linux-2.6.32.45/include/linux/rfkill.h 2011-03-27 14:31:47.000000000 -0400
59970+++ linux-2.6.32.45/include/linux/rfkill.h 2011-08-23 21:22:38.000000000 -0400
59971@@ -144,6 +144,7 @@ struct rfkill_ops {
59972 void (*query)(struct rfkill *rfkill, void *data);
59973 int (*set_block)(void *data, bool blocked);
59974 };
59975+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
59976
59977 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
59978 /**
59979diff -urNp linux-2.6.32.45/include/linux/sched.h linux-2.6.32.45/include/linux/sched.h
59980--- linux-2.6.32.45/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
59981+++ linux-2.6.32.45/include/linux/sched.h 2011-08-11 19:48:55.000000000 -0400
59982@@ -101,6 +101,7 @@ struct bio;
59983 struct fs_struct;
59984 struct bts_context;
59985 struct perf_event_context;
59986+struct linux_binprm;
59987
59988 /*
59989 * List of flags we want to share for kernel threads,
59990@@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
59991 extern signed long schedule_timeout_uninterruptible(signed long timeout);
59992 asmlinkage void __schedule(void);
59993 asmlinkage void schedule(void);
59994-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
59995+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
59996
59997 struct nsproxy;
59998 struct user_namespace;
59999@@ -371,9 +372,12 @@ struct user_namespace;
60000 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60001
60002 extern int sysctl_max_map_count;
60003+extern unsigned long sysctl_heap_stack_gap;
60004
60005 #include <linux/aio.h>
60006
60007+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60008+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60009 extern unsigned long
60010 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60011 unsigned long, unsigned long);
60012@@ -666,6 +670,16 @@ struct signal_struct {
60013 struct tty_audit_buf *tty_audit_buf;
60014 #endif
60015
60016+#ifdef CONFIG_GRKERNSEC
60017+ u32 curr_ip;
60018+ u32 saved_ip;
60019+ u32 gr_saddr;
60020+ u32 gr_daddr;
60021+ u16 gr_sport;
60022+ u16 gr_dport;
60023+ u8 used_accept:1;
60024+#endif
60025+
60026 int oom_adj; /* OOM kill score adjustment (bit shift) */
60027 };
60028
60029@@ -723,6 +737,11 @@ struct user_struct {
60030 struct key *session_keyring; /* UID's default session keyring */
60031 #endif
60032
60033+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60034+ unsigned int banned;
60035+ unsigned long ban_expires;
60036+#endif
60037+
60038 /* Hash table maintenance information */
60039 struct hlist_node uidhash_node;
60040 uid_t uid;
60041@@ -1328,8 +1347,8 @@ struct task_struct {
60042 struct list_head thread_group;
60043
60044 struct completion *vfork_done; /* for vfork() */
60045- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60046- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60047+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60048+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60049
60050 cputime_t utime, stime, utimescaled, stimescaled;
60051 cputime_t gtime;
60052@@ -1343,16 +1362,6 @@ struct task_struct {
60053 struct task_cputime cputime_expires;
60054 struct list_head cpu_timers[3];
60055
60056-/* process credentials */
60057- const struct cred *real_cred; /* objective and real subjective task
60058- * credentials (COW) */
60059- const struct cred *cred; /* effective (overridable) subjective task
60060- * credentials (COW) */
60061- struct mutex cred_guard_mutex; /* guard against foreign influences on
60062- * credential calculations
60063- * (notably. ptrace) */
60064- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60065-
60066 char comm[TASK_COMM_LEN]; /* executable name excluding path
60067 - access with [gs]et_task_comm (which lock
60068 it with task_lock())
60069@@ -1369,6 +1378,10 @@ struct task_struct {
60070 #endif
60071 /* CPU-specific state of this task */
60072 struct thread_struct thread;
60073+/* thread_info moved to task_struct */
60074+#ifdef CONFIG_X86
60075+ struct thread_info tinfo;
60076+#endif
60077 /* filesystem information */
60078 struct fs_struct *fs;
60079 /* open file information */
60080@@ -1436,6 +1449,15 @@ struct task_struct {
60081 int hardirq_context;
60082 int softirq_context;
60083 #endif
60084+
60085+/* process credentials */
60086+ const struct cred *real_cred; /* objective and real subjective task
60087+ * credentials (COW) */
60088+ struct mutex cred_guard_mutex; /* guard against foreign influences on
60089+ * credential calculations
60090+ * (notably. ptrace) */
60091+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60092+
60093 #ifdef CONFIG_LOCKDEP
60094 # define MAX_LOCK_DEPTH 48UL
60095 u64 curr_chain_key;
60096@@ -1456,6 +1478,9 @@ struct task_struct {
60097
60098 struct backing_dev_info *backing_dev_info;
60099
60100+ const struct cred *cred; /* effective (overridable) subjective task
60101+ * credentials (COW) */
60102+
60103 struct io_context *io_context;
60104
60105 unsigned long ptrace_message;
60106@@ -1519,6 +1544,21 @@ struct task_struct {
60107 unsigned long default_timer_slack_ns;
60108
60109 struct list_head *scm_work_list;
60110+
60111+#ifdef CONFIG_GRKERNSEC
60112+ /* grsecurity */
60113+ struct dentry *gr_chroot_dentry;
60114+ struct acl_subject_label *acl;
60115+ struct acl_role_label *role;
60116+ struct file *exec_file;
60117+ u16 acl_role_id;
60118+ /* is this the task that authenticated to the special role */
60119+ u8 acl_sp_role;
60120+ u8 is_writable;
60121+ u8 brute;
60122+ u8 gr_is_chrooted;
60123+#endif
60124+
60125 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60126 /* Index of current stored adress in ret_stack */
60127 int curr_ret_stack;
60128@@ -1542,6 +1582,57 @@ struct task_struct {
60129 #endif /* CONFIG_TRACING */
60130 };
60131
60132+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60133+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60134+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60135+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60136+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60137+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60138+
60139+#ifdef CONFIG_PAX_SOFTMODE
60140+extern int pax_softmode;
60141+#endif
60142+
60143+extern int pax_check_flags(unsigned long *);
60144+
60145+/* if tsk != current then task_lock must be held on it */
60146+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60147+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60148+{
60149+ if (likely(tsk->mm))
60150+ return tsk->mm->pax_flags;
60151+ else
60152+ return 0UL;
60153+}
60154+
60155+/* if tsk != current then task_lock must be held on it */
60156+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60157+{
60158+ if (likely(tsk->mm)) {
60159+ tsk->mm->pax_flags = flags;
60160+ return 0;
60161+ }
60162+ return -EINVAL;
60163+}
60164+#endif
60165+
60166+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60167+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60168+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60169+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60170+#endif
60171+
60172+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60173+extern void pax_report_insns(void *pc, void *sp);
60174+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60175+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60176+
60177+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60178+extern void pax_track_stack(void);
60179+#else
60180+static inline void pax_track_stack(void) {}
60181+#endif
60182+
60183 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60184 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
60185
60186@@ -1740,7 +1831,7 @@ extern void thread_group_times(struct ta
60187 #define PF_DUMPCORE 0x00000200 /* dumped core */
60188 #define PF_SIGNALED 0x00000400 /* killed by a signal */
60189 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
60190-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
60191+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
60192 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
60193 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
60194 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
60195@@ -1978,7 +2069,9 @@ void yield(void);
60196 extern struct exec_domain default_exec_domain;
60197
60198 union thread_union {
60199+#ifndef CONFIG_X86
60200 struct thread_info thread_info;
60201+#endif
60202 unsigned long stack[THREAD_SIZE/sizeof(long)];
60203 };
60204
60205@@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
60206 */
60207
60208 extern struct task_struct *find_task_by_vpid(pid_t nr);
60209+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60210 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60211 struct pid_namespace *ns);
60212
60213@@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
60214 extern void exit_itimers(struct signal_struct *);
60215 extern void flush_itimer_signals(void);
60216
60217-extern NORET_TYPE void do_group_exit(int);
60218+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60219
60220 extern void daemonize(const char *, ...);
60221 extern int allow_signal(int);
60222@@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
60223
60224 #endif
60225
60226-static inline int object_is_on_stack(void *obj)
60227+static inline int object_starts_on_stack(void *obj)
60228 {
60229- void *stack = task_stack_page(current);
60230+ const void *stack = task_stack_page(current);
60231
60232 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60233 }
60234
60235+#ifdef CONFIG_PAX_USERCOPY
60236+extern int object_is_on_stack(const void *obj, unsigned long len);
60237+#endif
60238+
60239 extern void thread_info_cache_init(void);
60240
60241 #ifdef CONFIG_DEBUG_STACK_USAGE
60242diff -urNp linux-2.6.32.45/include/linux/screen_info.h linux-2.6.32.45/include/linux/screen_info.h
60243--- linux-2.6.32.45/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
60244+++ linux-2.6.32.45/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
60245@@ -42,7 +42,8 @@ struct screen_info {
60246 __u16 pages; /* 0x32 */
60247 __u16 vesa_attributes; /* 0x34 */
60248 __u32 capabilities; /* 0x36 */
60249- __u8 _reserved[6]; /* 0x3a */
60250+ __u16 vesapm_size; /* 0x3a */
60251+ __u8 _reserved[4]; /* 0x3c */
60252 } __attribute__((packed));
60253
60254 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60255diff -urNp linux-2.6.32.45/include/linux/security.h linux-2.6.32.45/include/linux/security.h
60256--- linux-2.6.32.45/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
60257+++ linux-2.6.32.45/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
60258@@ -34,6 +34,7 @@
60259 #include <linux/key.h>
60260 #include <linux/xfrm.h>
60261 #include <linux/gfp.h>
60262+#include <linux/grsecurity.h>
60263 #include <net/flow.h>
60264
60265 /* Maximum number of letters for an LSM name string */
60266diff -urNp linux-2.6.32.45/include/linux/seq_file.h linux-2.6.32.45/include/linux/seq_file.h
60267--- linux-2.6.32.45/include/linux/seq_file.h 2011-03-27 14:31:47.000000000 -0400
60268+++ linux-2.6.32.45/include/linux/seq_file.h 2011-08-23 21:22:38.000000000 -0400
60269@@ -32,6 +32,7 @@ struct seq_operations {
60270 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60271 int (*show) (struct seq_file *m, void *v);
60272 };
60273+typedef struct seq_operations __no_const seq_operations_no_const;
60274
60275 #define SEQ_SKIP 1
60276
60277diff -urNp linux-2.6.32.45/include/linux/shm.h linux-2.6.32.45/include/linux/shm.h
60278--- linux-2.6.32.45/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
60279+++ linux-2.6.32.45/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
60280@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60281 pid_t shm_cprid;
60282 pid_t shm_lprid;
60283 struct user_struct *mlock_user;
60284+#ifdef CONFIG_GRKERNSEC
60285+ time_t shm_createtime;
60286+ pid_t shm_lapid;
60287+#endif
60288 };
60289
60290 /* shm_mode upper byte flags */
60291diff -urNp linux-2.6.32.45/include/linux/skbuff.h linux-2.6.32.45/include/linux/skbuff.h
60292--- linux-2.6.32.45/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
60293+++ linux-2.6.32.45/include/linux/skbuff.h 2011-08-21 15:27:56.000000000 -0400
60294@@ -14,6 +14,7 @@
60295 #ifndef _LINUX_SKBUFF_H
60296 #define _LINUX_SKBUFF_H
60297
60298+#include <linux/const.h>
60299 #include <linux/kernel.h>
60300 #include <linux/kmemcheck.h>
60301 #include <linux/compiler.h>
60302@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_t
60303 */
60304 static inline int skb_queue_empty(const struct sk_buff_head *list)
60305 {
60306- return list->next == (struct sk_buff *)list;
60307+ return list->next == (const struct sk_buff *)list;
60308 }
60309
60310 /**
60311@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const
60312 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60313 const struct sk_buff *skb)
60314 {
60315- return (skb->next == (struct sk_buff *) list);
60316+ return (skb->next == (const struct sk_buff *) list);
60317 }
60318
60319 /**
60320@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(con
60321 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60322 const struct sk_buff *skb)
60323 {
60324- return (skb->prev == (struct sk_buff *) list);
60325+ return (skb->prev == (const struct sk_buff *) list);
60326 }
60327
60328 /**
60329@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(con
60330 * headroom, you should not reduce this.
60331 */
60332 #ifndef NET_SKB_PAD
60333-#define NET_SKB_PAD 32
60334+#define NET_SKB_PAD (_AC(32,UL))
60335 #endif
60336
60337 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60338diff -urNp linux-2.6.32.45/include/linux/slab_def.h linux-2.6.32.45/include/linux/slab_def.h
60339--- linux-2.6.32.45/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
60340+++ linux-2.6.32.45/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
60341@@ -69,10 +69,10 @@ struct kmem_cache {
60342 unsigned long node_allocs;
60343 unsigned long node_frees;
60344 unsigned long node_overflow;
60345- atomic_t allochit;
60346- atomic_t allocmiss;
60347- atomic_t freehit;
60348- atomic_t freemiss;
60349+ atomic_unchecked_t allochit;
60350+ atomic_unchecked_t allocmiss;
60351+ atomic_unchecked_t freehit;
60352+ atomic_unchecked_t freemiss;
60353
60354 /*
60355 * If debugging is enabled, then the allocator can add additional
60356diff -urNp linux-2.6.32.45/include/linux/slab.h linux-2.6.32.45/include/linux/slab.h
60357--- linux-2.6.32.45/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
60358+++ linux-2.6.32.45/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
60359@@ -11,12 +11,20 @@
60360
60361 #include <linux/gfp.h>
60362 #include <linux/types.h>
60363+#include <linux/err.h>
60364
60365 /*
60366 * Flags to pass to kmem_cache_create().
60367 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60368 */
60369 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60370+
60371+#ifdef CONFIG_PAX_USERCOPY
60372+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60373+#else
60374+#define SLAB_USERCOPY 0x00000000UL
60375+#endif
60376+
60377 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60378 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60379 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60380@@ -82,10 +90,13 @@
60381 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60382 * Both make kfree a no-op.
60383 */
60384-#define ZERO_SIZE_PTR ((void *)16)
60385+#define ZERO_SIZE_PTR \
60386+({ \
60387+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60388+ (void *)(-MAX_ERRNO-1L); \
60389+})
60390
60391-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60392- (unsigned long)ZERO_SIZE_PTR)
60393+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60394
60395 /*
60396 * struct kmem_cache related prototypes
60397@@ -138,6 +149,7 @@ void * __must_check krealloc(const void
60398 void kfree(const void *);
60399 void kzfree(const void *);
60400 size_t ksize(const void *);
60401+void check_object_size(const void *ptr, unsigned long n, bool to);
60402
60403 /*
60404 * Allocator specific definitions. These are mainly used to establish optimized
60405@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
60406
60407 void __init kmem_cache_init_late(void);
60408
60409+#define kmalloc(x, y) \
60410+({ \
60411+ void *___retval; \
60412+ intoverflow_t ___x = (intoverflow_t)x; \
60413+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
60414+ ___retval = NULL; \
60415+ else \
60416+ ___retval = kmalloc((size_t)___x, (y)); \
60417+ ___retval; \
60418+})
60419+
60420+#define kmalloc_node(x, y, z) \
60421+({ \
60422+ void *___retval; \
60423+ intoverflow_t ___x = (intoverflow_t)x; \
60424+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60425+ ___retval = NULL; \
60426+ else \
60427+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60428+ ___retval; \
60429+})
60430+
60431+#define kzalloc(x, y) \
60432+({ \
60433+ void *___retval; \
60434+ intoverflow_t ___x = (intoverflow_t)x; \
60435+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
60436+ ___retval = NULL; \
60437+ else \
60438+ ___retval = kzalloc((size_t)___x, (y)); \
60439+ ___retval; \
60440+})
60441+
60442 #endif /* _LINUX_SLAB_H */
60443diff -urNp linux-2.6.32.45/include/linux/slub_def.h linux-2.6.32.45/include/linux/slub_def.h
60444--- linux-2.6.32.45/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
60445+++ linux-2.6.32.45/include/linux/slub_def.h 2011-08-05 20:33:55.000000000 -0400
60446@@ -86,7 +86,7 @@ struct kmem_cache {
60447 struct kmem_cache_order_objects max;
60448 struct kmem_cache_order_objects min;
60449 gfp_t allocflags; /* gfp flags to use on each alloc */
60450- int refcount; /* Refcount for slab cache destroy */
60451+ atomic_t refcount; /* Refcount for slab cache destroy */
60452 void (*ctor)(void *);
60453 int inuse; /* Offset to metadata */
60454 int align; /* Alignment */
60455@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache
60456 #endif
60457
60458 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60459-void *__kmalloc(size_t size, gfp_t flags);
60460+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60461
60462 #ifdef CONFIG_KMEMTRACE
60463 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
60464diff -urNp linux-2.6.32.45/include/linux/sonet.h linux-2.6.32.45/include/linux/sonet.h
60465--- linux-2.6.32.45/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
60466+++ linux-2.6.32.45/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
60467@@ -61,7 +61,7 @@ struct sonet_stats {
60468 #include <asm/atomic.h>
60469
60470 struct k_sonet_stats {
60471-#define __HANDLE_ITEM(i) atomic_t i
60472+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60473 __SONET_ITEMS
60474 #undef __HANDLE_ITEM
60475 };
60476diff -urNp linux-2.6.32.45/include/linux/sunrpc/cache.h linux-2.6.32.45/include/linux/sunrpc/cache.h
60477--- linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-03-27 14:31:47.000000000 -0400
60478+++ linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-08-05 20:33:55.000000000 -0400
60479@@ -125,7 +125,7 @@ struct cache_detail {
60480 */
60481 struct cache_req {
60482 struct cache_deferred_req *(*defer)(struct cache_req *req);
60483-};
60484+} __no_const;
60485 /* this must be embedded in a deferred_request that is being
60486 * delayed awaiting cache-fill
60487 */
60488diff -urNp linux-2.6.32.45/include/linux/sunrpc/clnt.h linux-2.6.32.45/include/linux/sunrpc/clnt.h
60489--- linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
60490+++ linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
60491@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
60492 {
60493 switch (sap->sa_family) {
60494 case AF_INET:
60495- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60496+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60497 case AF_INET6:
60498- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60499+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60500 }
60501 return 0;
60502 }
60503@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
60504 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60505 const struct sockaddr *src)
60506 {
60507- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60508+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60509 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60510
60511 dsin->sin_family = ssin->sin_family;
60512@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
60513 if (sa->sa_family != AF_INET6)
60514 return 0;
60515
60516- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60517+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60518 }
60519
60520 #endif /* __KERNEL__ */
60521diff -urNp linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h
60522--- linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
60523+++ linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
60524@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60525 extern unsigned int svcrdma_max_requests;
60526 extern unsigned int svcrdma_max_req_size;
60527
60528-extern atomic_t rdma_stat_recv;
60529-extern atomic_t rdma_stat_read;
60530-extern atomic_t rdma_stat_write;
60531-extern atomic_t rdma_stat_sq_starve;
60532-extern atomic_t rdma_stat_rq_starve;
60533-extern atomic_t rdma_stat_rq_poll;
60534-extern atomic_t rdma_stat_rq_prod;
60535-extern atomic_t rdma_stat_sq_poll;
60536-extern atomic_t rdma_stat_sq_prod;
60537+extern atomic_unchecked_t rdma_stat_recv;
60538+extern atomic_unchecked_t rdma_stat_read;
60539+extern atomic_unchecked_t rdma_stat_write;
60540+extern atomic_unchecked_t rdma_stat_sq_starve;
60541+extern atomic_unchecked_t rdma_stat_rq_starve;
60542+extern atomic_unchecked_t rdma_stat_rq_poll;
60543+extern atomic_unchecked_t rdma_stat_rq_prod;
60544+extern atomic_unchecked_t rdma_stat_sq_poll;
60545+extern atomic_unchecked_t rdma_stat_sq_prod;
60546
60547 #define RPCRDMA_VERSION 1
60548
60549diff -urNp linux-2.6.32.45/include/linux/suspend.h linux-2.6.32.45/include/linux/suspend.h
60550--- linux-2.6.32.45/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
60551+++ linux-2.6.32.45/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
60552@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
60553 * which require special recovery actions in that situation.
60554 */
60555 struct platform_suspend_ops {
60556- int (*valid)(suspend_state_t state);
60557- int (*begin)(suspend_state_t state);
60558- int (*prepare)(void);
60559- int (*prepare_late)(void);
60560- int (*enter)(suspend_state_t state);
60561- void (*wake)(void);
60562- void (*finish)(void);
60563- void (*end)(void);
60564- void (*recover)(void);
60565+ int (* const valid)(suspend_state_t state);
60566+ int (* const begin)(suspend_state_t state);
60567+ int (* const prepare)(void);
60568+ int (* const prepare_late)(void);
60569+ int (* const enter)(suspend_state_t state);
60570+ void (* const wake)(void);
60571+ void (* const finish)(void);
60572+ void (* const end)(void);
60573+ void (* const recover)(void);
60574 };
60575
60576 #ifdef CONFIG_SUSPEND
60577@@ -120,7 +120,7 @@ struct platform_suspend_ops {
60578 * suspend_set_ops - set platform dependent suspend operations
60579 * @ops: The new suspend operations to set.
60580 */
60581-extern void suspend_set_ops(struct platform_suspend_ops *ops);
60582+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
60583 extern int suspend_valid_only_mem(suspend_state_t state);
60584
60585 /**
60586@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
60587 #else /* !CONFIG_SUSPEND */
60588 #define suspend_valid_only_mem NULL
60589
60590-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
60591+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
60592 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
60593 #endif /* !CONFIG_SUSPEND */
60594
60595@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
60596 * platforms which require special recovery actions in that situation.
60597 */
60598 struct platform_hibernation_ops {
60599- int (*begin)(void);
60600- void (*end)(void);
60601- int (*pre_snapshot)(void);
60602- void (*finish)(void);
60603- int (*prepare)(void);
60604- int (*enter)(void);
60605- void (*leave)(void);
60606- int (*pre_restore)(void);
60607- void (*restore_cleanup)(void);
60608- void (*recover)(void);
60609+ int (* const begin)(void);
60610+ void (* const end)(void);
60611+ int (* const pre_snapshot)(void);
60612+ void (* const finish)(void);
60613+ int (* const prepare)(void);
60614+ int (* const enter)(void);
60615+ void (* const leave)(void);
60616+ int (* const pre_restore)(void);
60617+ void (* const restore_cleanup)(void);
60618+ void (* const recover)(void);
60619 };
60620
60621 #ifdef CONFIG_HIBERNATION
60622@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
60623 extern void swsusp_unset_page_free(struct page *);
60624 extern unsigned long get_safe_page(gfp_t gfp_mask);
60625
60626-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
60627+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
60628 extern int hibernate(void);
60629 extern bool system_entering_hibernation(void);
60630 #else /* CONFIG_HIBERNATION */
60631@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
60632 static inline void swsusp_set_page_free(struct page *p) {}
60633 static inline void swsusp_unset_page_free(struct page *p) {}
60634
60635-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
60636+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
60637 static inline int hibernate(void) { return -ENOSYS; }
60638 static inline bool system_entering_hibernation(void) { return false; }
60639 #endif /* CONFIG_HIBERNATION */
60640diff -urNp linux-2.6.32.45/include/linux/sysctl.h linux-2.6.32.45/include/linux/sysctl.h
60641--- linux-2.6.32.45/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
60642+++ linux-2.6.32.45/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
60643@@ -164,7 +164,11 @@ enum
60644 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60645 };
60646
60647-
60648+#ifdef CONFIG_PAX_SOFTMODE
60649+enum {
60650+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60651+};
60652+#endif
60653
60654 /* CTL_VM names: */
60655 enum
60656@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
60657
60658 extern int proc_dostring(struct ctl_table *, int,
60659 void __user *, size_t *, loff_t *);
60660+extern int proc_dostring_modpriv(struct ctl_table *, int,
60661+ void __user *, size_t *, loff_t *);
60662 extern int proc_dointvec(struct ctl_table *, int,
60663 void __user *, size_t *, loff_t *);
60664 extern int proc_dointvec_minmax(struct ctl_table *, int,
60665@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
60666
60667 extern ctl_handler sysctl_data;
60668 extern ctl_handler sysctl_string;
60669+extern ctl_handler sysctl_string_modpriv;
60670 extern ctl_handler sysctl_intvec;
60671 extern ctl_handler sysctl_jiffies;
60672 extern ctl_handler sysctl_ms_jiffies;
60673diff -urNp linux-2.6.32.45/include/linux/sysfs.h linux-2.6.32.45/include/linux/sysfs.h
60674--- linux-2.6.32.45/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
60675+++ linux-2.6.32.45/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
60676@@ -75,8 +75,8 @@ struct bin_attribute {
60677 };
60678
60679 struct sysfs_ops {
60680- ssize_t (*show)(struct kobject *, struct attribute *,char *);
60681- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
60682+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
60683+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
60684 };
60685
60686 struct sysfs_dirent;
60687diff -urNp linux-2.6.32.45/include/linux/thread_info.h linux-2.6.32.45/include/linux/thread_info.h
60688--- linux-2.6.32.45/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
60689+++ linux-2.6.32.45/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
60690@@ -23,7 +23,7 @@ struct restart_block {
60691 };
60692 /* For futex_wait and futex_wait_requeue_pi */
60693 struct {
60694- u32 *uaddr;
60695+ u32 __user *uaddr;
60696 u32 val;
60697 u32 flags;
60698 u32 bitset;
60699diff -urNp linux-2.6.32.45/include/linux/tty.h linux-2.6.32.45/include/linux/tty.h
60700--- linux-2.6.32.45/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
60701+++ linux-2.6.32.45/include/linux/tty.h 2011-08-05 20:33:55.000000000 -0400
60702@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
60703 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
60704 extern void tty_ldisc_enable(struct tty_struct *tty);
60705
60706-
60707 /* n_tty.c */
60708 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
60709
60710diff -urNp linux-2.6.32.45/include/linux/tty_ldisc.h linux-2.6.32.45/include/linux/tty_ldisc.h
60711--- linux-2.6.32.45/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
60712+++ linux-2.6.32.45/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
60713@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
60714
60715 struct module *owner;
60716
60717- int refcount;
60718+ atomic_t refcount;
60719 };
60720
60721 struct tty_ldisc {
60722diff -urNp linux-2.6.32.45/include/linux/types.h linux-2.6.32.45/include/linux/types.h
60723--- linux-2.6.32.45/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
60724+++ linux-2.6.32.45/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
60725@@ -191,10 +191,26 @@ typedef struct {
60726 volatile int counter;
60727 } atomic_t;
60728
60729+#ifdef CONFIG_PAX_REFCOUNT
60730+typedef struct {
60731+ volatile int counter;
60732+} atomic_unchecked_t;
60733+#else
60734+typedef atomic_t atomic_unchecked_t;
60735+#endif
60736+
60737 #ifdef CONFIG_64BIT
60738 typedef struct {
60739 volatile long counter;
60740 } atomic64_t;
60741+
60742+#ifdef CONFIG_PAX_REFCOUNT
60743+typedef struct {
60744+ volatile long counter;
60745+} atomic64_unchecked_t;
60746+#else
60747+typedef atomic64_t atomic64_unchecked_t;
60748+#endif
60749 #endif
60750
60751 struct ustat {
60752diff -urNp linux-2.6.32.45/include/linux/uaccess.h linux-2.6.32.45/include/linux/uaccess.h
60753--- linux-2.6.32.45/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
60754+++ linux-2.6.32.45/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
60755@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60756 long ret; \
60757 mm_segment_t old_fs = get_fs(); \
60758 \
60759- set_fs(KERNEL_DS); \
60760 pagefault_disable(); \
60761+ set_fs(KERNEL_DS); \
60762 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60763- pagefault_enable(); \
60764 set_fs(old_fs); \
60765+ pagefault_enable(); \
60766 ret; \
60767 })
60768
60769@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
60770 * Safely read from address @src to the buffer at @dst. If a kernel fault
60771 * happens, handle that and return -EFAULT.
60772 */
60773-extern long probe_kernel_read(void *dst, void *src, size_t size);
60774+extern long probe_kernel_read(void *dst, const void *src, size_t size);
60775
60776 /*
60777 * probe_kernel_write(): safely attempt to write to a location
60778@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
60779 * Safely write to address @dst from the buffer at @src. If a kernel fault
60780 * happens, handle that and return -EFAULT.
60781 */
60782-extern long probe_kernel_write(void *dst, void *src, size_t size);
60783+extern long probe_kernel_write(void *dst, const void *src, size_t size);
60784
60785 #endif /* __LINUX_UACCESS_H__ */
60786diff -urNp linux-2.6.32.45/include/linux/unaligned/access_ok.h linux-2.6.32.45/include/linux/unaligned/access_ok.h
60787--- linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
60788+++ linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
60789@@ -6,32 +6,32 @@
60790
60791 static inline u16 get_unaligned_le16(const void *p)
60792 {
60793- return le16_to_cpup((__le16 *)p);
60794+ return le16_to_cpup((const __le16 *)p);
60795 }
60796
60797 static inline u32 get_unaligned_le32(const void *p)
60798 {
60799- return le32_to_cpup((__le32 *)p);
60800+ return le32_to_cpup((const __le32 *)p);
60801 }
60802
60803 static inline u64 get_unaligned_le64(const void *p)
60804 {
60805- return le64_to_cpup((__le64 *)p);
60806+ return le64_to_cpup((const __le64 *)p);
60807 }
60808
60809 static inline u16 get_unaligned_be16(const void *p)
60810 {
60811- return be16_to_cpup((__be16 *)p);
60812+ return be16_to_cpup((const __be16 *)p);
60813 }
60814
60815 static inline u32 get_unaligned_be32(const void *p)
60816 {
60817- return be32_to_cpup((__be32 *)p);
60818+ return be32_to_cpup((const __be32 *)p);
60819 }
60820
60821 static inline u64 get_unaligned_be64(const void *p)
60822 {
60823- return be64_to_cpup((__be64 *)p);
60824+ return be64_to_cpup((const __be64 *)p);
60825 }
60826
60827 static inline void put_unaligned_le16(u16 val, void *p)
60828diff -urNp linux-2.6.32.45/include/linux/vmalloc.h linux-2.6.32.45/include/linux/vmalloc.h
60829--- linux-2.6.32.45/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
60830+++ linux-2.6.32.45/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
60831@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
60832 #define VM_MAP 0x00000004 /* vmap()ed pages */
60833 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60834 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60835+
60836+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60837+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
60838+#endif
60839+
60840 /* bits [20..32] reserved for arch specific ioremap internals */
60841
60842 /*
60843@@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
60844
60845 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
60846
60847+#define vmalloc(x) \
60848+({ \
60849+ void *___retval; \
60850+ intoverflow_t ___x = (intoverflow_t)x; \
60851+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60852+ ___retval = NULL; \
60853+ else \
60854+ ___retval = vmalloc((unsigned long)___x); \
60855+ ___retval; \
60856+})
60857+
60858+#define __vmalloc(x, y, z) \
60859+({ \
60860+ void *___retval; \
60861+ intoverflow_t ___x = (intoverflow_t)x; \
60862+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60863+ ___retval = NULL; \
60864+ else \
60865+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60866+ ___retval; \
60867+})
60868+
60869+#define vmalloc_user(x) \
60870+({ \
60871+ void *___retval; \
60872+ intoverflow_t ___x = (intoverflow_t)x; \
60873+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60874+ ___retval = NULL; \
60875+ else \
60876+ ___retval = vmalloc_user((unsigned long)___x); \
60877+ ___retval; \
60878+})
60879+
60880+#define vmalloc_exec(x) \
60881+({ \
60882+ void *___retval; \
60883+ intoverflow_t ___x = (intoverflow_t)x; \
60884+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60885+ ___retval = NULL; \
60886+ else \
60887+ ___retval = vmalloc_exec((unsigned long)___x); \
60888+ ___retval; \
60889+})
60890+
60891+#define vmalloc_node(x, y) \
60892+({ \
60893+ void *___retval; \
60894+ intoverflow_t ___x = (intoverflow_t)x; \
60895+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
60896+ ___retval = NULL; \
60897+ else \
60898+ ___retval = vmalloc_node((unsigned long)___x, (y));\
60899+ ___retval; \
60900+})
60901+
60902+#define vmalloc_32(x) \
60903+({ \
60904+ void *___retval; \
60905+ intoverflow_t ___x = (intoverflow_t)x; \
60906+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
60907+ ___retval = NULL; \
60908+ else \
60909+ ___retval = vmalloc_32((unsigned long)___x); \
60910+ ___retval; \
60911+})
60912+
60913+#define vmalloc_32_user(x) \
60914+({ \
60915+ void *___retval; \
60916+ intoverflow_t ___x = (intoverflow_t)x; \
60917+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
60918+ ___retval = NULL; \
60919+ else \
60920+ ___retval = vmalloc_32_user((unsigned long)___x);\
60921+ ___retval; \
60922+})
60923+
60924 #endif /* _LINUX_VMALLOC_H */
60925diff -urNp linux-2.6.32.45/include/linux/vmstat.h linux-2.6.32.45/include/linux/vmstat.h
60926--- linux-2.6.32.45/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
60927+++ linux-2.6.32.45/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
60928@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
60929 /*
60930 * Zone based page accounting with per cpu differentials.
60931 */
60932-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60933+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60934
60935 static inline void zone_page_state_add(long x, struct zone *zone,
60936 enum zone_stat_item item)
60937 {
60938- atomic_long_add(x, &zone->vm_stat[item]);
60939- atomic_long_add(x, &vm_stat[item]);
60940+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
60941+ atomic_long_add_unchecked(x, &vm_stat[item]);
60942 }
60943
60944 static inline unsigned long global_page_state(enum zone_stat_item item)
60945 {
60946- long x = atomic_long_read(&vm_stat[item]);
60947+ long x = atomic_long_read_unchecked(&vm_stat[item]);
60948 #ifdef CONFIG_SMP
60949 if (x < 0)
60950 x = 0;
60951@@ -158,7 +158,7 @@ static inline unsigned long global_page_
60952 static inline unsigned long zone_page_state(struct zone *zone,
60953 enum zone_stat_item item)
60954 {
60955- long x = atomic_long_read(&zone->vm_stat[item]);
60956+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60957 #ifdef CONFIG_SMP
60958 if (x < 0)
60959 x = 0;
60960@@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
60961 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
60962 enum zone_stat_item item)
60963 {
60964- long x = atomic_long_read(&zone->vm_stat[item]);
60965+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60966
60967 #ifdef CONFIG_SMP
60968 int cpu;
60969@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
60970
60971 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
60972 {
60973- atomic_long_inc(&zone->vm_stat[item]);
60974- atomic_long_inc(&vm_stat[item]);
60975+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
60976+ atomic_long_inc_unchecked(&vm_stat[item]);
60977 }
60978
60979 static inline void __inc_zone_page_state(struct page *page,
60980@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
60981
60982 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
60983 {
60984- atomic_long_dec(&zone->vm_stat[item]);
60985- atomic_long_dec(&vm_stat[item]);
60986+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
60987+ atomic_long_dec_unchecked(&vm_stat[item]);
60988 }
60989
60990 static inline void __dec_zone_page_state(struct page *page,
60991diff -urNp linux-2.6.32.45/include/media/saa7146_vv.h linux-2.6.32.45/include/media/saa7146_vv.h
60992--- linux-2.6.32.45/include/media/saa7146_vv.h 2011-03-27 14:31:47.000000000 -0400
60993+++ linux-2.6.32.45/include/media/saa7146_vv.h 2011-08-23 21:22:38.000000000 -0400
60994@@ -167,7 +167,7 @@ struct saa7146_ext_vv
60995 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
60996
60997 /* the extension can override this */
60998- struct v4l2_ioctl_ops ops;
60999+ v4l2_ioctl_ops_no_const ops;
61000 /* pointer to the saa7146 core ops */
61001 const struct v4l2_ioctl_ops *core_ops;
61002
61003diff -urNp linux-2.6.32.45/include/media/v4l2-dev.h linux-2.6.32.45/include/media/v4l2-dev.h
61004--- linux-2.6.32.45/include/media/v4l2-dev.h 2011-03-27 14:31:47.000000000 -0400
61005+++ linux-2.6.32.45/include/media/v4l2-dev.h 2011-08-05 20:33:55.000000000 -0400
61006@@ -34,7 +34,7 @@ struct v4l2_device;
61007 #define V4L2_FL_UNREGISTERED (0)
61008
61009 struct v4l2_file_operations {
61010- struct module *owner;
61011+ struct module * const owner;
61012 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61013 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61014 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61015diff -urNp linux-2.6.32.45/include/media/v4l2-device.h linux-2.6.32.45/include/media/v4l2-device.h
61016--- linux-2.6.32.45/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
61017+++ linux-2.6.32.45/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
61018@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
61019 this function returns 0. If the name ends with a digit (e.g. cx18),
61020 then the name will be set to cx18-0 since cx180 looks really odd. */
61021 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
61022- atomic_t *instance);
61023+ atomic_unchecked_t *instance);
61024
61025 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
61026 Since the parent disappears this ensures that v4l2_dev doesn't have an
61027diff -urNp linux-2.6.32.45/include/media/v4l2-ioctl.h linux-2.6.32.45/include/media/v4l2-ioctl.h
61028--- linux-2.6.32.45/include/media/v4l2-ioctl.h 2011-03-27 14:31:47.000000000 -0400
61029+++ linux-2.6.32.45/include/media/v4l2-ioctl.h 2011-08-23 21:22:38.000000000 -0400
61030@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
61031 long (*vidioc_default) (struct file *file, void *fh,
61032 int cmd, void *arg);
61033 };
61034+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61035
61036
61037 /* v4l debugging and diagnostics */
61038diff -urNp linux-2.6.32.45/include/net/flow.h linux-2.6.32.45/include/net/flow.h
61039--- linux-2.6.32.45/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
61040+++ linux-2.6.32.45/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
61041@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
61042 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
61043 u8 dir, flow_resolve_t resolver);
61044 extern void flow_cache_flush(void);
61045-extern atomic_t flow_cache_genid;
61046+extern atomic_unchecked_t flow_cache_genid;
61047
61048 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
61049 {
61050diff -urNp linux-2.6.32.45/include/net/inetpeer.h linux-2.6.32.45/include/net/inetpeer.h
61051--- linux-2.6.32.45/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
61052+++ linux-2.6.32.45/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
61053@@ -24,7 +24,7 @@ struct inet_peer
61054 __u32 dtime; /* the time of last use of not
61055 * referenced entries */
61056 atomic_t refcnt;
61057- atomic_t rid; /* Frag reception counter */
61058+ atomic_unchecked_t rid; /* Frag reception counter */
61059 __u32 tcp_ts;
61060 unsigned long tcp_ts_stamp;
61061 };
61062diff -urNp linux-2.6.32.45/include/net/ip_vs.h linux-2.6.32.45/include/net/ip_vs.h
61063--- linux-2.6.32.45/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
61064+++ linux-2.6.32.45/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
61065@@ -365,7 +365,7 @@ struct ip_vs_conn {
61066 struct ip_vs_conn *control; /* Master control connection */
61067 atomic_t n_control; /* Number of controlled ones */
61068 struct ip_vs_dest *dest; /* real server */
61069- atomic_t in_pkts; /* incoming packet counter */
61070+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61071
61072 /* packet transmitter for different forwarding methods. If it
61073 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61074@@ -466,7 +466,7 @@ struct ip_vs_dest {
61075 union nf_inet_addr addr; /* IP address of the server */
61076 __be16 port; /* port number of the server */
61077 volatile unsigned flags; /* dest status flags */
61078- atomic_t conn_flags; /* flags to copy to conn */
61079+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61080 atomic_t weight; /* server weight */
61081
61082 atomic_t refcnt; /* reference counter */
61083diff -urNp linux-2.6.32.45/include/net/irda/ircomm_core.h linux-2.6.32.45/include/net/irda/ircomm_core.h
61084--- linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-03-27 14:31:47.000000000 -0400
61085+++ linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-08-05 20:33:55.000000000 -0400
61086@@ -51,7 +51,7 @@ typedef struct {
61087 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61088 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61089 struct ircomm_info *);
61090-} call_t;
61091+} __no_const call_t;
61092
61093 struct ircomm_cb {
61094 irda_queue_t queue;
61095diff -urNp linux-2.6.32.45/include/net/irda/ircomm_tty.h linux-2.6.32.45/include/net/irda/ircomm_tty.h
61096--- linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
61097+++ linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
61098@@ -35,6 +35,7 @@
61099 #include <linux/termios.h>
61100 #include <linux/timer.h>
61101 #include <linux/tty.h> /* struct tty_struct */
61102+#include <asm/local.h>
61103
61104 #include <net/irda/irias_object.h>
61105 #include <net/irda/ircomm_core.h>
61106@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61107 unsigned short close_delay;
61108 unsigned short closing_wait; /* time to wait before closing */
61109
61110- int open_count;
61111- int blocked_open; /* # of blocked opens */
61112+ local_t open_count;
61113+ local_t blocked_open; /* # of blocked opens */
61114
61115 /* Protect concurent access to :
61116 * o self->open_count
61117diff -urNp linux-2.6.32.45/include/net/iucv/af_iucv.h linux-2.6.32.45/include/net/iucv/af_iucv.h
61118--- linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
61119+++ linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
61120@@ -87,7 +87,7 @@ struct iucv_sock {
61121 struct iucv_sock_list {
61122 struct hlist_head head;
61123 rwlock_t lock;
61124- atomic_t autobind_name;
61125+ atomic_unchecked_t autobind_name;
61126 };
61127
61128 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61129diff -urNp linux-2.6.32.45/include/net/lapb.h linux-2.6.32.45/include/net/lapb.h
61130--- linux-2.6.32.45/include/net/lapb.h 2011-03-27 14:31:47.000000000 -0400
61131+++ linux-2.6.32.45/include/net/lapb.h 2011-08-05 20:33:55.000000000 -0400
61132@@ -95,7 +95,7 @@ struct lapb_cb {
61133 struct sk_buff_head write_queue;
61134 struct sk_buff_head ack_queue;
61135 unsigned char window;
61136- struct lapb_register_struct callbacks;
61137+ struct lapb_register_struct *callbacks;
61138
61139 /* FRMR control information */
61140 struct lapb_frame frmr_data;
61141diff -urNp linux-2.6.32.45/include/net/neighbour.h linux-2.6.32.45/include/net/neighbour.h
61142--- linux-2.6.32.45/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
61143+++ linux-2.6.32.45/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
61144@@ -125,12 +125,12 @@ struct neighbour
61145 struct neigh_ops
61146 {
61147 int family;
61148- void (*solicit)(struct neighbour *, struct sk_buff*);
61149- void (*error_report)(struct neighbour *, struct sk_buff*);
61150- int (*output)(struct sk_buff*);
61151- int (*connected_output)(struct sk_buff*);
61152- int (*hh_output)(struct sk_buff*);
61153- int (*queue_xmit)(struct sk_buff*);
61154+ void (* const solicit)(struct neighbour *, struct sk_buff*);
61155+ void (* const error_report)(struct neighbour *, struct sk_buff*);
61156+ int (* const output)(struct sk_buff*);
61157+ int (* const connected_output)(struct sk_buff*);
61158+ int (* const hh_output)(struct sk_buff*);
61159+ int (* const queue_xmit)(struct sk_buff*);
61160 };
61161
61162 struct pneigh_entry
61163diff -urNp linux-2.6.32.45/include/net/netlink.h linux-2.6.32.45/include/net/netlink.h
61164--- linux-2.6.32.45/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
61165+++ linux-2.6.32.45/include/net/netlink.h 2011-08-21 18:08:11.000000000 -0400
61166@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct
61167 {
61168 return (remaining >= (int) sizeof(struct nlmsghdr) &&
61169 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
61170- nlh->nlmsg_len <= remaining);
61171+ nlh->nlmsg_len <= (unsigned int)remaining);
61172 }
61173
61174 /**
61175@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
61176 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61177 {
61178 if (mark)
61179- skb_trim(skb, (unsigned char *) mark - skb->data);
61180+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61181 }
61182
61183 /**
61184diff -urNp linux-2.6.32.45/include/net/netns/ipv4.h linux-2.6.32.45/include/net/netns/ipv4.h
61185--- linux-2.6.32.45/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
61186+++ linux-2.6.32.45/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
61187@@ -54,7 +54,7 @@ struct netns_ipv4 {
61188 int current_rt_cache_rebuild_count;
61189
61190 struct timer_list rt_secret_timer;
61191- atomic_t rt_genid;
61192+ atomic_unchecked_t rt_genid;
61193
61194 #ifdef CONFIG_IP_MROUTE
61195 struct sock *mroute_sk;
61196diff -urNp linux-2.6.32.45/include/net/sctp/sctp.h linux-2.6.32.45/include/net/sctp/sctp.h
61197--- linux-2.6.32.45/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
61198+++ linux-2.6.32.45/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
61199@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
61200
61201 #else /* SCTP_DEBUG */
61202
61203-#define SCTP_DEBUG_PRINTK(whatever...)
61204-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61205+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61206+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61207 #define SCTP_ENABLE_DEBUG
61208 #define SCTP_DISABLE_DEBUG
61209 #define SCTP_ASSERT(expr, str, func)
61210diff -urNp linux-2.6.32.45/include/net/secure_seq.h linux-2.6.32.45/include/net/secure_seq.h
61211--- linux-2.6.32.45/include/net/secure_seq.h 2011-08-16 20:37:25.000000000 -0400
61212+++ linux-2.6.32.45/include/net/secure_seq.h 2011-08-07 19:48:09.000000000 -0400
61213@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
61214 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
61215 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
61216 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
61217- __be16 dport);
61218+ __be16 dport);
61219 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
61220 __be16 sport, __be16 dport);
61221 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61222- __be16 sport, __be16 dport);
61223+ __be16 sport, __be16 dport);
61224 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
61225- __be16 sport, __be16 dport);
61226+ __be16 sport, __be16 dport);
61227 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61228- __be16 sport, __be16 dport);
61229+ __be16 sport, __be16 dport);
61230
61231 #endif /* _NET_SECURE_SEQ */
61232diff -urNp linux-2.6.32.45/include/net/sock.h linux-2.6.32.45/include/net/sock.h
61233--- linux-2.6.32.45/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
61234+++ linux-2.6.32.45/include/net/sock.h 2011-08-21 17:24:37.000000000 -0400
61235@@ -272,7 +272,7 @@ struct sock {
61236 rwlock_t sk_callback_lock;
61237 int sk_err,
61238 sk_err_soft;
61239- atomic_t sk_drops;
61240+ atomic_unchecked_t sk_drops;
61241 unsigned short sk_ack_backlog;
61242 unsigned short sk_max_ack_backlog;
61243 __u32 sk_priority;
61244@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_relea
61245 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
61246 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
61247 #else
61248-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
61249+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
61250 int inc)
61251 {
61252 }
61253diff -urNp linux-2.6.32.45/include/net/tcp.h linux-2.6.32.45/include/net/tcp.h
61254--- linux-2.6.32.45/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
61255+++ linux-2.6.32.45/include/net/tcp.h 2011-08-23 21:29:10.000000000 -0400
61256@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
61257 struct tcp_seq_afinfo {
61258 char *name;
61259 sa_family_t family;
61260- struct file_operations seq_fops;
61261- struct seq_operations seq_ops;
61262+ file_operations_no_const seq_fops;
61263+ seq_operations_no_const seq_ops;
61264 };
61265
61266 struct tcp_iter_state {
61267diff -urNp linux-2.6.32.45/include/net/udp.h linux-2.6.32.45/include/net/udp.h
61268--- linux-2.6.32.45/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
61269+++ linux-2.6.32.45/include/net/udp.h 2011-08-23 21:29:34.000000000 -0400
61270@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
61271 char *name;
61272 sa_family_t family;
61273 struct udp_table *udp_table;
61274- struct file_operations seq_fops;
61275- struct seq_operations seq_ops;
61276+ file_operations_no_const seq_fops;
61277+ seq_operations_no_const seq_ops;
61278 };
61279
61280 struct udp_iter_state {
61281diff -urNp linux-2.6.32.45/include/rdma/iw_cm.h linux-2.6.32.45/include/rdma/iw_cm.h
61282--- linux-2.6.32.45/include/rdma/iw_cm.h 2011-03-27 14:31:47.000000000 -0400
61283+++ linux-2.6.32.45/include/rdma/iw_cm.h 2011-08-05 20:33:55.000000000 -0400
61284@@ -129,7 +129,7 @@ struct iw_cm_verbs {
61285 int backlog);
61286
61287 int (*destroy_listen)(struct iw_cm_id *cm_id);
61288-};
61289+} __no_const;
61290
61291 /**
61292 * iw_create_cm_id - Create an IW CM identifier.
61293diff -urNp linux-2.6.32.45/include/scsi/libfc.h linux-2.6.32.45/include/scsi/libfc.h
61294--- linux-2.6.32.45/include/scsi/libfc.h 2011-03-27 14:31:47.000000000 -0400
61295+++ linux-2.6.32.45/include/scsi/libfc.h 2011-08-23 21:22:38.000000000 -0400
61296@@ -675,6 +675,7 @@ struct libfc_function_template {
61297 */
61298 void (*disc_stop_final) (struct fc_lport *);
61299 };
61300+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61301
61302 /* information used by the discovery layer */
61303 struct fc_disc {
61304@@ -707,7 +708,7 @@ struct fc_lport {
61305 struct fc_disc disc;
61306
61307 /* Operational Information */
61308- struct libfc_function_template tt;
61309+ libfc_function_template_no_const tt;
61310 u8 link_up;
61311 u8 qfull;
61312 enum fc_lport_state state;
61313diff -urNp linux-2.6.32.45/include/scsi/scsi_device.h linux-2.6.32.45/include/scsi/scsi_device.h
61314--- linux-2.6.32.45/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
61315+++ linux-2.6.32.45/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
61316@@ -156,9 +156,9 @@ struct scsi_device {
61317 unsigned int max_device_blocked; /* what device_blocked counts down from */
61318 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61319
61320- atomic_t iorequest_cnt;
61321- atomic_t iodone_cnt;
61322- atomic_t ioerr_cnt;
61323+ atomic_unchecked_t iorequest_cnt;
61324+ atomic_unchecked_t iodone_cnt;
61325+ atomic_unchecked_t ioerr_cnt;
61326
61327 struct device sdev_gendev,
61328 sdev_dev;
61329diff -urNp linux-2.6.32.45/include/scsi/scsi_transport_fc.h linux-2.6.32.45/include/scsi/scsi_transport_fc.h
61330--- linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-03-27 14:31:47.000000000 -0400
61331+++ linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-08-05 20:33:55.000000000 -0400
61332@@ -663,9 +663,9 @@ struct fc_function_template {
61333 int (*bsg_timeout)(struct fc_bsg_job *);
61334
61335 /* allocation lengths for host-specific data */
61336- u32 dd_fcrport_size;
61337- u32 dd_fcvport_size;
61338- u32 dd_bsg_size;
61339+ const u32 dd_fcrport_size;
61340+ const u32 dd_fcvport_size;
61341+ const u32 dd_bsg_size;
61342
61343 /*
61344 * The driver sets these to tell the transport class it
61345@@ -675,39 +675,39 @@ struct fc_function_template {
61346 */
61347
61348 /* remote port fixed attributes */
61349- unsigned long show_rport_maxframe_size:1;
61350- unsigned long show_rport_supported_classes:1;
61351- unsigned long show_rport_dev_loss_tmo:1;
61352+ const unsigned long show_rport_maxframe_size:1;
61353+ const unsigned long show_rport_supported_classes:1;
61354+ const unsigned long show_rport_dev_loss_tmo:1;
61355
61356 /*
61357 * target dynamic attributes
61358 * These should all be "1" if the driver uses the remote port
61359 * add/delete functions (so attributes reflect rport values).
61360 */
61361- unsigned long show_starget_node_name:1;
61362- unsigned long show_starget_port_name:1;
61363- unsigned long show_starget_port_id:1;
61364+ const unsigned long show_starget_node_name:1;
61365+ const unsigned long show_starget_port_name:1;
61366+ const unsigned long show_starget_port_id:1;
61367
61368 /* host fixed attributes */
61369- unsigned long show_host_node_name:1;
61370- unsigned long show_host_port_name:1;
61371- unsigned long show_host_permanent_port_name:1;
61372- unsigned long show_host_supported_classes:1;
61373- unsigned long show_host_supported_fc4s:1;
61374- unsigned long show_host_supported_speeds:1;
61375- unsigned long show_host_maxframe_size:1;
61376- unsigned long show_host_serial_number:1;
61377+ const unsigned long show_host_node_name:1;
61378+ const unsigned long show_host_port_name:1;
61379+ const unsigned long show_host_permanent_port_name:1;
61380+ const unsigned long show_host_supported_classes:1;
61381+ const unsigned long show_host_supported_fc4s:1;
61382+ const unsigned long show_host_supported_speeds:1;
61383+ const unsigned long show_host_maxframe_size:1;
61384+ const unsigned long show_host_serial_number:1;
61385 /* host dynamic attributes */
61386- unsigned long show_host_port_id:1;
61387- unsigned long show_host_port_type:1;
61388- unsigned long show_host_port_state:1;
61389- unsigned long show_host_active_fc4s:1;
61390- unsigned long show_host_speed:1;
61391- unsigned long show_host_fabric_name:1;
61392- unsigned long show_host_symbolic_name:1;
61393- unsigned long show_host_system_hostname:1;
61394+ const unsigned long show_host_port_id:1;
61395+ const unsigned long show_host_port_type:1;
61396+ const unsigned long show_host_port_state:1;
61397+ const unsigned long show_host_active_fc4s:1;
61398+ const unsigned long show_host_speed:1;
61399+ const unsigned long show_host_fabric_name:1;
61400+ const unsigned long show_host_symbolic_name:1;
61401+ const unsigned long show_host_system_hostname:1;
61402
61403- unsigned long disable_target_scan:1;
61404+ const unsigned long disable_target_scan:1;
61405 };
61406
61407
61408diff -urNp linux-2.6.32.45/include/sound/ac97_codec.h linux-2.6.32.45/include/sound/ac97_codec.h
61409--- linux-2.6.32.45/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
61410+++ linux-2.6.32.45/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
61411@@ -419,15 +419,15 @@
61412 struct snd_ac97;
61413
61414 struct snd_ac97_build_ops {
61415- int (*build_3d) (struct snd_ac97 *ac97);
61416- int (*build_specific) (struct snd_ac97 *ac97);
61417- int (*build_spdif) (struct snd_ac97 *ac97);
61418- int (*build_post_spdif) (struct snd_ac97 *ac97);
61419+ int (* const build_3d) (struct snd_ac97 *ac97);
61420+ int (* const build_specific) (struct snd_ac97 *ac97);
61421+ int (* const build_spdif) (struct snd_ac97 *ac97);
61422+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
61423 #ifdef CONFIG_PM
61424- void (*suspend) (struct snd_ac97 *ac97);
61425- void (*resume) (struct snd_ac97 *ac97);
61426+ void (* const suspend) (struct snd_ac97 *ac97);
61427+ void (* const resume) (struct snd_ac97 *ac97);
61428 #endif
61429- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61430+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61431 };
61432
61433 struct snd_ac97_bus_ops {
61434@@ -477,7 +477,7 @@ struct snd_ac97_template {
61435
61436 struct snd_ac97 {
61437 /* -- lowlevel (hardware) driver specific -- */
61438- struct snd_ac97_build_ops * build_ops;
61439+ const struct snd_ac97_build_ops * build_ops;
61440 void *private_data;
61441 void (*private_free) (struct snd_ac97 *ac97);
61442 /* --- */
61443diff -urNp linux-2.6.32.45/include/sound/ak4xxx-adda.h linux-2.6.32.45/include/sound/ak4xxx-adda.h
61444--- linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-03-27 14:31:47.000000000 -0400
61445+++ linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-08-05 20:33:55.000000000 -0400
61446@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61447 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61448 unsigned char val);
61449 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61450-};
61451+} __no_const;
61452
61453 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61454
61455diff -urNp linux-2.6.32.45/include/sound/hwdep.h linux-2.6.32.45/include/sound/hwdep.h
61456--- linux-2.6.32.45/include/sound/hwdep.h 2011-03-27 14:31:47.000000000 -0400
61457+++ linux-2.6.32.45/include/sound/hwdep.h 2011-08-05 20:33:55.000000000 -0400
61458@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61459 struct snd_hwdep_dsp_status *status);
61460 int (*dsp_load)(struct snd_hwdep *hw,
61461 struct snd_hwdep_dsp_image *image);
61462-};
61463+} __no_const;
61464
61465 struct snd_hwdep {
61466 struct snd_card *card;
61467diff -urNp linux-2.6.32.45/include/sound/info.h linux-2.6.32.45/include/sound/info.h
61468--- linux-2.6.32.45/include/sound/info.h 2011-03-27 14:31:47.000000000 -0400
61469+++ linux-2.6.32.45/include/sound/info.h 2011-08-05 20:33:55.000000000 -0400
61470@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61471 struct snd_info_buffer *buffer);
61472 void (*write)(struct snd_info_entry *entry,
61473 struct snd_info_buffer *buffer);
61474-};
61475+} __no_const;
61476
61477 struct snd_info_entry_ops {
61478 int (*open)(struct snd_info_entry *entry,
61479diff -urNp linux-2.6.32.45/include/sound/pcm.h linux-2.6.32.45/include/sound/pcm.h
61480--- linux-2.6.32.45/include/sound/pcm.h 2011-03-27 14:31:47.000000000 -0400
61481+++ linux-2.6.32.45/include/sound/pcm.h 2011-08-23 21:22:38.000000000 -0400
61482@@ -80,6 +80,7 @@ struct snd_pcm_ops {
61483 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61484 int (*ack)(struct snd_pcm_substream *substream);
61485 };
61486+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61487
61488 /*
61489 *
61490diff -urNp linux-2.6.32.45/include/sound/sb16_csp.h linux-2.6.32.45/include/sound/sb16_csp.h
61491--- linux-2.6.32.45/include/sound/sb16_csp.h 2011-03-27 14:31:47.000000000 -0400
61492+++ linux-2.6.32.45/include/sound/sb16_csp.h 2011-08-05 20:33:55.000000000 -0400
61493@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
61494 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61495 int (*csp_stop) (struct snd_sb_csp * p);
61496 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61497-};
61498+} __no_const;
61499
61500 /*
61501 * CSP private data
61502diff -urNp linux-2.6.32.45/include/sound/ymfpci.h linux-2.6.32.45/include/sound/ymfpci.h
61503--- linux-2.6.32.45/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
61504+++ linux-2.6.32.45/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
61505@@ -358,7 +358,7 @@ struct snd_ymfpci {
61506 spinlock_t reg_lock;
61507 spinlock_t voice_lock;
61508 wait_queue_head_t interrupt_sleep;
61509- atomic_t interrupt_sleep_count;
61510+ atomic_unchecked_t interrupt_sleep_count;
61511 struct snd_info_entry *proc_entry;
61512 const struct firmware *dsp_microcode;
61513 const struct firmware *controller_microcode;
61514diff -urNp linux-2.6.32.45/include/trace/events/irq.h linux-2.6.32.45/include/trace/events/irq.h
61515--- linux-2.6.32.45/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
61516+++ linux-2.6.32.45/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
61517@@ -34,7 +34,7 @@
61518 */
61519 TRACE_EVENT(irq_handler_entry,
61520
61521- TP_PROTO(int irq, struct irqaction *action),
61522+ TP_PROTO(int irq, const struct irqaction *action),
61523
61524 TP_ARGS(irq, action),
61525
61526@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
61527 */
61528 TRACE_EVENT(irq_handler_exit,
61529
61530- TP_PROTO(int irq, struct irqaction *action, int ret),
61531+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61532
61533 TP_ARGS(irq, action, ret),
61534
61535@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
61536 */
61537 TRACE_EVENT(softirq_entry,
61538
61539- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61540+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61541
61542 TP_ARGS(h, vec),
61543
61544@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
61545 */
61546 TRACE_EVENT(softirq_exit,
61547
61548- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61549+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61550
61551 TP_ARGS(h, vec),
61552
61553diff -urNp linux-2.6.32.45/include/video/uvesafb.h linux-2.6.32.45/include/video/uvesafb.h
61554--- linux-2.6.32.45/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
61555+++ linux-2.6.32.45/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
61556@@ -177,6 +177,7 @@ struct uvesafb_par {
61557 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61558 u8 pmi_setpal; /* PMI for palette changes */
61559 u16 *pmi_base; /* protected mode interface location */
61560+ u8 *pmi_code; /* protected mode code location */
61561 void *pmi_start;
61562 void *pmi_pal;
61563 u8 *vbe_state_orig; /*
61564diff -urNp linux-2.6.32.45/init/do_mounts.c linux-2.6.32.45/init/do_mounts.c
61565--- linux-2.6.32.45/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
61566+++ linux-2.6.32.45/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
61567@@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
61568
61569 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61570 {
61571- int err = sys_mount(name, "/root", fs, flags, data);
61572+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
61573 if (err)
61574 return err;
61575
61576- sys_chdir("/root");
61577+ sys_chdir((__force const char __user *)"/root");
61578 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61579 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61580 current->fs->pwd.mnt->mnt_sb->s_type->name,
61581@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
61582 va_start(args, fmt);
61583 vsprintf(buf, fmt, args);
61584 va_end(args);
61585- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61586+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61587 if (fd >= 0) {
61588 sys_ioctl(fd, FDEJECT, 0);
61589 sys_close(fd);
61590 }
61591 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61592- fd = sys_open("/dev/console", O_RDWR, 0);
61593+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
61594 if (fd >= 0) {
61595 sys_ioctl(fd, TCGETS, (long)&termios);
61596 termios.c_lflag &= ~ICANON;
61597 sys_ioctl(fd, TCSETSF, (long)&termios);
61598- sys_read(fd, &c, 1);
61599+ sys_read(fd, (char __user *)&c, 1);
61600 termios.c_lflag |= ICANON;
61601 sys_ioctl(fd, TCSETSF, (long)&termios);
61602 sys_close(fd);
61603@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
61604 mount_root();
61605 out:
61606 devtmpfs_mount("dev");
61607- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61608- sys_chroot(".");
61609+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61610+ sys_chroot((__force char __user *)".");
61611 }
61612diff -urNp linux-2.6.32.45/init/do_mounts.h linux-2.6.32.45/init/do_mounts.h
61613--- linux-2.6.32.45/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
61614+++ linux-2.6.32.45/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
61615@@ -15,15 +15,15 @@ extern int root_mountflags;
61616
61617 static inline int create_dev(char *name, dev_t dev)
61618 {
61619- sys_unlink(name);
61620- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61621+ sys_unlink((__force char __user *)name);
61622+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
61623 }
61624
61625 #if BITS_PER_LONG == 32
61626 static inline u32 bstat(char *name)
61627 {
61628 struct stat64 stat;
61629- if (sys_stat64(name, &stat) != 0)
61630+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
61631 return 0;
61632 if (!S_ISBLK(stat.st_mode))
61633 return 0;
61634diff -urNp linux-2.6.32.45/init/do_mounts_initrd.c linux-2.6.32.45/init/do_mounts_initrd.c
61635--- linux-2.6.32.45/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
61636+++ linux-2.6.32.45/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
61637@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
61638 sys_close(old_fd);sys_close(root_fd);
61639 sys_close(0);sys_close(1);sys_close(2);
61640 sys_setsid();
61641- (void) sys_open("/dev/console",O_RDWR,0);
61642+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
61643 (void) sys_dup(0);
61644 (void) sys_dup(0);
61645 return kernel_execve(shell, argv, envp_init);
61646@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
61647 create_dev("/dev/root.old", Root_RAM0);
61648 /* mount initrd on rootfs' /root */
61649 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61650- sys_mkdir("/old", 0700);
61651- root_fd = sys_open("/", 0, 0);
61652- old_fd = sys_open("/old", 0, 0);
61653+ sys_mkdir((__force const char __user *)"/old", 0700);
61654+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
61655+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
61656 /* move initrd over / and chdir/chroot in initrd root */
61657- sys_chdir("/root");
61658- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61659- sys_chroot(".");
61660+ sys_chdir((__force const char __user *)"/root");
61661+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61662+ sys_chroot((__force const char __user *)".");
61663
61664 /*
61665 * In case that a resume from disk is carried out by linuxrc or one of
61666@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
61667
61668 /* move initrd to rootfs' /old */
61669 sys_fchdir(old_fd);
61670- sys_mount("/", ".", NULL, MS_MOVE, NULL);
61671+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
61672 /* switch root and cwd back to / of rootfs */
61673 sys_fchdir(root_fd);
61674- sys_chroot(".");
61675+ sys_chroot((__force const char __user *)".");
61676 sys_close(old_fd);
61677 sys_close(root_fd);
61678
61679 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61680- sys_chdir("/old");
61681+ sys_chdir((__force const char __user *)"/old");
61682 return;
61683 }
61684
61685@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
61686 mount_root();
61687
61688 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61689- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61690+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
61691 if (!error)
61692 printk("okay\n");
61693 else {
61694- int fd = sys_open("/dev/root.old", O_RDWR, 0);
61695+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
61696 if (error == -ENOENT)
61697 printk("/initrd does not exist. Ignored.\n");
61698 else
61699 printk("failed\n");
61700 printk(KERN_NOTICE "Unmounting old root\n");
61701- sys_umount("/old", MNT_DETACH);
61702+ sys_umount((__force char __user *)"/old", MNT_DETACH);
61703 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61704 if (fd < 0) {
61705 error = fd;
61706@@ -119,11 +119,11 @@ int __init initrd_load(void)
61707 * mounted in the normal path.
61708 */
61709 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61710- sys_unlink("/initrd.image");
61711+ sys_unlink((__force const char __user *)"/initrd.image");
61712 handle_initrd();
61713 return 1;
61714 }
61715 }
61716- sys_unlink("/initrd.image");
61717+ sys_unlink((__force const char __user *)"/initrd.image");
61718 return 0;
61719 }
61720diff -urNp linux-2.6.32.45/init/do_mounts_md.c linux-2.6.32.45/init/do_mounts_md.c
61721--- linux-2.6.32.45/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
61722+++ linux-2.6.32.45/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
61723@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61724 partitioned ? "_d" : "", minor,
61725 md_setup_args[ent].device_names);
61726
61727- fd = sys_open(name, 0, 0);
61728+ fd = sys_open((__force char __user *)name, 0, 0);
61729 if (fd < 0) {
61730 printk(KERN_ERR "md: open failed - cannot start "
61731 "array %s\n", name);
61732@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61733 * array without it
61734 */
61735 sys_close(fd);
61736- fd = sys_open(name, 0, 0);
61737+ fd = sys_open((__force char __user *)name, 0, 0);
61738 sys_ioctl(fd, BLKRRPART, 0);
61739 }
61740 sys_close(fd);
61741@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61742
61743 wait_for_device_probe();
61744
61745- fd = sys_open("/dev/md0", 0, 0);
61746+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
61747 if (fd >= 0) {
61748 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61749 sys_close(fd);
61750diff -urNp linux-2.6.32.45/init/initramfs.c linux-2.6.32.45/init/initramfs.c
61751--- linux-2.6.32.45/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
61752+++ linux-2.6.32.45/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
61753@@ -74,7 +74,7 @@ static void __init free_hash(void)
61754 }
61755 }
61756
61757-static long __init do_utime(char __user *filename, time_t mtime)
61758+static long __init do_utime(__force char __user *filename, time_t mtime)
61759 {
61760 struct timespec t[2];
61761
61762@@ -109,7 +109,7 @@ static void __init dir_utime(void)
61763 struct dir_entry *de, *tmp;
61764 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61765 list_del(&de->list);
61766- do_utime(de->name, de->mtime);
61767+ do_utime((__force char __user *)de->name, de->mtime);
61768 kfree(de->name);
61769 kfree(de);
61770 }
61771@@ -271,7 +271,7 @@ static int __init maybe_link(void)
61772 if (nlink >= 2) {
61773 char *old = find_link(major, minor, ino, mode, collected);
61774 if (old)
61775- return (sys_link(old, collected) < 0) ? -1 : 1;
61776+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
61777 }
61778 return 0;
61779 }
61780@@ -280,11 +280,11 @@ static void __init clean_path(char *path
61781 {
61782 struct stat st;
61783
61784- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61785+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
61786 if (S_ISDIR(st.st_mode))
61787- sys_rmdir(path);
61788+ sys_rmdir((__force char __user *)path);
61789 else
61790- sys_unlink(path);
61791+ sys_unlink((__force char __user *)path);
61792 }
61793 }
61794
61795@@ -305,7 +305,7 @@ static int __init do_name(void)
61796 int openflags = O_WRONLY|O_CREAT;
61797 if (ml != 1)
61798 openflags |= O_TRUNC;
61799- wfd = sys_open(collected, openflags, mode);
61800+ wfd = sys_open((__force char __user *)collected, openflags, mode);
61801
61802 if (wfd >= 0) {
61803 sys_fchown(wfd, uid, gid);
61804@@ -317,17 +317,17 @@ static int __init do_name(void)
61805 }
61806 }
61807 } else if (S_ISDIR(mode)) {
61808- sys_mkdir(collected, mode);
61809- sys_chown(collected, uid, gid);
61810- sys_chmod(collected, mode);
61811+ sys_mkdir((__force char __user *)collected, mode);
61812+ sys_chown((__force char __user *)collected, uid, gid);
61813+ sys_chmod((__force char __user *)collected, mode);
61814 dir_add(collected, mtime);
61815 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61816 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61817 if (maybe_link() == 0) {
61818- sys_mknod(collected, mode, rdev);
61819- sys_chown(collected, uid, gid);
61820- sys_chmod(collected, mode);
61821- do_utime(collected, mtime);
61822+ sys_mknod((__force char __user *)collected, mode, rdev);
61823+ sys_chown((__force char __user *)collected, uid, gid);
61824+ sys_chmod((__force char __user *)collected, mode);
61825+ do_utime((__force char __user *)collected, mtime);
61826 }
61827 }
61828 return 0;
61829@@ -336,15 +336,15 @@ static int __init do_name(void)
61830 static int __init do_copy(void)
61831 {
61832 if (count >= body_len) {
61833- sys_write(wfd, victim, body_len);
61834+ sys_write(wfd, (__force char __user *)victim, body_len);
61835 sys_close(wfd);
61836- do_utime(vcollected, mtime);
61837+ do_utime((__force char __user *)vcollected, mtime);
61838 kfree(vcollected);
61839 eat(body_len);
61840 state = SkipIt;
61841 return 0;
61842 } else {
61843- sys_write(wfd, victim, count);
61844+ sys_write(wfd, (__force char __user *)victim, count);
61845 body_len -= count;
61846 eat(count);
61847 return 1;
61848@@ -355,9 +355,9 @@ static int __init do_symlink(void)
61849 {
61850 collected[N_ALIGN(name_len) + body_len] = '\0';
61851 clean_path(collected, 0);
61852- sys_symlink(collected + N_ALIGN(name_len), collected);
61853- sys_lchown(collected, uid, gid);
61854- do_utime(collected, mtime);
61855+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
61856+ sys_lchown((__force char __user *)collected, uid, gid);
61857+ do_utime((__force char __user *)collected, mtime);
61858 state = SkipIt;
61859 next_state = Reset;
61860 return 0;
61861diff -urNp linux-2.6.32.45/init/Kconfig linux-2.6.32.45/init/Kconfig
61862--- linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
61863+++ linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
61864@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
61865
61866 config COMPAT_BRK
61867 bool "Disable heap randomization"
61868- default y
61869+ default n
61870 help
61871 Randomizing heap placement makes heap exploits harder, but it
61872 also breaks ancient binaries (including anything libc5 based).
61873diff -urNp linux-2.6.32.45/init/main.c linux-2.6.32.45/init/main.c
61874--- linux-2.6.32.45/init/main.c 2011-05-10 22:12:01.000000000 -0400
61875+++ linux-2.6.32.45/init/main.c 2011-08-05 20:33:55.000000000 -0400
61876@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
61877 #ifdef CONFIG_TC
61878 extern void tc_init(void);
61879 #endif
61880+extern void grsecurity_init(void);
61881
61882 enum system_states system_state __read_mostly;
61883 EXPORT_SYMBOL(system_state);
61884@@ -183,6 +184,49 @@ static int __init set_reset_devices(char
61885
61886 __setup("reset_devices", set_reset_devices);
61887
61888+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61889+extern char pax_enter_kernel_user[];
61890+extern char pax_exit_kernel_user[];
61891+extern pgdval_t clone_pgd_mask;
61892+#endif
61893+
61894+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61895+static int __init setup_pax_nouderef(char *str)
61896+{
61897+#ifdef CONFIG_X86_32
61898+ unsigned int cpu;
61899+ struct desc_struct *gdt;
61900+
61901+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
61902+ gdt = get_cpu_gdt_table(cpu);
61903+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61904+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61905+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61906+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61907+ }
61908+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61909+#else
61910+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61911+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61912+ clone_pgd_mask = ~(pgdval_t)0UL;
61913+#endif
61914+
61915+ return 0;
61916+}
61917+early_param("pax_nouderef", setup_pax_nouderef);
61918+#endif
61919+
61920+#ifdef CONFIG_PAX_SOFTMODE
61921+int pax_softmode;
61922+
61923+static int __init setup_pax_softmode(char *str)
61924+{
61925+ get_option(&str, &pax_softmode);
61926+ return 1;
61927+}
61928+__setup("pax_softmode=", setup_pax_softmode);
61929+#endif
61930+
61931 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61932 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
61933 static const char *panic_later, *panic_param;
61934@@ -705,52 +749,53 @@ int initcall_debug;
61935 core_param(initcall_debug, initcall_debug, bool, 0644);
61936
61937 static char msgbuf[64];
61938-static struct boot_trace_call call;
61939-static struct boot_trace_ret ret;
61940+static struct boot_trace_call trace_call;
61941+static struct boot_trace_ret trace_ret;
61942
61943 int do_one_initcall(initcall_t fn)
61944 {
61945 int count = preempt_count();
61946 ktime_t calltime, delta, rettime;
61947+ const char *msg1 = "", *msg2 = "";
61948
61949 if (initcall_debug) {
61950- call.caller = task_pid_nr(current);
61951- printk("calling %pF @ %i\n", fn, call.caller);
61952+ trace_call.caller = task_pid_nr(current);
61953+ printk("calling %pF @ %i\n", fn, trace_call.caller);
61954 calltime = ktime_get();
61955- trace_boot_call(&call, fn);
61956+ trace_boot_call(&trace_call, fn);
61957 enable_boot_trace();
61958 }
61959
61960- ret.result = fn();
61961+ trace_ret.result = fn();
61962
61963 if (initcall_debug) {
61964 disable_boot_trace();
61965 rettime = ktime_get();
61966 delta = ktime_sub(rettime, calltime);
61967- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
61968- trace_boot_ret(&ret, fn);
61969+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
61970+ trace_boot_ret(&trace_ret, fn);
61971 printk("initcall %pF returned %d after %Ld usecs\n", fn,
61972- ret.result, ret.duration);
61973+ trace_ret.result, trace_ret.duration);
61974 }
61975
61976 msgbuf[0] = 0;
61977
61978- if (ret.result && ret.result != -ENODEV && initcall_debug)
61979- sprintf(msgbuf, "error code %d ", ret.result);
61980+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
61981+ sprintf(msgbuf, "error code %d ", trace_ret.result);
61982
61983 if (preempt_count() != count) {
61984- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
61985+ msg1 = " preemption imbalance";
61986 preempt_count() = count;
61987 }
61988 if (irqs_disabled()) {
61989- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
61990+ msg2 = " disabled interrupts";
61991 local_irq_enable();
61992 }
61993- if (msgbuf[0]) {
61994- printk("initcall %pF returned with %s\n", fn, msgbuf);
61995+ if (msgbuf[0] || *msg1 || *msg2) {
61996+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
61997 }
61998
61999- return ret.result;
62000+ return trace_ret.result;
62001 }
62002
62003
62004@@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
62005 if (!ramdisk_execute_command)
62006 ramdisk_execute_command = "/init";
62007
62008- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62009+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
62010 ramdisk_execute_command = NULL;
62011 prepare_namespace();
62012 }
62013
62014+ grsecurity_init();
62015+
62016 /*
62017 * Ok, we have completed the initial bootup, and
62018 * we're essentially up and running. Get rid of the
62019diff -urNp linux-2.6.32.45/init/noinitramfs.c linux-2.6.32.45/init/noinitramfs.c
62020--- linux-2.6.32.45/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
62021+++ linux-2.6.32.45/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
62022@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
62023 {
62024 int err;
62025
62026- err = sys_mkdir("/dev", 0755);
62027+ err = sys_mkdir((const char __user *)"/dev", 0755);
62028 if (err < 0)
62029 goto out;
62030
62031@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
62032 if (err < 0)
62033 goto out;
62034
62035- err = sys_mkdir("/root", 0700);
62036+ err = sys_mkdir((const char __user *)"/root", 0700);
62037 if (err < 0)
62038 goto out;
62039
62040diff -urNp linux-2.6.32.45/ipc/mqueue.c linux-2.6.32.45/ipc/mqueue.c
62041--- linux-2.6.32.45/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
62042+++ linux-2.6.32.45/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
62043@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
62044 mq_bytes = (mq_msg_tblsz +
62045 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62046
62047+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62048 spin_lock(&mq_lock);
62049 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62050 u->mq_bytes + mq_bytes >
62051diff -urNp linux-2.6.32.45/ipc/msg.c linux-2.6.32.45/ipc/msg.c
62052--- linux-2.6.32.45/ipc/msg.c 2011-03-27 14:31:47.000000000 -0400
62053+++ linux-2.6.32.45/ipc/msg.c 2011-08-05 20:33:55.000000000 -0400
62054@@ -310,18 +310,19 @@ static inline int msg_security(struct ke
62055 return security_msg_queue_associate(msq, msgflg);
62056 }
62057
62058+static struct ipc_ops msg_ops = {
62059+ .getnew = newque,
62060+ .associate = msg_security,
62061+ .more_checks = NULL
62062+};
62063+
62064 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62065 {
62066 struct ipc_namespace *ns;
62067- struct ipc_ops msg_ops;
62068 struct ipc_params msg_params;
62069
62070 ns = current->nsproxy->ipc_ns;
62071
62072- msg_ops.getnew = newque;
62073- msg_ops.associate = msg_security;
62074- msg_ops.more_checks = NULL;
62075-
62076 msg_params.key = key;
62077 msg_params.flg = msgflg;
62078
62079diff -urNp linux-2.6.32.45/ipc/sem.c linux-2.6.32.45/ipc/sem.c
62080--- linux-2.6.32.45/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
62081+++ linux-2.6.32.45/ipc/sem.c 2011-08-05 20:33:55.000000000 -0400
62082@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct
62083 return 0;
62084 }
62085
62086+static struct ipc_ops sem_ops = {
62087+ .getnew = newary,
62088+ .associate = sem_security,
62089+ .more_checks = sem_more_checks
62090+};
62091+
62092 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62093 {
62094 struct ipc_namespace *ns;
62095- struct ipc_ops sem_ops;
62096 struct ipc_params sem_params;
62097
62098 ns = current->nsproxy->ipc_ns;
62099@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62100 if (nsems < 0 || nsems > ns->sc_semmsl)
62101 return -EINVAL;
62102
62103- sem_ops.getnew = newary;
62104- sem_ops.associate = sem_security;
62105- sem_ops.more_checks = sem_more_checks;
62106-
62107 sem_params.key = key;
62108 sem_params.flg = semflg;
62109 sem_params.u.nsems = nsems;
62110@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namesp
62111 ushort* sem_io = fast_sem_io;
62112 int nsems;
62113
62114+ pax_track_stack();
62115+
62116 sma = sem_lock_check(ns, semid);
62117 if (IS_ERR(sma))
62118 return PTR_ERR(sma);
62119@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62120 unsigned long jiffies_left = 0;
62121 struct ipc_namespace *ns;
62122
62123+ pax_track_stack();
62124+
62125 ns = current->nsproxy->ipc_ns;
62126
62127 if (nsops < 1 || semid < 0)
62128diff -urNp linux-2.6.32.45/ipc/shm.c linux-2.6.32.45/ipc/shm.c
62129--- linux-2.6.32.45/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
62130+++ linux-2.6.32.45/ipc/shm.c 2011-08-05 20:33:55.000000000 -0400
62131@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
62132 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62133 #endif
62134
62135+#ifdef CONFIG_GRKERNSEC
62136+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62137+ const time_t shm_createtime, const uid_t cuid,
62138+ const int shmid);
62139+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62140+ const time_t shm_createtime);
62141+#endif
62142+
62143 void shm_init_ns(struct ipc_namespace *ns)
62144 {
62145 ns->shm_ctlmax = SHMMAX;
62146@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
62147 shp->shm_lprid = 0;
62148 shp->shm_atim = shp->shm_dtim = 0;
62149 shp->shm_ctim = get_seconds();
62150+#ifdef CONFIG_GRKERNSEC
62151+ {
62152+ struct timespec timeval;
62153+ do_posix_clock_monotonic_gettime(&timeval);
62154+
62155+ shp->shm_createtime = timeval.tv_sec;
62156+ }
62157+#endif
62158 shp->shm_segsz = size;
62159 shp->shm_nattch = 0;
62160 shp->shm_file = file;
62161@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct
62162 return 0;
62163 }
62164
62165+static struct ipc_ops shm_ops = {
62166+ .getnew = newseg,
62167+ .associate = shm_security,
62168+ .more_checks = shm_more_checks
62169+};
62170+
62171 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62172 {
62173 struct ipc_namespace *ns;
62174- struct ipc_ops shm_ops;
62175 struct ipc_params shm_params;
62176
62177 ns = current->nsproxy->ipc_ns;
62178
62179- shm_ops.getnew = newseg;
62180- shm_ops.associate = shm_security;
62181- shm_ops.more_checks = shm_more_checks;
62182-
62183 shm_params.key = key;
62184 shm_params.flg = shmflg;
62185 shm_params.u.size = size;
62186@@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *sh
62187 if (err)
62188 goto out_unlock;
62189
62190+#ifdef CONFIG_GRKERNSEC
62191+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62192+ shp->shm_perm.cuid, shmid) ||
62193+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62194+ err = -EACCES;
62195+ goto out_unlock;
62196+ }
62197+#endif
62198+
62199 path.dentry = dget(shp->shm_file->f_path.dentry);
62200 path.mnt = shp->shm_file->f_path.mnt;
62201 shp->shm_nattch++;
62202+#ifdef CONFIG_GRKERNSEC
62203+ shp->shm_lapid = current->pid;
62204+#endif
62205 size = i_size_read(path.dentry->d_inode);
62206 shm_unlock(shp);
62207
62208diff -urNp linux-2.6.32.45/kernel/acct.c linux-2.6.32.45/kernel/acct.c
62209--- linux-2.6.32.45/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
62210+++ linux-2.6.32.45/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
62211@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
62212 */
62213 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62214 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62215- file->f_op->write(file, (char *)&ac,
62216+ file->f_op->write(file, (__force char __user *)&ac,
62217 sizeof(acct_t), &file->f_pos);
62218 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62219 set_fs(fs);
62220diff -urNp linux-2.6.32.45/kernel/audit.c linux-2.6.32.45/kernel/audit.c
62221--- linux-2.6.32.45/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
62222+++ linux-2.6.32.45/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
62223@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
62224 3) suppressed due to audit_rate_limit
62225 4) suppressed due to audit_backlog_limit
62226 */
62227-static atomic_t audit_lost = ATOMIC_INIT(0);
62228+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62229
62230 /* The netlink socket. */
62231 static struct sock *audit_sock;
62232@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
62233 unsigned long now;
62234 int print;
62235
62236- atomic_inc(&audit_lost);
62237+ atomic_inc_unchecked(&audit_lost);
62238
62239 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62240
62241@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
62242 printk(KERN_WARNING
62243 "audit: audit_lost=%d audit_rate_limit=%d "
62244 "audit_backlog_limit=%d\n",
62245- atomic_read(&audit_lost),
62246+ atomic_read_unchecked(&audit_lost),
62247 audit_rate_limit,
62248 audit_backlog_limit);
62249 audit_panic(message);
62250@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
62251 status_set.pid = audit_pid;
62252 status_set.rate_limit = audit_rate_limit;
62253 status_set.backlog_limit = audit_backlog_limit;
62254- status_set.lost = atomic_read(&audit_lost);
62255+ status_set.lost = atomic_read_unchecked(&audit_lost);
62256 status_set.backlog = skb_queue_len(&audit_skb_queue);
62257 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62258 &status_set, sizeof(status_set));
62259@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
62260 spin_unlock_irq(&tsk->sighand->siglock);
62261 }
62262 read_unlock(&tasklist_lock);
62263- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
62264- &s, sizeof(s));
62265+
62266+ if (!err)
62267+ audit_send_reply(NETLINK_CB(skb).pid, seq,
62268+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
62269 break;
62270 }
62271 case AUDIT_TTY_SET: {
62272diff -urNp linux-2.6.32.45/kernel/auditsc.c linux-2.6.32.45/kernel/auditsc.c
62273--- linux-2.6.32.45/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
62274+++ linux-2.6.32.45/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
62275@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
62276 }
62277
62278 /* global counter which is incremented every time something logs in */
62279-static atomic_t session_id = ATOMIC_INIT(0);
62280+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62281
62282 /**
62283 * audit_set_loginuid - set a task's audit_context loginuid
62284@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
62285 */
62286 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62287 {
62288- unsigned int sessionid = atomic_inc_return(&session_id);
62289+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62290 struct audit_context *context = task->audit_context;
62291
62292 if (context && context->in_syscall) {
62293diff -urNp linux-2.6.32.45/kernel/capability.c linux-2.6.32.45/kernel/capability.c
62294--- linux-2.6.32.45/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
62295+++ linux-2.6.32.45/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
62296@@ -305,10 +305,26 @@ int capable(int cap)
62297 BUG();
62298 }
62299
62300- if (security_capable(cap) == 0) {
62301+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
62302 current->flags |= PF_SUPERPRIV;
62303 return 1;
62304 }
62305 return 0;
62306 }
62307+
62308+int capable_nolog(int cap)
62309+{
62310+ if (unlikely(!cap_valid(cap))) {
62311+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62312+ BUG();
62313+ }
62314+
62315+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
62316+ current->flags |= PF_SUPERPRIV;
62317+ return 1;
62318+ }
62319+ return 0;
62320+}
62321+
62322 EXPORT_SYMBOL(capable);
62323+EXPORT_SYMBOL(capable_nolog);
62324diff -urNp linux-2.6.32.45/kernel/cgroup.c linux-2.6.32.45/kernel/cgroup.c
62325--- linux-2.6.32.45/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
62326+++ linux-2.6.32.45/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
62327@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
62328 struct hlist_head *hhead;
62329 struct cg_cgroup_link *link;
62330
62331+ pax_track_stack();
62332+
62333 /* First see if we already have a cgroup group that matches
62334 * the desired set */
62335 read_lock(&css_set_lock);
62336diff -urNp linux-2.6.32.45/kernel/configs.c linux-2.6.32.45/kernel/configs.c
62337--- linux-2.6.32.45/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
62338+++ linux-2.6.32.45/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
62339@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
62340 struct proc_dir_entry *entry;
62341
62342 /* create the current config file */
62343+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62344+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62345+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62346+ &ikconfig_file_ops);
62347+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62348+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62349+ &ikconfig_file_ops);
62350+#endif
62351+#else
62352 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62353 &ikconfig_file_ops);
62354+#endif
62355+
62356 if (!entry)
62357 return -ENOMEM;
62358
62359diff -urNp linux-2.6.32.45/kernel/cpu.c linux-2.6.32.45/kernel/cpu.c
62360--- linux-2.6.32.45/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
62361+++ linux-2.6.32.45/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
62362@@ -19,7 +19,7 @@
62363 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
62364 static DEFINE_MUTEX(cpu_add_remove_lock);
62365
62366-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
62367+static RAW_NOTIFIER_HEAD(cpu_chain);
62368
62369 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
62370 * Should always be manipulated under cpu_add_remove_lock
62371diff -urNp linux-2.6.32.45/kernel/cred.c linux-2.6.32.45/kernel/cred.c
62372--- linux-2.6.32.45/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
62373+++ linux-2.6.32.45/kernel/cred.c 2011-08-11 19:49:38.000000000 -0400
62374@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
62375 */
62376 void __put_cred(struct cred *cred)
62377 {
62378+ pax_track_stack();
62379+
62380 kdebug("__put_cred(%p{%d,%d})", cred,
62381 atomic_read(&cred->usage),
62382 read_cred_subscribers(cred));
62383@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
62384 {
62385 struct cred *cred;
62386
62387+ pax_track_stack();
62388+
62389 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62390 atomic_read(&tsk->cred->usage),
62391 read_cred_subscribers(tsk->cred));
62392@@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
62393 {
62394 const struct cred *cred;
62395
62396+ pax_track_stack();
62397+
62398 rcu_read_lock();
62399
62400 do {
62401@@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
62402 {
62403 struct cred *new;
62404
62405+ pax_track_stack();
62406+
62407 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62408 if (!new)
62409 return NULL;
62410@@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
62411 const struct cred *old;
62412 struct cred *new;
62413
62414+ pax_track_stack();
62415+
62416 validate_process_creds();
62417
62418 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62419@@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
62420 struct thread_group_cred *tgcred = NULL;
62421 struct cred *new;
62422
62423+ pax_track_stack();
62424+
62425 #ifdef CONFIG_KEYS
62426 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62427 if (!tgcred)
62428@@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
62429 struct cred *new;
62430 int ret;
62431
62432+ pax_track_stack();
62433+
62434 mutex_init(&p->cred_guard_mutex);
62435
62436 if (
62437@@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
62438 struct task_struct *task = current;
62439 const struct cred *old = task->real_cred;
62440
62441+ pax_track_stack();
62442+
62443 kdebug("commit_creds(%p{%d,%d})", new,
62444 atomic_read(&new->usage),
62445 read_cred_subscribers(new));
62446@@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
62447
62448 get_cred(new); /* we will require a ref for the subj creds too */
62449
62450+ gr_set_role_label(task, new->uid, new->gid);
62451+
62452 /* dumpability changes */
62453 if (old->euid != new->euid ||
62454 old->egid != new->egid ||
62455@@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
62456 key_fsgid_changed(task);
62457
62458 /* do it
62459- * - What if a process setreuid()'s and this brings the
62460- * new uid over his NPROC rlimit? We can check this now
62461- * cheaply with the new uid cache, so if it matters
62462- * we should be checking for it. -DaveM
62463+ * RLIMIT_NPROC limits on user->processes have already been checked
62464+ * in set_user().
62465 */
62466 alter_cred_subscribers(new, 2);
62467 if (new->user != old->user)
62468@@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
62469 */
62470 void abort_creds(struct cred *new)
62471 {
62472+ pax_track_stack();
62473+
62474 kdebug("abort_creds(%p{%d,%d})", new,
62475 atomic_read(&new->usage),
62476 read_cred_subscribers(new));
62477@@ -629,6 +647,8 @@ const struct cred *override_creds(const
62478 {
62479 const struct cred *old = current->cred;
62480
62481+ pax_track_stack();
62482+
62483 kdebug("override_creds(%p{%d,%d})", new,
62484 atomic_read(&new->usage),
62485 read_cred_subscribers(new));
62486@@ -658,6 +678,8 @@ void revert_creds(const struct cred *old
62487 {
62488 const struct cred *override = current->cred;
62489
62490+ pax_track_stack();
62491+
62492 kdebug("revert_creds(%p{%d,%d})", old,
62493 atomic_read(&old->usage),
62494 read_cred_subscribers(old));
62495@@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct
62496 const struct cred *old;
62497 struct cred *new;
62498
62499+ pax_track_stack();
62500+
62501 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62502 if (!new)
62503 return NULL;
62504@@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62505 */
62506 int set_security_override(struct cred *new, u32 secid)
62507 {
62508+ pax_track_stack();
62509+
62510 return security_kernel_act_as(new, secid);
62511 }
62512 EXPORT_SYMBOL(set_security_override);
62513@@ -777,6 +803,8 @@ int set_security_override_from_ctx(struc
62514 u32 secid;
62515 int ret;
62516
62517+ pax_track_stack();
62518+
62519 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62520 if (ret < 0)
62521 return ret;
62522diff -urNp linux-2.6.32.45/kernel/exit.c linux-2.6.32.45/kernel/exit.c
62523--- linux-2.6.32.45/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
62524+++ linux-2.6.32.45/kernel/exit.c 2011-08-17 19:19:50.000000000 -0400
62525@@ -55,6 +55,10 @@
62526 #include <asm/pgtable.h>
62527 #include <asm/mmu_context.h>
62528
62529+#ifdef CONFIG_GRKERNSEC
62530+extern rwlock_t grsec_exec_file_lock;
62531+#endif
62532+
62533 static void exit_mm(struct task_struct * tsk);
62534
62535 static void __unhash_process(struct task_struct *p)
62536@@ -174,6 +178,10 @@ void release_task(struct task_struct * p
62537 struct task_struct *leader;
62538 int zap_leader;
62539 repeat:
62540+#ifdef CONFIG_NET
62541+ gr_del_task_from_ip_table(p);
62542+#endif
62543+
62544 tracehook_prepare_release_task(p);
62545 /* don't need to get the RCU readlock here - the process is dead and
62546 * can't be modifying its own credentials */
62547@@ -341,11 +349,22 @@ static void reparent_to_kthreadd(void)
62548 {
62549 write_lock_irq(&tasklist_lock);
62550
62551+#ifdef CONFIG_GRKERNSEC
62552+ write_lock(&grsec_exec_file_lock);
62553+ if (current->exec_file) {
62554+ fput(current->exec_file);
62555+ current->exec_file = NULL;
62556+ }
62557+ write_unlock(&grsec_exec_file_lock);
62558+#endif
62559+
62560 ptrace_unlink(current);
62561 /* Reparent to init */
62562 current->real_parent = current->parent = kthreadd_task;
62563 list_move_tail(&current->sibling, &current->real_parent->children);
62564
62565+ gr_set_kernel_label(current);
62566+
62567 /* Set the exit signal to SIGCHLD so we signal init on exit */
62568 current->exit_signal = SIGCHLD;
62569
62570@@ -397,7 +416,7 @@ int allow_signal(int sig)
62571 * know it'll be handled, so that they don't get converted to
62572 * SIGKILL or just silently dropped.
62573 */
62574- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62575+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62576 recalc_sigpending();
62577 spin_unlock_irq(&current->sighand->siglock);
62578 return 0;
62579@@ -433,6 +452,17 @@ void daemonize(const char *name, ...)
62580 vsnprintf(current->comm, sizeof(current->comm), name, args);
62581 va_end(args);
62582
62583+#ifdef CONFIG_GRKERNSEC
62584+ write_lock(&grsec_exec_file_lock);
62585+ if (current->exec_file) {
62586+ fput(current->exec_file);
62587+ current->exec_file = NULL;
62588+ }
62589+ write_unlock(&grsec_exec_file_lock);
62590+#endif
62591+
62592+ gr_set_kernel_label(current);
62593+
62594 /*
62595 * If we were started as result of loading a module, close all of the
62596 * user space pages. We don't need them, and if we didn't close them
62597@@ -897,17 +927,17 @@ NORET_TYPE void do_exit(long code)
62598 struct task_struct *tsk = current;
62599 int group_dead;
62600
62601- profile_task_exit(tsk);
62602-
62603- WARN_ON(atomic_read(&tsk->fs_excl));
62604-
62605+ /*
62606+ * Check this first since set_fs() below depends on
62607+ * current_thread_info(), which we better not access when we're in
62608+ * interrupt context. Other than that, we want to do the set_fs()
62609+ * as early as possible.
62610+ */
62611 if (unlikely(in_interrupt()))
62612 panic("Aiee, killing interrupt handler!");
62613- if (unlikely(!tsk->pid))
62614- panic("Attempted to kill the idle task!");
62615
62616 /*
62617- * If do_exit is called because this processes oopsed, it's possible
62618+ * If do_exit is called because this processes Oops'ed, it's possible
62619 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
62620 * continuing. Amongst other possible reasons, this is to prevent
62621 * mm_release()->clear_child_tid() from writing to a user-controlled
62622@@ -915,6 +945,13 @@ NORET_TYPE void do_exit(long code)
62623 */
62624 set_fs(USER_DS);
62625
62626+ profile_task_exit(tsk);
62627+
62628+ WARN_ON(atomic_read(&tsk->fs_excl));
62629+
62630+ if (unlikely(!tsk->pid))
62631+ panic("Attempted to kill the idle task!");
62632+
62633 tracehook_report_exit(&code);
62634
62635 validate_creds_for_do_exit(tsk);
62636@@ -973,6 +1010,9 @@ NORET_TYPE void do_exit(long code)
62637 tsk->exit_code = code;
62638 taskstats_exit(tsk, group_dead);
62639
62640+ gr_acl_handle_psacct(tsk, code);
62641+ gr_acl_handle_exit();
62642+
62643 exit_mm(tsk);
62644
62645 if (group_dead)
62646@@ -1188,7 +1228,7 @@ static int wait_task_zombie(struct wait_
62647
62648 if (unlikely(wo->wo_flags & WNOWAIT)) {
62649 int exit_code = p->exit_code;
62650- int why, status;
62651+ int why;
62652
62653 get_task_struct(p);
62654 read_unlock(&tasklist_lock);
62655diff -urNp linux-2.6.32.45/kernel/fork.c linux-2.6.32.45/kernel/fork.c
62656--- linux-2.6.32.45/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
62657+++ linux-2.6.32.45/kernel/fork.c 2011-08-11 19:50:07.000000000 -0400
62658@@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
62659 *stackend = STACK_END_MAGIC; /* for overflow detection */
62660
62661 #ifdef CONFIG_CC_STACKPROTECTOR
62662- tsk->stack_canary = get_random_int();
62663+ tsk->stack_canary = pax_get_random_long();
62664 #endif
62665
62666 /* One for us, one for whoever does the "release_task()" (usually parent) */
62667@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
62668 mm->locked_vm = 0;
62669 mm->mmap = NULL;
62670 mm->mmap_cache = NULL;
62671- mm->free_area_cache = oldmm->mmap_base;
62672- mm->cached_hole_size = ~0UL;
62673+ mm->free_area_cache = oldmm->free_area_cache;
62674+ mm->cached_hole_size = oldmm->cached_hole_size;
62675 mm->map_count = 0;
62676 cpumask_clear(mm_cpumask(mm));
62677 mm->mm_rb = RB_ROOT;
62678@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
62679 tmp->vm_flags &= ~VM_LOCKED;
62680 tmp->vm_mm = mm;
62681 tmp->vm_next = tmp->vm_prev = NULL;
62682+ tmp->vm_mirror = NULL;
62683 anon_vma_link(tmp);
62684 file = tmp->vm_file;
62685 if (file) {
62686@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
62687 if (retval)
62688 goto out;
62689 }
62690+
62691+#ifdef CONFIG_PAX_SEGMEXEC
62692+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
62693+ struct vm_area_struct *mpnt_m;
62694+
62695+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
62696+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
62697+
62698+ if (!mpnt->vm_mirror)
62699+ continue;
62700+
62701+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
62702+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
62703+ mpnt->vm_mirror = mpnt_m;
62704+ } else {
62705+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
62706+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
62707+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
62708+ mpnt->vm_mirror->vm_mirror = mpnt;
62709+ }
62710+ }
62711+ BUG_ON(mpnt_m);
62712+ }
62713+#endif
62714+
62715 /* a new mm has just been created */
62716 arch_dup_mmap(oldmm, mm);
62717 retval = 0;
62718@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
62719 write_unlock(&fs->lock);
62720 return -EAGAIN;
62721 }
62722- fs->users++;
62723+ atomic_inc(&fs->users);
62724 write_unlock(&fs->lock);
62725 return 0;
62726 }
62727 tsk->fs = copy_fs_struct(fs);
62728 if (!tsk->fs)
62729 return -ENOMEM;
62730+ gr_set_chroot_entries(tsk, &tsk->fs->root);
62731 return 0;
62732 }
62733
62734@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(
62735 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
62736 #endif
62737 retval = -EAGAIN;
62738+
62739+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
62740+
62741 if (atomic_read(&p->real_cred->user->processes) >=
62742 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
62743- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
62744- p->real_cred->user != INIT_USER)
62745+ if (p->real_cred->user != INIT_USER &&
62746+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
62747 goto bad_fork_free;
62748 }
62749+ current->flags &= ~PF_NPROC_EXCEEDED;
62750
62751 retval = copy_creds(p, clone_flags);
62752 if (retval < 0)
62753@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(
62754 goto bad_fork_free_pid;
62755 }
62756
62757+ gr_copy_label(p);
62758+
62759 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
62760 /*
62761 * Clear TID on mm_release()?
62762@@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
62763 bad_fork_free:
62764 free_task(p);
62765 fork_out:
62766+ gr_log_forkfail(retval);
62767+
62768 return ERR_PTR(retval);
62769 }
62770
62771@@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
62772 if (clone_flags & CLONE_PARENT_SETTID)
62773 put_user(nr, parent_tidptr);
62774
62775+ gr_handle_brute_check();
62776+
62777 if (clone_flags & CLONE_VFORK) {
62778 p->vfork_done = &vfork;
62779 init_completion(&vfork);
62780@@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unsh
62781 return 0;
62782
62783 /* don't need lock here; in the worst case we'll do useless copy */
62784- if (fs->users == 1)
62785+ if (atomic_read(&fs->users) == 1)
62786 return 0;
62787
62788 *new_fsp = copy_fs_struct(fs);
62789@@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
62790 fs = current->fs;
62791 write_lock(&fs->lock);
62792 current->fs = new_fs;
62793- if (--fs->users)
62794+ gr_set_chroot_entries(current, &current->fs->root);
62795+ if (atomic_dec_return(&fs->users))
62796 new_fs = NULL;
62797 else
62798 new_fs = fs;
62799diff -urNp linux-2.6.32.45/kernel/futex.c linux-2.6.32.45/kernel/futex.c
62800--- linux-2.6.32.45/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
62801+++ linux-2.6.32.45/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
62802@@ -54,6 +54,7 @@
62803 #include <linux/mount.h>
62804 #include <linux/pagemap.h>
62805 #include <linux/syscalls.h>
62806+#include <linux/ptrace.h>
62807 #include <linux/signal.h>
62808 #include <linux/module.h>
62809 #include <linux/magic.h>
62810@@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
62811 struct page *page;
62812 int err;
62813
62814+#ifdef CONFIG_PAX_SEGMEXEC
62815+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
62816+ return -EFAULT;
62817+#endif
62818+
62819 /*
62820 * The futex address must be "naturally" aligned.
62821 */
62822@@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
62823 struct futex_q q;
62824 int ret;
62825
62826+ pax_track_stack();
62827+
62828 if (!bitset)
62829 return -EINVAL;
62830
62831@@ -1841,7 +1849,7 @@ retry:
62832
62833 restart = &current_thread_info()->restart_block;
62834 restart->fn = futex_wait_restart;
62835- restart->futex.uaddr = (u32 *)uaddr;
62836+ restart->futex.uaddr = uaddr;
62837 restart->futex.val = val;
62838 restart->futex.time = abs_time->tv64;
62839 restart->futex.bitset = bitset;
62840@@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
62841 struct futex_q q;
62842 int res, ret;
62843
62844+ pax_track_stack();
62845+
62846 if (!bitset)
62847 return -EINVAL;
62848
62849@@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62850 {
62851 struct robust_list_head __user *head;
62852 unsigned long ret;
62853+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62854 const struct cred *cred = current_cred(), *pcred;
62855+#endif
62856
62857 if (!futex_cmpxchg_enabled)
62858 return -ENOSYS;
62859@@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62860 if (!p)
62861 goto err_unlock;
62862 ret = -EPERM;
62863+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62864+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
62865+ goto err_unlock;
62866+#else
62867 pcred = __task_cred(p);
62868 if (cred->euid != pcred->euid &&
62869 cred->euid != pcred->uid &&
62870 !capable(CAP_SYS_PTRACE))
62871 goto err_unlock;
62872+#endif
62873 head = p->robust_list;
62874 rcu_read_unlock();
62875 }
62876@@ -2459,7 +2476,7 @@ retry:
62877 */
62878 static inline int fetch_robust_entry(struct robust_list __user **entry,
62879 struct robust_list __user * __user *head,
62880- int *pi)
62881+ unsigned int *pi)
62882 {
62883 unsigned long uentry;
62884
62885@@ -2640,6 +2657,7 @@ static int __init futex_init(void)
62886 {
62887 u32 curval;
62888 int i;
62889+ mm_segment_t oldfs;
62890
62891 /*
62892 * This will fail and we want it. Some arch implementations do
62893@@ -2651,7 +2669,10 @@ static int __init futex_init(void)
62894 * implementation, the non functional ones will return
62895 * -ENOSYS.
62896 */
62897+ oldfs = get_fs();
62898+ set_fs(USER_DS);
62899 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
62900+ set_fs(oldfs);
62901 if (curval == -EFAULT)
62902 futex_cmpxchg_enabled = 1;
62903
62904diff -urNp linux-2.6.32.45/kernel/futex_compat.c linux-2.6.32.45/kernel/futex_compat.c
62905--- linux-2.6.32.45/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
62906+++ linux-2.6.32.45/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
62907@@ -10,6 +10,7 @@
62908 #include <linux/compat.h>
62909 #include <linux/nsproxy.h>
62910 #include <linux/futex.h>
62911+#include <linux/ptrace.h>
62912
62913 #include <asm/uaccess.h>
62914
62915@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
62916 {
62917 struct compat_robust_list_head __user *head;
62918 unsigned long ret;
62919- const struct cred *cred = current_cred(), *pcred;
62920+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62921+ const struct cred *cred = current_cred();
62922+ const struct cred *pcred;
62923+#endif
62924
62925 if (!futex_cmpxchg_enabled)
62926 return -ENOSYS;
62927@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
62928 if (!p)
62929 goto err_unlock;
62930 ret = -EPERM;
62931+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62932+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
62933+ goto err_unlock;
62934+#else
62935 pcred = __task_cred(p);
62936 if (cred->euid != pcred->euid &&
62937 cred->euid != pcred->uid &&
62938 !capable(CAP_SYS_PTRACE))
62939 goto err_unlock;
62940+#endif
62941 head = p->compat_robust_list;
62942 read_unlock(&tasklist_lock);
62943 }
62944diff -urNp linux-2.6.32.45/kernel/gcov/base.c linux-2.6.32.45/kernel/gcov/base.c
62945--- linux-2.6.32.45/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
62946+++ linux-2.6.32.45/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
62947@@ -102,11 +102,6 @@ void gcov_enable_events(void)
62948 }
62949
62950 #ifdef CONFIG_MODULES
62951-static inline int within(void *addr, void *start, unsigned long size)
62952-{
62953- return ((addr >= start) && (addr < start + size));
62954-}
62955-
62956 /* Update list and generate events when modules are unloaded. */
62957 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
62958 void *data)
62959@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
62960 prev = NULL;
62961 /* Remove entries located in module from linked list. */
62962 for (info = gcov_info_head; info; info = info->next) {
62963- if (within(info, mod->module_core, mod->core_size)) {
62964+ if (within_module_core_rw((unsigned long)info, mod)) {
62965 if (prev)
62966 prev->next = info->next;
62967 else
62968diff -urNp linux-2.6.32.45/kernel/hrtimer.c linux-2.6.32.45/kernel/hrtimer.c
62969--- linux-2.6.32.45/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
62970+++ linux-2.6.32.45/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
62971@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
62972 local_irq_restore(flags);
62973 }
62974
62975-static void run_hrtimer_softirq(struct softirq_action *h)
62976+static void run_hrtimer_softirq(void)
62977 {
62978 hrtimer_peek_ahead_timers();
62979 }
62980diff -urNp linux-2.6.32.45/kernel/kallsyms.c linux-2.6.32.45/kernel/kallsyms.c
62981--- linux-2.6.32.45/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
62982+++ linux-2.6.32.45/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
62983@@ -11,6 +11,9 @@
62984 * Changed the compression method from stem compression to "table lookup"
62985 * compression (see scripts/kallsyms.c for a more complete description)
62986 */
62987+#ifdef CONFIG_GRKERNSEC_HIDESYM
62988+#define __INCLUDED_BY_HIDESYM 1
62989+#endif
62990 #include <linux/kallsyms.h>
62991 #include <linux/module.h>
62992 #include <linux/init.h>
62993@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
62994
62995 static inline int is_kernel_inittext(unsigned long addr)
62996 {
62997+ if (system_state != SYSTEM_BOOTING)
62998+ return 0;
62999+
63000 if (addr >= (unsigned long)_sinittext
63001 && addr <= (unsigned long)_einittext)
63002 return 1;
63003 return 0;
63004 }
63005
63006+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63007+#ifdef CONFIG_MODULES
63008+static inline int is_module_text(unsigned long addr)
63009+{
63010+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63011+ return 1;
63012+
63013+ addr = ktla_ktva(addr);
63014+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63015+}
63016+#else
63017+static inline int is_module_text(unsigned long addr)
63018+{
63019+ return 0;
63020+}
63021+#endif
63022+#endif
63023+
63024 static inline int is_kernel_text(unsigned long addr)
63025 {
63026 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63027@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
63028
63029 static inline int is_kernel(unsigned long addr)
63030 {
63031+
63032+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63033+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63034+ return 1;
63035+
63036+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63037+#else
63038 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63039+#endif
63040+
63041 return 1;
63042 return in_gate_area_no_task(addr);
63043 }
63044
63045 static int is_ksym_addr(unsigned long addr)
63046 {
63047+
63048+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63049+ if (is_module_text(addr))
63050+ return 0;
63051+#endif
63052+
63053 if (all_var)
63054 return is_kernel(addr);
63055
63056@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
63057
63058 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63059 {
63060- iter->name[0] = '\0';
63061 iter->nameoff = get_symbol_offset(new_pos);
63062 iter->pos = new_pos;
63063 }
63064@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
63065 {
63066 struct kallsym_iter *iter = m->private;
63067
63068+#ifdef CONFIG_GRKERNSEC_HIDESYM
63069+ if (current_uid())
63070+ return 0;
63071+#endif
63072+
63073 /* Some debugging symbols have no name. Ignore them. */
63074 if (!iter->name[0])
63075 return 0;
63076@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
63077 struct kallsym_iter *iter;
63078 int ret;
63079
63080- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63081+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63082 if (!iter)
63083 return -ENOMEM;
63084 reset_iter(iter, 0);
63085diff -urNp linux-2.6.32.45/kernel/kgdb.c linux-2.6.32.45/kernel/kgdb.c
63086--- linux-2.6.32.45/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
63087+++ linux-2.6.32.45/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
63088@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
63089 /* Guard for recursive entry */
63090 static int exception_level;
63091
63092-static struct kgdb_io *kgdb_io_ops;
63093+static const struct kgdb_io *kgdb_io_ops;
63094 static DEFINE_SPINLOCK(kgdb_registration_lock);
63095
63096 /* kgdb console driver is loaded */
63097@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
63098 */
63099 static atomic_t passive_cpu_wait[NR_CPUS];
63100 static atomic_t cpu_in_kgdb[NR_CPUS];
63101-atomic_t kgdb_setting_breakpoint;
63102+atomic_unchecked_t kgdb_setting_breakpoint;
63103
63104 struct task_struct *kgdb_usethread;
63105 struct task_struct *kgdb_contthread;
63106@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
63107 sizeof(unsigned long)];
63108
63109 /* to keep track of the CPU which is doing the single stepping*/
63110-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63111+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63112
63113 /*
63114 * If you are debugging a problem where roundup (the collection of
63115@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
63116 return 0;
63117 if (kgdb_connected)
63118 return 1;
63119- if (atomic_read(&kgdb_setting_breakpoint))
63120+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
63121 return 1;
63122 if (print_wait)
63123 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
63124@@ -1426,8 +1426,8 @@ acquirelock:
63125 * instance of the exception handler wanted to come into the
63126 * debugger on a different CPU via a single step
63127 */
63128- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63129- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
63130+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63131+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
63132
63133 atomic_set(&kgdb_active, -1);
63134 touch_softlockup_watchdog();
63135@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
63136 *
63137 * Register it with the KGDB core.
63138 */
63139-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
63140+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
63141 {
63142 int err;
63143
63144@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
63145 *
63146 * Unregister it with the KGDB core.
63147 */
63148-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
63149+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
63150 {
63151 BUG_ON(kgdb_connected);
63152
63153@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
63154 */
63155 void kgdb_breakpoint(void)
63156 {
63157- atomic_set(&kgdb_setting_breakpoint, 1);
63158+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
63159 wmb(); /* Sync point before breakpoint */
63160 arch_kgdb_breakpoint();
63161 wmb(); /* Sync point after breakpoint */
63162- atomic_set(&kgdb_setting_breakpoint, 0);
63163+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
63164 }
63165 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
63166
63167diff -urNp linux-2.6.32.45/kernel/kmod.c linux-2.6.32.45/kernel/kmod.c
63168--- linux-2.6.32.45/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
63169+++ linux-2.6.32.45/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
63170@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63171 * If module auto-loading support is disabled then this function
63172 * becomes a no-operation.
63173 */
63174-int __request_module(bool wait, const char *fmt, ...)
63175+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63176 {
63177- va_list args;
63178 char module_name[MODULE_NAME_LEN];
63179 unsigned int max_modprobes;
63180 int ret;
63181- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63182+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63183 static char *envp[] = { "HOME=/",
63184 "TERM=linux",
63185 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63186@@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
63187 if (ret)
63188 return ret;
63189
63190- va_start(args, fmt);
63191- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63192- va_end(args);
63193+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63194 if (ret >= MODULE_NAME_LEN)
63195 return -ENAMETOOLONG;
63196
63197+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63198+ if (!current_uid()) {
63199+ /* hack to workaround consolekit/udisks stupidity */
63200+ read_lock(&tasklist_lock);
63201+ if (!strcmp(current->comm, "mount") &&
63202+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63203+ read_unlock(&tasklist_lock);
63204+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63205+ return -EPERM;
63206+ }
63207+ read_unlock(&tasklist_lock);
63208+ }
63209+#endif
63210+
63211 /* If modprobe needs a service that is in a module, we get a recursive
63212 * loop. Limit the number of running kmod threads to max_threads/2 or
63213 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63214@@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
63215 atomic_dec(&kmod_concurrent);
63216 return ret;
63217 }
63218+
63219+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63220+{
63221+ va_list args;
63222+ int ret;
63223+
63224+ va_start(args, fmt);
63225+ ret = ____request_module(wait, module_param, fmt, args);
63226+ va_end(args);
63227+
63228+ return ret;
63229+}
63230+
63231+int __request_module(bool wait, const char *fmt, ...)
63232+{
63233+ va_list args;
63234+ int ret;
63235+
63236+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63237+ if (current_uid()) {
63238+ char module_param[MODULE_NAME_LEN];
63239+
63240+ memset(module_param, 0, sizeof(module_param));
63241+
63242+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63243+
63244+ va_start(args, fmt);
63245+ ret = ____request_module(wait, module_param, fmt, args);
63246+ va_end(args);
63247+
63248+ return ret;
63249+ }
63250+#endif
63251+
63252+ va_start(args, fmt);
63253+ ret = ____request_module(wait, NULL, fmt, args);
63254+ va_end(args);
63255+
63256+ return ret;
63257+}
63258+
63259+
63260 EXPORT_SYMBOL(__request_module);
63261 #endif /* CONFIG_MODULES */
63262
63263diff -urNp linux-2.6.32.45/kernel/kprobes.c linux-2.6.32.45/kernel/kprobes.c
63264--- linux-2.6.32.45/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
63265+++ linux-2.6.32.45/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
63266@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
63267 * kernel image and loaded module images reside. This is required
63268 * so x86_64 can correctly handle the %rip-relative fixups.
63269 */
63270- kip->insns = module_alloc(PAGE_SIZE);
63271+ kip->insns = module_alloc_exec(PAGE_SIZE);
63272 if (!kip->insns) {
63273 kfree(kip);
63274 return NULL;
63275@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
63276 */
63277 if (!list_is_singular(&kprobe_insn_pages)) {
63278 list_del(&kip->list);
63279- module_free(NULL, kip->insns);
63280+ module_free_exec(NULL, kip->insns);
63281 kfree(kip);
63282 }
63283 return 1;
63284@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
63285 {
63286 int i, err = 0;
63287 unsigned long offset = 0, size = 0;
63288- char *modname, namebuf[128];
63289+ char *modname, namebuf[KSYM_NAME_LEN];
63290 const char *symbol_name;
63291 void *addr;
63292 struct kprobe_blackpoint *kb;
63293@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
63294 const char *sym = NULL;
63295 unsigned int i = *(loff_t *) v;
63296 unsigned long offset = 0;
63297- char *modname, namebuf[128];
63298+ char *modname, namebuf[KSYM_NAME_LEN];
63299
63300 head = &kprobe_table[i];
63301 preempt_disable();
63302diff -urNp linux-2.6.32.45/kernel/lockdep.c linux-2.6.32.45/kernel/lockdep.c
63303--- linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
63304+++ linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
63305@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
63306 /*
63307 * Various lockdep statistics:
63308 */
63309-atomic_t chain_lookup_hits;
63310-atomic_t chain_lookup_misses;
63311-atomic_t hardirqs_on_events;
63312-atomic_t hardirqs_off_events;
63313-atomic_t redundant_hardirqs_on;
63314-atomic_t redundant_hardirqs_off;
63315-atomic_t softirqs_on_events;
63316-atomic_t softirqs_off_events;
63317-atomic_t redundant_softirqs_on;
63318-atomic_t redundant_softirqs_off;
63319-atomic_t nr_unused_locks;
63320-atomic_t nr_cyclic_checks;
63321-atomic_t nr_find_usage_forwards_checks;
63322-atomic_t nr_find_usage_backwards_checks;
63323+atomic_unchecked_t chain_lookup_hits;
63324+atomic_unchecked_t chain_lookup_misses;
63325+atomic_unchecked_t hardirqs_on_events;
63326+atomic_unchecked_t hardirqs_off_events;
63327+atomic_unchecked_t redundant_hardirqs_on;
63328+atomic_unchecked_t redundant_hardirqs_off;
63329+atomic_unchecked_t softirqs_on_events;
63330+atomic_unchecked_t softirqs_off_events;
63331+atomic_unchecked_t redundant_softirqs_on;
63332+atomic_unchecked_t redundant_softirqs_off;
63333+atomic_unchecked_t nr_unused_locks;
63334+atomic_unchecked_t nr_cyclic_checks;
63335+atomic_unchecked_t nr_find_usage_forwards_checks;
63336+atomic_unchecked_t nr_find_usage_backwards_checks;
63337 #endif
63338
63339 /*
63340@@ -577,6 +577,10 @@ static int static_obj(void *obj)
63341 int i;
63342 #endif
63343
63344+#ifdef CONFIG_PAX_KERNEXEC
63345+ start = ktla_ktva(start);
63346+#endif
63347+
63348 /*
63349 * static variable?
63350 */
63351@@ -592,8 +596,7 @@ static int static_obj(void *obj)
63352 */
63353 for_each_possible_cpu(i) {
63354 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
63355- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
63356- + per_cpu_offset(i);
63357+ end = start + PERCPU_ENOUGH_ROOM;
63358
63359 if ((addr >= start) && (addr < end))
63360 return 1;
63361@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
63362 if (!static_obj(lock->key)) {
63363 debug_locks_off();
63364 printk("INFO: trying to register non-static key.\n");
63365+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63366 printk("the code is fine but needs lockdep annotation.\n");
63367 printk("turning off the locking correctness validator.\n");
63368 dump_stack();
63369@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
63370 if (!class)
63371 return 0;
63372 }
63373- debug_atomic_inc((atomic_t *)&class->ops);
63374+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
63375 if (very_verbose(class)) {
63376 printk("\nacquire class [%p] %s", class->key, class->name);
63377 if (class->name_version > 1)
63378diff -urNp linux-2.6.32.45/kernel/lockdep_internals.h linux-2.6.32.45/kernel/lockdep_internals.h
63379--- linux-2.6.32.45/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
63380+++ linux-2.6.32.45/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
63381@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
63382 /*
63383 * Various lockdep statistics:
63384 */
63385-extern atomic_t chain_lookup_hits;
63386-extern atomic_t chain_lookup_misses;
63387-extern atomic_t hardirqs_on_events;
63388-extern atomic_t hardirqs_off_events;
63389-extern atomic_t redundant_hardirqs_on;
63390-extern atomic_t redundant_hardirqs_off;
63391-extern atomic_t softirqs_on_events;
63392-extern atomic_t softirqs_off_events;
63393-extern atomic_t redundant_softirqs_on;
63394-extern atomic_t redundant_softirqs_off;
63395-extern atomic_t nr_unused_locks;
63396-extern atomic_t nr_cyclic_checks;
63397-extern atomic_t nr_cyclic_check_recursions;
63398-extern atomic_t nr_find_usage_forwards_checks;
63399-extern atomic_t nr_find_usage_forwards_recursions;
63400-extern atomic_t nr_find_usage_backwards_checks;
63401-extern atomic_t nr_find_usage_backwards_recursions;
63402-# define debug_atomic_inc(ptr) atomic_inc(ptr)
63403-# define debug_atomic_dec(ptr) atomic_dec(ptr)
63404-# define debug_atomic_read(ptr) atomic_read(ptr)
63405+extern atomic_unchecked_t chain_lookup_hits;
63406+extern atomic_unchecked_t chain_lookup_misses;
63407+extern atomic_unchecked_t hardirqs_on_events;
63408+extern atomic_unchecked_t hardirqs_off_events;
63409+extern atomic_unchecked_t redundant_hardirqs_on;
63410+extern atomic_unchecked_t redundant_hardirqs_off;
63411+extern atomic_unchecked_t softirqs_on_events;
63412+extern atomic_unchecked_t softirqs_off_events;
63413+extern atomic_unchecked_t redundant_softirqs_on;
63414+extern atomic_unchecked_t redundant_softirqs_off;
63415+extern atomic_unchecked_t nr_unused_locks;
63416+extern atomic_unchecked_t nr_cyclic_checks;
63417+extern atomic_unchecked_t nr_cyclic_check_recursions;
63418+extern atomic_unchecked_t nr_find_usage_forwards_checks;
63419+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
63420+extern atomic_unchecked_t nr_find_usage_backwards_checks;
63421+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
63422+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
63423+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
63424+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
63425 #else
63426 # define debug_atomic_inc(ptr) do { } while (0)
63427 # define debug_atomic_dec(ptr) do { } while (0)
63428diff -urNp linux-2.6.32.45/kernel/lockdep_proc.c linux-2.6.32.45/kernel/lockdep_proc.c
63429--- linux-2.6.32.45/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
63430+++ linux-2.6.32.45/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
63431@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63432
63433 static void print_name(struct seq_file *m, struct lock_class *class)
63434 {
63435- char str[128];
63436+ char str[KSYM_NAME_LEN];
63437 const char *name = class->name;
63438
63439 if (!name) {
63440diff -urNp linux-2.6.32.45/kernel/module.c linux-2.6.32.45/kernel/module.c
63441--- linux-2.6.32.45/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
63442+++ linux-2.6.32.45/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
63443@@ -55,6 +55,7 @@
63444 #include <linux/async.h>
63445 #include <linux/percpu.h>
63446 #include <linux/kmemleak.h>
63447+#include <linux/grsecurity.h>
63448
63449 #define CREATE_TRACE_POINTS
63450 #include <trace/events/module.h>
63451@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
63452 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
63453
63454 /* Bounds of module allocation, for speeding __module_address */
63455-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63456+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63457+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63458
63459 int register_module_notifier(struct notifier_block * nb)
63460 {
63461@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
63462 return true;
63463
63464 list_for_each_entry_rcu(mod, &modules, list) {
63465- struct symsearch arr[] = {
63466+ struct symsearch modarr[] = {
63467 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63468 NOT_GPL_ONLY, false },
63469 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63470@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
63471 #endif
63472 };
63473
63474- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63475+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63476 return true;
63477 }
63478 return false;
63479@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
63480 void *ptr;
63481 int cpu;
63482
63483- if (align > PAGE_SIZE) {
63484+ if (align-1 >= PAGE_SIZE) {
63485 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63486 name, align, PAGE_SIZE);
63487 align = PAGE_SIZE;
63488@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
63489 * /sys/module/foo/sections stuff
63490 * J. Corbet <corbet@lwn.net>
63491 */
63492-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
63493+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63494
63495 static inline bool sect_empty(const Elf_Shdr *sect)
63496 {
63497@@ -1545,7 +1547,8 @@ static void free_module(struct module *m
63498 destroy_params(mod->kp, mod->num_kp);
63499
63500 /* This may be NULL, but that's OK */
63501- module_free(mod, mod->module_init);
63502+ module_free(mod, mod->module_init_rw);
63503+ module_free_exec(mod, mod->module_init_rx);
63504 kfree(mod->args);
63505 if (mod->percpu)
63506 percpu_modfree(mod->percpu);
63507@@ -1554,10 +1557,12 @@ static void free_module(struct module *m
63508 percpu_modfree(mod->refptr);
63509 #endif
63510 /* Free lock-classes: */
63511- lockdep_free_key_range(mod->module_core, mod->core_size);
63512+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63513+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63514
63515 /* Finally, free the core (containing the module structure) */
63516- module_free(mod, mod->module_core);
63517+ module_free_exec(mod, mod->module_core_rx);
63518+ module_free(mod, mod->module_core_rw);
63519
63520 #ifdef CONFIG_MPU
63521 update_protections(current->mm);
63522@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
63523 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63524 int ret = 0;
63525 const struct kernel_symbol *ksym;
63526+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63527+ int is_fs_load = 0;
63528+ int register_filesystem_found = 0;
63529+ char *p;
63530+
63531+ p = strstr(mod->args, "grsec_modharden_fs");
63532+
63533+ if (p) {
63534+ char *endptr = p + strlen("grsec_modharden_fs");
63535+ /* copy \0 as well */
63536+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63537+ is_fs_load = 1;
63538+ }
63539+#endif
63540+
63541
63542 for (i = 1; i < n; i++) {
63543+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63544+ const char *name = strtab + sym[i].st_name;
63545+
63546+ /* it's a real shame this will never get ripped and copied
63547+ upstream! ;(
63548+ */
63549+ if (is_fs_load && !strcmp(name, "register_filesystem"))
63550+ register_filesystem_found = 1;
63551+#endif
63552 switch (sym[i].st_shndx) {
63553 case SHN_COMMON:
63554 /* We compiled with -fno-common. These are not
63555@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
63556 strtab + sym[i].st_name, mod);
63557 /* Ok if resolved. */
63558 if (ksym) {
63559+ pax_open_kernel();
63560 sym[i].st_value = ksym->value;
63561+ pax_close_kernel();
63562 break;
63563 }
63564
63565@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
63566 secbase = (unsigned long)mod->percpu;
63567 else
63568 secbase = sechdrs[sym[i].st_shndx].sh_addr;
63569+ pax_open_kernel();
63570 sym[i].st_value += secbase;
63571+ pax_close_kernel();
63572 break;
63573 }
63574 }
63575
63576+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63577+ if (is_fs_load && !register_filesystem_found) {
63578+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63579+ ret = -EPERM;
63580+ }
63581+#endif
63582+
63583 return ret;
63584 }
63585
63586@@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
63587 || s->sh_entsize != ~0UL
63588 || strstarts(secstrings + s->sh_name, ".init"))
63589 continue;
63590- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63591+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63592+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63593+ else
63594+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63595 DEBUGP("\t%s\n", secstrings + s->sh_name);
63596 }
63597- if (m == 0)
63598- mod->core_text_size = mod->core_size;
63599 }
63600
63601 DEBUGP("Init section allocation order:\n");
63602@@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
63603 || s->sh_entsize != ~0UL
63604 || !strstarts(secstrings + s->sh_name, ".init"))
63605 continue;
63606- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63607- | INIT_OFFSET_MASK);
63608+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63609+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63610+ else
63611+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63612+ s->sh_entsize |= INIT_OFFSET_MASK;
63613 DEBUGP("\t%s\n", secstrings + s->sh_name);
63614 }
63615- if (m == 0)
63616- mod->init_text_size = mod->init_size;
63617 }
63618 }
63619
63620@@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
63621
63622 /* As per nm */
63623 static char elf_type(const Elf_Sym *sym,
63624- Elf_Shdr *sechdrs,
63625- const char *secstrings,
63626- struct module *mod)
63627+ const Elf_Shdr *sechdrs,
63628+ const char *secstrings)
63629 {
63630 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
63631 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
63632@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
63633
63634 /* Put symbol section at end of init part of module. */
63635 symsect->sh_flags |= SHF_ALLOC;
63636- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63637+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63638 symindex) | INIT_OFFSET_MASK;
63639 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
63640
63641@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
63642 }
63643
63644 /* Append room for core symbols at end of core part. */
63645- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63646- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
63647+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63648+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
63649
63650 /* Put string table section at end of init part of module. */
63651 strsect->sh_flags |= SHF_ALLOC;
63652- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63653+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63654 strindex) | INIT_OFFSET_MASK;
63655 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
63656
63657 /* Append room for core symbols' strings at end of core part. */
63658- *pstroffs = mod->core_size;
63659+ *pstroffs = mod->core_size_rx;
63660 __set_bit(0, strmap);
63661- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
63662+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
63663
63664 return symoffs;
63665 }
63666@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
63667 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63668 mod->strtab = (void *)sechdrs[strindex].sh_addr;
63669
63670+ pax_open_kernel();
63671+
63672 /* Set types up while we still have access to sections. */
63673 for (i = 0; i < mod->num_symtab; i++)
63674 mod->symtab[i].st_info
63675- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
63676+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
63677
63678- mod->core_symtab = dst = mod->module_core + symoffs;
63679+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
63680 src = mod->symtab;
63681 *dst = *src;
63682 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63683@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
63684 }
63685 mod->core_num_syms = ndst;
63686
63687- mod->core_strtab = s = mod->module_core + stroffs;
63688+ mod->core_strtab = s = mod->module_core_rx + stroffs;
63689 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
63690 if (test_bit(i, strmap))
63691 *++s = mod->strtab[i];
63692+
63693+ pax_close_kernel();
63694 }
63695 #else
63696 static inline unsigned long layout_symtab(struct module *mod,
63697@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
63698 #endif
63699 }
63700
63701-static void *module_alloc_update_bounds(unsigned long size)
63702+static void *module_alloc_update_bounds_rw(unsigned long size)
63703 {
63704 void *ret = module_alloc(size);
63705
63706 if (ret) {
63707 /* Update module bounds. */
63708- if ((unsigned long)ret < module_addr_min)
63709- module_addr_min = (unsigned long)ret;
63710- if ((unsigned long)ret + size > module_addr_max)
63711- module_addr_max = (unsigned long)ret + size;
63712+ if ((unsigned long)ret < module_addr_min_rw)
63713+ module_addr_min_rw = (unsigned long)ret;
63714+ if ((unsigned long)ret + size > module_addr_max_rw)
63715+ module_addr_max_rw = (unsigned long)ret + size;
63716+ }
63717+ return ret;
63718+}
63719+
63720+static void *module_alloc_update_bounds_rx(unsigned long size)
63721+{
63722+ void *ret = module_alloc_exec(size);
63723+
63724+ if (ret) {
63725+ /* Update module bounds. */
63726+ if ((unsigned long)ret < module_addr_min_rx)
63727+ module_addr_min_rx = (unsigned long)ret;
63728+ if ((unsigned long)ret + size > module_addr_max_rx)
63729+ module_addr_max_rx = (unsigned long)ret + size;
63730 }
63731 return ret;
63732 }
63733@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
63734 unsigned int i;
63735
63736 /* only scan the sections containing data */
63737- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
63738- (unsigned long)mod->module_core,
63739+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
63740+ (unsigned long)mod->module_core_rw,
63741 sizeof(struct module), GFP_KERNEL);
63742
63743 for (i = 1; i < hdr->e_shnum; i++) {
63744@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
63745 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
63746 continue;
63747
63748- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
63749- (unsigned long)mod->module_core,
63750+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
63751+ (unsigned long)mod->module_core_rw,
63752 sechdrs[i].sh_size, GFP_KERNEL);
63753 }
63754 }
63755@@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
63756 secstrings, &stroffs, strmap);
63757
63758 /* Do the allocs. */
63759- ptr = module_alloc_update_bounds(mod->core_size);
63760+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
63761 /*
63762 * The pointer to this block is stored in the module structure
63763 * which is inside the block. Just mark it as not being a
63764@@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
63765 err = -ENOMEM;
63766 goto free_percpu;
63767 }
63768- memset(ptr, 0, mod->core_size);
63769- mod->module_core = ptr;
63770+ memset(ptr, 0, mod->core_size_rw);
63771+ mod->module_core_rw = ptr;
63772
63773- ptr = module_alloc_update_bounds(mod->init_size);
63774+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
63775 /*
63776 * The pointer to this block is stored in the module structure
63777 * which is inside the block. This block doesn't need to be
63778 * scanned as it contains data and code that will be freed
63779 * after the module is initialized.
63780 */
63781- kmemleak_ignore(ptr);
63782- if (!ptr && mod->init_size) {
63783+ kmemleak_not_leak(ptr);
63784+ if (!ptr && mod->init_size_rw) {
63785+ err = -ENOMEM;
63786+ goto free_core_rw;
63787+ }
63788+ memset(ptr, 0, mod->init_size_rw);
63789+ mod->module_init_rw = ptr;
63790+
63791+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
63792+ kmemleak_not_leak(ptr);
63793+ if (!ptr) {
63794 err = -ENOMEM;
63795- goto free_core;
63796+ goto free_init_rw;
63797 }
63798- memset(ptr, 0, mod->init_size);
63799- mod->module_init = ptr;
63800+
63801+ pax_open_kernel();
63802+ memset(ptr, 0, mod->core_size_rx);
63803+ pax_close_kernel();
63804+ mod->module_core_rx = ptr;
63805+
63806+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
63807+ kmemleak_not_leak(ptr);
63808+ if (!ptr && mod->init_size_rx) {
63809+ err = -ENOMEM;
63810+ goto free_core_rx;
63811+ }
63812+
63813+ pax_open_kernel();
63814+ memset(ptr, 0, mod->init_size_rx);
63815+ pax_close_kernel();
63816+ mod->module_init_rx = ptr;
63817
63818 /* Transfer each section which specifies SHF_ALLOC */
63819 DEBUGP("final section addresses:\n");
63820@@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
63821 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
63822 continue;
63823
63824- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
63825- dest = mod->module_init
63826- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63827- else
63828- dest = mod->module_core + sechdrs[i].sh_entsize;
63829+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
63830+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63831+ dest = mod->module_init_rw
63832+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63833+ else
63834+ dest = mod->module_init_rx
63835+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63836+ } else {
63837+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63838+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
63839+ else
63840+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
63841+ }
63842+
63843+ if (sechdrs[i].sh_type != SHT_NOBITS) {
63844
63845- if (sechdrs[i].sh_type != SHT_NOBITS)
63846- memcpy(dest, (void *)sechdrs[i].sh_addr,
63847- sechdrs[i].sh_size);
63848+#ifdef CONFIG_PAX_KERNEXEC
63849+#ifdef CONFIG_X86_64
63850+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
63851+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
63852+#endif
63853+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
63854+ pax_open_kernel();
63855+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63856+ pax_close_kernel();
63857+ } else
63858+#endif
63859+
63860+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63861+ }
63862 /* Update sh_addr to point to copy in image. */
63863- sechdrs[i].sh_addr = (unsigned long)dest;
63864+
63865+#ifdef CONFIG_PAX_KERNEXEC
63866+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
63867+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
63868+ else
63869+#endif
63870+
63871+ sechdrs[i].sh_addr = (unsigned long)dest;
63872 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
63873 }
63874 /* Module has been moved. */
63875@@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
63876 mod->name);
63877 if (!mod->refptr) {
63878 err = -ENOMEM;
63879- goto free_init;
63880+ goto free_init_rx;
63881 }
63882 #endif
63883 /* Now we've moved module, initialize linked lists, etc. */
63884@@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
63885 /* Set up MODINFO_ATTR fields */
63886 setup_modinfo(mod, sechdrs, infoindex);
63887
63888+ mod->args = args;
63889+
63890+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63891+ {
63892+ char *p, *p2;
63893+
63894+ if (strstr(mod->args, "grsec_modharden_netdev")) {
63895+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
63896+ err = -EPERM;
63897+ goto cleanup;
63898+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
63899+ p += strlen("grsec_modharden_normal");
63900+ p2 = strstr(p, "_");
63901+ if (p2) {
63902+ *p2 = '\0';
63903+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
63904+ *p2 = '_';
63905+ }
63906+ err = -EPERM;
63907+ goto cleanup;
63908+ }
63909+ }
63910+#endif
63911+
63912+
63913 /* Fix up syms, so that st_value is a pointer to location. */
63914 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
63915 mod);
63916@@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
63917
63918 /* Now do relocations. */
63919 for (i = 1; i < hdr->e_shnum; i++) {
63920- const char *strtab = (char *)sechdrs[strindex].sh_addr;
63921 unsigned int info = sechdrs[i].sh_info;
63922+ strtab = (char *)sechdrs[strindex].sh_addr;
63923
63924 /* Not a valid relocation section? */
63925 if (info >= hdr->e_shnum)
63926@@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
63927 * Do it before processing of module parameters, so the module
63928 * can provide parameter accessor functions of its own.
63929 */
63930- if (mod->module_init)
63931- flush_icache_range((unsigned long)mod->module_init,
63932- (unsigned long)mod->module_init
63933- + mod->init_size);
63934- flush_icache_range((unsigned long)mod->module_core,
63935- (unsigned long)mod->module_core + mod->core_size);
63936+ if (mod->module_init_rx)
63937+ flush_icache_range((unsigned long)mod->module_init_rx,
63938+ (unsigned long)mod->module_init_rx
63939+ + mod->init_size_rx);
63940+ flush_icache_range((unsigned long)mod->module_core_rx,
63941+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
63942
63943 set_fs(old_fs);
63944
63945- mod->args = args;
63946 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
63947 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
63948 mod->name);
63949@@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
63950 free_unload:
63951 module_unload_free(mod);
63952 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
63953+ free_init_rx:
63954 percpu_modfree(mod->refptr);
63955- free_init:
63956 #endif
63957- module_free(mod, mod->module_init);
63958- free_core:
63959- module_free(mod, mod->module_core);
63960+ module_free_exec(mod, mod->module_init_rx);
63961+ free_core_rx:
63962+ module_free_exec(mod, mod->module_core_rx);
63963+ free_init_rw:
63964+ module_free(mod, mod->module_init_rw);
63965+ free_core_rw:
63966+ module_free(mod, mod->module_core_rw);
63967 /* mod will be freed with core. Don't access it beyond this line! */
63968 free_percpu:
63969 if (percpu)
63970@@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
63971 mod->symtab = mod->core_symtab;
63972 mod->strtab = mod->core_strtab;
63973 #endif
63974- module_free(mod, mod->module_init);
63975- mod->module_init = NULL;
63976- mod->init_size = 0;
63977- mod->init_text_size = 0;
63978+ module_free(mod, mod->module_init_rw);
63979+ module_free_exec(mod, mod->module_init_rx);
63980+ mod->module_init_rw = NULL;
63981+ mod->module_init_rx = NULL;
63982+ mod->init_size_rw = 0;
63983+ mod->init_size_rx = 0;
63984 mutex_unlock(&module_mutex);
63985
63986 return 0;
63987@@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
63988 unsigned long nextval;
63989
63990 /* At worse, next value is at end of module */
63991- if (within_module_init(addr, mod))
63992- nextval = (unsigned long)mod->module_init+mod->init_text_size;
63993+ if (within_module_init_rx(addr, mod))
63994+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
63995+ else if (within_module_init_rw(addr, mod))
63996+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
63997+ else if (within_module_core_rx(addr, mod))
63998+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
63999+ else if (within_module_core_rw(addr, mod))
64000+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64001 else
64002- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64003+ return NULL;
64004
64005 /* Scan for closest preceeding symbol, and next symbol. (ELF
64006 starts real symbols at 1). */
64007@@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
64008 char buf[8];
64009
64010 seq_printf(m, "%s %u",
64011- mod->name, mod->init_size + mod->core_size);
64012+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64013 print_unload_info(m, mod);
64014
64015 /* Informative for users. */
64016@@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
64017 mod->state == MODULE_STATE_COMING ? "Loading":
64018 "Live");
64019 /* Used by oprofile and other similar tools. */
64020- seq_printf(m, " 0x%p", mod->module_core);
64021+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64022
64023 /* Taints info */
64024 if (mod->taints)
64025@@ -2981,7 +3128,17 @@ static const struct file_operations proc
64026
64027 static int __init proc_modules_init(void)
64028 {
64029+#ifndef CONFIG_GRKERNSEC_HIDESYM
64030+#ifdef CONFIG_GRKERNSEC_PROC_USER
64031+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64032+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64033+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64034+#else
64035 proc_create("modules", 0, NULL, &proc_modules_operations);
64036+#endif
64037+#else
64038+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64039+#endif
64040 return 0;
64041 }
64042 module_init(proc_modules_init);
64043@@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
64044 {
64045 struct module *mod;
64046
64047- if (addr < module_addr_min || addr > module_addr_max)
64048+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64049+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64050 return NULL;
64051
64052 list_for_each_entry_rcu(mod, &modules, list)
64053- if (within_module_core(addr, mod)
64054- || within_module_init(addr, mod))
64055+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64056 return mod;
64057 return NULL;
64058 }
64059@@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
64060 */
64061 struct module *__module_text_address(unsigned long addr)
64062 {
64063- struct module *mod = __module_address(addr);
64064+ struct module *mod;
64065+
64066+#ifdef CONFIG_X86_32
64067+ addr = ktla_ktva(addr);
64068+#endif
64069+
64070+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64071+ return NULL;
64072+
64073+ mod = __module_address(addr);
64074+
64075 if (mod) {
64076 /* Make sure it's within the text section. */
64077- if (!within(addr, mod->module_init, mod->init_text_size)
64078- && !within(addr, mod->module_core, mod->core_text_size))
64079+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64080 mod = NULL;
64081 }
64082 return mod;
64083diff -urNp linux-2.6.32.45/kernel/mutex.c linux-2.6.32.45/kernel/mutex.c
64084--- linux-2.6.32.45/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
64085+++ linux-2.6.32.45/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
64086@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
64087 */
64088
64089 for (;;) {
64090- struct thread_info *owner;
64091+ struct task_struct *owner;
64092
64093 /*
64094 * If we own the BKL, then don't spin. The owner of
64095@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
64096 spin_lock_mutex(&lock->wait_lock, flags);
64097
64098 debug_mutex_lock_common(lock, &waiter);
64099- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64100+ debug_mutex_add_waiter(lock, &waiter, task);
64101
64102 /* add waiting tasks to the end of the waitqueue (FIFO): */
64103 list_add_tail(&waiter.list, &lock->wait_list);
64104@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
64105 * TASK_UNINTERRUPTIBLE case.)
64106 */
64107 if (unlikely(signal_pending_state(state, task))) {
64108- mutex_remove_waiter(lock, &waiter,
64109- task_thread_info(task));
64110+ mutex_remove_waiter(lock, &waiter, task);
64111 mutex_release(&lock->dep_map, 1, ip);
64112 spin_unlock_mutex(&lock->wait_lock, flags);
64113
64114@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
64115 done:
64116 lock_acquired(&lock->dep_map, ip);
64117 /* got the lock - rejoice! */
64118- mutex_remove_waiter(lock, &waiter, current_thread_info());
64119+ mutex_remove_waiter(lock, &waiter, task);
64120 mutex_set_owner(lock);
64121
64122 /* set it to 0 if there are no waiters left: */
64123diff -urNp linux-2.6.32.45/kernel/mutex-debug.c linux-2.6.32.45/kernel/mutex-debug.c
64124--- linux-2.6.32.45/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
64125+++ linux-2.6.32.45/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
64126@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64127 }
64128
64129 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64130- struct thread_info *ti)
64131+ struct task_struct *task)
64132 {
64133 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64134
64135 /* Mark the current thread as blocked on the lock: */
64136- ti->task->blocked_on = waiter;
64137+ task->blocked_on = waiter;
64138 }
64139
64140 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64141- struct thread_info *ti)
64142+ struct task_struct *task)
64143 {
64144 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64145- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64146- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64147- ti->task->blocked_on = NULL;
64148+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64149+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64150+ task->blocked_on = NULL;
64151
64152 list_del_init(&waiter->list);
64153 waiter->task = NULL;
64154@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
64155 return;
64156
64157 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
64158- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
64159+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
64160 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
64161 mutex_clear_owner(lock);
64162 }
64163diff -urNp linux-2.6.32.45/kernel/mutex-debug.h linux-2.6.32.45/kernel/mutex-debug.h
64164--- linux-2.6.32.45/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
64165+++ linux-2.6.32.45/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
64166@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
64167 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64168 extern void debug_mutex_add_waiter(struct mutex *lock,
64169 struct mutex_waiter *waiter,
64170- struct thread_info *ti);
64171+ struct task_struct *task);
64172 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64173- struct thread_info *ti);
64174+ struct task_struct *task);
64175 extern void debug_mutex_unlock(struct mutex *lock);
64176 extern void debug_mutex_init(struct mutex *lock, const char *name,
64177 struct lock_class_key *key);
64178
64179 static inline void mutex_set_owner(struct mutex *lock)
64180 {
64181- lock->owner = current_thread_info();
64182+ lock->owner = current;
64183 }
64184
64185 static inline void mutex_clear_owner(struct mutex *lock)
64186diff -urNp linux-2.6.32.45/kernel/mutex.h linux-2.6.32.45/kernel/mutex.h
64187--- linux-2.6.32.45/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
64188+++ linux-2.6.32.45/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
64189@@ -19,7 +19,7 @@
64190 #ifdef CONFIG_SMP
64191 static inline void mutex_set_owner(struct mutex *lock)
64192 {
64193- lock->owner = current_thread_info();
64194+ lock->owner = current;
64195 }
64196
64197 static inline void mutex_clear_owner(struct mutex *lock)
64198diff -urNp linux-2.6.32.45/kernel/panic.c linux-2.6.32.45/kernel/panic.c
64199--- linux-2.6.32.45/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
64200+++ linux-2.6.32.45/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
64201@@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
64202 const char *board;
64203
64204 printk(KERN_WARNING "------------[ cut here ]------------\n");
64205- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64206+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64207 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64208 if (board)
64209 printk(KERN_WARNING "Hardware name: %s\n", board);
64210@@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64211 */
64212 void __stack_chk_fail(void)
64213 {
64214- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64215+ dump_stack();
64216+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64217 __builtin_return_address(0));
64218 }
64219 EXPORT_SYMBOL(__stack_chk_fail);
64220diff -urNp linux-2.6.32.45/kernel/params.c linux-2.6.32.45/kernel/params.c
64221--- linux-2.6.32.45/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
64222+++ linux-2.6.32.45/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
64223@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
64224 return ret;
64225 }
64226
64227-static struct sysfs_ops module_sysfs_ops = {
64228+static const struct sysfs_ops module_sysfs_ops = {
64229 .show = module_attr_show,
64230 .store = module_attr_store,
64231 };
64232@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
64233 return 0;
64234 }
64235
64236-static struct kset_uevent_ops module_uevent_ops = {
64237+static const struct kset_uevent_ops module_uevent_ops = {
64238 .filter = uevent_filter,
64239 };
64240
64241diff -urNp linux-2.6.32.45/kernel/perf_event.c linux-2.6.32.45/kernel/perf_event.c
64242--- linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:35:30.000000000 -0400
64243+++ linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:34:01.000000000 -0400
64244@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
64245 */
64246 int sysctl_perf_event_sample_rate __read_mostly = 100000;
64247
64248-static atomic64_t perf_event_id;
64249+static atomic64_unchecked_t perf_event_id;
64250
64251 /*
64252 * Lock for (sysadmin-configurable) event reservations:
64253@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
64254 * In order to keep per-task stats reliable we need to flip the event
64255 * values when we flip the contexts.
64256 */
64257- value = atomic64_read(&next_event->count);
64258- value = atomic64_xchg(&event->count, value);
64259- atomic64_set(&next_event->count, value);
64260+ value = atomic64_read_unchecked(&next_event->count);
64261+ value = atomic64_xchg_unchecked(&event->count, value);
64262+ atomic64_set_unchecked(&next_event->count, value);
64263
64264 swap(event->total_time_enabled, next_event->total_time_enabled);
64265 swap(event->total_time_running, next_event->total_time_running);
64266@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
64267 update_event_times(event);
64268 }
64269
64270- return atomic64_read(&event->count);
64271+ return atomic64_read_unchecked(&event->count);
64272 }
64273
64274 /*
64275@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
64276 values[n++] = 1 + leader->nr_siblings;
64277 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64278 values[n++] = leader->total_time_enabled +
64279- atomic64_read(&leader->child_total_time_enabled);
64280+ atomic64_read_unchecked(&leader->child_total_time_enabled);
64281 }
64282 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64283 values[n++] = leader->total_time_running +
64284- atomic64_read(&leader->child_total_time_running);
64285+ atomic64_read_unchecked(&leader->child_total_time_running);
64286 }
64287
64288 size = n * sizeof(u64);
64289@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
64290 values[n++] = perf_event_read_value(event);
64291 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64292 values[n++] = event->total_time_enabled +
64293- atomic64_read(&event->child_total_time_enabled);
64294+ atomic64_read_unchecked(&event->child_total_time_enabled);
64295 }
64296 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64297 values[n++] = event->total_time_running +
64298- atomic64_read(&event->child_total_time_running);
64299+ atomic64_read_unchecked(&event->child_total_time_running);
64300 }
64301 if (read_format & PERF_FORMAT_ID)
64302 values[n++] = primary_event_id(event);
64303@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
64304 static void perf_event_reset(struct perf_event *event)
64305 {
64306 (void)perf_event_read(event);
64307- atomic64_set(&event->count, 0);
64308+ atomic64_set_unchecked(&event->count, 0);
64309 perf_event_update_userpage(event);
64310 }
64311
64312@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
64313 ++userpg->lock;
64314 barrier();
64315 userpg->index = perf_event_index(event);
64316- userpg->offset = atomic64_read(&event->count);
64317+ userpg->offset = atomic64_read_unchecked(&event->count);
64318 if (event->state == PERF_EVENT_STATE_ACTIVE)
64319- userpg->offset -= atomic64_read(&event->hw.prev_count);
64320+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
64321
64322 userpg->time_enabled = event->total_time_enabled +
64323- atomic64_read(&event->child_total_time_enabled);
64324+ atomic64_read_unchecked(&event->child_total_time_enabled);
64325
64326 userpg->time_running = event->total_time_running +
64327- atomic64_read(&event->child_total_time_running);
64328+ atomic64_read_unchecked(&event->child_total_time_running);
64329
64330 barrier();
64331 ++userpg->lock;
64332@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
64333 u64 values[4];
64334 int n = 0;
64335
64336- values[n++] = atomic64_read(&event->count);
64337+ values[n++] = atomic64_read_unchecked(&event->count);
64338 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64339 values[n++] = event->total_time_enabled +
64340- atomic64_read(&event->child_total_time_enabled);
64341+ atomic64_read_unchecked(&event->child_total_time_enabled);
64342 }
64343 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64344 values[n++] = event->total_time_running +
64345- atomic64_read(&event->child_total_time_running);
64346+ atomic64_read_unchecked(&event->child_total_time_running);
64347 }
64348 if (read_format & PERF_FORMAT_ID)
64349 values[n++] = primary_event_id(event);
64350@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
64351 if (leader != event)
64352 leader->pmu->read(leader);
64353
64354- values[n++] = atomic64_read(&leader->count);
64355+ values[n++] = atomic64_read_unchecked(&leader->count);
64356 if (read_format & PERF_FORMAT_ID)
64357 values[n++] = primary_event_id(leader);
64358
64359@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
64360 if (sub != event)
64361 sub->pmu->read(sub);
64362
64363- values[n++] = atomic64_read(&sub->count);
64364+ values[n++] = atomic64_read_unchecked(&sub->count);
64365 if (read_format & PERF_FORMAT_ID)
64366 values[n++] = primary_event_id(sub);
64367
64368@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf
64369 {
64370 struct hw_perf_event *hwc = &event->hw;
64371
64372- atomic64_add(nr, &event->count);
64373+ atomic64_add_unchecked(nr, &event->count);
64374
64375 if (!hwc->sample_period)
64376 return;
64377@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(
64378 u64 now;
64379
64380 now = cpu_clock(cpu);
64381- prev = atomic64_read(&event->hw.prev_count);
64382- atomic64_set(&event->hw.prev_count, now);
64383- atomic64_add(now - prev, &event->count);
64384+ prev = atomic64_read_unchecked(&event->hw.prev_count);
64385+ atomic64_set_unchecked(&event->hw.prev_count, now);
64386+ atomic64_add_unchecked(now - prev, &event->count);
64387 }
64388
64389 static int cpu_clock_perf_event_enable(struct perf_event *event)
64390@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(s
64391 struct hw_perf_event *hwc = &event->hw;
64392 int cpu = raw_smp_processor_id();
64393
64394- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
64395+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
64396 perf_swevent_start_hrtimer(event);
64397
64398 return 0;
64399@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update
64400 u64 prev;
64401 s64 delta;
64402
64403- prev = atomic64_xchg(&event->hw.prev_count, now);
64404+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
64405 delta = now - prev;
64406- atomic64_add(delta, &event->count);
64407+ atomic64_add_unchecked(delta, &event->count);
64408 }
64409
64410 static int task_clock_perf_event_enable(struct perf_event *event)
64411@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(
64412
64413 now = event->ctx->time;
64414
64415- atomic64_set(&hwc->prev_count, now);
64416+ atomic64_set_unchecked(&hwc->prev_count, now);
64417
64418 perf_swevent_start_hrtimer(event);
64419
64420@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr
64421 event->parent = parent_event;
64422
64423 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64424- event->id = atomic64_inc_return(&perf_event_id);
64425+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
64426
64427 event->state = PERF_EVENT_STATE_INACTIVE;
64428
64429@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf
64430 if (child_event->attr.inherit_stat)
64431 perf_event_read_event(child_event, child);
64432
64433- child_val = atomic64_read(&child_event->count);
64434+ child_val = atomic64_read_unchecked(&child_event->count);
64435
64436 /*
64437 * Add back the child's count to the parent's count:
64438 */
64439- atomic64_add(child_val, &parent_event->count);
64440- atomic64_add(child_event->total_time_enabled,
64441+ atomic64_add_unchecked(child_val, &parent_event->count);
64442+ atomic64_add_unchecked(child_event->total_time_enabled,
64443 &parent_event->child_total_time_enabled);
64444- atomic64_add(child_event->total_time_running,
64445+ atomic64_add_unchecked(child_event->total_time_running,
64446 &parent_event->child_total_time_running);
64447
64448 /*
64449diff -urNp linux-2.6.32.45/kernel/pid.c linux-2.6.32.45/kernel/pid.c
64450--- linux-2.6.32.45/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
64451+++ linux-2.6.32.45/kernel/pid.c 2011-08-21 19:11:29.000000000 -0400
64452@@ -33,6 +33,7 @@
64453 #include <linux/rculist.h>
64454 #include <linux/bootmem.h>
64455 #include <linux/hash.h>
64456+#include <linux/security.h>
64457 #include <linux/pid_namespace.h>
64458 #include <linux/init_task.h>
64459 #include <linux/syscalls.h>
64460@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64461
64462 int pid_max = PID_MAX_DEFAULT;
64463
64464-#define RESERVED_PIDS 300
64465+#define RESERVED_PIDS 500
64466
64467 int pid_max_min = RESERVED_PIDS + 1;
64468 int pid_max_max = PID_MAX_LIMIT;
64469@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
64470 */
64471 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64472 {
64473- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64474+ struct task_struct *task;
64475+
64476+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64477+
64478+ if (gr_pid_is_chrooted(task))
64479+ return NULL;
64480+
64481+ return task;
64482 }
64483
64484 struct task_struct *find_task_by_vpid(pid_t vnr)
64485@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pi
64486 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64487 }
64488
64489+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64490+{
64491+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64492+}
64493+
64494 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64495 {
64496 struct pid *pid;
64497diff -urNp linux-2.6.32.45/kernel/posix-cpu-timers.c linux-2.6.32.45/kernel/posix-cpu-timers.c
64498--- linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
64499+++ linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-08-06 09:33:44.000000000 -0400
64500@@ -6,6 +6,7 @@
64501 #include <linux/posix-timers.h>
64502 #include <linux/errno.h>
64503 #include <linux/math64.h>
64504+#include <linux/security.h>
64505 #include <asm/uaccess.h>
64506 #include <linux/kernel_stat.h>
64507 #include <trace/events/timer.h>
64508@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(st
64509
64510 static __init int init_posix_cpu_timers(void)
64511 {
64512- struct k_clock process = {
64513+ static struct k_clock process = {
64514 .clock_getres = process_cpu_clock_getres,
64515 .clock_get = process_cpu_clock_get,
64516 .clock_set = do_posix_clock_nosettime,
64517@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(
64518 .nsleep = process_cpu_nsleep,
64519 .nsleep_restart = process_cpu_nsleep_restart,
64520 };
64521- struct k_clock thread = {
64522+ static struct k_clock thread = {
64523 .clock_getres = thread_cpu_clock_getres,
64524 .clock_get = thread_cpu_clock_get,
64525 .clock_set = do_posix_clock_nosettime,
64526diff -urNp linux-2.6.32.45/kernel/posix-timers.c linux-2.6.32.45/kernel/posix-timers.c
64527--- linux-2.6.32.45/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
64528+++ linux-2.6.32.45/kernel/posix-timers.c 2011-08-23 20:22:38.000000000 -0400
64529@@ -42,6 +42,7 @@
64530 #include <linux/compiler.h>
64531 #include <linux/idr.h>
64532 #include <linux/posix-timers.h>
64533+#include <linux/grsecurity.h>
64534 #include <linux/syscalls.h>
64535 #include <linux/wait.h>
64536 #include <linux/workqueue.h>
64537@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
64538 * which we beg off on and pass to do_sys_settimeofday().
64539 */
64540
64541-static struct k_clock posix_clocks[MAX_CLOCKS];
64542+static struct k_clock *posix_clocks[MAX_CLOCKS];
64543
64544 /*
64545 * These ones are defined below.
64546@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k
64547 */
64548 #define CLOCK_DISPATCH(clock, call, arglist) \
64549 ((clock) < 0 ? posix_cpu_##call arglist : \
64550- (posix_clocks[clock].call != NULL \
64551- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
64552+ (posix_clocks[clock]->call != NULL \
64553+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
64554
64555 /*
64556 * Default clock hook functions when the struct k_clock passed
64557@@ -172,7 +173,7 @@ static inline int common_clock_getres(co
64558 struct timespec *tp)
64559 {
64560 tp->tv_sec = 0;
64561- tp->tv_nsec = posix_clocks[which_clock].res;
64562+ tp->tv_nsec = posix_clocks[which_clock]->res;
64563 return 0;
64564 }
64565
64566@@ -217,9 +218,11 @@ static inline int invalid_clockid(const
64567 return 0;
64568 if ((unsigned) which_clock >= MAX_CLOCKS)
64569 return 1;
64570- if (posix_clocks[which_clock].clock_getres != NULL)
64571+ if (posix_clocks[which_clock] == NULL)
64572 return 0;
64573- if (posix_clocks[which_clock].res != 0)
64574+ if (posix_clocks[which_clock]->clock_getres != NULL)
64575+ return 0;
64576+ if (posix_clocks[which_clock]->res != 0)
64577 return 0;
64578 return 1;
64579 }
64580@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t
64581 */
64582 static __init int init_posix_timers(void)
64583 {
64584- struct k_clock clock_realtime = {
64585+ static struct k_clock clock_realtime = {
64586 .clock_getres = hrtimer_get_res,
64587 };
64588- struct k_clock clock_monotonic = {
64589+ static struct k_clock clock_monotonic = {
64590 .clock_getres = hrtimer_get_res,
64591 .clock_get = posix_ktime_get_ts,
64592 .clock_set = do_posix_clock_nosettime,
64593 };
64594- struct k_clock clock_monotonic_raw = {
64595+ static struct k_clock clock_monotonic_raw = {
64596 .clock_getres = hrtimer_get_res,
64597 .clock_get = posix_get_monotonic_raw,
64598 .clock_set = do_posix_clock_nosettime,
64599 .timer_create = no_timer_create,
64600 .nsleep = no_nsleep,
64601 };
64602- struct k_clock clock_realtime_coarse = {
64603+ static struct k_clock clock_realtime_coarse = {
64604 .clock_getres = posix_get_coarse_res,
64605 .clock_get = posix_get_realtime_coarse,
64606 .clock_set = do_posix_clock_nosettime,
64607 .timer_create = no_timer_create,
64608 .nsleep = no_nsleep,
64609 };
64610- struct k_clock clock_monotonic_coarse = {
64611+ static struct k_clock clock_monotonic_coarse = {
64612 .clock_getres = posix_get_coarse_res,
64613 .clock_get = posix_get_monotonic_coarse,
64614 .clock_set = do_posix_clock_nosettime,
64615@@ -296,6 +299,8 @@ static __init int init_posix_timers(void
64616 .nsleep = no_nsleep,
64617 };
64618
64619+ pax_track_stack();
64620+
64621 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
64622 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
64623 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64624@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_
64625 return;
64626 }
64627
64628- posix_clocks[clock_id] = *new_clock;
64629+ posix_clocks[clock_id] = new_clock;
64630 }
64631 EXPORT_SYMBOL_GPL(register_posix_clock);
64632
64633@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64634 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64635 return -EFAULT;
64636
64637+ /* only the CLOCK_REALTIME clock can be set, all other clocks
64638+ have their clock_set fptr set to a nosettime dummy function
64639+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64640+ call common_clock_set, which calls do_sys_settimeofday, which
64641+ we hook
64642+ */
64643+
64644 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
64645 }
64646
64647diff -urNp linux-2.6.32.45/kernel/power/hibernate.c linux-2.6.32.45/kernel/power/hibernate.c
64648--- linux-2.6.32.45/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
64649+++ linux-2.6.32.45/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
64650@@ -48,14 +48,14 @@ enum {
64651
64652 static int hibernation_mode = HIBERNATION_SHUTDOWN;
64653
64654-static struct platform_hibernation_ops *hibernation_ops;
64655+static const struct platform_hibernation_ops *hibernation_ops;
64656
64657 /**
64658 * hibernation_set_ops - set the global hibernate operations
64659 * @ops: the hibernation operations to use in subsequent hibernation transitions
64660 */
64661
64662-void hibernation_set_ops(struct platform_hibernation_ops *ops)
64663+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
64664 {
64665 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
64666 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
64667diff -urNp linux-2.6.32.45/kernel/power/poweroff.c linux-2.6.32.45/kernel/power/poweroff.c
64668--- linux-2.6.32.45/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
64669+++ linux-2.6.32.45/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
64670@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64671 .enable_mask = SYSRQ_ENABLE_BOOT,
64672 };
64673
64674-static int pm_sysrq_init(void)
64675+static int __init pm_sysrq_init(void)
64676 {
64677 register_sysrq_key('o', &sysrq_poweroff_op);
64678 return 0;
64679diff -urNp linux-2.6.32.45/kernel/power/process.c linux-2.6.32.45/kernel/power/process.c
64680--- linux-2.6.32.45/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
64681+++ linux-2.6.32.45/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
64682@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
64683 struct timeval start, end;
64684 u64 elapsed_csecs64;
64685 unsigned int elapsed_csecs;
64686+ bool timedout = false;
64687
64688 do_gettimeofday(&start);
64689
64690 end_time = jiffies + TIMEOUT;
64691 do {
64692 todo = 0;
64693+ if (time_after(jiffies, end_time))
64694+ timedout = true;
64695 read_lock(&tasklist_lock);
64696 do_each_thread(g, p) {
64697 if (frozen(p) || !freezeable(p))
64698@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
64699 * It is "frozen enough". If the task does wake
64700 * up, it will immediately call try_to_freeze.
64701 */
64702- if (!task_is_stopped_or_traced(p) &&
64703- !freezer_should_skip(p))
64704+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64705 todo++;
64706+ if (timedout) {
64707+ printk(KERN_ERR "Task refusing to freeze:\n");
64708+ sched_show_task(p);
64709+ }
64710+ }
64711 } while_each_thread(g, p);
64712 read_unlock(&tasklist_lock);
64713 yield(); /* Yield is okay here */
64714- if (time_after(jiffies, end_time))
64715- break;
64716- } while (todo);
64717+ } while (todo && !timedout);
64718
64719 do_gettimeofday(&end);
64720 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
64721diff -urNp linux-2.6.32.45/kernel/power/suspend.c linux-2.6.32.45/kernel/power/suspend.c
64722--- linux-2.6.32.45/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
64723+++ linux-2.6.32.45/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
64724@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
64725 [PM_SUSPEND_MEM] = "mem",
64726 };
64727
64728-static struct platform_suspend_ops *suspend_ops;
64729+static const struct platform_suspend_ops *suspend_ops;
64730
64731 /**
64732 * suspend_set_ops - Set the global suspend method table.
64733 * @ops: Pointer to ops structure.
64734 */
64735-void suspend_set_ops(struct platform_suspend_ops *ops)
64736+void suspend_set_ops(const struct platform_suspend_ops *ops)
64737 {
64738 mutex_lock(&pm_mutex);
64739 suspend_ops = ops;
64740diff -urNp linux-2.6.32.45/kernel/printk.c linux-2.6.32.45/kernel/printk.c
64741--- linux-2.6.32.45/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
64742+++ linux-2.6.32.45/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
64743@@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
64744 char c;
64745 int error = 0;
64746
64747+#ifdef CONFIG_GRKERNSEC_DMESG
64748+ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
64749+ return -EPERM;
64750+#endif
64751+
64752 error = security_syslog(type);
64753 if (error)
64754 return error;
64755diff -urNp linux-2.6.32.45/kernel/profile.c linux-2.6.32.45/kernel/profile.c
64756--- linux-2.6.32.45/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
64757+++ linux-2.6.32.45/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
64758@@ -39,7 +39,7 @@ struct profile_hit {
64759 /* Oprofile timer tick hook */
64760 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64761
64762-static atomic_t *prof_buffer;
64763+static atomic_unchecked_t *prof_buffer;
64764 static unsigned long prof_len, prof_shift;
64765
64766 int prof_on __read_mostly;
64767@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
64768 hits[i].pc = 0;
64769 continue;
64770 }
64771- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64772+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64773 hits[i].hits = hits[i].pc = 0;
64774 }
64775 }
64776@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
64777 * Add the current hit(s) and flush the write-queue out
64778 * to the global buffer:
64779 */
64780- atomic_add(nr_hits, &prof_buffer[pc]);
64781+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64782 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64783- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64784+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64785 hits[i].pc = hits[i].hits = 0;
64786 }
64787 out:
64788@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
64789 if (prof_on != type || !prof_buffer)
64790 return;
64791 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64792- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64793+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64794 }
64795 #endif /* !CONFIG_SMP */
64796 EXPORT_SYMBOL_GPL(profile_hits);
64797@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64798 return -EFAULT;
64799 buf++; p++; count--; read++;
64800 }
64801- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64802+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64803 if (copy_to_user(buf, (void *)pnt, count))
64804 return -EFAULT;
64805 read += count;
64806@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64807 }
64808 #endif
64809 profile_discard_flip_buffers();
64810- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64811+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64812 return count;
64813 }
64814
64815diff -urNp linux-2.6.32.45/kernel/ptrace.c linux-2.6.32.45/kernel/ptrace.c
64816--- linux-2.6.32.45/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
64817+++ linux-2.6.32.45/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
64818@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
64819 return ret;
64820 }
64821
64822-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64823+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64824+ unsigned int log)
64825 {
64826 const struct cred *cred = current_cred(), *tcred;
64827
64828@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
64829 cred->gid != tcred->egid ||
64830 cred->gid != tcred->sgid ||
64831 cred->gid != tcred->gid) &&
64832- !capable(CAP_SYS_PTRACE)) {
64833+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64834+ (log && !capable(CAP_SYS_PTRACE)))
64835+ ) {
64836 rcu_read_unlock();
64837 return -EPERM;
64838 }
64839@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
64840 smp_rmb();
64841 if (task->mm)
64842 dumpable = get_dumpable(task->mm);
64843- if (!dumpable && !capable(CAP_SYS_PTRACE))
64844+ if (!dumpable &&
64845+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64846+ (log && !capable(CAP_SYS_PTRACE))))
64847 return -EPERM;
64848
64849 return security_ptrace_access_check(task, mode);
64850@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
64851 {
64852 int err;
64853 task_lock(task);
64854- err = __ptrace_may_access(task, mode);
64855+ err = __ptrace_may_access(task, mode, 0);
64856+ task_unlock(task);
64857+ return !err;
64858+}
64859+
64860+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64861+{
64862+ int err;
64863+ task_lock(task);
64864+ err = __ptrace_may_access(task, mode, 1);
64865 task_unlock(task);
64866 return !err;
64867 }
64868@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
64869 goto out;
64870
64871 task_lock(task);
64872- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64873+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64874 task_unlock(task);
64875 if (retval)
64876 goto unlock_creds;
64877@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
64878 goto unlock_tasklist;
64879
64880 task->ptrace = PT_PTRACED;
64881- if (capable(CAP_SYS_PTRACE))
64882+ if (capable_nolog(CAP_SYS_PTRACE))
64883 task->ptrace |= PT_PTRACE_CAP;
64884
64885 __ptrace_link(task, current);
64886@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
64887 {
64888 int copied = 0;
64889
64890+ pax_track_stack();
64891+
64892 while (len > 0) {
64893 char buf[128];
64894 int this_len, retval;
64895@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
64896 {
64897 int copied = 0;
64898
64899+ pax_track_stack();
64900+
64901 while (len > 0) {
64902 char buf[128];
64903 int this_len, retval;
64904@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
64905 int ret = -EIO;
64906 siginfo_t siginfo;
64907
64908+ pax_track_stack();
64909+
64910 switch (request) {
64911 case PTRACE_PEEKTEXT:
64912 case PTRACE_PEEKDATA:
64913@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
64914 ret = ptrace_setoptions(child, data);
64915 break;
64916 case PTRACE_GETEVENTMSG:
64917- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
64918+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
64919 break;
64920
64921 case PTRACE_GETSIGINFO:
64922 ret = ptrace_getsiginfo(child, &siginfo);
64923 if (!ret)
64924- ret = copy_siginfo_to_user((siginfo_t __user *) data,
64925+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
64926 &siginfo);
64927 break;
64928
64929 case PTRACE_SETSIGINFO:
64930- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
64931+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
64932 sizeof siginfo))
64933 ret = -EFAULT;
64934 else
64935@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
64936 goto out;
64937 }
64938
64939+ if (gr_handle_ptrace(child, request)) {
64940+ ret = -EPERM;
64941+ goto out_put_task_struct;
64942+ }
64943+
64944 if (request == PTRACE_ATTACH) {
64945 ret = ptrace_attach(child);
64946 /*
64947 * Some architectures need to do book-keeping after
64948 * a ptrace attach.
64949 */
64950- if (!ret)
64951+ if (!ret) {
64952 arch_ptrace_attach(child);
64953+ gr_audit_ptrace(child);
64954+ }
64955 goto out_put_task_struct;
64956 }
64957
64958@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
64959 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
64960 if (copied != sizeof(tmp))
64961 return -EIO;
64962- return put_user(tmp, (unsigned long __user *)data);
64963+ return put_user(tmp, (__force unsigned long __user *)data);
64964 }
64965
64966 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
64967@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
64968 siginfo_t siginfo;
64969 int ret;
64970
64971+ pax_track_stack();
64972+
64973 switch (request) {
64974 case PTRACE_PEEKTEXT:
64975 case PTRACE_PEEKDATA:
64976@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
64977 goto out;
64978 }
64979
64980+ if (gr_handle_ptrace(child, request)) {
64981+ ret = -EPERM;
64982+ goto out_put_task_struct;
64983+ }
64984+
64985 if (request == PTRACE_ATTACH) {
64986 ret = ptrace_attach(child);
64987 /*
64988 * Some architectures need to do book-keeping after
64989 * a ptrace attach.
64990 */
64991- if (!ret)
64992+ if (!ret) {
64993 arch_ptrace_attach(child);
64994+ gr_audit_ptrace(child);
64995+ }
64996 goto out_put_task_struct;
64997 }
64998
64999diff -urNp linux-2.6.32.45/kernel/rcutorture.c linux-2.6.32.45/kernel/rcutorture.c
65000--- linux-2.6.32.45/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
65001+++ linux-2.6.32.45/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
65002@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
65003 { 0 };
65004 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65005 { 0 };
65006-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65007-static atomic_t n_rcu_torture_alloc;
65008-static atomic_t n_rcu_torture_alloc_fail;
65009-static atomic_t n_rcu_torture_free;
65010-static atomic_t n_rcu_torture_mberror;
65011-static atomic_t n_rcu_torture_error;
65012+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65013+static atomic_unchecked_t n_rcu_torture_alloc;
65014+static atomic_unchecked_t n_rcu_torture_alloc_fail;
65015+static atomic_unchecked_t n_rcu_torture_free;
65016+static atomic_unchecked_t n_rcu_torture_mberror;
65017+static atomic_unchecked_t n_rcu_torture_error;
65018 static long n_rcu_torture_timers;
65019 static struct list_head rcu_torture_removed;
65020 static cpumask_var_t shuffle_tmp_mask;
65021@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
65022
65023 spin_lock_bh(&rcu_torture_lock);
65024 if (list_empty(&rcu_torture_freelist)) {
65025- atomic_inc(&n_rcu_torture_alloc_fail);
65026+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65027 spin_unlock_bh(&rcu_torture_lock);
65028 return NULL;
65029 }
65030- atomic_inc(&n_rcu_torture_alloc);
65031+ atomic_inc_unchecked(&n_rcu_torture_alloc);
65032 p = rcu_torture_freelist.next;
65033 list_del_init(p);
65034 spin_unlock_bh(&rcu_torture_lock);
65035@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
65036 static void
65037 rcu_torture_free(struct rcu_torture *p)
65038 {
65039- atomic_inc(&n_rcu_torture_free);
65040+ atomic_inc_unchecked(&n_rcu_torture_free);
65041 spin_lock_bh(&rcu_torture_lock);
65042 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65043 spin_unlock_bh(&rcu_torture_lock);
65044@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
65045 i = rp->rtort_pipe_count;
65046 if (i > RCU_TORTURE_PIPE_LEN)
65047 i = RCU_TORTURE_PIPE_LEN;
65048- atomic_inc(&rcu_torture_wcount[i]);
65049+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65050 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65051 rp->rtort_mbtest = 0;
65052 rcu_torture_free(rp);
65053@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
65054 i = rp->rtort_pipe_count;
65055 if (i > RCU_TORTURE_PIPE_LEN)
65056 i = RCU_TORTURE_PIPE_LEN;
65057- atomic_inc(&rcu_torture_wcount[i]);
65058+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65059 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65060 rp->rtort_mbtest = 0;
65061 list_del(&rp->rtort_free);
65062@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
65063 i = old_rp->rtort_pipe_count;
65064 if (i > RCU_TORTURE_PIPE_LEN)
65065 i = RCU_TORTURE_PIPE_LEN;
65066- atomic_inc(&rcu_torture_wcount[i]);
65067+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65068 old_rp->rtort_pipe_count++;
65069 cur_ops->deferred_free(old_rp);
65070 }
65071@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
65072 return;
65073 }
65074 if (p->rtort_mbtest == 0)
65075- atomic_inc(&n_rcu_torture_mberror);
65076+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65077 spin_lock(&rand_lock);
65078 cur_ops->read_delay(&rand);
65079 n_rcu_torture_timers++;
65080@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
65081 continue;
65082 }
65083 if (p->rtort_mbtest == 0)
65084- atomic_inc(&n_rcu_torture_mberror);
65085+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65086 cur_ops->read_delay(&rand);
65087 preempt_disable();
65088 pipe_count = p->rtort_pipe_count;
65089@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
65090 rcu_torture_current,
65091 rcu_torture_current_version,
65092 list_empty(&rcu_torture_freelist),
65093- atomic_read(&n_rcu_torture_alloc),
65094- atomic_read(&n_rcu_torture_alloc_fail),
65095- atomic_read(&n_rcu_torture_free),
65096- atomic_read(&n_rcu_torture_mberror),
65097+ atomic_read_unchecked(&n_rcu_torture_alloc),
65098+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65099+ atomic_read_unchecked(&n_rcu_torture_free),
65100+ atomic_read_unchecked(&n_rcu_torture_mberror),
65101 n_rcu_torture_timers);
65102- if (atomic_read(&n_rcu_torture_mberror) != 0)
65103+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
65104 cnt += sprintf(&page[cnt], " !!!");
65105 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65106 if (i > 1) {
65107 cnt += sprintf(&page[cnt], "!!! ");
65108- atomic_inc(&n_rcu_torture_error);
65109+ atomic_inc_unchecked(&n_rcu_torture_error);
65110 WARN_ON_ONCE(1);
65111 }
65112 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65113@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
65114 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65115 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65116 cnt += sprintf(&page[cnt], " %d",
65117- atomic_read(&rcu_torture_wcount[i]));
65118+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65119 }
65120 cnt += sprintf(&page[cnt], "\n");
65121 if (cur_ops->stats)
65122@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
65123
65124 if (cur_ops->cleanup)
65125 cur_ops->cleanup();
65126- if (atomic_read(&n_rcu_torture_error))
65127+ if (atomic_read_unchecked(&n_rcu_torture_error))
65128 rcu_torture_print_module_parms("End of test: FAILURE");
65129 else
65130 rcu_torture_print_module_parms("End of test: SUCCESS");
65131@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
65132
65133 rcu_torture_current = NULL;
65134 rcu_torture_current_version = 0;
65135- atomic_set(&n_rcu_torture_alloc, 0);
65136- atomic_set(&n_rcu_torture_alloc_fail, 0);
65137- atomic_set(&n_rcu_torture_free, 0);
65138- atomic_set(&n_rcu_torture_mberror, 0);
65139- atomic_set(&n_rcu_torture_error, 0);
65140+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65141+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65142+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65143+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65144+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65145 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65146- atomic_set(&rcu_torture_wcount[i], 0);
65147+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65148 for_each_possible_cpu(cpu) {
65149 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65150 per_cpu(rcu_torture_count, cpu)[i] = 0;
65151diff -urNp linux-2.6.32.45/kernel/rcutree.c linux-2.6.32.45/kernel/rcutree.c
65152--- linux-2.6.32.45/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
65153+++ linux-2.6.32.45/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
65154@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
65155 /*
65156 * Do softirq processing for the current CPU.
65157 */
65158-static void rcu_process_callbacks(struct softirq_action *unused)
65159+static void rcu_process_callbacks(void)
65160 {
65161 /*
65162 * Memory references from any prior RCU read-side critical sections
65163diff -urNp linux-2.6.32.45/kernel/rcutree_plugin.h linux-2.6.32.45/kernel/rcutree_plugin.h
65164--- linux-2.6.32.45/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
65165+++ linux-2.6.32.45/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
65166@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
65167 */
65168 void __rcu_read_lock(void)
65169 {
65170- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
65171+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
65172 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
65173 }
65174 EXPORT_SYMBOL_GPL(__rcu_read_lock);
65175@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
65176 struct task_struct *t = current;
65177
65178 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
65179- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
65180+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
65181 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
65182 rcu_read_unlock_special(t);
65183 }
65184diff -urNp linux-2.6.32.45/kernel/relay.c linux-2.6.32.45/kernel/relay.c
65185--- linux-2.6.32.45/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
65186+++ linux-2.6.32.45/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
65187@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
65188 unsigned int flags,
65189 int *nonpad_ret)
65190 {
65191- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
65192+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
65193 struct rchan_buf *rbuf = in->private_data;
65194 unsigned int subbuf_size = rbuf->chan->subbuf_size;
65195 uint64_t pos = (uint64_t) *ppos;
65196@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
65197 .ops = &relay_pipe_buf_ops,
65198 .spd_release = relay_page_release,
65199 };
65200+ ssize_t ret;
65201+
65202+ pax_track_stack();
65203
65204 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65205 return 0;
65206diff -urNp linux-2.6.32.45/kernel/resource.c linux-2.6.32.45/kernel/resource.c
65207--- linux-2.6.32.45/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
65208+++ linux-2.6.32.45/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
65209@@ -132,8 +132,18 @@ static const struct file_operations proc
65210
65211 static int __init ioresources_init(void)
65212 {
65213+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65214+#ifdef CONFIG_GRKERNSEC_PROC_USER
65215+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65216+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65217+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65218+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65219+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65220+#endif
65221+#else
65222 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65223 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65224+#endif
65225 return 0;
65226 }
65227 __initcall(ioresources_init);
65228diff -urNp linux-2.6.32.45/kernel/rtmutex.c linux-2.6.32.45/kernel/rtmutex.c
65229--- linux-2.6.32.45/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
65230+++ linux-2.6.32.45/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
65231@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
65232 */
65233 spin_lock_irqsave(&pendowner->pi_lock, flags);
65234
65235- WARN_ON(!pendowner->pi_blocked_on);
65236+ BUG_ON(!pendowner->pi_blocked_on);
65237 WARN_ON(pendowner->pi_blocked_on != waiter);
65238 WARN_ON(pendowner->pi_blocked_on->lock != lock);
65239
65240diff -urNp linux-2.6.32.45/kernel/rtmutex-tester.c linux-2.6.32.45/kernel/rtmutex-tester.c
65241--- linux-2.6.32.45/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
65242+++ linux-2.6.32.45/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
65243@@ -21,7 +21,7 @@
65244 #define MAX_RT_TEST_MUTEXES 8
65245
65246 static spinlock_t rttest_lock;
65247-static atomic_t rttest_event;
65248+static atomic_unchecked_t rttest_event;
65249
65250 struct test_thread_data {
65251 int opcode;
65252@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
65253
65254 case RTTEST_LOCKCONT:
65255 td->mutexes[td->opdata] = 1;
65256- td->event = atomic_add_return(1, &rttest_event);
65257+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65258 return 0;
65259
65260 case RTTEST_RESET:
65261@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
65262 return 0;
65263
65264 case RTTEST_RESETEVENT:
65265- atomic_set(&rttest_event, 0);
65266+ atomic_set_unchecked(&rttest_event, 0);
65267 return 0;
65268
65269 default:
65270@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
65271 return ret;
65272
65273 td->mutexes[id] = 1;
65274- td->event = atomic_add_return(1, &rttest_event);
65275+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65276 rt_mutex_lock(&mutexes[id]);
65277- td->event = atomic_add_return(1, &rttest_event);
65278+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65279 td->mutexes[id] = 4;
65280 return 0;
65281
65282@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
65283 return ret;
65284
65285 td->mutexes[id] = 1;
65286- td->event = atomic_add_return(1, &rttest_event);
65287+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65288 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65289- td->event = atomic_add_return(1, &rttest_event);
65290+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65291 td->mutexes[id] = ret ? 0 : 4;
65292 return ret ? -EINTR : 0;
65293
65294@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
65295 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65296 return ret;
65297
65298- td->event = atomic_add_return(1, &rttest_event);
65299+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65300 rt_mutex_unlock(&mutexes[id]);
65301- td->event = atomic_add_return(1, &rttest_event);
65302+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65303 td->mutexes[id] = 0;
65304 return 0;
65305
65306@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
65307 break;
65308
65309 td->mutexes[dat] = 2;
65310- td->event = atomic_add_return(1, &rttest_event);
65311+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65312 break;
65313
65314 case RTTEST_LOCKBKL:
65315@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
65316 return;
65317
65318 td->mutexes[dat] = 3;
65319- td->event = atomic_add_return(1, &rttest_event);
65320+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65321 break;
65322
65323 case RTTEST_LOCKNOWAIT:
65324@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
65325 return;
65326
65327 td->mutexes[dat] = 1;
65328- td->event = atomic_add_return(1, &rttest_event);
65329+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65330 return;
65331
65332 case RTTEST_LOCKBKL:
65333diff -urNp linux-2.6.32.45/kernel/sched.c linux-2.6.32.45/kernel/sched.c
65334--- linux-2.6.32.45/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
65335+++ linux-2.6.32.45/kernel/sched.c 2011-08-21 19:29:25.000000000 -0400
65336@@ -2764,9 +2764,10 @@ void wake_up_new_task(struct task_struct
65337 {
65338 unsigned long flags;
65339 struct rq *rq;
65340- int cpu = get_cpu();
65341
65342 #ifdef CONFIG_SMP
65343+ int cpu = get_cpu();
65344+
65345 rq = task_rq_lock(p, &flags);
65346 p->state = TASK_WAKING;
65347
65348@@ -5043,7 +5044,7 @@ out:
65349 * In CONFIG_NO_HZ case, the idle load balance owner will do the
65350 * rebalancing for all the cpus for whom scheduler ticks are stopped.
65351 */
65352-static void run_rebalance_domains(struct softirq_action *h)
65353+static void run_rebalance_domains(void)
65354 {
65355 int this_cpu = smp_processor_id();
65356 struct rq *this_rq = cpu_rq(this_cpu);
65357@@ -5700,6 +5701,8 @@ asmlinkage void __sched schedule(void)
65358 struct rq *rq;
65359 int cpu;
65360
65361+ pax_track_stack();
65362+
65363 need_resched:
65364 preempt_disable();
65365 cpu = smp_processor_id();
65366@@ -5770,7 +5773,7 @@ EXPORT_SYMBOL(schedule);
65367 * Look out! "owner" is an entirely speculative pointer
65368 * access and not reliable.
65369 */
65370-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
65371+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
65372 {
65373 unsigned int cpu;
65374 struct rq *rq;
65375@@ -5784,10 +5787,10 @@ int mutex_spin_on_owner(struct mutex *lo
65376 * DEBUG_PAGEALLOC could have unmapped it if
65377 * the mutex owner just released it and exited.
65378 */
65379- if (probe_kernel_address(&owner->cpu, cpu))
65380+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
65381 return 0;
65382 #else
65383- cpu = owner->cpu;
65384+ cpu = task_thread_info(owner)->cpu;
65385 #endif
65386
65387 /*
65388@@ -5816,7 +5819,7 @@ int mutex_spin_on_owner(struct mutex *lo
65389 /*
65390 * Is that owner really running on that cpu?
65391 */
65392- if (task_thread_info(rq->curr) != owner || need_resched())
65393+ if (rq->curr != owner || need_resched())
65394 return 0;
65395
65396 cpu_relax();
65397@@ -6359,6 +6362,8 @@ int can_nice(const struct task_struct *p
65398 /* convert nice value [19,-20] to rlimit style value [1,40] */
65399 int nice_rlim = 20 - nice;
65400
65401+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65402+
65403 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
65404 capable(CAP_SYS_NICE));
65405 }
65406@@ -6392,7 +6397,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65407 if (nice > 19)
65408 nice = 19;
65409
65410- if (increment < 0 && !can_nice(current, nice))
65411+ if (increment < 0 && (!can_nice(current, nice) ||
65412+ gr_handle_chroot_nice()))
65413 return -EPERM;
65414
65415 retval = security_task_setnice(current, nice);
65416@@ -8774,7 +8780,7 @@ static void init_sched_groups_power(int
65417 long power;
65418 int weight;
65419
65420- WARN_ON(!sd || !sd->groups);
65421+ BUG_ON(!sd || !sd->groups);
65422
65423 if (cpu != group_first_cpu(sd->groups))
65424 return;
65425diff -urNp linux-2.6.32.45/kernel/signal.c linux-2.6.32.45/kernel/signal.c
65426--- linux-2.6.32.45/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
65427+++ linux-2.6.32.45/kernel/signal.c 2011-08-16 21:15:58.000000000 -0400
65428@@ -41,12 +41,12 @@
65429
65430 static struct kmem_cache *sigqueue_cachep;
65431
65432-static void __user *sig_handler(struct task_struct *t, int sig)
65433+static __sighandler_t sig_handler(struct task_struct *t, int sig)
65434 {
65435 return t->sighand->action[sig - 1].sa.sa_handler;
65436 }
65437
65438-static int sig_handler_ignored(void __user *handler, int sig)
65439+static int sig_handler_ignored(__sighandler_t handler, int sig)
65440 {
65441 /* Is it explicitly or implicitly ignored? */
65442 return handler == SIG_IGN ||
65443@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
65444 static int sig_task_ignored(struct task_struct *t, int sig,
65445 int from_ancestor_ns)
65446 {
65447- void __user *handler;
65448+ __sighandler_t handler;
65449
65450 handler = sig_handler(t, sig);
65451
65452@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
65453 */
65454 user = get_uid(__task_cred(t)->user);
65455 atomic_inc(&user->sigpending);
65456+
65457+ if (!override_rlimit)
65458+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65459 if (override_rlimit ||
65460 atomic_read(&user->sigpending) <=
65461 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
65462@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
65463
65464 int unhandled_signal(struct task_struct *tsk, int sig)
65465 {
65466- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65467+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65468 if (is_global_init(tsk))
65469 return 1;
65470 if (handler != SIG_IGN && handler != SIG_DFL)
65471@@ -627,6 +630,13 @@ static int check_kill_permission(int sig
65472 }
65473 }
65474
65475+ /* allow glibc communication via tgkill to other threads in our
65476+ thread group */
65477+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65478+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65479+ && gr_handle_signal(t, sig))
65480+ return -EPERM;
65481+
65482 return security_task_kill(t, info, sig, 0);
65483 }
65484
65485@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct si
65486 return send_signal(sig, info, p, 1);
65487 }
65488
65489-static int
65490+int
65491 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65492 {
65493 return send_signal(sig, info, t, 0);
65494@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *
65495 unsigned long int flags;
65496 int ret, blocked, ignored;
65497 struct k_sigaction *action;
65498+ int is_unhandled = 0;
65499
65500 spin_lock_irqsave(&t->sighand->siglock, flags);
65501 action = &t->sighand->action[sig-1];
65502@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *
65503 }
65504 if (action->sa.sa_handler == SIG_DFL)
65505 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65506+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65507+ is_unhandled = 1;
65508 ret = specific_send_sig_info(sig, info, t);
65509 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65510
65511+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
65512+ normal operation */
65513+ if (is_unhandled) {
65514+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65515+ gr_handle_crash(t, sig);
65516+ }
65517+
65518 return ret;
65519 }
65520
65521@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct
65522 {
65523 int ret = check_kill_permission(sig, info, p);
65524
65525- if (!ret && sig)
65526+ if (!ret && sig) {
65527 ret = do_send_sig_info(sig, info, p, true);
65528+ if (!ret)
65529+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65530+ }
65531
65532 return ret;
65533 }
65534@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
65535 {
65536 siginfo_t info;
65537
65538+ pax_track_stack();
65539+
65540 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65541
65542 memset(&info, 0, sizeof info);
65543@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65544 int error = -ESRCH;
65545
65546 rcu_read_lock();
65547- p = find_task_by_vpid(pid);
65548+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65549+ /* allow glibc communication via tgkill to other threads in our
65550+ thread group */
65551+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65552+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
65553+ p = find_task_by_vpid_unrestricted(pid);
65554+ else
65555+#endif
65556+ p = find_task_by_vpid(pid);
65557 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65558 error = check_kill_permission(sig, info, p);
65559 /*
65560diff -urNp linux-2.6.32.45/kernel/smp.c linux-2.6.32.45/kernel/smp.c
65561--- linux-2.6.32.45/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
65562+++ linux-2.6.32.45/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
65563@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
65564 }
65565 EXPORT_SYMBOL(smp_call_function);
65566
65567-void ipi_call_lock(void)
65568+void ipi_call_lock(void) __acquires(call_function.lock)
65569 {
65570 spin_lock(&call_function.lock);
65571 }
65572
65573-void ipi_call_unlock(void)
65574+void ipi_call_unlock(void) __releases(call_function.lock)
65575 {
65576 spin_unlock(&call_function.lock);
65577 }
65578
65579-void ipi_call_lock_irq(void)
65580+void ipi_call_lock_irq(void) __acquires(call_function.lock)
65581 {
65582 spin_lock_irq(&call_function.lock);
65583 }
65584
65585-void ipi_call_unlock_irq(void)
65586+void ipi_call_unlock_irq(void) __releases(call_function.lock)
65587 {
65588 spin_unlock_irq(&call_function.lock);
65589 }
65590diff -urNp linux-2.6.32.45/kernel/softirq.c linux-2.6.32.45/kernel/softirq.c
65591--- linux-2.6.32.45/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
65592+++ linux-2.6.32.45/kernel/softirq.c 2011-08-05 20:33:55.000000000 -0400
65593@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65594
65595 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65596
65597-char *softirq_to_name[NR_SOFTIRQS] = {
65598+const char * const softirq_to_name[NR_SOFTIRQS] = {
65599 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65600 "TASKLET", "SCHED", "HRTIMER", "RCU"
65601 };
65602@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
65603
65604 asmlinkage void __do_softirq(void)
65605 {
65606- struct softirq_action *h;
65607+ const struct softirq_action *h;
65608 __u32 pending;
65609 int max_restart = MAX_SOFTIRQ_RESTART;
65610 int cpu;
65611@@ -233,7 +233,7 @@ restart:
65612 kstat_incr_softirqs_this_cpu(h - softirq_vec);
65613
65614 trace_softirq_entry(h, softirq_vec);
65615- h->action(h);
65616+ h->action();
65617 trace_softirq_exit(h, softirq_vec);
65618 if (unlikely(prev_count != preempt_count())) {
65619 printk(KERN_ERR "huh, entered softirq %td %s %p"
65620@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
65621 local_irq_restore(flags);
65622 }
65623
65624-void open_softirq(int nr, void (*action)(struct softirq_action *))
65625+void open_softirq(int nr, void (*action)(void))
65626 {
65627- softirq_vec[nr].action = action;
65628+ pax_open_kernel();
65629+ *(void **)&softirq_vec[nr].action = action;
65630+ pax_close_kernel();
65631 }
65632
65633 /*
65634@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct
65635
65636 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65637
65638-static void tasklet_action(struct softirq_action *a)
65639+static void tasklet_action(void)
65640 {
65641 struct tasklet_struct *list;
65642
65643@@ -454,7 +456,7 @@ static void tasklet_action(struct softir
65644 }
65645 }
65646
65647-static void tasklet_hi_action(struct softirq_action *a)
65648+static void tasklet_hi_action(void)
65649 {
65650 struct tasklet_struct *list;
65651
65652diff -urNp linux-2.6.32.45/kernel/sys.c linux-2.6.32.45/kernel/sys.c
65653--- linux-2.6.32.45/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
65654+++ linux-2.6.32.45/kernel/sys.c 2011-08-11 19:51:54.000000000 -0400
65655@@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
65656 error = -EACCES;
65657 goto out;
65658 }
65659+
65660+ if (gr_handle_chroot_setpriority(p, niceval)) {
65661+ error = -EACCES;
65662+ goto out;
65663+ }
65664+
65665 no_nice = security_task_setnice(p, niceval);
65666 if (no_nice) {
65667 error = no_nice;
65668@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
65669 !(user = find_user(who)))
65670 goto out_unlock; /* No processes for this user */
65671
65672- do_each_thread(g, p)
65673+ do_each_thread(g, p) {
65674 if (__task_cred(p)->uid == who)
65675 error = set_one_prio(p, niceval, error);
65676- while_each_thread(g, p);
65677+ } while_each_thread(g, p);
65678 if (who != cred->uid)
65679 free_uid(user); /* For find_user() */
65680 break;
65681@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
65682 !(user = find_user(who)))
65683 goto out_unlock; /* No processes for this user */
65684
65685- do_each_thread(g, p)
65686+ do_each_thread(g, p) {
65687 if (__task_cred(p)->uid == who) {
65688 niceval = 20 - task_nice(p);
65689 if (niceval > retval)
65690 retval = niceval;
65691 }
65692- while_each_thread(g, p);
65693+ } while_each_thread(g, p);
65694 if (who != cred->uid)
65695 free_uid(user); /* for find_user() */
65696 break;
65697@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65698 goto error;
65699 }
65700
65701+ if (gr_check_group_change(new->gid, new->egid, -1))
65702+ goto error;
65703+
65704 if (rgid != (gid_t) -1 ||
65705 (egid != (gid_t) -1 && egid != old->gid))
65706 new->sgid = new->egid;
65707@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65708 goto error;
65709
65710 retval = -EPERM;
65711+
65712+ if (gr_check_group_change(gid, gid, gid))
65713+ goto error;
65714+
65715 if (capable(CAP_SETGID))
65716 new->gid = new->egid = new->sgid = new->fsgid = gid;
65717 else if (gid == old->gid || gid == old->sgid)
65718@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
65719 if (!new_user)
65720 return -EAGAIN;
65721
65722+ /*
65723+ * We don't fail in case of NPROC limit excess here because too many
65724+ * poorly written programs don't check set*uid() return code, assuming
65725+ * it never fails if called by root. We may still enforce NPROC limit
65726+ * for programs doing set*uid()+execve() by harmlessly deferring the
65727+ * failure to the execve() stage.
65728+ */
65729 if (atomic_read(&new_user->processes) >=
65730 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
65731- new_user != INIT_USER) {
65732- free_uid(new_user);
65733- return -EAGAIN;
65734- }
65735+ new_user != INIT_USER)
65736+ current->flags |= PF_NPROC_EXCEEDED;
65737+ else
65738+ current->flags &= ~PF_NPROC_EXCEEDED;
65739
65740 free_uid(new->user);
65741 new->user = new_user;
65742@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65743 goto error;
65744 }
65745
65746+ if (gr_check_user_change(new->uid, new->euid, -1))
65747+ goto error;
65748+
65749 if (new->uid != old->uid) {
65750 retval = set_user(new);
65751 if (retval < 0)
65752@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65753 goto error;
65754
65755 retval = -EPERM;
65756+
65757+ if (gr_check_crash_uid(uid))
65758+ goto error;
65759+ if (gr_check_user_change(uid, uid, uid))
65760+ goto error;
65761+
65762 if (capable(CAP_SETUID)) {
65763 new->suid = new->uid = uid;
65764 if (uid != old->uid) {
65765@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65766 goto error;
65767 }
65768
65769+ if (gr_check_user_change(ruid, euid, -1))
65770+ goto error;
65771+
65772 if (ruid != (uid_t) -1) {
65773 new->uid = ruid;
65774 if (ruid != old->uid) {
65775@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65776 goto error;
65777 }
65778
65779+ if (gr_check_group_change(rgid, egid, -1))
65780+ goto error;
65781+
65782 if (rgid != (gid_t) -1)
65783 new->gid = rgid;
65784 if (egid != (gid_t) -1)
65785@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65786 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
65787 goto error;
65788
65789+ if (gr_check_user_change(-1, -1, uid))
65790+ goto error;
65791+
65792 if (uid == old->uid || uid == old->euid ||
65793 uid == old->suid || uid == old->fsuid ||
65794 capable(CAP_SETUID)) {
65795@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65796 if (gid == old->gid || gid == old->egid ||
65797 gid == old->sgid || gid == old->fsgid ||
65798 capable(CAP_SETGID)) {
65799+ if (gr_check_group_change(-1, -1, gid))
65800+ goto error;
65801+
65802 if (gid != old_fsgid) {
65803 new->fsgid = gid;
65804 goto change_okay;
65805@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65806 error = get_dumpable(me->mm);
65807 break;
65808 case PR_SET_DUMPABLE:
65809- if (arg2 < 0 || arg2 > 1) {
65810+ if (arg2 > 1) {
65811 error = -EINVAL;
65812 break;
65813 }
65814diff -urNp linux-2.6.32.45/kernel/sysctl.c linux-2.6.32.45/kernel/sysctl.c
65815--- linux-2.6.32.45/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
65816+++ linux-2.6.32.45/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
65817@@ -63,6 +63,13 @@
65818 static int deprecated_sysctl_warning(struct __sysctl_args *args);
65819
65820 #if defined(CONFIG_SYSCTL)
65821+#include <linux/grsecurity.h>
65822+#include <linux/grinternal.h>
65823+
65824+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65825+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65826+ const int op);
65827+extern int gr_handle_chroot_sysctl(const int op);
65828
65829 /* External variables not in a header file. */
65830 extern int C_A_D;
65831@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
65832 static int proc_taint(struct ctl_table *table, int write,
65833 void __user *buffer, size_t *lenp, loff_t *ppos);
65834 #endif
65835+extern ctl_table grsecurity_table[];
65836
65837 static struct ctl_table root_table[];
65838 static struct ctl_table_root sysctl_table_root;
65839@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
65840 int sysctl_legacy_va_layout;
65841 #endif
65842
65843+#ifdef CONFIG_PAX_SOFTMODE
65844+static ctl_table pax_table[] = {
65845+ {
65846+ .ctl_name = CTL_UNNUMBERED,
65847+ .procname = "softmode",
65848+ .data = &pax_softmode,
65849+ .maxlen = sizeof(unsigned int),
65850+ .mode = 0600,
65851+ .proc_handler = &proc_dointvec,
65852+ },
65853+
65854+ { .ctl_name = 0 }
65855+};
65856+#endif
65857+
65858 extern int prove_locking;
65859 extern int lock_stat;
65860
65861@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
65862 #endif
65863
65864 static struct ctl_table kern_table[] = {
65865+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65866+ {
65867+ .ctl_name = CTL_UNNUMBERED,
65868+ .procname = "grsecurity",
65869+ .mode = 0500,
65870+ .child = grsecurity_table,
65871+ },
65872+#endif
65873+
65874+#ifdef CONFIG_PAX_SOFTMODE
65875+ {
65876+ .ctl_name = CTL_UNNUMBERED,
65877+ .procname = "pax",
65878+ .mode = 0500,
65879+ .child = pax_table,
65880+ },
65881+#endif
65882+
65883 {
65884 .ctl_name = CTL_UNNUMBERED,
65885 .procname = "sched_child_runs_first",
65886@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
65887 .data = &modprobe_path,
65888 .maxlen = KMOD_PATH_LEN,
65889 .mode = 0644,
65890- .proc_handler = &proc_dostring,
65891- .strategy = &sysctl_string,
65892+ .proc_handler = &proc_dostring_modpriv,
65893+ .strategy = &sysctl_string_modpriv,
65894 },
65895 {
65896 .ctl_name = CTL_UNNUMBERED,
65897@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
65898 .mode = 0644,
65899 .proc_handler = &proc_dointvec
65900 },
65901+ {
65902+ .procname = "heap_stack_gap",
65903+ .data = &sysctl_heap_stack_gap,
65904+ .maxlen = sizeof(sysctl_heap_stack_gap),
65905+ .mode = 0644,
65906+ .proc_handler = proc_doulongvec_minmax,
65907+ },
65908 #else
65909 {
65910 .ctl_name = CTL_UNNUMBERED,
65911@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
65912 return 0;
65913 }
65914
65915+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
65916+
65917 static int parse_table(int __user *name, int nlen,
65918 void __user *oldval, size_t __user *oldlenp,
65919 void __user *newval, size_t newlen,
65920@@ -1821,7 +1871,7 @@ repeat:
65921 if (n == table->ctl_name) {
65922 int error;
65923 if (table->child) {
65924- if (sysctl_perm(root, table, MAY_EXEC))
65925+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
65926 return -EPERM;
65927 name++;
65928 nlen--;
65929@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
65930 int error;
65931 int mode;
65932
65933+ if (table->parent != NULL && table->parent->procname != NULL &&
65934+ table->procname != NULL &&
65935+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
65936+ return -EACCES;
65937+ if (gr_handle_chroot_sysctl(op))
65938+ return -EACCES;
65939+ error = gr_handle_sysctl(table, op);
65940+ if (error)
65941+ return error;
65942+
65943+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
65944+ if (error)
65945+ return error;
65946+
65947+ if (root->permissions)
65948+ mode = root->permissions(root, current->nsproxy, table);
65949+ else
65950+ mode = table->mode;
65951+
65952+ return test_perm(mode, op);
65953+}
65954+
65955+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
65956+{
65957+ int error;
65958+ int mode;
65959+
65960 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
65961 if (error)
65962 return error;
65963@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
65964 buffer, lenp, ppos);
65965 }
65966
65967+int proc_dostring_modpriv(struct ctl_table *table, int write,
65968+ void __user *buffer, size_t *lenp, loff_t *ppos)
65969+{
65970+ if (write && !capable(CAP_SYS_MODULE))
65971+ return -EPERM;
65972+
65973+ return _proc_do_string(table->data, table->maxlen, write,
65974+ buffer, lenp, ppos);
65975+}
65976+
65977
65978 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
65979 int *valp,
65980@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
65981 vleft = table->maxlen / sizeof(unsigned long);
65982 left = *lenp;
65983
65984- for (; left && vleft--; i++, min++, max++, first=0) {
65985+ for (; left && vleft--; i++, first=0) {
65986 if (write) {
65987 while (left) {
65988 char c;
65989@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
65990 return -ENOSYS;
65991 }
65992
65993+int proc_dostring_modpriv(struct ctl_table *table, int write,
65994+ void __user *buffer, size_t *lenp, loff_t *ppos)
65995+{
65996+ return -ENOSYS;
65997+}
65998+
65999 int proc_dointvec(struct ctl_table *table, int write,
66000 void __user *buffer, size_t *lenp, loff_t *ppos)
66001 {
66002@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
66003 return 1;
66004 }
66005
66006+int sysctl_string_modpriv(struct ctl_table *table,
66007+ void __user *oldval, size_t __user *oldlenp,
66008+ void __user *newval, size_t newlen)
66009+{
66010+ if (newval && newlen && !capable(CAP_SYS_MODULE))
66011+ return -EPERM;
66012+
66013+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
66014+}
66015+
66016 /*
66017 * This function makes sure that all of the integers in the vector
66018 * are between the minimum and maximum values given in the arrays
66019@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
66020 return -ENOSYS;
66021 }
66022
66023+int sysctl_string_modpriv(struct ctl_table *table,
66024+ void __user *oldval, size_t __user *oldlenp,
66025+ void __user *newval, size_t newlen)
66026+{
66027+ return -ENOSYS;
66028+}
66029+
66030 int sysctl_intvec(struct ctl_table *table,
66031 void __user *oldval, size_t __user *oldlenp,
66032 void __user *newval, size_t newlen)
66033@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66034 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66035 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66036 EXPORT_SYMBOL(proc_dostring);
66037+EXPORT_SYMBOL(proc_dostring_modpriv);
66038 EXPORT_SYMBOL(proc_doulongvec_minmax);
66039 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66040 EXPORT_SYMBOL(register_sysctl_table);
66041@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
66042 EXPORT_SYMBOL(sysctl_jiffies);
66043 EXPORT_SYMBOL(sysctl_ms_jiffies);
66044 EXPORT_SYMBOL(sysctl_string);
66045+EXPORT_SYMBOL(sysctl_string_modpriv);
66046 EXPORT_SYMBOL(sysctl_data);
66047 EXPORT_SYMBOL(unregister_sysctl_table);
66048diff -urNp linux-2.6.32.45/kernel/sysctl_check.c linux-2.6.32.45/kernel/sysctl_check.c
66049--- linux-2.6.32.45/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
66050+++ linux-2.6.32.45/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
66051@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
66052 } else {
66053 if ((table->strategy == sysctl_data) ||
66054 (table->strategy == sysctl_string) ||
66055+ (table->strategy == sysctl_string_modpriv) ||
66056 (table->strategy == sysctl_intvec) ||
66057 (table->strategy == sysctl_jiffies) ||
66058 (table->strategy == sysctl_ms_jiffies) ||
66059 (table->proc_handler == proc_dostring) ||
66060+ (table->proc_handler == proc_dostring_modpriv) ||
66061 (table->proc_handler == proc_dointvec) ||
66062 (table->proc_handler == proc_dointvec_minmax) ||
66063 (table->proc_handler == proc_dointvec_jiffies) ||
66064diff -urNp linux-2.6.32.45/kernel/taskstats.c linux-2.6.32.45/kernel/taskstats.c
66065--- linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
66066+++ linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
66067@@ -26,9 +26,12 @@
66068 #include <linux/cgroup.h>
66069 #include <linux/fs.h>
66070 #include <linux/file.h>
66071+#include <linux/grsecurity.h>
66072 #include <net/genetlink.h>
66073 #include <asm/atomic.h>
66074
66075+extern int gr_is_taskstats_denied(int pid);
66076+
66077 /*
66078 * Maximum length of a cpumask that can be specified in
66079 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66080@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
66081 size_t size;
66082 cpumask_var_t mask;
66083
66084+ if (gr_is_taskstats_denied(current->pid))
66085+ return -EACCES;
66086+
66087 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
66088 return -ENOMEM;
66089
66090diff -urNp linux-2.6.32.45/kernel/time/tick-broadcast.c linux-2.6.32.45/kernel/time/tick-broadcast.c
66091--- linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
66092+++ linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
66093@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
66094 * then clear the broadcast bit.
66095 */
66096 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66097- int cpu = smp_processor_id();
66098+ cpu = smp_processor_id();
66099
66100 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66101 tick_broadcast_clear_oneshot(cpu);
66102diff -urNp linux-2.6.32.45/kernel/time/timekeeping.c linux-2.6.32.45/kernel/time/timekeeping.c
66103--- linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
66104+++ linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
66105@@ -14,6 +14,7 @@
66106 #include <linux/init.h>
66107 #include <linux/mm.h>
66108 #include <linux/sched.h>
66109+#include <linux/grsecurity.h>
66110 #include <linux/sysdev.h>
66111 #include <linux/clocksource.h>
66112 #include <linux/jiffies.h>
66113@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
66114 */
66115 struct timespec ts = xtime;
66116 timespec_add_ns(&ts, nsec);
66117- ACCESS_ONCE(xtime_cache) = ts;
66118+ ACCESS_ONCE_RW(xtime_cache) = ts;
66119 }
66120
66121 /* must hold xtime_lock */
66122@@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
66123 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66124 return -EINVAL;
66125
66126+ gr_log_timechange();
66127+
66128 write_seqlock_irqsave(&xtime_lock, flags);
66129
66130 timekeeping_forward_now();
66131diff -urNp linux-2.6.32.45/kernel/time/timer_list.c linux-2.6.32.45/kernel/time/timer_list.c
66132--- linux-2.6.32.45/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
66133+++ linux-2.6.32.45/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
66134@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66135
66136 static void print_name_offset(struct seq_file *m, void *sym)
66137 {
66138+#ifdef CONFIG_GRKERNSEC_HIDESYM
66139+ SEQ_printf(m, "<%p>", NULL);
66140+#else
66141 char symname[KSYM_NAME_LEN];
66142
66143 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66144 SEQ_printf(m, "<%p>", sym);
66145 else
66146 SEQ_printf(m, "%s", symname);
66147+#endif
66148 }
66149
66150 static void
66151@@ -112,7 +116,11 @@ next_one:
66152 static void
66153 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66154 {
66155+#ifdef CONFIG_GRKERNSEC_HIDESYM
66156+ SEQ_printf(m, " .base: %p\n", NULL);
66157+#else
66158 SEQ_printf(m, " .base: %p\n", base);
66159+#endif
66160 SEQ_printf(m, " .index: %d\n",
66161 base->index);
66162 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66163@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
66164 {
66165 struct proc_dir_entry *pe;
66166
66167+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66168+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66169+#else
66170 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66171+#endif
66172 if (!pe)
66173 return -ENOMEM;
66174 return 0;
66175diff -urNp linux-2.6.32.45/kernel/time/timer_stats.c linux-2.6.32.45/kernel/time/timer_stats.c
66176--- linux-2.6.32.45/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
66177+++ linux-2.6.32.45/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
66178@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66179 static unsigned long nr_entries;
66180 static struct entry entries[MAX_ENTRIES];
66181
66182-static atomic_t overflow_count;
66183+static atomic_unchecked_t overflow_count;
66184
66185 /*
66186 * The entries are in a hash-table, for fast lookup:
66187@@ -140,7 +140,7 @@ static void reset_entries(void)
66188 nr_entries = 0;
66189 memset(entries, 0, sizeof(entries));
66190 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66191- atomic_set(&overflow_count, 0);
66192+ atomic_set_unchecked(&overflow_count, 0);
66193 }
66194
66195 static struct entry *alloc_entry(void)
66196@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66197 if (likely(entry))
66198 entry->count++;
66199 else
66200- atomic_inc(&overflow_count);
66201+ atomic_inc_unchecked(&overflow_count);
66202
66203 out_unlock:
66204 spin_unlock_irqrestore(lock, flags);
66205@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66206
66207 static void print_name_offset(struct seq_file *m, unsigned long addr)
66208 {
66209+#ifdef CONFIG_GRKERNSEC_HIDESYM
66210+ seq_printf(m, "<%p>", NULL);
66211+#else
66212 char symname[KSYM_NAME_LEN];
66213
66214 if (lookup_symbol_name(addr, symname) < 0)
66215 seq_printf(m, "<%p>", (void *)addr);
66216 else
66217 seq_printf(m, "%s", symname);
66218+#endif
66219 }
66220
66221 static int tstats_show(struct seq_file *m, void *v)
66222@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66223
66224 seq_puts(m, "Timer Stats Version: v0.2\n");
66225 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66226- if (atomic_read(&overflow_count))
66227+ if (atomic_read_unchecked(&overflow_count))
66228 seq_printf(m, "Overflow: %d entries\n",
66229- atomic_read(&overflow_count));
66230+ atomic_read_unchecked(&overflow_count));
66231
66232 for (i = 0; i < nr_entries; i++) {
66233 entry = entries + i;
66234@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
66235 {
66236 struct proc_dir_entry *pe;
66237
66238+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66239+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66240+#else
66241 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66242+#endif
66243 if (!pe)
66244 return -ENOMEM;
66245 return 0;
66246diff -urNp linux-2.6.32.45/kernel/time.c linux-2.6.32.45/kernel/time.c
66247--- linux-2.6.32.45/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
66248+++ linux-2.6.32.45/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
66249@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
66250 return error;
66251
66252 if (tz) {
66253+ /* we log in do_settimeofday called below, so don't log twice
66254+ */
66255+ if (!tv)
66256+ gr_log_timechange();
66257+
66258 /* SMP safe, global irq locking makes it work. */
66259 sys_tz = *tz;
66260 update_vsyscall_tz();
66261@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
66262 * Avoid unnecessary multiplications/divisions in the
66263 * two most common HZ cases:
66264 */
66265-unsigned int inline jiffies_to_msecs(const unsigned long j)
66266+inline unsigned int jiffies_to_msecs(const unsigned long j)
66267 {
66268 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
66269 return (MSEC_PER_SEC / HZ) * j;
66270@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
66271 }
66272 EXPORT_SYMBOL(jiffies_to_msecs);
66273
66274-unsigned int inline jiffies_to_usecs(const unsigned long j)
66275+inline unsigned int jiffies_to_usecs(const unsigned long j)
66276 {
66277 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
66278 return (USEC_PER_SEC / HZ) * j;
66279diff -urNp linux-2.6.32.45/kernel/timer.c linux-2.6.32.45/kernel/timer.c
66280--- linux-2.6.32.45/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
66281+++ linux-2.6.32.45/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
66282@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
66283 /*
66284 * This function runs timers and the timer-tq in bottom half context.
66285 */
66286-static void run_timer_softirq(struct softirq_action *h)
66287+static void run_timer_softirq(void)
66288 {
66289 struct tvec_base *base = __get_cpu_var(tvec_bases);
66290
66291diff -urNp linux-2.6.32.45/kernel/trace/blktrace.c linux-2.6.32.45/kernel/trace/blktrace.c
66292--- linux-2.6.32.45/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
66293+++ linux-2.6.32.45/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
66294@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
66295 struct blk_trace *bt = filp->private_data;
66296 char buf[16];
66297
66298- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66299+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66300
66301 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66302 }
66303@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
66304 return 1;
66305
66306 bt = buf->chan->private_data;
66307- atomic_inc(&bt->dropped);
66308+ atomic_inc_unchecked(&bt->dropped);
66309 return 0;
66310 }
66311
66312@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
66313
66314 bt->dir = dir;
66315 bt->dev = dev;
66316- atomic_set(&bt->dropped, 0);
66317+ atomic_set_unchecked(&bt->dropped, 0);
66318
66319 ret = -EIO;
66320 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66321diff -urNp linux-2.6.32.45/kernel/trace/ftrace.c linux-2.6.32.45/kernel/trace/ftrace.c
66322--- linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
66323+++ linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
66324@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
66325
66326 ip = rec->ip;
66327
66328+ ret = ftrace_arch_code_modify_prepare();
66329+ FTRACE_WARN_ON(ret);
66330+ if (ret)
66331+ return 0;
66332+
66333 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66334+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66335 if (ret) {
66336 ftrace_bug(ret, ip);
66337 rec->flags |= FTRACE_FL_FAILED;
66338- return 0;
66339 }
66340- return 1;
66341+ return ret ? 0 : 1;
66342 }
66343
66344 /*
66345diff -urNp linux-2.6.32.45/kernel/trace/ring_buffer.c linux-2.6.32.45/kernel/trace/ring_buffer.c
66346--- linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
66347+++ linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
66348@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
66349 * the reader page). But if the next page is a header page,
66350 * its flags will be non zero.
66351 */
66352-static int inline
66353+static inline int
66354 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
66355 struct buffer_page *page, struct list_head *list)
66356 {
66357diff -urNp linux-2.6.32.45/kernel/trace/trace.c linux-2.6.32.45/kernel/trace/trace.c
66358--- linux-2.6.32.45/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
66359+++ linux-2.6.32.45/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
66360@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
66361 size_t rem;
66362 unsigned int i;
66363
66364+ pax_track_stack();
66365+
66366 /* copy the tracer to avoid using a global lock all around */
66367 mutex_lock(&trace_types_lock);
66368 if (unlikely(old_tracer != current_trace && current_trace)) {
66369@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
66370 int entries, size, i;
66371 size_t ret;
66372
66373+ pax_track_stack();
66374+
66375 if (*ppos & (PAGE_SIZE - 1)) {
66376 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
66377 return -EINVAL;
66378@@ -3816,10 +3820,9 @@ static const struct file_operations trac
66379 };
66380 #endif
66381
66382-static struct dentry *d_tracer;
66383-
66384 struct dentry *tracing_init_dentry(void)
66385 {
66386+ static struct dentry *d_tracer;
66387 static int once;
66388
66389 if (d_tracer)
66390@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
66391 return d_tracer;
66392 }
66393
66394-static struct dentry *d_percpu;
66395-
66396 struct dentry *tracing_dentry_percpu(void)
66397 {
66398+ static struct dentry *d_percpu;
66399 static int once;
66400 struct dentry *d_tracer;
66401
66402diff -urNp linux-2.6.32.45/kernel/trace/trace_events.c linux-2.6.32.45/kernel/trace/trace_events.c
66403--- linux-2.6.32.45/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
66404+++ linux-2.6.32.45/kernel/trace/trace_events.c 2011-08-05 20:33:55.000000000 -0400
66405@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list
66406 * Modules must own their file_operations to keep up with
66407 * reference counting.
66408 */
66409+
66410 struct ftrace_module_file_ops {
66411 struct list_head list;
66412 struct module *mod;
66413- struct file_operations id;
66414- struct file_operations enable;
66415- struct file_operations format;
66416- struct file_operations filter;
66417 };
66418
66419 static void remove_subsystem_dir(const char *name)
66420@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod
66421
66422 file_ops->mod = mod;
66423
66424- file_ops->id = ftrace_event_id_fops;
66425- file_ops->id.owner = mod;
66426-
66427- file_ops->enable = ftrace_enable_fops;
66428- file_ops->enable.owner = mod;
66429-
66430- file_ops->filter = ftrace_event_filter_fops;
66431- file_ops->filter.owner = mod;
66432-
66433- file_ops->format = ftrace_event_format_fops;
66434- file_ops->format.owner = mod;
66435+ pax_open_kernel();
66436+ *(void **)&mod->trace_id.owner = mod;
66437+ *(void **)&mod->trace_enable.owner = mod;
66438+ *(void **)&mod->trace_filter.owner = mod;
66439+ *(void **)&mod->trace_format.owner = mod;
66440+ pax_close_kernel();
66441
66442 list_add(&file_ops->list, &ftrace_module_file_list);
66443
66444@@ -1063,8 +1055,8 @@ static void trace_module_add_events(stru
66445 call->mod = mod;
66446 list_add(&call->list, &ftrace_events);
66447 event_create_dir(call, d_events,
66448- &file_ops->id, &file_ops->enable,
66449- &file_ops->filter, &file_ops->format);
66450+ &mod->trace_id, &mod->trace_enable,
66451+ &mod->trace_filter, &mod->trace_format);
66452 }
66453 }
66454
66455diff -urNp linux-2.6.32.45/kernel/trace/trace_mmiotrace.c linux-2.6.32.45/kernel/trace/trace_mmiotrace.c
66456--- linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
66457+++ linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
66458@@ -23,7 +23,7 @@ struct header_iter {
66459 static struct trace_array *mmio_trace_array;
66460 static bool overrun_detected;
66461 static unsigned long prev_overruns;
66462-static atomic_t dropped_count;
66463+static atomic_unchecked_t dropped_count;
66464
66465 static void mmio_reset_data(struct trace_array *tr)
66466 {
66467@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
66468
66469 static unsigned long count_overruns(struct trace_iterator *iter)
66470 {
66471- unsigned long cnt = atomic_xchg(&dropped_count, 0);
66472+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66473 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66474
66475 if (over > prev_overruns)
66476@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
66477 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66478 sizeof(*entry), 0, pc);
66479 if (!event) {
66480- atomic_inc(&dropped_count);
66481+ atomic_inc_unchecked(&dropped_count);
66482 return;
66483 }
66484 entry = ring_buffer_event_data(event);
66485@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
66486 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66487 sizeof(*entry), 0, pc);
66488 if (!event) {
66489- atomic_inc(&dropped_count);
66490+ atomic_inc_unchecked(&dropped_count);
66491 return;
66492 }
66493 entry = ring_buffer_event_data(event);
66494diff -urNp linux-2.6.32.45/kernel/trace/trace_output.c linux-2.6.32.45/kernel/trace/trace_output.c
66495--- linux-2.6.32.45/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
66496+++ linux-2.6.32.45/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
66497@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
66498 return 0;
66499 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66500 if (!IS_ERR(p)) {
66501- p = mangle_path(s->buffer + s->len, p, "\n");
66502+ p = mangle_path(s->buffer + s->len, p, "\n\\");
66503 if (p) {
66504 s->len = p - s->buffer;
66505 return 1;
66506diff -urNp linux-2.6.32.45/kernel/trace/trace_stack.c linux-2.6.32.45/kernel/trace/trace_stack.c
66507--- linux-2.6.32.45/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
66508+++ linux-2.6.32.45/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
66509@@ -50,7 +50,7 @@ static inline void check_stack(void)
66510 return;
66511
66512 /* we do not handle interrupt stacks yet */
66513- if (!object_is_on_stack(&this_size))
66514+ if (!object_starts_on_stack(&this_size))
66515 return;
66516
66517 local_irq_save(flags);
66518diff -urNp linux-2.6.32.45/kernel/trace/trace_workqueue.c linux-2.6.32.45/kernel/trace/trace_workqueue.c
66519--- linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
66520+++ linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
66521@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
66522 int cpu;
66523 pid_t pid;
66524 /* Can be inserted from interrupt or user context, need to be atomic */
66525- atomic_t inserted;
66526+ atomic_unchecked_t inserted;
66527 /*
66528 * Don't need to be atomic, works are serialized in a single workqueue thread
66529 * on a single CPU.
66530@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
66531 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66532 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66533 if (node->pid == wq_thread->pid) {
66534- atomic_inc(&node->inserted);
66535+ atomic_inc_unchecked(&node->inserted);
66536 goto found;
66537 }
66538 }
66539@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
66540 tsk = get_pid_task(pid, PIDTYPE_PID);
66541 if (tsk) {
66542 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66543- atomic_read(&cws->inserted), cws->executed,
66544+ atomic_read_unchecked(&cws->inserted), cws->executed,
66545 tsk->comm);
66546 put_task_struct(tsk);
66547 }
66548diff -urNp linux-2.6.32.45/kernel/user.c linux-2.6.32.45/kernel/user.c
66549--- linux-2.6.32.45/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
66550+++ linux-2.6.32.45/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
66551@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
66552 spin_lock_irq(&uidhash_lock);
66553 up = uid_hash_find(uid, hashent);
66554 if (up) {
66555+ put_user_ns(ns);
66556 key_put(new->uid_keyring);
66557 key_put(new->session_keyring);
66558 kmem_cache_free(uid_cachep, new);
66559diff -urNp linux-2.6.32.45/lib/bug.c linux-2.6.32.45/lib/bug.c
66560--- linux-2.6.32.45/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
66561+++ linux-2.6.32.45/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
66562@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
66563 return BUG_TRAP_TYPE_NONE;
66564
66565 bug = find_bug(bugaddr);
66566+ if (!bug)
66567+ return BUG_TRAP_TYPE_NONE;
66568
66569 printk(KERN_EMERG "------------[ cut here ]------------\n");
66570
66571diff -urNp linux-2.6.32.45/lib/debugobjects.c linux-2.6.32.45/lib/debugobjects.c
66572--- linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
66573+++ linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
66574@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
66575 if (limit > 4)
66576 return;
66577
66578- is_on_stack = object_is_on_stack(addr);
66579+ is_on_stack = object_starts_on_stack(addr);
66580 if (is_on_stack == onstack)
66581 return;
66582
66583diff -urNp linux-2.6.32.45/lib/dma-debug.c linux-2.6.32.45/lib/dma-debug.c
66584--- linux-2.6.32.45/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
66585+++ linux-2.6.32.45/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
66586@@ -861,7 +861,7 @@ out:
66587
66588 static void check_for_stack(struct device *dev, void *addr)
66589 {
66590- if (object_is_on_stack(addr))
66591+ if (object_starts_on_stack(addr))
66592 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66593 "stack [addr=%p]\n", addr);
66594 }
66595diff -urNp linux-2.6.32.45/lib/idr.c linux-2.6.32.45/lib/idr.c
66596--- linux-2.6.32.45/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
66597+++ linux-2.6.32.45/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
66598@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
66599 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
66600
66601 /* if already at the top layer, we need to grow */
66602- if (id >= 1 << (idp->layers * IDR_BITS)) {
66603+ if (id >= (1 << (idp->layers * IDR_BITS))) {
66604 *starting_id = id;
66605 return IDR_NEED_TO_GROW;
66606 }
66607diff -urNp linux-2.6.32.45/lib/inflate.c linux-2.6.32.45/lib/inflate.c
66608--- linux-2.6.32.45/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
66609+++ linux-2.6.32.45/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
66610@@ -266,7 +266,7 @@ static void free(void *where)
66611 malloc_ptr = free_mem_ptr;
66612 }
66613 #else
66614-#define malloc(a) kmalloc(a, GFP_KERNEL)
66615+#define malloc(a) kmalloc((a), GFP_KERNEL)
66616 #define free(a) kfree(a)
66617 #endif
66618
66619diff -urNp linux-2.6.32.45/lib/Kconfig.debug linux-2.6.32.45/lib/Kconfig.debug
66620--- linux-2.6.32.45/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
66621+++ linux-2.6.32.45/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
66622@@ -905,7 +905,7 @@ config LATENCYTOP
66623 select STACKTRACE
66624 select SCHEDSTATS
66625 select SCHED_DEBUG
66626- depends on HAVE_LATENCYTOP_SUPPORT
66627+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
66628 help
66629 Enable this option if you want to use the LatencyTOP tool
66630 to find out which userspace is blocking on what kernel operations.
66631diff -urNp linux-2.6.32.45/lib/kobject.c linux-2.6.32.45/lib/kobject.c
66632--- linux-2.6.32.45/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
66633+++ linux-2.6.32.45/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
66634@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
66635 return ret;
66636 }
66637
66638-struct sysfs_ops kobj_sysfs_ops = {
66639+const struct sysfs_ops kobj_sysfs_ops = {
66640 .show = kobj_attr_show,
66641 .store = kobj_attr_store,
66642 };
66643@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
66644 * If the kset was not able to be created, NULL will be returned.
66645 */
66646 static struct kset *kset_create(const char *name,
66647- struct kset_uevent_ops *uevent_ops,
66648+ const struct kset_uevent_ops *uevent_ops,
66649 struct kobject *parent_kobj)
66650 {
66651 struct kset *kset;
66652@@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
66653 * If the kset was not able to be created, NULL will be returned.
66654 */
66655 struct kset *kset_create_and_add(const char *name,
66656- struct kset_uevent_ops *uevent_ops,
66657+ const struct kset_uevent_ops *uevent_ops,
66658 struct kobject *parent_kobj)
66659 {
66660 struct kset *kset;
66661diff -urNp linux-2.6.32.45/lib/kobject_uevent.c linux-2.6.32.45/lib/kobject_uevent.c
66662--- linux-2.6.32.45/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
66663+++ linux-2.6.32.45/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
66664@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
66665 const char *subsystem;
66666 struct kobject *top_kobj;
66667 struct kset *kset;
66668- struct kset_uevent_ops *uevent_ops;
66669+ const struct kset_uevent_ops *uevent_ops;
66670 u64 seq;
66671 int i = 0;
66672 int retval = 0;
66673diff -urNp linux-2.6.32.45/lib/kref.c linux-2.6.32.45/lib/kref.c
66674--- linux-2.6.32.45/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
66675+++ linux-2.6.32.45/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
66676@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
66677 */
66678 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66679 {
66680- WARN_ON(release == NULL);
66681+ BUG_ON(release == NULL);
66682 WARN_ON(release == (void (*)(struct kref *))kfree);
66683
66684 if (atomic_dec_and_test(&kref->refcount)) {
66685diff -urNp linux-2.6.32.45/lib/parser.c linux-2.6.32.45/lib/parser.c
66686--- linux-2.6.32.45/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
66687+++ linux-2.6.32.45/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
66688@@ -126,7 +126,7 @@ static int match_number(substring_t *s,
66689 char *buf;
66690 int ret;
66691
66692- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
66693+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
66694 if (!buf)
66695 return -ENOMEM;
66696 memcpy(buf, s->from, s->to - s->from);
66697diff -urNp linux-2.6.32.45/lib/radix-tree.c linux-2.6.32.45/lib/radix-tree.c
66698--- linux-2.6.32.45/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
66699+++ linux-2.6.32.45/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
66700@@ -81,7 +81,7 @@ struct radix_tree_preload {
66701 int nr;
66702 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66703 };
66704-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66705+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66706
66707 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
66708 {
66709diff -urNp linux-2.6.32.45/lib/random32.c linux-2.6.32.45/lib/random32.c
66710--- linux-2.6.32.45/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
66711+++ linux-2.6.32.45/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
66712@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
66713 */
66714 static inline u32 __seed(u32 x, u32 m)
66715 {
66716- return (x < m) ? x + m : x;
66717+ return (x <= m) ? x + m + 1 : x;
66718 }
66719
66720 /**
66721diff -urNp linux-2.6.32.45/lib/vsprintf.c linux-2.6.32.45/lib/vsprintf.c
66722--- linux-2.6.32.45/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
66723+++ linux-2.6.32.45/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
66724@@ -16,6 +16,9 @@
66725 * - scnprintf and vscnprintf
66726 */
66727
66728+#ifdef CONFIG_GRKERNSEC_HIDESYM
66729+#define __INCLUDED_BY_HIDESYM 1
66730+#endif
66731 #include <stdarg.h>
66732 #include <linux/module.h>
66733 #include <linux/types.h>
66734@@ -546,12 +549,12 @@ static char *number(char *buf, char *end
66735 return buf;
66736 }
66737
66738-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
66739+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
66740 {
66741 int len, i;
66742
66743 if ((unsigned long)s < PAGE_SIZE)
66744- s = "<NULL>";
66745+ s = "(null)";
66746
66747 len = strnlen(s, spec.precision);
66748
66749@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
66750 unsigned long value = (unsigned long) ptr;
66751 #ifdef CONFIG_KALLSYMS
66752 char sym[KSYM_SYMBOL_LEN];
66753- if (ext != 'f' && ext != 's')
66754+ if (ext != 'f' && ext != 's' && ext != 'a')
66755 sprint_symbol(sym, value);
66756 else
66757 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66758@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
66759 * - 'f' For simple symbolic function names without offset
66760 * - 'S' For symbolic direct pointers with offset
66761 * - 's' For symbolic direct pointers without offset
66762+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66763+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66764 * - 'R' For a struct resource pointer, it prints the range of
66765 * addresses (not the name nor the flags)
66766 * - 'M' For a 6-byte MAC address, it prints the address in the
66767@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
66768 struct printf_spec spec)
66769 {
66770 if (!ptr)
66771- return string(buf, end, "(null)", spec);
66772+ return string(buf, end, "(nil)", spec);
66773
66774 switch (*fmt) {
66775 case 'F':
66776@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
66777 case 's':
66778 /* Fallthrough */
66779 case 'S':
66780+#ifdef CONFIG_GRKERNSEC_HIDESYM
66781+ break;
66782+#else
66783+ return symbol_string(buf, end, ptr, spec, *fmt);
66784+#endif
66785+ case 'a':
66786+ /* Fallthrough */
66787+ case 'A':
66788 return symbol_string(buf, end, ptr, spec, *fmt);
66789 case 'R':
66790 return resource_string(buf, end, ptr, spec);
66791@@ -1445,7 +1458,7 @@ do { \
66792 size_t len;
66793 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
66794 || (unsigned long)save_str < PAGE_SIZE)
66795- save_str = "<NULL>";
66796+ save_str = "(null)";
66797 len = strlen(save_str);
66798 if (str + len + 1 < end)
66799 memcpy(str, save_str, len + 1);
66800@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
66801 typeof(type) value; \
66802 if (sizeof(type) == 8) { \
66803 args = PTR_ALIGN(args, sizeof(u32)); \
66804- *(u32 *)&value = *(u32 *)args; \
66805- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66806+ *(u32 *)&value = *(const u32 *)args; \
66807+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66808 } else { \
66809 args = PTR_ALIGN(args, sizeof(type)); \
66810- value = *(typeof(type) *)args; \
66811+ value = *(const typeof(type) *)args; \
66812 } \
66813 args += sizeof(type); \
66814 value; \
66815@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
66816 const char *str_arg = args;
66817 size_t len = strlen(str_arg);
66818 args += len + 1;
66819- str = string(str, end, (char *)str_arg, spec);
66820+ str = string(str, end, str_arg, spec);
66821 break;
66822 }
66823
66824diff -urNp linux-2.6.32.45/localversion-grsec linux-2.6.32.45/localversion-grsec
66825--- linux-2.6.32.45/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66826+++ linux-2.6.32.45/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
66827@@ -0,0 +1 @@
66828+-grsec
66829diff -urNp linux-2.6.32.45/Makefile linux-2.6.32.45/Makefile
66830--- linux-2.6.32.45/Makefile 2011-08-16 20:37:25.000000000 -0400
66831+++ linux-2.6.32.45/Makefile 2011-08-24 18:35:52.000000000 -0400
66832@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66833
66834 HOSTCC = gcc
66835 HOSTCXX = g++
66836-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66837-HOSTCXXFLAGS = -O2
66838+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66839+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
66840+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
66841
66842 # Decide whether to build built-in, modular, or both.
66843 # Normally, just do built-in.
66844@@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
66845 KBUILD_CPPFLAGS := -D__KERNEL__
66846
66847 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
66848+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
66849 -fno-strict-aliasing -fno-common \
66850 -Werror-implicit-function-declaration \
66851 -Wno-format-security \
66852 -fno-delete-null-pointer-checks
66853+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
66854 KBUILD_AFLAGS := -D__ASSEMBLY__
66855
66856 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
66857@@ -376,9 +379,10 @@ export RCS_TAR_IGNORE := --exclude SCCS
66858 # Rules shared between *config targets and build targets
66859
66860 # Basic helpers built in scripts/
66861-PHONY += scripts_basic
66862-scripts_basic:
66863+PHONY += scripts_basic0 scripts_basic gcc-plugins
66864+scripts_basic0:
66865 $(Q)$(MAKE) $(build)=scripts/basic
66866+scripts_basic: scripts_basic0 gcc-plugins
66867
66868 # To avoid any implicit rule to kick in, define an empty command.
66869 scripts/basic/%: scripts_basic ;
66870@@ -403,7 +407,7 @@ endif
66871 # of make so .config is not included in this case either (for *config).
66872
66873 no-dot-config-targets := clean mrproper distclean \
66874- cscope TAGS tags help %docs check% \
66875+ cscope gtags TAGS tags help %docs check% \
66876 include/linux/version.h headers_% \
66877 kernelrelease kernelversion
66878
66879@@ -526,6 +530,24 @@ else
66880 KBUILD_CFLAGS += -O2
66881 endif
66882
66883+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
66884+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
66885+ifdef CONFIG_PAX_MEMORY_STACKLEAK
66886+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
66887+endif
66888+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
66889+gcc-plugins:
66890+ $(Q)$(MAKE) $(build)=tools/gcc
66891+else
66892+gcc-plugins:
66893+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
66894+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
66895+else
66896+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
66897+endif
66898+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
66899+endif
66900+
66901 include $(srctree)/arch/$(SRCARCH)/Makefile
66902
66903 ifneq ($(CONFIG_FRAME_WARN),0)
66904@@ -644,7 +666,7 @@ export mod_strip_cmd
66905
66906
66907 ifeq ($(KBUILD_EXTMOD),)
66908-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
66909+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
66910
66911 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
66912 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
66913@@ -840,6 +862,7 @@ define rule_vmlinux-modpost
66914 endef
66915
66916 # vmlinux image - including updated kernel symbols
66917+vmlinux: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
66918 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
66919 ifdef CONFIG_HEADERS_CHECK
66920 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
66921@@ -970,7 +993,7 @@ ifneq ($(KBUILD_SRC),)
66922 endif
66923
66924 # prepare2 creates a makefile if using a separate output directory
66925-prepare2: prepare3 outputmakefile
66926+prepare2: prepare3 outputmakefile gcc-plugins
66927
66928 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
66929 include/asm include/config/auto.conf
66930@@ -1124,6 +1147,7 @@ all: modules
66931 # using awk while concatenating to the final file.
66932
66933 PHONY += modules
66934+modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
66935 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
66936 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
66937 @$(kecho) ' Building modules, stage 2.';
66938@@ -1198,7 +1222,7 @@ MRPROPER_FILES += .config .config.old in
66939 include/linux/autoconf.h include/linux/version.h \
66940 include/linux/utsrelease.h \
66941 include/linux/bounds.h include/asm*/asm-offsets.h \
66942- Module.symvers Module.markers tags TAGS cscope*
66943+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
66944
66945 # clean - Delete most, but leave enough to build external modules
66946 #
66947@@ -1242,7 +1266,7 @@ distclean: mrproper
66948 @find $(srctree) $(RCS_FIND_IGNORE) \
66949 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
66950 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
66951- -o -name '.*.rej' -o -size 0 \
66952+ -o -name '.*.rej' -o -size 0 -o -name '*.so' \
66953 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
66954 -type f -print | xargs rm -f
66955
66956@@ -1289,6 +1313,7 @@ help:
66957 @echo ' modules_prepare - Set up for building external modules'
66958 @echo ' tags/TAGS - Generate tags file for editors'
66959 @echo ' cscope - Generate cscope index'
66960+ @echo ' gtags - Generate GNU GLOBAL index'
66961 @echo ' kernelrelease - Output the release version string'
66962 @echo ' kernelversion - Output the version stored in Makefile'
66963 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
66964@@ -1421,7 +1446,7 @@ clean: $(clean-dirs)
66965 $(call cmd,rmdirs)
66966 $(call cmd,rmfiles)
66967 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
66968- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
66969+ \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
66970 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
66971 -o -name '*.gcno' \) -type f -print | xargs rm -f
66972
66973@@ -1445,7 +1470,7 @@ endif # KBUILD_EXTMOD
66974 quiet_cmd_tags = GEN $@
66975 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
66976
66977-tags TAGS cscope: FORCE
66978+tags TAGS cscope gtags: FORCE
66979 $(call cmd,tags)
66980
66981 # Scripts to check various things for consistency
66982diff -urNp linux-2.6.32.45/mm/backing-dev.c linux-2.6.32.45/mm/backing-dev.c
66983--- linux-2.6.32.45/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
66984+++ linux-2.6.32.45/mm/backing-dev.c 2011-08-11 19:48:17.000000000 -0400
66985@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing
66986 list_add_tail_rcu(&wb->list, &bdi->wb_list);
66987 spin_unlock(&bdi->wb_lock);
66988
66989- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
66990+ tsk->flags |= PF_SWAPWRITE;
66991 set_freezable();
66992
66993 /*
66994@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
66995 * Add the default flusher task that gets created for any bdi
66996 * that has dirty data pending writeout
66997 */
66998-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
66999+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67000 {
67001 if (!bdi_cap_writeback_dirty(bdi))
67002 return;
67003diff -urNp linux-2.6.32.45/mm/filemap.c linux-2.6.32.45/mm/filemap.c
67004--- linux-2.6.32.45/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
67005+++ linux-2.6.32.45/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
67006@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
67007 struct address_space *mapping = file->f_mapping;
67008
67009 if (!mapping->a_ops->readpage)
67010- return -ENOEXEC;
67011+ return -ENODEV;
67012 file_accessed(file);
67013 vma->vm_ops = &generic_file_vm_ops;
67014 vma->vm_flags |= VM_CAN_NONLINEAR;
67015@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
67016 *pos = i_size_read(inode);
67017
67018 if (limit != RLIM_INFINITY) {
67019+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67020 if (*pos >= limit) {
67021 send_sig(SIGXFSZ, current, 0);
67022 return -EFBIG;
67023diff -urNp linux-2.6.32.45/mm/fremap.c linux-2.6.32.45/mm/fremap.c
67024--- linux-2.6.32.45/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
67025+++ linux-2.6.32.45/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
67026@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67027 retry:
67028 vma = find_vma(mm, start);
67029
67030+#ifdef CONFIG_PAX_SEGMEXEC
67031+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67032+ goto out;
67033+#endif
67034+
67035 /*
67036 * Make sure the vma is shared, that it supports prefaulting,
67037 * and that the remapped range is valid and fully within
67038@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67039 /*
67040 * drop PG_Mlocked flag for over-mapped range
67041 */
67042- unsigned int saved_flags = vma->vm_flags;
67043+ unsigned long saved_flags = vma->vm_flags;
67044 munlock_vma_pages_range(vma, start, start + size);
67045 vma->vm_flags = saved_flags;
67046 }
67047diff -urNp linux-2.6.32.45/mm/highmem.c linux-2.6.32.45/mm/highmem.c
67048--- linux-2.6.32.45/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
67049+++ linux-2.6.32.45/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
67050@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
67051 * So no dangers, even with speculative execution.
67052 */
67053 page = pte_page(pkmap_page_table[i]);
67054+ pax_open_kernel();
67055 pte_clear(&init_mm, (unsigned long)page_address(page),
67056 &pkmap_page_table[i]);
67057-
67058+ pax_close_kernel();
67059 set_page_address(page, NULL);
67060 need_flush = 1;
67061 }
67062@@ -177,9 +178,11 @@ start:
67063 }
67064 }
67065 vaddr = PKMAP_ADDR(last_pkmap_nr);
67066+
67067+ pax_open_kernel();
67068 set_pte_at(&init_mm, vaddr,
67069 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67070-
67071+ pax_close_kernel();
67072 pkmap_count[last_pkmap_nr] = 1;
67073 set_page_address(page, (void *)vaddr);
67074
67075diff -urNp linux-2.6.32.45/mm/hugetlb.c linux-2.6.32.45/mm/hugetlb.c
67076--- linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
67077+++ linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
67078@@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
67079 return 1;
67080 }
67081
67082+#ifdef CONFIG_PAX_SEGMEXEC
67083+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67084+{
67085+ struct mm_struct *mm = vma->vm_mm;
67086+ struct vm_area_struct *vma_m;
67087+ unsigned long address_m;
67088+ pte_t *ptep_m;
67089+
67090+ vma_m = pax_find_mirror_vma(vma);
67091+ if (!vma_m)
67092+ return;
67093+
67094+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67095+ address_m = address + SEGMEXEC_TASK_SIZE;
67096+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67097+ get_page(page_m);
67098+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67099+}
67100+#endif
67101+
67102 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
67103 unsigned long address, pte_t *ptep, pte_t pte,
67104 struct page *pagecache_page)
67105@@ -2004,6 +2024,11 @@ retry_avoidcopy:
67106 huge_ptep_clear_flush(vma, address, ptep);
67107 set_huge_pte_at(mm, address, ptep,
67108 make_huge_pte(vma, new_page, 1));
67109+
67110+#ifdef CONFIG_PAX_SEGMEXEC
67111+ pax_mirror_huge_pte(vma, address, new_page);
67112+#endif
67113+
67114 /* Make the old page be freed below */
67115 new_page = old_page;
67116 }
67117@@ -2135,6 +2160,10 @@ retry:
67118 && (vma->vm_flags & VM_SHARED)));
67119 set_huge_pte_at(mm, address, ptep, new_pte);
67120
67121+#ifdef CONFIG_PAX_SEGMEXEC
67122+ pax_mirror_huge_pte(vma, address, page);
67123+#endif
67124+
67125 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67126 /* Optimization, do the COW without a second fault */
67127 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67128@@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
67129 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67130 struct hstate *h = hstate_vma(vma);
67131
67132+#ifdef CONFIG_PAX_SEGMEXEC
67133+ struct vm_area_struct *vma_m;
67134+
67135+ vma_m = pax_find_mirror_vma(vma);
67136+ if (vma_m) {
67137+ unsigned long address_m;
67138+
67139+ if (vma->vm_start > vma_m->vm_start) {
67140+ address_m = address;
67141+ address -= SEGMEXEC_TASK_SIZE;
67142+ vma = vma_m;
67143+ h = hstate_vma(vma);
67144+ } else
67145+ address_m = address + SEGMEXEC_TASK_SIZE;
67146+
67147+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67148+ return VM_FAULT_OOM;
67149+ address_m &= HPAGE_MASK;
67150+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67151+ }
67152+#endif
67153+
67154 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67155 if (!ptep)
67156 return VM_FAULT_OOM;
67157diff -urNp linux-2.6.32.45/mm/internal.h linux-2.6.32.45/mm/internal.h
67158--- linux-2.6.32.45/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
67159+++ linux-2.6.32.45/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
67160@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
67161 * in mm/page_alloc.c
67162 */
67163 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67164+extern void free_compound_page(struct page *page);
67165 extern void prep_compound_page(struct page *page, unsigned long order);
67166
67167
67168diff -urNp linux-2.6.32.45/mm/Kconfig linux-2.6.32.45/mm/Kconfig
67169--- linux-2.6.32.45/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
67170+++ linux-2.6.32.45/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
67171@@ -228,7 +228,7 @@ config KSM
67172 config DEFAULT_MMAP_MIN_ADDR
67173 int "Low address space to protect from user allocation"
67174 depends on MMU
67175- default 4096
67176+ default 65536
67177 help
67178 This is the portion of low virtual memory which should be protected
67179 from userspace allocation. Keeping a user from writing to low pages
67180diff -urNp linux-2.6.32.45/mm/kmemleak.c linux-2.6.32.45/mm/kmemleak.c
67181--- linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
67182+++ linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
67183@@ -358,7 +358,7 @@ static void print_unreferenced(struct se
67184
67185 for (i = 0; i < object->trace_len; i++) {
67186 void *ptr = (void *)object->trace[i];
67187- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67188+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67189 }
67190 }
67191
67192diff -urNp linux-2.6.32.45/mm/maccess.c linux-2.6.32.45/mm/maccess.c
67193--- linux-2.6.32.45/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
67194+++ linux-2.6.32.45/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
67195@@ -14,7 +14,7 @@
67196 * Safely read from address @src to the buffer at @dst. If a kernel fault
67197 * happens, handle that and return -EFAULT.
67198 */
67199-long probe_kernel_read(void *dst, void *src, size_t size)
67200+long probe_kernel_read(void *dst, const void *src, size_t size)
67201 {
67202 long ret;
67203 mm_segment_t old_fs = get_fs();
67204@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
67205 * Safely write to address @dst from the buffer at @src. If a kernel fault
67206 * happens, handle that and return -EFAULT.
67207 */
67208-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
67209+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
67210 {
67211 long ret;
67212 mm_segment_t old_fs = get_fs();
67213diff -urNp linux-2.6.32.45/mm/madvise.c linux-2.6.32.45/mm/madvise.c
67214--- linux-2.6.32.45/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
67215+++ linux-2.6.32.45/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
67216@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
67217 pgoff_t pgoff;
67218 unsigned long new_flags = vma->vm_flags;
67219
67220+#ifdef CONFIG_PAX_SEGMEXEC
67221+ struct vm_area_struct *vma_m;
67222+#endif
67223+
67224 switch (behavior) {
67225 case MADV_NORMAL:
67226 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67227@@ -103,6 +107,13 @@ success:
67228 /*
67229 * vm_flags is protected by the mmap_sem held in write mode.
67230 */
67231+
67232+#ifdef CONFIG_PAX_SEGMEXEC
67233+ vma_m = pax_find_mirror_vma(vma);
67234+ if (vma_m)
67235+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67236+#endif
67237+
67238 vma->vm_flags = new_flags;
67239
67240 out:
67241@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
67242 struct vm_area_struct ** prev,
67243 unsigned long start, unsigned long end)
67244 {
67245+
67246+#ifdef CONFIG_PAX_SEGMEXEC
67247+ struct vm_area_struct *vma_m;
67248+#endif
67249+
67250 *prev = vma;
67251 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67252 return -EINVAL;
67253@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
67254 zap_page_range(vma, start, end - start, &details);
67255 } else
67256 zap_page_range(vma, start, end - start, NULL);
67257+
67258+#ifdef CONFIG_PAX_SEGMEXEC
67259+ vma_m = pax_find_mirror_vma(vma);
67260+ if (vma_m) {
67261+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67262+ struct zap_details details = {
67263+ .nonlinear_vma = vma_m,
67264+ .last_index = ULONG_MAX,
67265+ };
67266+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67267+ } else
67268+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67269+ }
67270+#endif
67271+
67272 return 0;
67273 }
67274
67275@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67276 if (end < start)
67277 goto out;
67278
67279+#ifdef CONFIG_PAX_SEGMEXEC
67280+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67281+ if (end > SEGMEXEC_TASK_SIZE)
67282+ goto out;
67283+ } else
67284+#endif
67285+
67286+ if (end > TASK_SIZE)
67287+ goto out;
67288+
67289 error = 0;
67290 if (end == start)
67291 goto out;
67292diff -urNp linux-2.6.32.45/mm/memory.c linux-2.6.32.45/mm/memory.c
67293--- linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
67294+++ linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
67295@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
67296 return;
67297
67298 pmd = pmd_offset(pud, start);
67299+
67300+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67301 pud_clear(pud);
67302 pmd_free_tlb(tlb, pmd, start);
67303+#endif
67304+
67305 }
67306
67307 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67308@@ -219,9 +223,12 @@ static inline void free_pud_range(struct
67309 if (end - 1 > ceiling - 1)
67310 return;
67311
67312+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67313 pud = pud_offset(pgd, start);
67314 pgd_clear(pgd);
67315 pud_free_tlb(tlb, pud, start);
67316+#endif
67317+
67318 }
67319
67320 /*
67321@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
67322 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67323 i = 0;
67324
67325- do {
67326+ while (nr_pages) {
67327 struct vm_area_struct *vma;
67328
67329- vma = find_extend_vma(mm, start);
67330+ vma = find_vma(mm, start);
67331 if (!vma && in_gate_area(tsk, start)) {
67332 unsigned long pg = start & PAGE_MASK;
67333 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
67334@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
67335 continue;
67336 }
67337
67338- if (!vma ||
67339+ if (!vma || start < vma->vm_start ||
67340 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67341 !(vm_flags & vma->vm_flags))
67342 return i ? : -EFAULT;
67343@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
67344 start += PAGE_SIZE;
67345 nr_pages--;
67346 } while (nr_pages && start < vma->vm_end);
67347- } while (nr_pages);
67348+ }
67349 return i;
67350 }
67351
67352@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
67353 page_add_file_rmap(page);
67354 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67355
67356+#ifdef CONFIG_PAX_SEGMEXEC
67357+ pax_mirror_file_pte(vma, addr, page, ptl);
67358+#endif
67359+
67360 retval = 0;
67361 pte_unmap_unlock(pte, ptl);
67362 return retval;
67363@@ -1560,10 +1571,22 @@ out:
67364 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67365 struct page *page)
67366 {
67367+
67368+#ifdef CONFIG_PAX_SEGMEXEC
67369+ struct vm_area_struct *vma_m;
67370+#endif
67371+
67372 if (addr < vma->vm_start || addr >= vma->vm_end)
67373 return -EFAULT;
67374 if (!page_count(page))
67375 return -EINVAL;
67376+
67377+#ifdef CONFIG_PAX_SEGMEXEC
67378+ vma_m = pax_find_mirror_vma(vma);
67379+ if (vma_m)
67380+ vma_m->vm_flags |= VM_INSERTPAGE;
67381+#endif
67382+
67383 vma->vm_flags |= VM_INSERTPAGE;
67384 return insert_page(vma, addr, page, vma->vm_page_prot);
67385 }
67386@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
67387 unsigned long pfn)
67388 {
67389 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67390+ BUG_ON(vma->vm_mirror);
67391
67392 if (addr < vma->vm_start || addr >= vma->vm_end)
67393 return -EFAULT;
67394@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
67395 copy_user_highpage(dst, src, va, vma);
67396 }
67397
67398+#ifdef CONFIG_PAX_SEGMEXEC
67399+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67400+{
67401+ struct mm_struct *mm = vma->vm_mm;
67402+ spinlock_t *ptl;
67403+ pte_t *pte, entry;
67404+
67405+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67406+ entry = *pte;
67407+ if (!pte_present(entry)) {
67408+ if (!pte_none(entry)) {
67409+ BUG_ON(pte_file(entry));
67410+ free_swap_and_cache(pte_to_swp_entry(entry));
67411+ pte_clear_not_present_full(mm, address, pte, 0);
67412+ }
67413+ } else {
67414+ struct page *page;
67415+
67416+ flush_cache_page(vma, address, pte_pfn(entry));
67417+ entry = ptep_clear_flush(vma, address, pte);
67418+ BUG_ON(pte_dirty(entry));
67419+ page = vm_normal_page(vma, address, entry);
67420+ if (page) {
67421+ update_hiwater_rss(mm);
67422+ if (PageAnon(page))
67423+ dec_mm_counter(mm, anon_rss);
67424+ else
67425+ dec_mm_counter(mm, file_rss);
67426+ page_remove_rmap(page);
67427+ page_cache_release(page);
67428+ }
67429+ }
67430+ pte_unmap_unlock(pte, ptl);
67431+}
67432+
67433+/* PaX: if vma is mirrored, synchronize the mirror's PTE
67434+ *
67435+ * the ptl of the lower mapped page is held on entry and is not released on exit
67436+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67437+ */
67438+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67439+{
67440+ struct mm_struct *mm = vma->vm_mm;
67441+ unsigned long address_m;
67442+ spinlock_t *ptl_m;
67443+ struct vm_area_struct *vma_m;
67444+ pmd_t *pmd_m;
67445+ pte_t *pte_m, entry_m;
67446+
67447+ BUG_ON(!page_m || !PageAnon(page_m));
67448+
67449+ vma_m = pax_find_mirror_vma(vma);
67450+ if (!vma_m)
67451+ return;
67452+
67453+ BUG_ON(!PageLocked(page_m));
67454+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67455+ address_m = address + SEGMEXEC_TASK_SIZE;
67456+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67457+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67458+ ptl_m = pte_lockptr(mm, pmd_m);
67459+ if (ptl != ptl_m) {
67460+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67461+ if (!pte_none(*pte_m))
67462+ goto out;
67463+ }
67464+
67465+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67466+ page_cache_get(page_m);
67467+ page_add_anon_rmap(page_m, vma_m, address_m);
67468+ inc_mm_counter(mm, anon_rss);
67469+ set_pte_at(mm, address_m, pte_m, entry_m);
67470+ update_mmu_cache(vma_m, address_m, entry_m);
67471+out:
67472+ if (ptl != ptl_m)
67473+ spin_unlock(ptl_m);
67474+ pte_unmap_nested(pte_m);
67475+ unlock_page(page_m);
67476+}
67477+
67478+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67479+{
67480+ struct mm_struct *mm = vma->vm_mm;
67481+ unsigned long address_m;
67482+ spinlock_t *ptl_m;
67483+ struct vm_area_struct *vma_m;
67484+ pmd_t *pmd_m;
67485+ pte_t *pte_m, entry_m;
67486+
67487+ BUG_ON(!page_m || PageAnon(page_m));
67488+
67489+ vma_m = pax_find_mirror_vma(vma);
67490+ if (!vma_m)
67491+ return;
67492+
67493+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67494+ address_m = address + SEGMEXEC_TASK_SIZE;
67495+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67496+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67497+ ptl_m = pte_lockptr(mm, pmd_m);
67498+ if (ptl != ptl_m) {
67499+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67500+ if (!pte_none(*pte_m))
67501+ goto out;
67502+ }
67503+
67504+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67505+ page_cache_get(page_m);
67506+ page_add_file_rmap(page_m);
67507+ inc_mm_counter(mm, file_rss);
67508+ set_pte_at(mm, address_m, pte_m, entry_m);
67509+ update_mmu_cache(vma_m, address_m, entry_m);
67510+out:
67511+ if (ptl != ptl_m)
67512+ spin_unlock(ptl_m);
67513+ pte_unmap_nested(pte_m);
67514+}
67515+
67516+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67517+{
67518+ struct mm_struct *mm = vma->vm_mm;
67519+ unsigned long address_m;
67520+ spinlock_t *ptl_m;
67521+ struct vm_area_struct *vma_m;
67522+ pmd_t *pmd_m;
67523+ pte_t *pte_m, entry_m;
67524+
67525+ vma_m = pax_find_mirror_vma(vma);
67526+ if (!vma_m)
67527+ return;
67528+
67529+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67530+ address_m = address + SEGMEXEC_TASK_SIZE;
67531+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67532+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67533+ ptl_m = pte_lockptr(mm, pmd_m);
67534+ if (ptl != ptl_m) {
67535+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67536+ if (!pte_none(*pte_m))
67537+ goto out;
67538+ }
67539+
67540+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67541+ set_pte_at(mm, address_m, pte_m, entry_m);
67542+out:
67543+ if (ptl != ptl_m)
67544+ spin_unlock(ptl_m);
67545+ pte_unmap_nested(pte_m);
67546+}
67547+
67548+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67549+{
67550+ struct page *page_m;
67551+ pte_t entry;
67552+
67553+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67554+ goto out;
67555+
67556+ entry = *pte;
67557+ page_m = vm_normal_page(vma, address, entry);
67558+ if (!page_m)
67559+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67560+ else if (PageAnon(page_m)) {
67561+ if (pax_find_mirror_vma(vma)) {
67562+ pte_unmap_unlock(pte, ptl);
67563+ lock_page(page_m);
67564+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67565+ if (pte_same(entry, *pte))
67566+ pax_mirror_anon_pte(vma, address, page_m, ptl);
67567+ else
67568+ unlock_page(page_m);
67569+ }
67570+ } else
67571+ pax_mirror_file_pte(vma, address, page_m, ptl);
67572+
67573+out:
67574+ pte_unmap_unlock(pte, ptl);
67575+}
67576+#endif
67577+
67578 /*
67579 * This routine handles present pages, when users try to write
67580 * to a shared page. It is done by copying the page to a new address
67581@@ -2156,6 +2360,12 @@ gotten:
67582 */
67583 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67584 if (likely(pte_same(*page_table, orig_pte))) {
67585+
67586+#ifdef CONFIG_PAX_SEGMEXEC
67587+ if (pax_find_mirror_vma(vma))
67588+ BUG_ON(!trylock_page(new_page));
67589+#endif
67590+
67591 if (old_page) {
67592 if (!PageAnon(old_page)) {
67593 dec_mm_counter(mm, file_rss);
67594@@ -2207,6 +2417,10 @@ gotten:
67595 page_remove_rmap(old_page);
67596 }
67597
67598+#ifdef CONFIG_PAX_SEGMEXEC
67599+ pax_mirror_anon_pte(vma, address, new_page, ptl);
67600+#endif
67601+
67602 /* Free the old page.. */
67603 new_page = old_page;
67604 ret |= VM_FAULT_WRITE;
67605@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
67606 swap_free(entry);
67607 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67608 try_to_free_swap(page);
67609+
67610+#ifdef CONFIG_PAX_SEGMEXEC
67611+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67612+#endif
67613+
67614 unlock_page(page);
67615
67616 if (flags & FAULT_FLAG_WRITE) {
67617@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
67618
67619 /* No need to invalidate - it was non-present before */
67620 update_mmu_cache(vma, address, pte);
67621+
67622+#ifdef CONFIG_PAX_SEGMEXEC
67623+ pax_mirror_anon_pte(vma, address, page, ptl);
67624+#endif
67625+
67626 unlock:
67627 pte_unmap_unlock(page_table, ptl);
67628 out:
67629@@ -2632,40 +2856,6 @@ out_release:
67630 }
67631
67632 /*
67633- * This is like a special single-page "expand_{down|up}wards()",
67634- * except we must first make sure that 'address{-|+}PAGE_SIZE'
67635- * doesn't hit another vma.
67636- */
67637-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67638-{
67639- address &= PAGE_MASK;
67640- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67641- struct vm_area_struct *prev = vma->vm_prev;
67642-
67643- /*
67644- * Is there a mapping abutting this one below?
67645- *
67646- * That's only ok if it's the same stack mapping
67647- * that has gotten split..
67648- */
67649- if (prev && prev->vm_end == address)
67650- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67651-
67652- expand_stack(vma, address - PAGE_SIZE);
67653- }
67654- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67655- struct vm_area_struct *next = vma->vm_next;
67656-
67657- /* As VM_GROWSDOWN but s/below/above/ */
67658- if (next && next->vm_start == address + PAGE_SIZE)
67659- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67660-
67661- expand_upwards(vma, address + PAGE_SIZE);
67662- }
67663- return 0;
67664-}
67665-
67666-/*
67667 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67668 * but allow concurrent faults), and pte mapped but not yet locked.
67669 * We return with mmap_sem still held, but pte unmapped and unlocked.
67670@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
67671 unsigned long address, pte_t *page_table, pmd_t *pmd,
67672 unsigned int flags)
67673 {
67674- struct page *page;
67675+ struct page *page = NULL;
67676 spinlock_t *ptl;
67677 pte_t entry;
67678
67679- pte_unmap(page_table);
67680-
67681- /* Check if we need to add a guard page to the stack */
67682- if (check_stack_guard_page(vma, address) < 0)
67683- return VM_FAULT_SIGBUS;
67684-
67685- /* Use the zero-page for reads */
67686 if (!(flags & FAULT_FLAG_WRITE)) {
67687 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67688 vma->vm_page_prot));
67689- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67690+ ptl = pte_lockptr(mm, pmd);
67691+ spin_lock(ptl);
67692 if (!pte_none(*page_table))
67693 goto unlock;
67694 goto setpte;
67695 }
67696
67697 /* Allocate our own private page. */
67698+ pte_unmap(page_table);
67699+
67700 if (unlikely(anon_vma_prepare(vma)))
67701 goto oom;
67702 page = alloc_zeroed_user_highpage_movable(vma, address);
67703@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
67704 if (!pte_none(*page_table))
67705 goto release;
67706
67707+#ifdef CONFIG_PAX_SEGMEXEC
67708+ if (pax_find_mirror_vma(vma))
67709+ BUG_ON(!trylock_page(page));
67710+#endif
67711+
67712 inc_mm_counter(mm, anon_rss);
67713 page_add_new_anon_rmap(page, vma, address);
67714 setpte:
67715@@ -2720,6 +2911,12 @@ setpte:
67716
67717 /* No need to invalidate - it was non-present before */
67718 update_mmu_cache(vma, address, entry);
67719+
67720+#ifdef CONFIG_PAX_SEGMEXEC
67721+ if (page)
67722+ pax_mirror_anon_pte(vma, address, page, ptl);
67723+#endif
67724+
67725 unlock:
67726 pte_unmap_unlock(page_table, ptl);
67727 return 0;
67728@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
67729 */
67730 /* Only go through if we didn't race with anybody else... */
67731 if (likely(pte_same(*page_table, orig_pte))) {
67732+
67733+#ifdef CONFIG_PAX_SEGMEXEC
67734+ if (anon && pax_find_mirror_vma(vma))
67735+ BUG_ON(!trylock_page(page));
67736+#endif
67737+
67738 flush_icache_page(vma, page);
67739 entry = mk_pte(page, vma->vm_page_prot);
67740 if (flags & FAULT_FLAG_WRITE)
67741@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
67742
67743 /* no need to invalidate: a not-present page won't be cached */
67744 update_mmu_cache(vma, address, entry);
67745+
67746+#ifdef CONFIG_PAX_SEGMEXEC
67747+ if (anon)
67748+ pax_mirror_anon_pte(vma, address, page, ptl);
67749+ else
67750+ pax_mirror_file_pte(vma, address, page, ptl);
67751+#endif
67752+
67753 } else {
67754 if (charged)
67755 mem_cgroup_uncharge_page(page);
67756@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
67757 if (flags & FAULT_FLAG_WRITE)
67758 flush_tlb_page(vma, address);
67759 }
67760+
67761+#ifdef CONFIG_PAX_SEGMEXEC
67762+ pax_mirror_pte(vma, address, pte, pmd, ptl);
67763+ return 0;
67764+#endif
67765+
67766 unlock:
67767 pte_unmap_unlock(pte, ptl);
67768 return 0;
67769@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
67770 pmd_t *pmd;
67771 pte_t *pte;
67772
67773+#ifdef CONFIG_PAX_SEGMEXEC
67774+ struct vm_area_struct *vma_m;
67775+#endif
67776+
67777 __set_current_state(TASK_RUNNING);
67778
67779 count_vm_event(PGFAULT);
67780@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
67781 if (unlikely(is_vm_hugetlb_page(vma)))
67782 return hugetlb_fault(mm, vma, address, flags);
67783
67784+#ifdef CONFIG_PAX_SEGMEXEC
67785+ vma_m = pax_find_mirror_vma(vma);
67786+ if (vma_m) {
67787+ unsigned long address_m;
67788+ pgd_t *pgd_m;
67789+ pud_t *pud_m;
67790+ pmd_t *pmd_m;
67791+
67792+ if (vma->vm_start > vma_m->vm_start) {
67793+ address_m = address;
67794+ address -= SEGMEXEC_TASK_SIZE;
67795+ vma = vma_m;
67796+ } else
67797+ address_m = address + SEGMEXEC_TASK_SIZE;
67798+
67799+ pgd_m = pgd_offset(mm, address_m);
67800+ pud_m = pud_alloc(mm, pgd_m, address_m);
67801+ if (!pud_m)
67802+ return VM_FAULT_OOM;
67803+ pmd_m = pmd_alloc(mm, pud_m, address_m);
67804+ if (!pmd_m)
67805+ return VM_FAULT_OOM;
67806+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
67807+ return VM_FAULT_OOM;
67808+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67809+ }
67810+#endif
67811+
67812 pgd = pgd_offset(mm, address);
67813 pud = pud_alloc(mm, pgd, address);
67814 if (!pud)
67815@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
67816 gate_vma.vm_start = FIXADDR_USER_START;
67817 gate_vma.vm_end = FIXADDR_USER_END;
67818 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67819- gate_vma.vm_page_prot = __P101;
67820+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67821 /*
67822 * Make sure the vDSO gets into every core dump.
67823 * Dumping its contents makes post-mortem fully interpretable later
67824diff -urNp linux-2.6.32.45/mm/memory-failure.c linux-2.6.32.45/mm/memory-failure.c
67825--- linux-2.6.32.45/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
67826+++ linux-2.6.32.45/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
67827@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
67828
67829 int sysctl_memory_failure_recovery __read_mostly = 1;
67830
67831-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67832+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67833
67834 /*
67835 * Send all the processes who have the page mapped an ``action optional''
67836@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
67837 return 0;
67838 }
67839
67840- atomic_long_add(1, &mce_bad_pages);
67841+ atomic_long_add_unchecked(1, &mce_bad_pages);
67842
67843 /*
67844 * We need/can do nothing about count=0 pages.
67845diff -urNp linux-2.6.32.45/mm/mempolicy.c linux-2.6.32.45/mm/mempolicy.c
67846--- linux-2.6.32.45/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
67847+++ linux-2.6.32.45/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
67848@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
67849 struct vm_area_struct *next;
67850 int err;
67851
67852+#ifdef CONFIG_PAX_SEGMEXEC
67853+ struct vm_area_struct *vma_m;
67854+#endif
67855+
67856 err = 0;
67857 for (; vma && vma->vm_start < end; vma = next) {
67858 next = vma->vm_next;
67859@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
67860 err = policy_vma(vma, new);
67861 if (err)
67862 break;
67863+
67864+#ifdef CONFIG_PAX_SEGMEXEC
67865+ vma_m = pax_find_mirror_vma(vma);
67866+ if (vma_m) {
67867+ err = policy_vma(vma_m, new);
67868+ if (err)
67869+ break;
67870+ }
67871+#endif
67872+
67873 }
67874 return err;
67875 }
67876@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
67877
67878 if (end < start)
67879 return -EINVAL;
67880+
67881+#ifdef CONFIG_PAX_SEGMEXEC
67882+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67883+ if (end > SEGMEXEC_TASK_SIZE)
67884+ return -EINVAL;
67885+ } else
67886+#endif
67887+
67888+ if (end > TASK_SIZE)
67889+ return -EINVAL;
67890+
67891 if (end == start)
67892 return 0;
67893
67894@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67895 if (!mm)
67896 return -EINVAL;
67897
67898+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67899+ if (mm != current->mm &&
67900+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67901+ err = -EPERM;
67902+ goto out;
67903+ }
67904+#endif
67905+
67906 /*
67907 * Check if this process has the right to modify the specified
67908 * process. The right exists if the process has administrative
67909@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67910 rcu_read_lock();
67911 tcred = __task_cred(task);
67912 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67913- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67914- !capable(CAP_SYS_NICE)) {
67915+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67916 rcu_read_unlock();
67917 err = -EPERM;
67918 goto out;
67919@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
67920
67921 if (file) {
67922 seq_printf(m, " file=");
67923- seq_path(m, &file->f_path, "\n\t= ");
67924+ seq_path(m, &file->f_path, "\n\t\\= ");
67925 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67926 seq_printf(m, " heap");
67927 } else if (vma->vm_start <= mm->start_stack &&
67928diff -urNp linux-2.6.32.45/mm/migrate.c linux-2.6.32.45/mm/migrate.c
67929--- linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
67930+++ linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
67931@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
67932 unsigned long chunk_start;
67933 int err;
67934
67935+ pax_track_stack();
67936+
67937 task_nodes = cpuset_mems_allowed(task);
67938
67939 err = -ENOMEM;
67940@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67941 if (!mm)
67942 return -EINVAL;
67943
67944+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67945+ if (mm != current->mm &&
67946+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67947+ err = -EPERM;
67948+ goto out;
67949+ }
67950+#endif
67951+
67952 /*
67953 * Check if this process has the right to modify the specified
67954 * process. The right exists if the process has administrative
67955@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67956 rcu_read_lock();
67957 tcred = __task_cred(task);
67958 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67959- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67960- !capable(CAP_SYS_NICE)) {
67961+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67962 rcu_read_unlock();
67963 err = -EPERM;
67964 goto out;
67965diff -urNp linux-2.6.32.45/mm/mlock.c linux-2.6.32.45/mm/mlock.c
67966--- linux-2.6.32.45/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
67967+++ linux-2.6.32.45/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
67968@@ -13,6 +13,7 @@
67969 #include <linux/pagemap.h>
67970 #include <linux/mempolicy.h>
67971 #include <linux/syscalls.h>
67972+#include <linux/security.h>
67973 #include <linux/sched.h>
67974 #include <linux/module.h>
67975 #include <linux/rmap.h>
67976@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
67977 }
67978 }
67979
67980-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67981-{
67982- return (vma->vm_flags & VM_GROWSDOWN) &&
67983- (vma->vm_start == addr) &&
67984- !vma_stack_continue(vma->vm_prev, addr);
67985-}
67986-
67987 /**
67988 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
67989 * @vma: target vma
67990@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
67991 if (vma->vm_flags & VM_WRITE)
67992 gup_flags |= FOLL_WRITE;
67993
67994- /* We don't try to access the guard page of a stack vma */
67995- if (stack_guard_page(vma, start)) {
67996- addr += PAGE_SIZE;
67997- nr_pages--;
67998- }
67999-
68000 while (nr_pages > 0) {
68001 int i;
68002
68003@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
68004 {
68005 unsigned long nstart, end, tmp;
68006 struct vm_area_struct * vma, * prev;
68007- int error;
68008+ int error = -EINVAL;
68009
68010 len = PAGE_ALIGN(len);
68011 end = start + len;
68012@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
68013 return -EINVAL;
68014 if (end == start)
68015 return 0;
68016+ if (end > TASK_SIZE)
68017+ return -EINVAL;
68018+
68019 vma = find_vma_prev(current->mm, start, &prev);
68020 if (!vma || vma->vm_start > start)
68021 return -ENOMEM;
68022@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
68023 for (nstart = start ; ; ) {
68024 unsigned int newflags;
68025
68026+#ifdef CONFIG_PAX_SEGMEXEC
68027+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68028+ break;
68029+#endif
68030+
68031 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68032
68033 newflags = vma->vm_flags | VM_LOCKED;
68034@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
68035 lock_limit >>= PAGE_SHIFT;
68036
68037 /* check against resource limits */
68038+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68039 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68040 error = do_mlock(start, len, 1);
68041 up_write(&current->mm->mmap_sem);
68042@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
68043 static int do_mlockall(int flags)
68044 {
68045 struct vm_area_struct * vma, * prev = NULL;
68046- unsigned int def_flags = 0;
68047
68048 if (flags & MCL_FUTURE)
68049- def_flags = VM_LOCKED;
68050- current->mm->def_flags = def_flags;
68051+ current->mm->def_flags |= VM_LOCKED;
68052+ else
68053+ current->mm->def_flags &= ~VM_LOCKED;
68054 if (flags == MCL_FUTURE)
68055 goto out;
68056
68057 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68058- unsigned int newflags;
68059+ unsigned long newflags;
68060+
68061+#ifdef CONFIG_PAX_SEGMEXEC
68062+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68063+ break;
68064+#endif
68065
68066+ BUG_ON(vma->vm_end > TASK_SIZE);
68067 newflags = vma->vm_flags | VM_LOCKED;
68068 if (!(flags & MCL_CURRENT))
68069 newflags &= ~VM_LOCKED;
68070@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68071 lock_limit >>= PAGE_SHIFT;
68072
68073 ret = -ENOMEM;
68074+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68075 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68076 capable(CAP_IPC_LOCK))
68077 ret = do_mlockall(flags);
68078diff -urNp linux-2.6.32.45/mm/mmap.c linux-2.6.32.45/mm/mmap.c
68079--- linux-2.6.32.45/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
68080+++ linux-2.6.32.45/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
68081@@ -45,6 +45,16 @@
68082 #define arch_rebalance_pgtables(addr, len) (addr)
68083 #endif
68084
68085+static inline void verify_mm_writelocked(struct mm_struct *mm)
68086+{
68087+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68088+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68089+ up_read(&mm->mmap_sem);
68090+ BUG();
68091+ }
68092+#endif
68093+}
68094+
68095 static void unmap_region(struct mm_struct *mm,
68096 struct vm_area_struct *vma, struct vm_area_struct *prev,
68097 unsigned long start, unsigned long end);
68098@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
68099 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68100 *
68101 */
68102-pgprot_t protection_map[16] = {
68103+pgprot_t protection_map[16] __read_only = {
68104 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68105 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68106 };
68107
68108 pgprot_t vm_get_page_prot(unsigned long vm_flags)
68109 {
68110- return __pgprot(pgprot_val(protection_map[vm_flags &
68111+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68112 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68113 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68114+
68115+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68116+ if (!nx_enabled &&
68117+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68118+ (vm_flags & (VM_READ | VM_WRITE)))
68119+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68120+#endif
68121+
68122+ return prot;
68123 }
68124 EXPORT_SYMBOL(vm_get_page_prot);
68125
68126 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
68127 int sysctl_overcommit_ratio = 50; /* default is 50% */
68128 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68129+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68130 struct percpu_counter vm_committed_as;
68131
68132 /*
68133@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
68134 struct vm_area_struct *next = vma->vm_next;
68135
68136 might_sleep();
68137+ BUG_ON(vma->vm_mirror);
68138 if (vma->vm_ops && vma->vm_ops->close)
68139 vma->vm_ops->close(vma);
68140 if (vma->vm_file) {
68141@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68142 * not page aligned -Ram Gupta
68143 */
68144 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
68145+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68146 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68147 (mm->end_data - mm->start_data) > rlim)
68148 goto out;
68149@@ -704,6 +726,12 @@ static int
68150 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68151 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68152 {
68153+
68154+#ifdef CONFIG_PAX_SEGMEXEC
68155+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68156+ return 0;
68157+#endif
68158+
68159 if (is_mergeable_vma(vma, file, vm_flags) &&
68160 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68161 if (vma->vm_pgoff == vm_pgoff)
68162@@ -723,6 +751,12 @@ static int
68163 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68164 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68165 {
68166+
68167+#ifdef CONFIG_PAX_SEGMEXEC
68168+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68169+ return 0;
68170+#endif
68171+
68172 if (is_mergeable_vma(vma, file, vm_flags) &&
68173 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68174 pgoff_t vm_pglen;
68175@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
68176 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68177 struct vm_area_struct *prev, unsigned long addr,
68178 unsigned long end, unsigned long vm_flags,
68179- struct anon_vma *anon_vma, struct file *file,
68180+ struct anon_vma *anon_vma, struct file *file,
68181 pgoff_t pgoff, struct mempolicy *policy)
68182 {
68183 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68184 struct vm_area_struct *area, *next;
68185
68186+#ifdef CONFIG_PAX_SEGMEXEC
68187+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68188+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68189+
68190+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68191+#endif
68192+
68193 /*
68194 * We later require that vma->vm_flags == vm_flags,
68195 * so this tests vma->vm_flags & VM_SPECIAL, too.
68196@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
68197 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68198 next = next->vm_next;
68199
68200+#ifdef CONFIG_PAX_SEGMEXEC
68201+ if (prev)
68202+ prev_m = pax_find_mirror_vma(prev);
68203+ if (area)
68204+ area_m = pax_find_mirror_vma(area);
68205+ if (next)
68206+ next_m = pax_find_mirror_vma(next);
68207+#endif
68208+
68209 /*
68210 * Can it merge with the predecessor?
68211 */
68212@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
68213 /* cases 1, 6 */
68214 vma_adjust(prev, prev->vm_start,
68215 next->vm_end, prev->vm_pgoff, NULL);
68216- } else /* cases 2, 5, 7 */
68217+
68218+#ifdef CONFIG_PAX_SEGMEXEC
68219+ if (prev_m)
68220+ vma_adjust(prev_m, prev_m->vm_start,
68221+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68222+#endif
68223+
68224+ } else { /* cases 2, 5, 7 */
68225 vma_adjust(prev, prev->vm_start,
68226 end, prev->vm_pgoff, NULL);
68227+
68228+#ifdef CONFIG_PAX_SEGMEXEC
68229+ if (prev_m)
68230+ vma_adjust(prev_m, prev_m->vm_start,
68231+ end_m, prev_m->vm_pgoff, NULL);
68232+#endif
68233+
68234+ }
68235 return prev;
68236 }
68237
68238@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
68239 mpol_equal(policy, vma_policy(next)) &&
68240 can_vma_merge_before(next, vm_flags,
68241 anon_vma, file, pgoff+pglen)) {
68242- if (prev && addr < prev->vm_end) /* case 4 */
68243+ if (prev && addr < prev->vm_end) { /* case 4 */
68244 vma_adjust(prev, prev->vm_start,
68245 addr, prev->vm_pgoff, NULL);
68246- else /* cases 3, 8 */
68247+
68248+#ifdef CONFIG_PAX_SEGMEXEC
68249+ if (prev_m)
68250+ vma_adjust(prev_m, prev_m->vm_start,
68251+ addr_m, prev_m->vm_pgoff, NULL);
68252+#endif
68253+
68254+ } else { /* cases 3, 8 */
68255 vma_adjust(area, addr, next->vm_end,
68256 next->vm_pgoff - pglen, NULL);
68257+
68258+#ifdef CONFIG_PAX_SEGMEXEC
68259+ if (area_m)
68260+ vma_adjust(area_m, addr_m, next_m->vm_end,
68261+ next_m->vm_pgoff - pglen, NULL);
68262+#endif
68263+
68264+ }
68265 return area;
68266 }
68267
68268@@ -898,14 +978,11 @@ none:
68269 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68270 struct file *file, long pages)
68271 {
68272- const unsigned long stack_flags
68273- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68274-
68275 if (file) {
68276 mm->shared_vm += pages;
68277 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68278 mm->exec_vm += pages;
68279- } else if (flags & stack_flags)
68280+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68281 mm->stack_vm += pages;
68282 if (flags & (VM_RESERVED|VM_IO))
68283 mm->reserved_vm += pages;
68284@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
68285 * (the exception is when the underlying filesystem is noexec
68286 * mounted, in which case we dont add PROT_EXEC.)
68287 */
68288- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68289+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68290 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68291 prot |= PROT_EXEC;
68292
68293@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
68294 /* Obtain the address to map to. we verify (or select) it and ensure
68295 * that it represents a valid section of the address space.
68296 */
68297- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68298+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68299 if (addr & ~PAGE_MASK)
68300 return addr;
68301
68302@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
68303 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68304 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68305
68306+#ifdef CONFIG_PAX_MPROTECT
68307+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68308+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68309+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68310+ gr_log_rwxmmap(file);
68311+
68312+#ifdef CONFIG_PAX_EMUPLT
68313+ vm_flags &= ~VM_EXEC;
68314+#else
68315+ return -EPERM;
68316+#endif
68317+
68318+ }
68319+
68320+ if (!(vm_flags & VM_EXEC))
68321+ vm_flags &= ~VM_MAYEXEC;
68322+#else
68323+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68324+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68325+#endif
68326+ else
68327+ vm_flags &= ~VM_MAYWRITE;
68328+ }
68329+#endif
68330+
68331+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68332+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68333+ vm_flags &= ~VM_PAGEEXEC;
68334+#endif
68335+
68336 if (flags & MAP_LOCKED)
68337 if (!can_do_mlock())
68338 return -EPERM;
68339@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
68340 locked += mm->locked_vm;
68341 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
68342 lock_limit >>= PAGE_SHIFT;
68343+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68344 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68345 return -EAGAIN;
68346 }
68347@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
68348 if (error)
68349 return error;
68350
68351+ if (!gr_acl_handle_mmap(file, prot))
68352+ return -EACCES;
68353+
68354 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68355 }
68356 EXPORT_SYMBOL(do_mmap_pgoff);
68357@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
68358 */
68359 int vma_wants_writenotify(struct vm_area_struct *vma)
68360 {
68361- unsigned int vm_flags = vma->vm_flags;
68362+ unsigned long vm_flags = vma->vm_flags;
68363
68364 /* If it was private or non-writable, the write bit is already clear */
68365- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68366+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68367 return 0;
68368
68369 /* The backer wishes to know when pages are first written to? */
68370@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
68371 unsigned long charged = 0;
68372 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68373
68374+#ifdef CONFIG_PAX_SEGMEXEC
68375+ struct vm_area_struct *vma_m = NULL;
68376+#endif
68377+
68378+ /*
68379+ * mm->mmap_sem is required to protect against another thread
68380+ * changing the mappings in case we sleep.
68381+ */
68382+ verify_mm_writelocked(mm);
68383+
68384 /* Clear old maps */
68385 error = -ENOMEM;
68386-munmap_back:
68387 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68388 if (vma && vma->vm_start < addr + len) {
68389 if (do_munmap(mm, addr, len))
68390 return -ENOMEM;
68391- goto munmap_back;
68392+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68393+ BUG_ON(vma && vma->vm_start < addr + len);
68394 }
68395
68396 /* Check against address space limit. */
68397@@ -1173,6 +1294,16 @@ munmap_back:
68398 goto unacct_error;
68399 }
68400
68401+#ifdef CONFIG_PAX_SEGMEXEC
68402+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68403+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68404+ if (!vma_m) {
68405+ error = -ENOMEM;
68406+ goto free_vma;
68407+ }
68408+ }
68409+#endif
68410+
68411 vma->vm_mm = mm;
68412 vma->vm_start = addr;
68413 vma->vm_end = addr + len;
68414@@ -1195,6 +1326,19 @@ munmap_back:
68415 error = file->f_op->mmap(file, vma);
68416 if (error)
68417 goto unmap_and_free_vma;
68418+
68419+#ifdef CONFIG_PAX_SEGMEXEC
68420+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68421+ added_exe_file_vma(mm);
68422+#endif
68423+
68424+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68425+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68426+ vma->vm_flags |= VM_PAGEEXEC;
68427+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68428+ }
68429+#endif
68430+
68431 if (vm_flags & VM_EXECUTABLE)
68432 added_exe_file_vma(mm);
68433
68434@@ -1218,6 +1362,11 @@ munmap_back:
68435 vma_link(mm, vma, prev, rb_link, rb_parent);
68436 file = vma->vm_file;
68437
68438+#ifdef CONFIG_PAX_SEGMEXEC
68439+ if (vma_m)
68440+ pax_mirror_vma(vma_m, vma);
68441+#endif
68442+
68443 /* Once vma denies write, undo our temporary denial count */
68444 if (correct_wcount)
68445 atomic_inc(&inode->i_writecount);
68446@@ -1226,6 +1375,7 @@ out:
68447
68448 mm->total_vm += len >> PAGE_SHIFT;
68449 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68450+ track_exec_limit(mm, addr, addr + len, vm_flags);
68451 if (vm_flags & VM_LOCKED) {
68452 /*
68453 * makes pages present; downgrades, drops, reacquires mmap_sem
68454@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
68455 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68456 charged = 0;
68457 free_vma:
68458+
68459+#ifdef CONFIG_PAX_SEGMEXEC
68460+ if (vma_m)
68461+ kmem_cache_free(vm_area_cachep, vma_m);
68462+#endif
68463+
68464 kmem_cache_free(vm_area_cachep, vma);
68465 unacct_error:
68466 if (charged)
68467@@ -1255,6 +1411,44 @@ unacct_error:
68468 return error;
68469 }
68470
68471+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68472+{
68473+ if (!vma) {
68474+#ifdef CONFIG_STACK_GROWSUP
68475+ if (addr > sysctl_heap_stack_gap)
68476+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68477+ else
68478+ vma = find_vma(current->mm, 0);
68479+ if (vma && (vma->vm_flags & VM_GROWSUP))
68480+ return false;
68481+#endif
68482+ return true;
68483+ }
68484+
68485+ if (addr + len > vma->vm_start)
68486+ return false;
68487+
68488+ if (vma->vm_flags & VM_GROWSDOWN)
68489+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68490+#ifdef CONFIG_STACK_GROWSUP
68491+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68492+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68493+#endif
68494+
68495+ return true;
68496+}
68497+
68498+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68499+{
68500+ if (vma->vm_start < len)
68501+ return -ENOMEM;
68502+ if (!(vma->vm_flags & VM_GROWSDOWN))
68503+ return vma->vm_start - len;
68504+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
68505+ return vma->vm_start - len - sysctl_heap_stack_gap;
68506+ return -ENOMEM;
68507+}
68508+
68509 /* Get an address range which is currently unmapped.
68510 * For shmat() with addr=0.
68511 *
68512@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
68513 if (flags & MAP_FIXED)
68514 return addr;
68515
68516+#ifdef CONFIG_PAX_RANDMMAP
68517+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68518+#endif
68519+
68520 if (addr) {
68521 addr = PAGE_ALIGN(addr);
68522- vma = find_vma(mm, addr);
68523- if (TASK_SIZE - len >= addr &&
68524- (!vma || addr + len <= vma->vm_start))
68525- return addr;
68526+ if (TASK_SIZE - len >= addr) {
68527+ vma = find_vma(mm, addr);
68528+ if (check_heap_stack_gap(vma, addr, len))
68529+ return addr;
68530+ }
68531 }
68532 if (len > mm->cached_hole_size) {
68533- start_addr = addr = mm->free_area_cache;
68534+ start_addr = addr = mm->free_area_cache;
68535 } else {
68536- start_addr = addr = TASK_UNMAPPED_BASE;
68537- mm->cached_hole_size = 0;
68538+ start_addr = addr = mm->mmap_base;
68539+ mm->cached_hole_size = 0;
68540 }
68541
68542 full_search:
68543@@ -1303,34 +1502,40 @@ full_search:
68544 * Start a new search - just in case we missed
68545 * some holes.
68546 */
68547- if (start_addr != TASK_UNMAPPED_BASE) {
68548- addr = TASK_UNMAPPED_BASE;
68549- start_addr = addr;
68550+ if (start_addr != mm->mmap_base) {
68551+ start_addr = addr = mm->mmap_base;
68552 mm->cached_hole_size = 0;
68553 goto full_search;
68554 }
68555 return -ENOMEM;
68556 }
68557- if (!vma || addr + len <= vma->vm_start) {
68558- /*
68559- * Remember the place where we stopped the search:
68560- */
68561- mm->free_area_cache = addr + len;
68562- return addr;
68563- }
68564+ if (check_heap_stack_gap(vma, addr, len))
68565+ break;
68566 if (addr + mm->cached_hole_size < vma->vm_start)
68567 mm->cached_hole_size = vma->vm_start - addr;
68568 addr = vma->vm_end;
68569 }
68570+
68571+ /*
68572+ * Remember the place where we stopped the search:
68573+ */
68574+ mm->free_area_cache = addr + len;
68575+ return addr;
68576 }
68577 #endif
68578
68579 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68580 {
68581+
68582+#ifdef CONFIG_PAX_SEGMEXEC
68583+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68584+ return;
68585+#endif
68586+
68587 /*
68588 * Is this a new hole at the lowest possible address?
68589 */
68590- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68591+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68592 mm->free_area_cache = addr;
68593 mm->cached_hole_size = ~0UL;
68594 }
68595@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
68596 {
68597 struct vm_area_struct *vma;
68598 struct mm_struct *mm = current->mm;
68599- unsigned long addr = addr0;
68600+ unsigned long base = mm->mmap_base, addr = addr0;
68601
68602 /* requested length too big for entire address space */
68603 if (len > TASK_SIZE)
68604@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
68605 if (flags & MAP_FIXED)
68606 return addr;
68607
68608+#ifdef CONFIG_PAX_RANDMMAP
68609+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68610+#endif
68611+
68612 /* requesting a specific address */
68613 if (addr) {
68614 addr = PAGE_ALIGN(addr);
68615- vma = find_vma(mm, addr);
68616- if (TASK_SIZE - len >= addr &&
68617- (!vma || addr + len <= vma->vm_start))
68618- return addr;
68619+ if (TASK_SIZE - len >= addr) {
68620+ vma = find_vma(mm, addr);
68621+ if (check_heap_stack_gap(vma, addr, len))
68622+ return addr;
68623+ }
68624 }
68625
68626 /* check if free_area_cache is useful for us */
68627@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
68628 /* make sure it can fit in the remaining address space */
68629 if (addr > len) {
68630 vma = find_vma(mm, addr-len);
68631- if (!vma || addr <= vma->vm_start)
68632+ if (check_heap_stack_gap(vma, addr - len, len))
68633 /* remember the address as a hint for next time */
68634 return (mm->free_area_cache = addr-len);
68635 }
68636@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
68637 * return with success:
68638 */
68639 vma = find_vma(mm, addr);
68640- if (!vma || addr+len <= vma->vm_start)
68641+ if (check_heap_stack_gap(vma, addr, len))
68642 /* remember the address as a hint for next time */
68643 return (mm->free_area_cache = addr);
68644
68645@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
68646 mm->cached_hole_size = vma->vm_start - addr;
68647
68648 /* try just below the current vma->vm_start */
68649- addr = vma->vm_start-len;
68650- } while (len < vma->vm_start);
68651+ addr = skip_heap_stack_gap(vma, len);
68652+ } while (!IS_ERR_VALUE(addr));
68653
68654 bottomup:
68655 /*
68656@@ -1414,13 +1624,21 @@ bottomup:
68657 * can happen with large stack limits and large mmap()
68658 * allocations.
68659 */
68660+ mm->mmap_base = TASK_UNMAPPED_BASE;
68661+
68662+#ifdef CONFIG_PAX_RANDMMAP
68663+ if (mm->pax_flags & MF_PAX_RANDMMAP)
68664+ mm->mmap_base += mm->delta_mmap;
68665+#endif
68666+
68667+ mm->free_area_cache = mm->mmap_base;
68668 mm->cached_hole_size = ~0UL;
68669- mm->free_area_cache = TASK_UNMAPPED_BASE;
68670 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68671 /*
68672 * Restore the topdown base:
68673 */
68674- mm->free_area_cache = mm->mmap_base;
68675+ mm->mmap_base = base;
68676+ mm->free_area_cache = base;
68677 mm->cached_hole_size = ~0UL;
68678
68679 return addr;
68680@@ -1429,6 +1647,12 @@ bottomup:
68681
68682 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68683 {
68684+
68685+#ifdef CONFIG_PAX_SEGMEXEC
68686+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68687+ return;
68688+#endif
68689+
68690 /*
68691 * Is this a new hole at the highest possible address?
68692 */
68693@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
68694 mm->free_area_cache = addr;
68695
68696 /* dont allow allocations above current base */
68697- if (mm->free_area_cache > mm->mmap_base)
68698+ if (mm->free_area_cache > mm->mmap_base) {
68699 mm->free_area_cache = mm->mmap_base;
68700+ mm->cached_hole_size = ~0UL;
68701+ }
68702 }
68703
68704 unsigned long
68705@@ -1545,6 +1771,27 @@ out:
68706 return prev ? prev->vm_next : vma;
68707 }
68708
68709+#ifdef CONFIG_PAX_SEGMEXEC
68710+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68711+{
68712+ struct vm_area_struct *vma_m;
68713+
68714+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68715+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68716+ BUG_ON(vma->vm_mirror);
68717+ return NULL;
68718+ }
68719+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68720+ vma_m = vma->vm_mirror;
68721+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68722+ BUG_ON(vma->vm_file != vma_m->vm_file);
68723+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68724+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
68725+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68726+ return vma_m;
68727+}
68728+#endif
68729+
68730 /*
68731 * Verify that the stack growth is acceptable and
68732 * update accounting. This is shared with both the
68733@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
68734 return -ENOMEM;
68735
68736 /* Stack limit test */
68737+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
68738 if (size > rlim[RLIMIT_STACK].rlim_cur)
68739 return -ENOMEM;
68740
68741@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
68742 unsigned long limit;
68743 locked = mm->locked_vm + grow;
68744 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
68745+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68746 if (locked > limit && !capable(CAP_IPC_LOCK))
68747 return -ENOMEM;
68748 }
68749@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
68750 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68751 * vma is the last one with address > vma->vm_end. Have to extend vma.
68752 */
68753+#ifndef CONFIG_IA64
68754+static
68755+#endif
68756 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68757 {
68758 int error;
68759+ bool locknext;
68760
68761 if (!(vma->vm_flags & VM_GROWSUP))
68762 return -EFAULT;
68763
68764+ /* Also guard against wrapping around to address 0. */
68765+ if (address < PAGE_ALIGN(address+1))
68766+ address = PAGE_ALIGN(address+1);
68767+ else
68768+ return -ENOMEM;
68769+
68770 /*
68771 * We must make sure the anon_vma is allocated
68772 * so that the anon_vma locking is not a noop.
68773 */
68774 if (unlikely(anon_vma_prepare(vma)))
68775 return -ENOMEM;
68776+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68777+ if (locknext && anon_vma_prepare(vma->vm_next))
68778+ return -ENOMEM;
68779 anon_vma_lock(vma);
68780+ if (locknext)
68781+ anon_vma_lock(vma->vm_next);
68782
68783 /*
68784 * vma->vm_start/vm_end cannot change under us because the caller
68785 * is required to hold the mmap_sem in read mode. We need the
68786- * anon_vma lock to serialize against concurrent expand_stacks.
68787- * Also guard against wrapping around to address 0.
68788+ * anon_vma locks to serialize against concurrent expand_stacks
68789+ * and expand_upwards.
68790 */
68791- if (address < PAGE_ALIGN(address+4))
68792- address = PAGE_ALIGN(address+4);
68793- else {
68794- anon_vma_unlock(vma);
68795- return -ENOMEM;
68796- }
68797 error = 0;
68798
68799 /* Somebody else might have raced and expanded it already */
68800- if (address > vma->vm_end) {
68801+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68802+ error = -ENOMEM;
68803+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68804 unsigned long size, grow;
68805
68806 size = address - vma->vm_start;
68807@@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
68808 if (!error)
68809 vma->vm_end = address;
68810 }
68811+ if (locknext)
68812+ anon_vma_unlock(vma->vm_next);
68813 anon_vma_unlock(vma);
68814 return error;
68815 }
68816@@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
68817 unsigned long address)
68818 {
68819 int error;
68820+ bool lockprev = false;
68821+ struct vm_area_struct *prev;
68822
68823 /*
68824 * We must make sure the anon_vma is allocated
68825@@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
68826 if (error)
68827 return error;
68828
68829+ prev = vma->vm_prev;
68830+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68831+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68832+#endif
68833+ if (lockprev && anon_vma_prepare(prev))
68834+ return -ENOMEM;
68835+ if (lockprev)
68836+ anon_vma_lock(prev);
68837+
68838 anon_vma_lock(vma);
68839
68840 /*
68841@@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
68842 */
68843
68844 /* Somebody else might have raced and expanded it already */
68845- if (address < vma->vm_start) {
68846+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68847+ error = -ENOMEM;
68848+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68849 unsigned long size, grow;
68850
68851+#ifdef CONFIG_PAX_SEGMEXEC
68852+ struct vm_area_struct *vma_m;
68853+
68854+ vma_m = pax_find_mirror_vma(vma);
68855+#endif
68856+
68857 size = vma->vm_end - address;
68858 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68859
68860@@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
68861 if (!error) {
68862 vma->vm_start = address;
68863 vma->vm_pgoff -= grow;
68864+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68865+
68866+#ifdef CONFIG_PAX_SEGMEXEC
68867+ if (vma_m) {
68868+ vma_m->vm_start -= grow << PAGE_SHIFT;
68869+ vma_m->vm_pgoff -= grow;
68870+ }
68871+#endif
68872+
68873 }
68874 }
68875 anon_vma_unlock(vma);
68876+ if (lockprev)
68877+ anon_vma_unlock(prev);
68878 return error;
68879 }
68880
68881@@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
68882 do {
68883 long nrpages = vma_pages(vma);
68884
68885+#ifdef CONFIG_PAX_SEGMEXEC
68886+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
68887+ vma = remove_vma(vma);
68888+ continue;
68889+ }
68890+#endif
68891+
68892 mm->total_vm -= nrpages;
68893 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
68894 vma = remove_vma(vma);
68895@@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
68896 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
68897 vma->vm_prev = NULL;
68898 do {
68899+
68900+#ifdef CONFIG_PAX_SEGMEXEC
68901+ if (vma->vm_mirror) {
68902+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
68903+ vma->vm_mirror->vm_mirror = NULL;
68904+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
68905+ vma->vm_mirror = NULL;
68906+ }
68907+#endif
68908+
68909 rb_erase(&vma->vm_rb, &mm->mm_rb);
68910 mm->map_count--;
68911 tail_vma = vma;
68912@@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
68913 struct mempolicy *pol;
68914 struct vm_area_struct *new;
68915
68916+#ifdef CONFIG_PAX_SEGMEXEC
68917+ struct vm_area_struct *vma_m, *new_m = NULL;
68918+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
68919+#endif
68920+
68921 if (is_vm_hugetlb_page(vma) && (addr &
68922 ~(huge_page_mask(hstate_vma(vma)))))
68923 return -EINVAL;
68924
68925+#ifdef CONFIG_PAX_SEGMEXEC
68926+ vma_m = pax_find_mirror_vma(vma);
68927+
68928+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68929+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
68930+ if (mm->map_count >= sysctl_max_map_count-1)
68931+ return -ENOMEM;
68932+ } else
68933+#endif
68934+
68935 if (mm->map_count >= sysctl_max_map_count)
68936 return -ENOMEM;
68937
68938@@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
68939 if (!new)
68940 return -ENOMEM;
68941
68942+#ifdef CONFIG_PAX_SEGMEXEC
68943+ if (vma_m) {
68944+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68945+ if (!new_m) {
68946+ kmem_cache_free(vm_area_cachep, new);
68947+ return -ENOMEM;
68948+ }
68949+ }
68950+#endif
68951+
68952 /* most fields are the same, copy all, and then fixup */
68953 *new = *vma;
68954
68955@@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
68956 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
68957 }
68958
68959+#ifdef CONFIG_PAX_SEGMEXEC
68960+ if (vma_m) {
68961+ *new_m = *vma_m;
68962+ new_m->vm_mirror = new;
68963+ new->vm_mirror = new_m;
68964+
68965+ if (new_below)
68966+ new_m->vm_end = addr_m;
68967+ else {
68968+ new_m->vm_start = addr_m;
68969+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
68970+ }
68971+ }
68972+#endif
68973+
68974 pol = mpol_dup(vma_policy(vma));
68975 if (IS_ERR(pol)) {
68976+
68977+#ifdef CONFIG_PAX_SEGMEXEC
68978+ if (new_m)
68979+ kmem_cache_free(vm_area_cachep, new_m);
68980+#endif
68981+
68982 kmem_cache_free(vm_area_cachep, new);
68983 return PTR_ERR(pol);
68984 }
68985@@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
68986 else
68987 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
68988
68989+#ifdef CONFIG_PAX_SEGMEXEC
68990+ if (vma_m) {
68991+ mpol_get(pol);
68992+ vma_set_policy(new_m, pol);
68993+
68994+ if (new_m->vm_file) {
68995+ get_file(new_m->vm_file);
68996+ if (vma_m->vm_flags & VM_EXECUTABLE)
68997+ added_exe_file_vma(mm);
68998+ }
68999+
69000+ if (new_m->vm_ops && new_m->vm_ops->open)
69001+ new_m->vm_ops->open(new_m);
69002+
69003+ if (new_below)
69004+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69005+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69006+ else
69007+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69008+ }
69009+#endif
69010+
69011 return 0;
69012 }
69013
69014@@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
69015 * work. This now handles partial unmappings.
69016 * Jeremy Fitzhardinge <jeremy@goop.org>
69017 */
69018+#ifdef CONFIG_PAX_SEGMEXEC
69019+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69020+{
69021+ int ret = __do_munmap(mm, start, len);
69022+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69023+ return ret;
69024+
69025+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69026+}
69027+
69028+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69029+#else
69030 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69031+#endif
69032 {
69033 unsigned long end;
69034 struct vm_area_struct *vma, *prev, *last;
69035
69036+ /*
69037+ * mm->mmap_sem is required to protect against another thread
69038+ * changing the mappings in case we sleep.
69039+ */
69040+ verify_mm_writelocked(mm);
69041+
69042 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69043 return -EINVAL;
69044
69045@@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
69046 /* Fix up all other VM information */
69047 remove_vma_list(mm, vma);
69048
69049+ track_exec_limit(mm, start, end, 0UL);
69050+
69051 return 0;
69052 }
69053
69054@@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
69055
69056 profile_munmap(addr);
69057
69058+#ifdef CONFIG_PAX_SEGMEXEC
69059+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69060+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69061+ return -EINVAL;
69062+#endif
69063+
69064 down_write(&mm->mmap_sem);
69065 ret = do_munmap(mm, addr, len);
69066 up_write(&mm->mmap_sem);
69067 return ret;
69068 }
69069
69070-static inline void verify_mm_writelocked(struct mm_struct *mm)
69071-{
69072-#ifdef CONFIG_DEBUG_VM
69073- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69074- WARN_ON(1);
69075- up_read(&mm->mmap_sem);
69076- }
69077-#endif
69078-}
69079-
69080 /*
69081 * this is really a simplified "do_mmap". it only handles
69082 * anonymous maps. eventually we may be able to do some
69083@@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
69084 struct rb_node ** rb_link, * rb_parent;
69085 pgoff_t pgoff = addr >> PAGE_SHIFT;
69086 int error;
69087+ unsigned long charged;
69088
69089 len = PAGE_ALIGN(len);
69090 if (!len)
69091@@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
69092
69093 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69094
69095+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69096+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69097+ flags &= ~VM_EXEC;
69098+
69099+#ifdef CONFIG_PAX_MPROTECT
69100+ if (mm->pax_flags & MF_PAX_MPROTECT)
69101+ flags &= ~VM_MAYEXEC;
69102+#endif
69103+
69104+ }
69105+#endif
69106+
69107 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69108 if (error & ~PAGE_MASK)
69109 return error;
69110
69111+ charged = len >> PAGE_SHIFT;
69112+
69113 /*
69114 * mlock MCL_FUTURE?
69115 */
69116 if (mm->def_flags & VM_LOCKED) {
69117 unsigned long locked, lock_limit;
69118- locked = len >> PAGE_SHIFT;
69119+ locked = charged;
69120 locked += mm->locked_vm;
69121 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
69122 lock_limit >>= PAGE_SHIFT;
69123@@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
69124 /*
69125 * Clear old maps. this also does some error checking for us
69126 */
69127- munmap_back:
69128 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69129 if (vma && vma->vm_start < addr + len) {
69130 if (do_munmap(mm, addr, len))
69131 return -ENOMEM;
69132- goto munmap_back;
69133+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69134+ BUG_ON(vma && vma->vm_start < addr + len);
69135 }
69136
69137 /* Check against address space limits *after* clearing old maps... */
69138- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69139+ if (!may_expand_vm(mm, charged))
69140 return -ENOMEM;
69141
69142 if (mm->map_count > sysctl_max_map_count)
69143 return -ENOMEM;
69144
69145- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69146+ if (security_vm_enough_memory(charged))
69147 return -ENOMEM;
69148
69149 /* Can we just expand an old private anonymous mapping? */
69150@@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
69151 */
69152 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69153 if (!vma) {
69154- vm_unacct_memory(len >> PAGE_SHIFT);
69155+ vm_unacct_memory(charged);
69156 return -ENOMEM;
69157 }
69158
69159@@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
69160 vma->vm_page_prot = vm_get_page_prot(flags);
69161 vma_link(mm, vma, prev, rb_link, rb_parent);
69162 out:
69163- mm->total_vm += len >> PAGE_SHIFT;
69164+ mm->total_vm += charged;
69165 if (flags & VM_LOCKED) {
69166 if (!mlock_vma_pages_range(vma, addr, addr + len))
69167- mm->locked_vm += (len >> PAGE_SHIFT);
69168+ mm->locked_vm += charged;
69169 }
69170+ track_exec_limit(mm, addr, addr + len, flags);
69171 return addr;
69172 }
69173
69174@@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
69175 * Walk the list again, actually closing and freeing it,
69176 * with preemption enabled, without holding any MM locks.
69177 */
69178- while (vma)
69179+ while (vma) {
69180+ vma->vm_mirror = NULL;
69181 vma = remove_vma(vma);
69182+ }
69183
69184 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69185 }
69186@@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
69187 struct vm_area_struct * __vma, * prev;
69188 struct rb_node ** rb_link, * rb_parent;
69189
69190+#ifdef CONFIG_PAX_SEGMEXEC
69191+ struct vm_area_struct *vma_m = NULL;
69192+#endif
69193+
69194 /*
69195 * The vm_pgoff of a purely anonymous vma should be irrelevant
69196 * until its first write fault, when page's anon_vma and index
69197@@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
69198 if ((vma->vm_flags & VM_ACCOUNT) &&
69199 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69200 return -ENOMEM;
69201+
69202+#ifdef CONFIG_PAX_SEGMEXEC
69203+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69204+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69205+ if (!vma_m)
69206+ return -ENOMEM;
69207+ }
69208+#endif
69209+
69210 vma_link(mm, vma, prev, rb_link, rb_parent);
69211+
69212+#ifdef CONFIG_PAX_SEGMEXEC
69213+ if (vma_m)
69214+ pax_mirror_vma(vma_m, vma);
69215+#endif
69216+
69217 return 0;
69218 }
69219
69220@@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
69221 struct rb_node **rb_link, *rb_parent;
69222 struct mempolicy *pol;
69223
69224+ BUG_ON(vma->vm_mirror);
69225+
69226 /*
69227 * If anonymous vma has not yet been faulted, update new pgoff
69228 * to match new location, to increase its chance of merging.
69229@@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
69230 return new_vma;
69231 }
69232
69233+#ifdef CONFIG_PAX_SEGMEXEC
69234+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69235+{
69236+ struct vm_area_struct *prev_m;
69237+ struct rb_node **rb_link_m, *rb_parent_m;
69238+ struct mempolicy *pol_m;
69239+
69240+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69241+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69242+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69243+ *vma_m = *vma;
69244+ pol_m = vma_policy(vma_m);
69245+ mpol_get(pol_m);
69246+ vma_set_policy(vma_m, pol_m);
69247+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69248+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69249+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69250+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69251+ if (vma_m->vm_file)
69252+ get_file(vma_m->vm_file);
69253+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69254+ vma_m->vm_ops->open(vma_m);
69255+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69256+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69257+ vma_m->vm_mirror = vma;
69258+ vma->vm_mirror = vma_m;
69259+}
69260+#endif
69261+
69262 /*
69263 * Return true if the calling process may expand its vm space by the passed
69264 * number of pages
69265@@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
69266 unsigned long lim;
69267
69268 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
69269-
69270+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69271 if (cur + npages > lim)
69272 return 0;
69273 return 1;
69274@@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
69275 vma->vm_start = addr;
69276 vma->vm_end = addr + len;
69277
69278+#ifdef CONFIG_PAX_MPROTECT
69279+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69280+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69281+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69282+ return -EPERM;
69283+ if (!(vm_flags & VM_EXEC))
69284+ vm_flags &= ~VM_MAYEXEC;
69285+#else
69286+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69287+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69288+#endif
69289+ else
69290+ vm_flags &= ~VM_MAYWRITE;
69291+ }
69292+#endif
69293+
69294 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69295 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69296
69297diff -urNp linux-2.6.32.45/mm/mprotect.c linux-2.6.32.45/mm/mprotect.c
69298--- linux-2.6.32.45/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
69299+++ linux-2.6.32.45/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
69300@@ -24,10 +24,16 @@
69301 #include <linux/mmu_notifier.h>
69302 #include <linux/migrate.h>
69303 #include <linux/perf_event.h>
69304+
69305+#ifdef CONFIG_PAX_MPROTECT
69306+#include <linux/elf.h>
69307+#endif
69308+
69309 #include <asm/uaccess.h>
69310 #include <asm/pgtable.h>
69311 #include <asm/cacheflush.h>
69312 #include <asm/tlbflush.h>
69313+#include <asm/mmu_context.h>
69314
69315 #ifndef pgprot_modify
69316 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69317@@ -132,6 +138,48 @@ static void change_protection(struct vm_
69318 flush_tlb_range(vma, start, end);
69319 }
69320
69321+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69322+/* called while holding the mmap semaphor for writing except stack expansion */
69323+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69324+{
69325+ unsigned long oldlimit, newlimit = 0UL;
69326+
69327+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
69328+ return;
69329+
69330+ spin_lock(&mm->page_table_lock);
69331+ oldlimit = mm->context.user_cs_limit;
69332+ if ((prot & VM_EXEC) && oldlimit < end)
69333+ /* USER_CS limit moved up */
69334+ newlimit = end;
69335+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69336+ /* USER_CS limit moved down */
69337+ newlimit = start;
69338+
69339+ if (newlimit) {
69340+ mm->context.user_cs_limit = newlimit;
69341+
69342+#ifdef CONFIG_SMP
69343+ wmb();
69344+ cpus_clear(mm->context.cpu_user_cs_mask);
69345+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69346+#endif
69347+
69348+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69349+ }
69350+ spin_unlock(&mm->page_table_lock);
69351+ if (newlimit == end) {
69352+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69353+
69354+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69355+ if (is_vm_hugetlb_page(vma))
69356+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69357+ else
69358+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69359+ }
69360+}
69361+#endif
69362+
69363 int
69364 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69365 unsigned long start, unsigned long end, unsigned long newflags)
69366@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
69367 int error;
69368 int dirty_accountable = 0;
69369
69370+#ifdef CONFIG_PAX_SEGMEXEC
69371+ struct vm_area_struct *vma_m = NULL;
69372+ unsigned long start_m, end_m;
69373+
69374+ start_m = start + SEGMEXEC_TASK_SIZE;
69375+ end_m = end + SEGMEXEC_TASK_SIZE;
69376+#endif
69377+
69378 if (newflags == oldflags) {
69379 *pprev = vma;
69380 return 0;
69381 }
69382
69383+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69384+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69385+
69386+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69387+ return -ENOMEM;
69388+
69389+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69390+ return -ENOMEM;
69391+ }
69392+
69393 /*
69394 * If we make a private mapping writable we increase our commit;
69395 * but (without finer accounting) cannot reduce our commit if we
69396@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
69397 }
69398 }
69399
69400+#ifdef CONFIG_PAX_SEGMEXEC
69401+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69402+ if (start != vma->vm_start) {
69403+ error = split_vma(mm, vma, start, 1);
69404+ if (error)
69405+ goto fail;
69406+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69407+ *pprev = (*pprev)->vm_next;
69408+ }
69409+
69410+ if (end != vma->vm_end) {
69411+ error = split_vma(mm, vma, end, 0);
69412+ if (error)
69413+ goto fail;
69414+ }
69415+
69416+ if (pax_find_mirror_vma(vma)) {
69417+ error = __do_munmap(mm, start_m, end_m - start_m);
69418+ if (error)
69419+ goto fail;
69420+ } else {
69421+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69422+ if (!vma_m) {
69423+ error = -ENOMEM;
69424+ goto fail;
69425+ }
69426+ vma->vm_flags = newflags;
69427+ pax_mirror_vma(vma_m, vma);
69428+ }
69429+ }
69430+#endif
69431+
69432 /*
69433 * First try to merge with previous and/or next vma.
69434 */
69435@@ -195,9 +293,21 @@ success:
69436 * vm_flags and vm_page_prot are protected by the mmap_sem
69437 * held in write mode.
69438 */
69439+
69440+#ifdef CONFIG_PAX_SEGMEXEC
69441+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69442+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69443+#endif
69444+
69445 vma->vm_flags = newflags;
69446+
69447+#ifdef CONFIG_PAX_MPROTECT
69448+ if (mm->binfmt && mm->binfmt->handle_mprotect)
69449+ mm->binfmt->handle_mprotect(vma, newflags);
69450+#endif
69451+
69452 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69453- vm_get_page_prot(newflags));
69454+ vm_get_page_prot(vma->vm_flags));
69455
69456 if (vma_wants_writenotify(vma)) {
69457 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69458@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69459 end = start + len;
69460 if (end <= start)
69461 return -ENOMEM;
69462+
69463+#ifdef CONFIG_PAX_SEGMEXEC
69464+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69465+ if (end > SEGMEXEC_TASK_SIZE)
69466+ return -EINVAL;
69467+ } else
69468+#endif
69469+
69470+ if (end > TASK_SIZE)
69471+ return -EINVAL;
69472+
69473 if (!arch_validate_prot(prot))
69474 return -EINVAL;
69475
69476@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69477 /*
69478 * Does the application expect PROT_READ to imply PROT_EXEC:
69479 */
69480- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69481+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69482 prot |= PROT_EXEC;
69483
69484 vm_flags = calc_vm_prot_bits(prot);
69485@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69486 if (start > vma->vm_start)
69487 prev = vma;
69488
69489+#ifdef CONFIG_PAX_MPROTECT
69490+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69491+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
69492+#endif
69493+
69494 for (nstart = start ; ; ) {
69495 unsigned long newflags;
69496
69497@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69498
69499 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69500 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69501+ if (prot & (PROT_WRITE | PROT_EXEC))
69502+ gr_log_rwxmprotect(vma->vm_file);
69503+
69504+ error = -EACCES;
69505+ goto out;
69506+ }
69507+
69508+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69509 error = -EACCES;
69510 goto out;
69511 }
69512@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69513 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69514 if (error)
69515 goto out;
69516+
69517+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
69518+
69519 nstart = tmp;
69520
69521 if (nstart < prev->vm_end)
69522diff -urNp linux-2.6.32.45/mm/mremap.c linux-2.6.32.45/mm/mremap.c
69523--- linux-2.6.32.45/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
69524+++ linux-2.6.32.45/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
69525@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
69526 continue;
69527 pte = ptep_clear_flush(vma, old_addr, old_pte);
69528 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69529+
69530+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69531+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69532+ pte = pte_exprotect(pte);
69533+#endif
69534+
69535 set_pte_at(mm, new_addr, new_pte, pte);
69536 }
69537
69538@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
69539 if (is_vm_hugetlb_page(vma))
69540 goto Einval;
69541
69542+#ifdef CONFIG_PAX_SEGMEXEC
69543+ if (pax_find_mirror_vma(vma))
69544+ goto Einval;
69545+#endif
69546+
69547 /* We can't remap across vm area boundaries */
69548 if (old_len > vma->vm_end - addr)
69549 goto Efault;
69550@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
69551 unsigned long ret = -EINVAL;
69552 unsigned long charged = 0;
69553 unsigned long map_flags;
69554+ unsigned long pax_task_size = TASK_SIZE;
69555
69556 if (new_addr & ~PAGE_MASK)
69557 goto out;
69558
69559- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69560+#ifdef CONFIG_PAX_SEGMEXEC
69561+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69562+ pax_task_size = SEGMEXEC_TASK_SIZE;
69563+#endif
69564+
69565+ pax_task_size -= PAGE_SIZE;
69566+
69567+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69568 goto out;
69569
69570 /* Check if the location we're moving into overlaps the
69571 * old location at all, and fail if it does.
69572 */
69573- if ((new_addr <= addr) && (new_addr+new_len) > addr)
69574- goto out;
69575-
69576- if ((addr <= new_addr) && (addr+old_len) > new_addr)
69577+ if (addr + old_len > new_addr && new_addr + new_len > addr)
69578 goto out;
69579
69580 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69581@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
69582 struct vm_area_struct *vma;
69583 unsigned long ret = -EINVAL;
69584 unsigned long charged = 0;
69585+ unsigned long pax_task_size = TASK_SIZE;
69586
69587 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69588 goto out;
69589@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
69590 if (!new_len)
69591 goto out;
69592
69593+#ifdef CONFIG_PAX_SEGMEXEC
69594+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69595+ pax_task_size = SEGMEXEC_TASK_SIZE;
69596+#endif
69597+
69598+ pax_task_size -= PAGE_SIZE;
69599+
69600+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69601+ old_len > pax_task_size || addr > pax_task_size-old_len)
69602+ goto out;
69603+
69604 if (flags & MREMAP_FIXED) {
69605 if (flags & MREMAP_MAYMOVE)
69606 ret = mremap_to(addr, old_len, new_addr, new_len);
69607@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
69608 addr + new_len);
69609 }
69610 ret = addr;
69611+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69612 goto out;
69613 }
69614 }
69615@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
69616 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69617 if (ret)
69618 goto out;
69619+
69620+ map_flags = vma->vm_flags;
69621 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69622+ if (!(ret & ~PAGE_MASK)) {
69623+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69624+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69625+ }
69626 }
69627 out:
69628 if (ret & ~PAGE_MASK)
69629diff -urNp linux-2.6.32.45/mm/nommu.c linux-2.6.32.45/mm/nommu.c
69630--- linux-2.6.32.45/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
69631+++ linux-2.6.32.45/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
69632@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69633 int sysctl_overcommit_ratio = 50; /* default is 50% */
69634 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69635 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69636-int heap_stack_gap = 0;
69637
69638 atomic_long_t mmap_pages_allocated;
69639
69640@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
69641 EXPORT_SYMBOL(find_vma);
69642
69643 /*
69644- * find a VMA
69645- * - we don't extend stack VMAs under NOMMU conditions
69646- */
69647-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69648-{
69649- return find_vma(mm, addr);
69650-}
69651-
69652-/*
69653 * expand a stack to a given address
69654 * - not supported under NOMMU conditions
69655 */
69656diff -urNp linux-2.6.32.45/mm/page_alloc.c linux-2.6.32.45/mm/page_alloc.c
69657--- linux-2.6.32.45/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
69658+++ linux-2.6.32.45/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
69659@@ -289,7 +289,7 @@ out:
69660 * This usage means that zero-order pages may not be compound.
69661 */
69662
69663-static void free_compound_page(struct page *page)
69664+void free_compound_page(struct page *page)
69665 {
69666 __free_pages_ok(page, compound_order(page));
69667 }
69668@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
69669 int bad = 0;
69670 int wasMlocked = __TestClearPageMlocked(page);
69671
69672+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69673+ unsigned long index = 1UL << order;
69674+#endif
69675+
69676 kmemcheck_free_shadow(page, order);
69677
69678 for (i = 0 ; i < (1 << order) ; ++i)
69679@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
69680 debug_check_no_obj_freed(page_address(page),
69681 PAGE_SIZE << order);
69682 }
69683+
69684+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69685+ for (; index; --index)
69686+ sanitize_highpage(page + index - 1);
69687+#endif
69688+
69689 arch_free_page(page, order);
69690 kernel_map_pages(page, 1 << order, 0);
69691
69692@@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
69693 arch_alloc_page(page, order);
69694 kernel_map_pages(page, 1 << order, 1);
69695
69696+#ifndef CONFIG_PAX_MEMORY_SANITIZE
69697 if (gfp_flags & __GFP_ZERO)
69698 prep_zero_page(page, order, gfp_flags);
69699+#endif
69700
69701 if (order && (gfp_flags & __GFP_COMP))
69702 prep_compound_page(page, order);
69703@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
69704 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
69705 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
69706 }
69707+
69708+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69709+ sanitize_highpage(page);
69710+#endif
69711+
69712 arch_free_page(page, 0);
69713 kernel_map_pages(page, 1, 0);
69714
69715@@ -2179,6 +2196,8 @@ void show_free_areas(void)
69716 int cpu;
69717 struct zone *zone;
69718
69719+ pax_track_stack();
69720+
69721 for_each_populated_zone(zone) {
69722 show_node(zone);
69723 printk("%s per-cpu:\n", zone->name);
69724@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
69725 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
69726 }
69727 #else
69728-static void inline setup_usemap(struct pglist_data *pgdat,
69729+static inline void setup_usemap(struct pglist_data *pgdat,
69730 struct zone *zone, unsigned long zonesize) {}
69731 #endif /* CONFIG_SPARSEMEM */
69732
69733diff -urNp linux-2.6.32.45/mm/percpu.c linux-2.6.32.45/mm/percpu.c
69734--- linux-2.6.32.45/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
69735+++ linux-2.6.32.45/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
69736@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
69737 static unsigned int pcpu_last_unit_cpu __read_mostly;
69738
69739 /* the address of the first chunk which starts with the kernel static area */
69740-void *pcpu_base_addr __read_mostly;
69741+void *pcpu_base_addr __read_only;
69742 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69743
69744 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69745diff -urNp linux-2.6.32.45/mm/rmap.c linux-2.6.32.45/mm/rmap.c
69746--- linux-2.6.32.45/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
69747+++ linux-2.6.32.45/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
69748@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
69749 /* page_table_lock to protect against threads */
69750 spin_lock(&mm->page_table_lock);
69751 if (likely(!vma->anon_vma)) {
69752+
69753+#ifdef CONFIG_PAX_SEGMEXEC
69754+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
69755+
69756+ if (vma_m) {
69757+ BUG_ON(vma_m->anon_vma);
69758+ vma_m->anon_vma = anon_vma;
69759+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
69760+ }
69761+#endif
69762+
69763 vma->anon_vma = anon_vma;
69764 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
69765 allocated = NULL;
69766diff -urNp linux-2.6.32.45/mm/shmem.c linux-2.6.32.45/mm/shmem.c
69767--- linux-2.6.32.45/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
69768+++ linux-2.6.32.45/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
69769@@ -31,7 +31,7 @@
69770 #include <linux/swap.h>
69771 #include <linux/ima.h>
69772
69773-static struct vfsmount *shm_mnt;
69774+struct vfsmount *shm_mnt;
69775
69776 #ifdef CONFIG_SHMEM
69777 /*
69778@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
69779 goto unlock;
69780 }
69781 entry = shmem_swp_entry(info, index, NULL);
69782+ if (!entry)
69783+ goto unlock;
69784 if (entry->val) {
69785 /*
69786 * The more uptodate page coming down from a stacked
69787@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
69788 struct vm_area_struct pvma;
69789 struct page *page;
69790
69791+ pax_track_stack();
69792+
69793 spol = mpol_cond_copy(&mpol,
69794 mpol_shared_policy_lookup(&info->policy, idx));
69795
69796@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
69797
69798 info = SHMEM_I(inode);
69799 inode->i_size = len-1;
69800- if (len <= (char *)inode - (char *)info) {
69801+ if (len <= (char *)inode - (char *)info && len <= 64) {
69802 /* do it inline */
69803 memcpy(info, symname, len);
69804 inode->i_op = &shmem_symlink_inline_operations;
69805@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
69806 int err = -ENOMEM;
69807
69808 /* Round up to L1_CACHE_BYTES to resist false sharing */
69809- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
69810- L1_CACHE_BYTES), GFP_KERNEL);
69811+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
69812 if (!sbinfo)
69813 return -ENOMEM;
69814
69815diff -urNp linux-2.6.32.45/mm/slab.c linux-2.6.32.45/mm/slab.c
69816--- linux-2.6.32.45/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
69817+++ linux-2.6.32.45/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
69818@@ -174,7 +174,7 @@
69819
69820 /* Legal flag mask for kmem_cache_create(). */
69821 #if DEBUG
69822-# define CREATE_MASK (SLAB_RED_ZONE | \
69823+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
69824 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
69825 SLAB_CACHE_DMA | \
69826 SLAB_STORE_USER | \
69827@@ -182,7 +182,7 @@
69828 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69829 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
69830 #else
69831-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
69832+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
69833 SLAB_CACHE_DMA | \
69834 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
69835 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69836@@ -308,7 +308,7 @@ struct kmem_list3 {
69837 * Need this for bootstrapping a per node allocator.
69838 */
69839 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
69840-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
69841+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
69842 #define CACHE_CACHE 0
69843 #define SIZE_AC MAX_NUMNODES
69844 #define SIZE_L3 (2 * MAX_NUMNODES)
69845@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
69846 if ((x)->max_freeable < i) \
69847 (x)->max_freeable = i; \
69848 } while (0)
69849-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
69850-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
69851-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
69852-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
69853+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
69854+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
69855+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
69856+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
69857 #else
69858 #define STATS_INC_ACTIVE(x) do { } while (0)
69859 #define STATS_DEC_ACTIVE(x) do { } while (0)
69860@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
69861 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
69862 */
69863 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
69864- const struct slab *slab, void *obj)
69865+ const struct slab *slab, const void *obj)
69866 {
69867 u32 offset = (obj - slab->s_mem);
69868 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
69869@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
69870 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
69871 sizes[INDEX_AC].cs_size,
69872 ARCH_KMALLOC_MINALIGN,
69873- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69874+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69875 NULL);
69876
69877 if (INDEX_AC != INDEX_L3) {
69878@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
69879 kmem_cache_create(names[INDEX_L3].name,
69880 sizes[INDEX_L3].cs_size,
69881 ARCH_KMALLOC_MINALIGN,
69882- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69883+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69884 NULL);
69885 }
69886
69887@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
69888 sizes->cs_cachep = kmem_cache_create(names->name,
69889 sizes->cs_size,
69890 ARCH_KMALLOC_MINALIGN,
69891- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69892+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69893 NULL);
69894 }
69895 #ifdef CONFIG_ZONE_DMA
69896@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
69897 }
69898 /* cpu stats */
69899 {
69900- unsigned long allochit = atomic_read(&cachep->allochit);
69901- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
69902- unsigned long freehit = atomic_read(&cachep->freehit);
69903- unsigned long freemiss = atomic_read(&cachep->freemiss);
69904+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
69905+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
69906+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
69907+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
69908
69909 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
69910 allochit, allocmiss, freehit, freemiss);
69911@@ -4471,15 +4471,66 @@ static const struct file_operations proc
69912
69913 static int __init slab_proc_init(void)
69914 {
69915- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
69916+ mode_t gr_mode = S_IRUGO;
69917+
69918+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69919+ gr_mode = S_IRUSR;
69920+#endif
69921+
69922+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
69923 #ifdef CONFIG_DEBUG_SLAB_LEAK
69924- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
69925+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
69926 #endif
69927 return 0;
69928 }
69929 module_init(slab_proc_init);
69930 #endif
69931
69932+void check_object_size(const void *ptr, unsigned long n, bool to)
69933+{
69934+
69935+#ifdef CONFIG_PAX_USERCOPY
69936+ struct page *page;
69937+ struct kmem_cache *cachep = NULL;
69938+ struct slab *slabp;
69939+ unsigned int objnr;
69940+ unsigned long offset;
69941+
69942+ if (!n)
69943+ return;
69944+
69945+ if (ZERO_OR_NULL_PTR(ptr))
69946+ goto report;
69947+
69948+ if (!virt_addr_valid(ptr))
69949+ return;
69950+
69951+ page = virt_to_head_page(ptr);
69952+
69953+ if (!PageSlab(page)) {
69954+ if (object_is_on_stack(ptr, n) == -1)
69955+ goto report;
69956+ return;
69957+ }
69958+
69959+ cachep = page_get_cache(page);
69960+ if (!(cachep->flags & SLAB_USERCOPY))
69961+ goto report;
69962+
69963+ slabp = page_get_slab(page);
69964+ objnr = obj_to_index(cachep, slabp, ptr);
69965+ BUG_ON(objnr >= cachep->num);
69966+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
69967+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
69968+ return;
69969+
69970+report:
69971+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
69972+#endif
69973+
69974+}
69975+EXPORT_SYMBOL(check_object_size);
69976+
69977 /**
69978 * ksize - get the actual amount of memory allocated for a given object
69979 * @objp: Pointer to the object
69980diff -urNp linux-2.6.32.45/mm/slob.c linux-2.6.32.45/mm/slob.c
69981--- linux-2.6.32.45/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
69982+++ linux-2.6.32.45/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
69983@@ -29,7 +29,7 @@
69984 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
69985 * alloc_pages() directly, allocating compound pages so the page order
69986 * does not have to be separately tracked, and also stores the exact
69987- * allocation size in page->private so that it can be used to accurately
69988+ * allocation size in slob_page->size so that it can be used to accurately
69989 * provide ksize(). These objects are detected in kfree() because slob_page()
69990 * is false for them.
69991 *
69992@@ -58,6 +58,7 @@
69993 */
69994
69995 #include <linux/kernel.h>
69996+#include <linux/sched.h>
69997 #include <linux/slab.h>
69998 #include <linux/mm.h>
69999 #include <linux/swap.h> /* struct reclaim_state */
70000@@ -100,7 +101,8 @@ struct slob_page {
70001 unsigned long flags; /* mandatory */
70002 atomic_t _count; /* mandatory */
70003 slobidx_t units; /* free units left in page */
70004- unsigned long pad[2];
70005+ unsigned long pad[1];
70006+ unsigned long size; /* size when >=PAGE_SIZE */
70007 slob_t *free; /* first free slob_t in page */
70008 struct list_head list; /* linked list of free pages */
70009 };
70010@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
70011 */
70012 static inline int is_slob_page(struct slob_page *sp)
70013 {
70014- return PageSlab((struct page *)sp);
70015+ return PageSlab((struct page *)sp) && !sp->size;
70016 }
70017
70018 static inline void set_slob_page(struct slob_page *sp)
70019@@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
70020
70021 static inline struct slob_page *slob_page(const void *addr)
70022 {
70023- return (struct slob_page *)virt_to_page(addr);
70024+ return (struct slob_page *)virt_to_head_page(addr);
70025 }
70026
70027 /*
70028@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
70029 /*
70030 * Return the size of a slob block.
70031 */
70032-static slobidx_t slob_units(slob_t *s)
70033+static slobidx_t slob_units(const slob_t *s)
70034 {
70035 if (s->units > 0)
70036 return s->units;
70037@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
70038 /*
70039 * Return the next free slob block pointer after this one.
70040 */
70041-static slob_t *slob_next(slob_t *s)
70042+static slob_t *slob_next(const slob_t *s)
70043 {
70044 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70045 slobidx_t next;
70046@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
70047 /*
70048 * Returns true if s is the last free block in its page.
70049 */
70050-static int slob_last(slob_t *s)
70051+static int slob_last(const slob_t *s)
70052 {
70053 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70054 }
70055@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
70056 if (!page)
70057 return NULL;
70058
70059+ set_slob_page(page);
70060 return page_address(page);
70061 }
70062
70063@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
70064 if (!b)
70065 return NULL;
70066 sp = slob_page(b);
70067- set_slob_page(sp);
70068
70069 spin_lock_irqsave(&slob_lock, flags);
70070 sp->units = SLOB_UNITS(PAGE_SIZE);
70071 sp->free = b;
70072+ sp->size = 0;
70073 INIT_LIST_HEAD(&sp->list);
70074 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70075 set_slob_page_free(sp, slob_list);
70076@@ -475,10 +478,9 @@ out:
70077 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
70078 #endif
70079
70080-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70081+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70082 {
70083- unsigned int *m;
70084- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70085+ slob_t *m;
70086 void *ret;
70087
70088 lockdep_trace_alloc(gfp);
70089@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
70090
70091 if (!m)
70092 return NULL;
70093- *m = size;
70094+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70095+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70096+ m[0].units = size;
70097+ m[1].units = align;
70098 ret = (void *)m + align;
70099
70100 trace_kmalloc_node(_RET_IP_, ret,
70101@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
70102
70103 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
70104 if (ret) {
70105- struct page *page;
70106- page = virt_to_page(ret);
70107- page->private = size;
70108+ struct slob_page *sp;
70109+ sp = slob_page(ret);
70110+ sp->size = size;
70111 }
70112
70113 trace_kmalloc_node(_RET_IP_, ret,
70114 size, PAGE_SIZE << order, gfp, node);
70115 }
70116
70117- kmemleak_alloc(ret, size, 1, gfp);
70118+ return ret;
70119+}
70120+
70121+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70122+{
70123+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70124+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70125+
70126+ if (!ZERO_OR_NULL_PTR(ret))
70127+ kmemleak_alloc(ret, size, 1, gfp);
70128 return ret;
70129 }
70130 EXPORT_SYMBOL(__kmalloc_node);
70131@@ -528,13 +542,88 @@ void kfree(const void *block)
70132 sp = slob_page(block);
70133 if (is_slob_page(sp)) {
70134 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70135- unsigned int *m = (unsigned int *)(block - align);
70136- slob_free(m, *m + align);
70137- } else
70138+ slob_t *m = (slob_t *)(block - align);
70139+ slob_free(m, m[0].units + align);
70140+ } else {
70141+ clear_slob_page(sp);
70142+ free_slob_page(sp);
70143+ sp->size = 0;
70144 put_page(&sp->page);
70145+ }
70146 }
70147 EXPORT_SYMBOL(kfree);
70148
70149+void check_object_size(const void *ptr, unsigned long n, bool to)
70150+{
70151+
70152+#ifdef CONFIG_PAX_USERCOPY
70153+ struct slob_page *sp;
70154+ const slob_t *free;
70155+ const void *base;
70156+ unsigned long flags;
70157+
70158+ if (!n)
70159+ return;
70160+
70161+ if (ZERO_OR_NULL_PTR(ptr))
70162+ goto report;
70163+
70164+ if (!virt_addr_valid(ptr))
70165+ return;
70166+
70167+ sp = slob_page(ptr);
70168+ if (!PageSlab((struct page*)sp)) {
70169+ if (object_is_on_stack(ptr, n) == -1)
70170+ goto report;
70171+ return;
70172+ }
70173+
70174+ if (sp->size) {
70175+ base = page_address(&sp->page);
70176+ if (base <= ptr && n <= sp->size - (ptr - base))
70177+ return;
70178+ goto report;
70179+ }
70180+
70181+ /* some tricky double walking to find the chunk */
70182+ spin_lock_irqsave(&slob_lock, flags);
70183+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70184+ free = sp->free;
70185+
70186+ while (!slob_last(free) && (void *)free <= ptr) {
70187+ base = free + slob_units(free);
70188+ free = slob_next(free);
70189+ }
70190+
70191+ while (base < (void *)free) {
70192+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70193+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70194+ int offset;
70195+
70196+ if (ptr < base + align)
70197+ break;
70198+
70199+ offset = ptr - base - align;
70200+ if (offset >= m) {
70201+ base += size;
70202+ continue;
70203+ }
70204+
70205+ if (n > m - offset)
70206+ break;
70207+
70208+ spin_unlock_irqrestore(&slob_lock, flags);
70209+ return;
70210+ }
70211+
70212+ spin_unlock_irqrestore(&slob_lock, flags);
70213+report:
70214+ pax_report_usercopy(ptr, n, to, NULL);
70215+#endif
70216+
70217+}
70218+EXPORT_SYMBOL(check_object_size);
70219+
70220 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70221 size_t ksize(const void *block)
70222 {
70223@@ -547,10 +636,10 @@ size_t ksize(const void *block)
70224 sp = slob_page(block);
70225 if (is_slob_page(sp)) {
70226 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70227- unsigned int *m = (unsigned int *)(block - align);
70228- return SLOB_UNITS(*m) * SLOB_UNIT;
70229+ slob_t *m = (slob_t *)(block - align);
70230+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70231 } else
70232- return sp->page.private;
70233+ return sp->size;
70234 }
70235 EXPORT_SYMBOL(ksize);
70236
70237@@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
70238 {
70239 struct kmem_cache *c;
70240
70241+#ifdef CONFIG_PAX_USERCOPY
70242+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70243+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70244+#else
70245 c = slob_alloc(sizeof(struct kmem_cache),
70246 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70247+#endif
70248
70249 if (c) {
70250 c->name = name;
70251@@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
70252 {
70253 void *b;
70254
70255+#ifdef CONFIG_PAX_USERCOPY
70256+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70257+#else
70258 if (c->size < PAGE_SIZE) {
70259 b = slob_alloc(c->size, flags, c->align, node);
70260 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70261 SLOB_UNITS(c->size) * SLOB_UNIT,
70262 flags, node);
70263 } else {
70264+ struct slob_page *sp;
70265+
70266 b = slob_new_pages(flags, get_order(c->size), node);
70267+ sp = slob_page(b);
70268+ sp->size = c->size;
70269 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70270 PAGE_SIZE << get_order(c->size),
70271 flags, node);
70272 }
70273+#endif
70274
70275 if (c->ctor)
70276 c->ctor(b);
70277@@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70278
70279 static void __kmem_cache_free(void *b, int size)
70280 {
70281- if (size < PAGE_SIZE)
70282+ struct slob_page *sp = slob_page(b);
70283+
70284+ if (is_slob_page(sp))
70285 slob_free(b, size);
70286- else
70287+ else {
70288+ clear_slob_page(sp);
70289+ free_slob_page(sp);
70290+ sp->size = 0;
70291 slob_free_pages(b, get_order(size));
70292+ }
70293 }
70294
70295 static void kmem_rcu_free(struct rcu_head *head)
70296@@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
70297
70298 void kmem_cache_free(struct kmem_cache *c, void *b)
70299 {
70300+ int size = c->size;
70301+
70302+#ifdef CONFIG_PAX_USERCOPY
70303+ if (size + c->align < PAGE_SIZE) {
70304+ size += c->align;
70305+ b -= c->align;
70306+ }
70307+#endif
70308+
70309 kmemleak_free_recursive(b, c->flags);
70310 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70311 struct slob_rcu *slob_rcu;
70312- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70313+ slob_rcu = b + (size - sizeof(struct slob_rcu));
70314 INIT_RCU_HEAD(&slob_rcu->head);
70315- slob_rcu->size = c->size;
70316+ slob_rcu->size = size;
70317 call_rcu(&slob_rcu->head, kmem_rcu_free);
70318 } else {
70319- __kmem_cache_free(b, c->size);
70320+ __kmem_cache_free(b, size);
70321 }
70322
70323+#ifdef CONFIG_PAX_USERCOPY
70324+ trace_kfree(_RET_IP_, b);
70325+#else
70326 trace_kmem_cache_free(_RET_IP_, b);
70327+#endif
70328+
70329 }
70330 EXPORT_SYMBOL(kmem_cache_free);
70331
70332diff -urNp linux-2.6.32.45/mm/slub.c linux-2.6.32.45/mm/slub.c
70333--- linux-2.6.32.45/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
70334+++ linux-2.6.32.45/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
70335@@ -410,7 +410,7 @@ static void print_track(const char *s, s
70336 if (!t->addr)
70337 return;
70338
70339- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70340+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70341 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70342 }
70343
70344@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
70345
70346 page = virt_to_head_page(x);
70347
70348+ BUG_ON(!PageSlab(page));
70349+
70350 slab_free(s, page, x, _RET_IP_);
70351
70352 trace_kmem_cache_free(_RET_IP_, x);
70353@@ -1937,7 +1939,7 @@ static int slub_min_objects;
70354 * Merge control. If this is set then no merging of slab caches will occur.
70355 * (Could be removed. This was introduced to pacify the merge skeptics.)
70356 */
70357-static int slub_nomerge;
70358+static int slub_nomerge = 1;
70359
70360 /*
70361 * Calculate the order of allocation given an slab object size.
70362@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
70363 * list to avoid pounding the page allocator excessively.
70364 */
70365 set_min_partial(s, ilog2(s->size));
70366- s->refcount = 1;
70367+ atomic_set(&s->refcount, 1);
70368 #ifdef CONFIG_NUMA
70369 s->remote_node_defrag_ratio = 1000;
70370 #endif
70371@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
70372 void kmem_cache_destroy(struct kmem_cache *s)
70373 {
70374 down_write(&slub_lock);
70375- s->refcount--;
70376- if (!s->refcount) {
70377+ if (atomic_dec_and_test(&s->refcount)) {
70378 list_del(&s->list);
70379 up_write(&slub_lock);
70380 if (kmem_cache_close(s)) {
70381@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
70382 __setup("slub_nomerge", setup_slub_nomerge);
70383
70384 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
70385- const char *name, int size, gfp_t gfp_flags)
70386+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
70387 {
70388- unsigned int flags = 0;
70389-
70390 if (gfp_flags & SLUB_DMA)
70391- flags = SLAB_CACHE_DMA;
70392+ flags |= SLAB_CACHE_DMA;
70393
70394 /*
70395 * This function is called with IRQs disabled during early-boot on
70396@@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
70397 EXPORT_SYMBOL(__kmalloc_node);
70398 #endif
70399
70400+void check_object_size(const void *ptr, unsigned long n, bool to)
70401+{
70402+
70403+#ifdef CONFIG_PAX_USERCOPY
70404+ struct page *page;
70405+ struct kmem_cache *s = NULL;
70406+ unsigned long offset;
70407+
70408+ if (!n)
70409+ return;
70410+
70411+ if (ZERO_OR_NULL_PTR(ptr))
70412+ goto report;
70413+
70414+ if (!virt_addr_valid(ptr))
70415+ return;
70416+
70417+ page = get_object_page(ptr);
70418+
70419+ if (!page) {
70420+ if (object_is_on_stack(ptr, n) == -1)
70421+ goto report;
70422+ return;
70423+ }
70424+
70425+ s = page->slab;
70426+ if (!(s->flags & SLAB_USERCOPY))
70427+ goto report;
70428+
70429+ offset = (ptr - page_address(page)) % s->size;
70430+ if (offset <= s->objsize && n <= s->objsize - offset)
70431+ return;
70432+
70433+report:
70434+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70435+#endif
70436+
70437+}
70438+EXPORT_SYMBOL(check_object_size);
70439+
70440 size_t ksize(const void *object)
70441 {
70442 struct page *page;
70443@@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
70444 * kmem_cache_open for slab_state == DOWN.
70445 */
70446 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
70447- sizeof(struct kmem_cache_node), GFP_NOWAIT);
70448- kmalloc_caches[0].refcount = -1;
70449+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
70450+ atomic_set(&kmalloc_caches[0].refcount, -1);
70451 caches++;
70452
70453 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
70454@@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
70455 /* Caches that are not of the two-to-the-power-of size */
70456 if (KMALLOC_MIN_SIZE <= 32) {
70457 create_kmalloc_cache(&kmalloc_caches[1],
70458- "kmalloc-96", 96, GFP_NOWAIT);
70459+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
70460 caches++;
70461 }
70462 if (KMALLOC_MIN_SIZE <= 64) {
70463 create_kmalloc_cache(&kmalloc_caches[2],
70464- "kmalloc-192", 192, GFP_NOWAIT);
70465+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
70466 caches++;
70467 }
70468
70469 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70470 create_kmalloc_cache(&kmalloc_caches[i],
70471- "kmalloc", 1 << i, GFP_NOWAIT);
70472+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
70473 caches++;
70474 }
70475
70476@@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
70477 /*
70478 * We may have set a slab to be unmergeable during bootstrap.
70479 */
70480- if (s->refcount < 0)
70481+ if (atomic_read(&s->refcount) < 0)
70482 return 1;
70483
70484 return 0;
70485@@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
70486 if (s) {
70487 int cpu;
70488
70489- s->refcount++;
70490+ atomic_inc(&s->refcount);
70491 /*
70492 * Adjust the object sizes so that we clear
70493 * the complete object on kzalloc.
70494@@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
70495
70496 if (sysfs_slab_alias(s, name)) {
70497 down_write(&slub_lock);
70498- s->refcount--;
70499+ atomic_dec(&s->refcount);
70500 up_write(&slub_lock);
70501 goto err;
70502 }
70503@@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
70504
70505 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70506 {
70507- return sprintf(buf, "%d\n", s->refcount - 1);
70508+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70509 }
70510 SLAB_ATTR_RO(aliases);
70511
70512@@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
70513 kfree(s);
70514 }
70515
70516-static struct sysfs_ops slab_sysfs_ops = {
70517+static const struct sysfs_ops slab_sysfs_ops = {
70518 .show = slab_attr_show,
70519 .store = slab_attr_store,
70520 };
70521@@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
70522 return 0;
70523 }
70524
70525-static struct kset_uevent_ops slab_uevent_ops = {
70526+static const struct kset_uevent_ops slab_uevent_ops = {
70527 .filter = uevent_filter,
70528 };
70529
70530@@ -4785,7 +4824,13 @@ static const struct file_operations proc
70531
70532 static int __init slab_proc_init(void)
70533 {
70534- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70535+ mode_t gr_mode = S_IRUGO;
70536+
70537+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70538+ gr_mode = S_IRUSR;
70539+#endif
70540+
70541+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70542 return 0;
70543 }
70544 module_init(slab_proc_init);
70545diff -urNp linux-2.6.32.45/mm/swap.c linux-2.6.32.45/mm/swap.c
70546--- linux-2.6.32.45/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
70547+++ linux-2.6.32.45/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
70548@@ -30,6 +30,7 @@
70549 #include <linux/notifier.h>
70550 #include <linux/backing-dev.h>
70551 #include <linux/memcontrol.h>
70552+#include <linux/hugetlb.h>
70553
70554 #include "internal.h"
70555
70556@@ -65,6 +66,8 @@ static void put_compound_page(struct pag
70557 compound_page_dtor *dtor;
70558
70559 dtor = get_compound_page_dtor(page);
70560+ if (!PageHuge(page))
70561+ BUG_ON(dtor != free_compound_page);
70562 (*dtor)(page);
70563 }
70564 }
70565diff -urNp linux-2.6.32.45/mm/util.c linux-2.6.32.45/mm/util.c
70566--- linux-2.6.32.45/mm/util.c 2011-03-27 14:31:47.000000000 -0400
70567+++ linux-2.6.32.45/mm/util.c 2011-04-17 15:56:46.000000000 -0400
70568@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
70569 void arch_pick_mmap_layout(struct mm_struct *mm)
70570 {
70571 mm->mmap_base = TASK_UNMAPPED_BASE;
70572+
70573+#ifdef CONFIG_PAX_RANDMMAP
70574+ if (mm->pax_flags & MF_PAX_RANDMMAP)
70575+ mm->mmap_base += mm->delta_mmap;
70576+#endif
70577+
70578 mm->get_unmapped_area = arch_get_unmapped_area;
70579 mm->unmap_area = arch_unmap_area;
70580 }
70581diff -urNp linux-2.6.32.45/mm/vmalloc.c linux-2.6.32.45/mm/vmalloc.c
70582--- linux-2.6.32.45/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
70583+++ linux-2.6.32.45/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
70584@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70585
70586 pte = pte_offset_kernel(pmd, addr);
70587 do {
70588- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70589- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70590+
70591+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70592+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70593+ BUG_ON(!pte_exec(*pte));
70594+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70595+ continue;
70596+ }
70597+#endif
70598+
70599+ {
70600+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70601+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70602+ }
70603 } while (pte++, addr += PAGE_SIZE, addr != end);
70604 }
70605
70606@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70607 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70608 {
70609 pte_t *pte;
70610+ int ret = -ENOMEM;
70611
70612 /*
70613 * nr is a running index into the array which helps higher level
70614@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
70615 pte = pte_alloc_kernel(pmd, addr);
70616 if (!pte)
70617 return -ENOMEM;
70618+
70619+ pax_open_kernel();
70620 do {
70621 struct page *page = pages[*nr];
70622
70623- if (WARN_ON(!pte_none(*pte)))
70624- return -EBUSY;
70625- if (WARN_ON(!page))
70626- return -ENOMEM;
70627+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70628+ if (!(pgprot_val(prot) & _PAGE_NX))
70629+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
70630+ else
70631+#endif
70632+
70633+ if (WARN_ON(!pte_none(*pte))) {
70634+ ret = -EBUSY;
70635+ goto out;
70636+ }
70637+ if (WARN_ON(!page)) {
70638+ ret = -ENOMEM;
70639+ goto out;
70640+ }
70641 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70642 (*nr)++;
70643 } while (pte++, addr += PAGE_SIZE, addr != end);
70644- return 0;
70645+ ret = 0;
70646+out:
70647+ pax_close_kernel();
70648+ return ret;
70649 }
70650
70651 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70652@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
70653 * and fall back on vmalloc() if that fails. Others
70654 * just put it in the vmalloc space.
70655 */
70656-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70657+#ifdef CONFIG_MODULES
70658+#ifdef MODULES_VADDR
70659 unsigned long addr = (unsigned long)x;
70660 if (addr >= MODULES_VADDR && addr < MODULES_END)
70661 return 1;
70662 #endif
70663+
70664+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70665+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70666+ return 1;
70667+#endif
70668+
70669+#endif
70670+
70671 return is_vmalloc_addr(x);
70672 }
70673
70674@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
70675
70676 if (!pgd_none(*pgd)) {
70677 pud_t *pud = pud_offset(pgd, addr);
70678+#ifdef CONFIG_X86
70679+ if (!pud_large(*pud))
70680+#endif
70681 if (!pud_none(*pud)) {
70682 pmd_t *pmd = pmd_offset(pud, addr);
70683+#ifdef CONFIG_X86
70684+ if (!pmd_large(*pmd))
70685+#endif
70686 if (!pmd_none(*pmd)) {
70687 pte_t *ptep, pte;
70688
70689@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
70690 struct rb_node *tmp;
70691
70692 while (*p) {
70693- struct vmap_area *tmp;
70694+ struct vmap_area *varea;
70695
70696 parent = *p;
70697- tmp = rb_entry(parent, struct vmap_area, rb_node);
70698- if (va->va_start < tmp->va_end)
70699+ varea = rb_entry(parent, struct vmap_area, rb_node);
70700+ if (va->va_start < varea->va_end)
70701 p = &(*p)->rb_left;
70702- else if (va->va_end > tmp->va_start)
70703+ else if (va->va_end > varea->va_start)
70704 p = &(*p)->rb_right;
70705 else
70706 BUG();
70707@@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
70708 struct vm_struct *area;
70709
70710 BUG_ON(in_interrupt());
70711+
70712+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70713+ if (flags & VM_KERNEXEC) {
70714+ if (start != VMALLOC_START || end != VMALLOC_END)
70715+ return NULL;
70716+ start = (unsigned long)MODULES_EXEC_VADDR;
70717+ end = (unsigned long)MODULES_EXEC_END;
70718+ }
70719+#endif
70720+
70721 if (flags & VM_IOREMAP) {
70722 int bit = fls(size);
70723
70724@@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
70725 if (count > totalram_pages)
70726 return NULL;
70727
70728+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70729+ if (!(pgprot_val(prot) & _PAGE_NX))
70730+ flags |= VM_KERNEXEC;
70731+#endif
70732+
70733 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
70734 __builtin_return_address(0));
70735 if (!area)
70736@@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
70737 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
70738 return NULL;
70739
70740+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70741+ if (!(pgprot_val(prot) & _PAGE_NX))
70742+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
70743+ node, gfp_mask, caller);
70744+ else
70745+#endif
70746+
70747 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
70748 VMALLOC_END, node, gfp_mask, caller);
70749
70750@@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
70751 return addr;
70752 }
70753
70754+#undef __vmalloc
70755 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
70756 {
70757 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
70758@@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
70759 * For tight control over page level allocator and protection flags
70760 * use __vmalloc() instead.
70761 */
70762+#undef vmalloc
70763 void *vmalloc(unsigned long size)
70764 {
70765 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70766@@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
70767 * The resulting memory area is zeroed so it can be mapped to userspace
70768 * without leaking data.
70769 */
70770+#undef vmalloc_user
70771 void *vmalloc_user(unsigned long size)
70772 {
70773 struct vm_struct *area;
70774@@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
70775 * For tight control over page level allocator and protection flags
70776 * use __vmalloc() instead.
70777 */
70778+#undef vmalloc_node
70779 void *vmalloc_node(unsigned long size, int node)
70780 {
70781 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70782@@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
70783 * For tight control over page level allocator and protection flags
70784 * use __vmalloc() instead.
70785 */
70786-
70787+#undef vmalloc_exec
70788 void *vmalloc_exec(unsigned long size)
70789 {
70790- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
70791+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
70792 -1, __builtin_return_address(0));
70793 }
70794
70795@@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
70796 * Allocate enough 32bit PA addressable pages to cover @size from the
70797 * page level allocator and map them into contiguous kernel virtual space.
70798 */
70799+#undef vmalloc_32
70800 void *vmalloc_32(unsigned long size)
70801 {
70802 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
70803@@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
70804 * The resulting memory area is 32bit addressable and zeroed so it can be
70805 * mapped to userspace without leaking data.
70806 */
70807+#undef vmalloc_32_user
70808 void *vmalloc_32_user(unsigned long size)
70809 {
70810 struct vm_struct *area;
70811@@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
70812 unsigned long uaddr = vma->vm_start;
70813 unsigned long usize = vma->vm_end - vma->vm_start;
70814
70815+ BUG_ON(vma->vm_mirror);
70816+
70817 if ((PAGE_SIZE-1) & (unsigned long)addr)
70818 return -EINVAL;
70819
70820diff -urNp linux-2.6.32.45/mm/vmstat.c linux-2.6.32.45/mm/vmstat.c
70821--- linux-2.6.32.45/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
70822+++ linux-2.6.32.45/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
70823@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
70824 *
70825 * vm_stat contains the global counters
70826 */
70827-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70828+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70829 EXPORT_SYMBOL(vm_stat);
70830
70831 #ifdef CONFIG_SMP
70832@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
70833 v = p->vm_stat_diff[i];
70834 p->vm_stat_diff[i] = 0;
70835 local_irq_restore(flags);
70836- atomic_long_add(v, &zone->vm_stat[i]);
70837+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
70838 global_diff[i] += v;
70839 #ifdef CONFIG_NUMA
70840 /* 3 seconds idle till flush */
70841@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
70842
70843 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
70844 if (global_diff[i])
70845- atomic_long_add(global_diff[i], &vm_stat[i]);
70846+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
70847 }
70848
70849 #endif
70850@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
70851 start_cpu_timer(cpu);
70852 #endif
70853 #ifdef CONFIG_PROC_FS
70854- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
70855- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
70856- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
70857- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
70858+ {
70859+ mode_t gr_mode = S_IRUGO;
70860+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70861+ gr_mode = S_IRUSR;
70862+#endif
70863+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
70864+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
70865+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70866+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
70867+#else
70868+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
70869+#endif
70870+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
70871+ }
70872 #endif
70873 return 0;
70874 }
70875diff -urNp linux-2.6.32.45/net/8021q/vlan.c linux-2.6.32.45/net/8021q/vlan.c
70876--- linux-2.6.32.45/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
70877+++ linux-2.6.32.45/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
70878@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
70879 err = -EPERM;
70880 if (!capable(CAP_NET_ADMIN))
70881 break;
70882- if ((args.u.name_type >= 0) &&
70883- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
70884+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
70885 struct vlan_net *vn;
70886
70887 vn = net_generic(net, vlan_net_id);
70888diff -urNp linux-2.6.32.45/net/atm/atm_misc.c linux-2.6.32.45/net/atm/atm_misc.c
70889--- linux-2.6.32.45/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
70890+++ linux-2.6.32.45/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
70891@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
70892 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
70893 return 1;
70894 atm_return(vcc,truesize);
70895- atomic_inc(&vcc->stats->rx_drop);
70896+ atomic_inc_unchecked(&vcc->stats->rx_drop);
70897 return 0;
70898 }
70899
70900@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
70901 }
70902 }
70903 atm_return(vcc,guess);
70904- atomic_inc(&vcc->stats->rx_drop);
70905+ atomic_inc_unchecked(&vcc->stats->rx_drop);
70906 return NULL;
70907 }
70908
70909@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
70910
70911 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70912 {
70913-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
70914+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
70915 __SONET_ITEMS
70916 #undef __HANDLE_ITEM
70917 }
70918@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
70919
70920 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70921 {
70922-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
70923+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
70924 __SONET_ITEMS
70925 #undef __HANDLE_ITEM
70926 }
70927diff -urNp linux-2.6.32.45/net/atm/lec.h linux-2.6.32.45/net/atm/lec.h
70928--- linux-2.6.32.45/net/atm/lec.h 2011-03-27 14:31:47.000000000 -0400
70929+++ linux-2.6.32.45/net/atm/lec.h 2011-08-05 20:33:55.000000000 -0400
70930@@ -48,7 +48,7 @@ struct lane2_ops {
70931 const u8 *tlvs, u32 sizeoftlvs);
70932 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
70933 const u8 *tlvs, u32 sizeoftlvs);
70934-};
70935+} __no_const;
70936
70937 /*
70938 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
70939diff -urNp linux-2.6.32.45/net/atm/mpc.h linux-2.6.32.45/net/atm/mpc.h
70940--- linux-2.6.32.45/net/atm/mpc.h 2011-03-27 14:31:47.000000000 -0400
70941+++ linux-2.6.32.45/net/atm/mpc.h 2011-08-23 21:22:38.000000000 -0400
70942@@ -33,7 +33,7 @@ struct mpoa_client {
70943 struct mpc_parameters parameters; /* parameters for this client */
70944
70945 const struct net_device_ops *old_ops;
70946- struct net_device_ops new_ops;
70947+ net_device_ops_no_const new_ops;
70948 };
70949
70950
70951diff -urNp linux-2.6.32.45/net/atm/mpoa_caches.c linux-2.6.32.45/net/atm/mpoa_caches.c
70952--- linux-2.6.32.45/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
70953+++ linux-2.6.32.45/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
70954@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
70955 struct timeval now;
70956 struct k_message msg;
70957
70958+ pax_track_stack();
70959+
70960 do_gettimeofday(&now);
70961
70962 write_lock_irq(&client->egress_lock);
70963diff -urNp linux-2.6.32.45/net/atm/proc.c linux-2.6.32.45/net/atm/proc.c
70964--- linux-2.6.32.45/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
70965+++ linux-2.6.32.45/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
70966@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
70967 const struct k_atm_aal_stats *stats)
70968 {
70969 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
70970- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
70971- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
70972- atomic_read(&stats->rx_drop));
70973+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
70974+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
70975+ atomic_read_unchecked(&stats->rx_drop));
70976 }
70977
70978 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
70979@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
70980 {
70981 struct sock *sk = sk_atm(vcc);
70982
70983+#ifdef CONFIG_GRKERNSEC_HIDESYM
70984+ seq_printf(seq, "%p ", NULL);
70985+#else
70986 seq_printf(seq, "%p ", vcc);
70987+#endif
70988+
70989 if (!vcc->dev)
70990 seq_printf(seq, "Unassigned ");
70991 else
70992@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
70993 {
70994 if (!vcc->dev)
70995 seq_printf(seq, sizeof(void *) == 4 ?
70996+#ifdef CONFIG_GRKERNSEC_HIDESYM
70997+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
70998+#else
70999 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
71000+#endif
71001 else
71002 seq_printf(seq, "%3d %3d %5d ",
71003 vcc->dev->number, vcc->vpi, vcc->vci);
71004diff -urNp linux-2.6.32.45/net/atm/resources.c linux-2.6.32.45/net/atm/resources.c
71005--- linux-2.6.32.45/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
71006+++ linux-2.6.32.45/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
71007@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
71008 static void copy_aal_stats(struct k_atm_aal_stats *from,
71009 struct atm_aal_stats *to)
71010 {
71011-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71012+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71013 __AAL_STAT_ITEMS
71014 #undef __HANDLE_ITEM
71015 }
71016@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
71017 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71018 struct atm_aal_stats *to)
71019 {
71020-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71021+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71022 __AAL_STAT_ITEMS
71023 #undef __HANDLE_ITEM
71024 }
71025diff -urNp linux-2.6.32.45/net/bluetooth/l2cap.c linux-2.6.32.45/net/bluetooth/l2cap.c
71026--- linux-2.6.32.45/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
71027+++ linux-2.6.32.45/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
71028@@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
71029 err = -ENOTCONN;
71030 break;
71031 }
71032-
71033+ memset(&cinfo, 0, sizeof(cinfo));
71034 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
71035 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
71036
71037@@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
71038
71039 /* Reject if config buffer is too small. */
71040 len = cmd_len - sizeof(*req);
71041- if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71042+ if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71043 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
71044 l2cap_build_conf_rsp(sk, rsp,
71045 L2CAP_CONF_REJECT, flags), rsp);
71046diff -urNp linux-2.6.32.45/net/bluetooth/rfcomm/sock.c linux-2.6.32.45/net/bluetooth/rfcomm/sock.c
71047--- linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
71048+++ linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
71049@@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
71050
71051 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
71052
71053+ memset(&cinfo, 0, sizeof(cinfo));
71054 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
71055 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
71056
71057diff -urNp linux-2.6.32.45/net/bridge/br_private.h linux-2.6.32.45/net/bridge/br_private.h
71058--- linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:35:30.000000000 -0400
71059+++ linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:34:01.000000000 -0400
71060@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event,
71061
71062 #ifdef CONFIG_SYSFS
71063 /* br_sysfs_if.c */
71064-extern struct sysfs_ops brport_sysfs_ops;
71065+extern const struct sysfs_ops brport_sysfs_ops;
71066 extern int br_sysfs_addif(struct net_bridge_port *p);
71067
71068 /* br_sysfs_br.c */
71069diff -urNp linux-2.6.32.45/net/bridge/br_stp_if.c linux-2.6.32.45/net/bridge/br_stp_if.c
71070--- linux-2.6.32.45/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
71071+++ linux-2.6.32.45/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
71072@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
71073 char *envp[] = { NULL };
71074
71075 if (br->stp_enabled == BR_USER_STP) {
71076- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
71077+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
71078 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
71079 br->dev->name, r);
71080
71081diff -urNp linux-2.6.32.45/net/bridge/br_sysfs_if.c linux-2.6.32.45/net/bridge/br_sysfs_if.c
71082--- linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
71083+++ linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
71084@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
71085 return ret;
71086 }
71087
71088-struct sysfs_ops brport_sysfs_ops = {
71089+const struct sysfs_ops brport_sysfs_ops = {
71090 .show = brport_show,
71091 .store = brport_store,
71092 };
71093diff -urNp linux-2.6.32.45/net/bridge/netfilter/ebtables.c linux-2.6.32.45/net/bridge/netfilter/ebtables.c
71094--- linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
71095+++ linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
71096@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
71097 unsigned int entries_size, nentries;
71098 char *entries;
71099
71100+ pax_track_stack();
71101+
71102 if (cmd == EBT_SO_GET_ENTRIES) {
71103 entries_size = t->private->entries_size;
71104 nentries = t->private->nentries;
71105diff -urNp linux-2.6.32.45/net/can/bcm.c linux-2.6.32.45/net/can/bcm.c
71106--- linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
71107+++ linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
71108@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
71109 struct bcm_sock *bo = bcm_sk(sk);
71110 struct bcm_op *op;
71111
71112+#ifdef CONFIG_GRKERNSEC_HIDESYM
71113+ seq_printf(m, ">>> socket %p", NULL);
71114+ seq_printf(m, " / sk %p", NULL);
71115+ seq_printf(m, " / bo %p", NULL);
71116+#else
71117 seq_printf(m, ">>> socket %p", sk->sk_socket);
71118 seq_printf(m, " / sk %p", sk);
71119 seq_printf(m, " / bo %p", bo);
71120+#endif
71121 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
71122 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
71123 seq_printf(m, " <<<\n");
71124diff -urNp linux-2.6.32.45/net/core/dev.c linux-2.6.32.45/net/core/dev.c
71125--- linux-2.6.32.45/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
71126+++ linux-2.6.32.45/net/core/dev.c 2011-08-05 20:33:55.000000000 -0400
71127@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
71128 if (no_module && capable(CAP_NET_ADMIN))
71129 no_module = request_module("netdev-%s", name);
71130 if (no_module && capable(CAP_SYS_MODULE)) {
71131+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71132+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
71133+#else
71134 if (!request_module("%s", name))
71135 pr_err("Loading kernel module for a network device "
71136 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71137 "instead\n", name);
71138+#endif
71139 }
71140 }
71141 EXPORT_SYMBOL(dev_load);
71142@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct
71143
71144 struct dev_gso_cb {
71145 void (*destructor)(struct sk_buff *skb);
71146-};
71147+} __no_const;
71148
71149 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71150
71151@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
71152 }
71153 EXPORT_SYMBOL(netif_rx_ni);
71154
71155-static void net_tx_action(struct softirq_action *h)
71156+static void net_tx_action(void)
71157 {
71158 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71159
71160@@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
71161 EXPORT_SYMBOL(netif_napi_del);
71162
71163
71164-static void net_rx_action(struct softirq_action *h)
71165+static void net_rx_action(void)
71166 {
71167 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
71168 unsigned long time_limit = jiffies + 2;
71169diff -urNp linux-2.6.32.45/net/core/flow.c linux-2.6.32.45/net/core/flow.c
71170--- linux-2.6.32.45/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
71171+++ linux-2.6.32.45/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
71172@@ -35,11 +35,11 @@ struct flow_cache_entry {
71173 atomic_t *object_ref;
71174 };
71175
71176-atomic_t flow_cache_genid = ATOMIC_INIT(0);
71177+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71178
71179 static u32 flow_hash_shift;
71180 #define flow_hash_size (1 << flow_hash_shift)
71181-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
71182+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
71183
71184 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
71185
71186@@ -52,7 +52,7 @@ struct flow_percpu_info {
71187 u32 hash_rnd;
71188 int count;
71189 };
71190-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
71191+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
71192
71193 #define flow_hash_rnd_recalc(cpu) \
71194 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
71195@@ -69,7 +69,7 @@ struct flow_flush_info {
71196 atomic_t cpuleft;
71197 struct completion completion;
71198 };
71199-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
71200+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
71201
71202 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
71203
71204@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
71205 if (fle->family == family &&
71206 fle->dir == dir &&
71207 flow_key_compare(key, &fle->key) == 0) {
71208- if (fle->genid == atomic_read(&flow_cache_genid)) {
71209+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
71210 void *ret = fle->object;
71211
71212 if (ret)
71213@@ -228,7 +228,7 @@ nocache:
71214 err = resolver(net, key, family, dir, &obj, &obj_ref);
71215
71216 if (fle && !err) {
71217- fle->genid = atomic_read(&flow_cache_genid);
71218+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
71219
71220 if (fle->object)
71221 atomic_dec(fle->object_ref);
71222@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
71223
71224 fle = flow_table(cpu)[i];
71225 for (; fle; fle = fle->next) {
71226- unsigned genid = atomic_read(&flow_cache_genid);
71227+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
71228
71229 if (!fle->object || fle->genid == genid)
71230 continue;
71231diff -urNp linux-2.6.32.45/net/core/rtnetlink.c linux-2.6.32.45/net/core/rtnetlink.c
71232--- linux-2.6.32.45/net/core/rtnetlink.c 2011-03-27 14:31:47.000000000 -0400
71233+++ linux-2.6.32.45/net/core/rtnetlink.c 2011-08-05 20:33:55.000000000 -0400
71234@@ -57,7 +57,7 @@ struct rtnl_link
71235 {
71236 rtnl_doit_func doit;
71237 rtnl_dumpit_func dumpit;
71238-};
71239+} __no_const;
71240
71241 static DEFINE_MUTEX(rtnl_mutex);
71242
71243diff -urNp linux-2.6.32.45/net/core/secure_seq.c linux-2.6.32.45/net/core/secure_seq.c
71244--- linux-2.6.32.45/net/core/secure_seq.c 2011-08-16 20:37:25.000000000 -0400
71245+++ linux-2.6.32.45/net/core/secure_seq.c 2011-08-07 19:48:09.000000000 -0400
71246@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be3
71247 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
71248
71249 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
71250- __be16 dport)
71251+ __be16 dport)
71252 {
71253 u32 secret[MD5_MESSAGE_BYTES / 4];
71254 u32 hash[MD5_DIGEST_WORDS];
71255@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __b
71256 secret[i] = net_secret[i];
71257
71258 md5_transform(hash, secret);
71259-
71260 return hash[0];
71261 }
71262 #endif
71263diff -urNp linux-2.6.32.45/net/core/skbuff.c linux-2.6.32.45/net/core/skbuff.c
71264--- linux-2.6.32.45/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
71265+++ linux-2.6.32.45/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
71266@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
71267 struct sk_buff *frag_iter;
71268 struct sock *sk = skb->sk;
71269
71270+ pax_track_stack();
71271+
71272 /*
71273 * __skb_splice_bits() only fails if the output has no room left,
71274 * so no point in going over the frag_list for the error case.
71275diff -urNp linux-2.6.32.45/net/core/sock.c linux-2.6.32.45/net/core/sock.c
71276--- linux-2.6.32.45/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
71277+++ linux-2.6.32.45/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
71278@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
71279 break;
71280
71281 case SO_PEERCRED:
71282+ {
71283+ struct ucred peercred;
71284 if (len > sizeof(sk->sk_peercred))
71285 len = sizeof(sk->sk_peercred);
71286- if (copy_to_user(optval, &sk->sk_peercred, len))
71287+ peercred = sk->sk_peercred;
71288+ if (copy_to_user(optval, &peercred, len))
71289 return -EFAULT;
71290 goto lenout;
71291+ }
71292
71293 case SO_PEERNAME:
71294 {
71295@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
71296 */
71297 smp_wmb();
71298 atomic_set(&sk->sk_refcnt, 1);
71299- atomic_set(&sk->sk_drops, 0);
71300+ atomic_set_unchecked(&sk->sk_drops, 0);
71301 }
71302 EXPORT_SYMBOL(sock_init_data);
71303
71304diff -urNp linux-2.6.32.45/net/decnet/sysctl_net_decnet.c linux-2.6.32.45/net/decnet/sysctl_net_decnet.c
71305--- linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
71306+++ linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
71307@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
71308
71309 if (len > *lenp) len = *lenp;
71310
71311- if (copy_to_user(buffer, addr, len))
71312+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
71313 return -EFAULT;
71314
71315 *lenp = len;
71316@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
71317
71318 if (len > *lenp) len = *lenp;
71319
71320- if (copy_to_user(buffer, devname, len))
71321+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
71322 return -EFAULT;
71323
71324 *lenp = len;
71325diff -urNp linux-2.6.32.45/net/econet/Kconfig linux-2.6.32.45/net/econet/Kconfig
71326--- linux-2.6.32.45/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
71327+++ linux-2.6.32.45/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
71328@@ -4,7 +4,7 @@
71329
71330 config ECONET
71331 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71332- depends on EXPERIMENTAL && INET
71333+ depends on EXPERIMENTAL && INET && BROKEN
71334 ---help---
71335 Econet is a fairly old and slow networking protocol mainly used by
71336 Acorn computers to access file and print servers. It uses native
71337diff -urNp linux-2.6.32.45/net/ieee802154/dgram.c linux-2.6.32.45/net/ieee802154/dgram.c
71338--- linux-2.6.32.45/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
71339+++ linux-2.6.32.45/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
71340@@ -318,7 +318,7 @@ out:
71341 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
71342 {
71343 if (sock_queue_rcv_skb(sk, skb) < 0) {
71344- atomic_inc(&sk->sk_drops);
71345+ atomic_inc_unchecked(&sk->sk_drops);
71346 kfree_skb(skb);
71347 return NET_RX_DROP;
71348 }
71349diff -urNp linux-2.6.32.45/net/ieee802154/raw.c linux-2.6.32.45/net/ieee802154/raw.c
71350--- linux-2.6.32.45/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
71351+++ linux-2.6.32.45/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
71352@@ -206,7 +206,7 @@ out:
71353 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
71354 {
71355 if (sock_queue_rcv_skb(sk, skb) < 0) {
71356- atomic_inc(&sk->sk_drops);
71357+ atomic_inc_unchecked(&sk->sk_drops);
71358 kfree_skb(skb);
71359 return NET_RX_DROP;
71360 }
71361diff -urNp linux-2.6.32.45/net/ipv4/inet_diag.c linux-2.6.32.45/net/ipv4/inet_diag.c
71362--- linux-2.6.32.45/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
71363+++ linux-2.6.32.45/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
71364@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
71365 r->idiag_retrans = 0;
71366
71367 r->id.idiag_if = sk->sk_bound_dev_if;
71368+#ifdef CONFIG_GRKERNSEC_HIDESYM
71369+ r->id.idiag_cookie[0] = 0;
71370+ r->id.idiag_cookie[1] = 0;
71371+#else
71372 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71373 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71374+#endif
71375
71376 r->id.idiag_sport = inet->sport;
71377 r->id.idiag_dport = inet->dport;
71378@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
71379 r->idiag_family = tw->tw_family;
71380 r->idiag_retrans = 0;
71381 r->id.idiag_if = tw->tw_bound_dev_if;
71382+
71383+#ifdef CONFIG_GRKERNSEC_HIDESYM
71384+ r->id.idiag_cookie[0] = 0;
71385+ r->id.idiag_cookie[1] = 0;
71386+#else
71387 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71388 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71389+#endif
71390+
71391 r->id.idiag_sport = tw->tw_sport;
71392 r->id.idiag_dport = tw->tw_dport;
71393 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71394@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
71395 if (sk == NULL)
71396 goto unlock;
71397
71398+#ifndef CONFIG_GRKERNSEC_HIDESYM
71399 err = -ESTALE;
71400 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71401 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71402 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71403 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71404 goto out;
71405+#endif
71406
71407 err = -ENOMEM;
71408 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71409@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
71410 r->idiag_retrans = req->retrans;
71411
71412 r->id.idiag_if = sk->sk_bound_dev_if;
71413+
71414+#ifdef CONFIG_GRKERNSEC_HIDESYM
71415+ r->id.idiag_cookie[0] = 0;
71416+ r->id.idiag_cookie[1] = 0;
71417+#else
71418 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71419 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71420+#endif
71421
71422 tmo = req->expires - jiffies;
71423 if (tmo < 0)
71424diff -urNp linux-2.6.32.45/net/ipv4/inet_hashtables.c linux-2.6.32.45/net/ipv4/inet_hashtables.c
71425--- linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71426+++ linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:42:30.000000000 -0400
71427@@ -18,12 +18,15 @@
71428 #include <linux/sched.h>
71429 #include <linux/slab.h>
71430 #include <linux/wait.h>
71431+#include <linux/security.h>
71432
71433 #include <net/inet_connection_sock.h>
71434 #include <net/inet_hashtables.h>
71435 #include <net/secure_seq.h>
71436 #include <net/ip.h>
71437
71438+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
71439+
71440 /*
71441 * Allocate and initialize a new local port bind bucket.
71442 * The bindhash mutex for snum's hash chain must be held here.
71443@@ -491,6 +494,8 @@ ok:
71444 }
71445 spin_unlock(&head->lock);
71446
71447+ gr_update_task_in_ip_table(current, inet_sk(sk));
71448+
71449 if (tw) {
71450 inet_twsk_deschedule(tw, death_row);
71451 inet_twsk_put(tw);
71452diff -urNp linux-2.6.32.45/net/ipv4/inetpeer.c linux-2.6.32.45/net/ipv4/inetpeer.c
71453--- linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-16 20:37:25.000000000 -0400
71454+++ linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-07 19:48:09.000000000 -0400
71455@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 da
71456 struct inet_peer *p, *n;
71457 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
71458
71459+ pax_track_stack();
71460+
71461 /* Look up for the address quickly. */
71462 read_lock_bh(&peer_pool_lock);
71463 p = lookup(daddr, NULL);
71464@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 da
71465 return NULL;
71466 n->v4daddr = daddr;
71467 atomic_set(&n->refcnt, 1);
71468- atomic_set(&n->rid, 0);
71469+ atomic_set_unchecked(&n->rid, 0);
71470 n->ip_id_count = secure_ip_id(daddr);
71471 n->tcp_ts_stamp = 0;
71472
71473diff -urNp linux-2.6.32.45/net/ipv4/ip_fragment.c linux-2.6.32.45/net/ipv4/ip_fragment.c
71474--- linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
71475+++ linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
71476@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
71477 return 0;
71478
71479 start = qp->rid;
71480- end = atomic_inc_return(&peer->rid);
71481+ end = atomic_inc_return_unchecked(&peer->rid);
71482 qp->rid = end;
71483
71484 rc = qp->q.fragments && (end - start) > max;
71485diff -urNp linux-2.6.32.45/net/ipv4/ip_sockglue.c linux-2.6.32.45/net/ipv4/ip_sockglue.c
71486--- linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71487+++ linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71488@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
71489 int val;
71490 int len;
71491
71492+ pax_track_stack();
71493+
71494 if (level != SOL_IP)
71495 return -EOPNOTSUPP;
71496
71497diff -urNp linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c
71498--- linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
71499+++ linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
71500@@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
71501 private = &tmp;
71502 }
71503 #endif
71504+ memset(&info, 0, sizeof(info));
71505 info.valid_hooks = t->valid_hooks;
71506 memcpy(info.hook_entry, private->hook_entry,
71507 sizeof(info.hook_entry));
71508diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c
71509--- linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c 2011-03-27 14:31:47.000000000 -0400
71510+++ linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c 2011-08-21 18:42:53.000000000 -0400
71511@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, st
71512
71513 if (v->data_len < sizeof(*user_iph))
71514 return 0;
71515+ if (v->data_len > 65535)
71516+ return -EMSGSIZE;
71517+
71518 diff = v->data_len - e->skb->len;
71519 if (diff < 0) {
71520 if (pskb_trim(e->skb, v->data_len))
71521@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
71522 static inline void
71523 __ipq_rcv_skb(struct sk_buff *skb)
71524 {
71525- int status, type, pid, flags, nlmsglen, skblen;
71526+ int status, type, pid, flags;
71527+ unsigned int nlmsglen, skblen;
71528 struct nlmsghdr *nlh;
71529
71530 skblen = skb->len;
71531diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c
71532--- linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
71533+++ linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
71534@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
71535 private = &tmp;
71536 }
71537 #endif
71538+ memset(&info, 0, sizeof(info));
71539 info.valid_hooks = t->valid_hooks;
71540 memcpy(info.hook_entry, private->hook_entry,
71541 sizeof(info.hook_entry));
71542diff -urNp linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c
71543--- linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
71544+++ linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
71545@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
71546
71547 *len = 0;
71548
71549- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
71550+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
71551 if (*octets == NULL) {
71552 if (net_ratelimit())
71553 printk("OOM in bsalg (%d)\n", __LINE__);
71554diff -urNp linux-2.6.32.45/net/ipv4/raw.c linux-2.6.32.45/net/ipv4/raw.c
71555--- linux-2.6.32.45/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
71556+++ linux-2.6.32.45/net/ipv4/raw.c 2011-08-14 11:46:51.000000000 -0400
71557@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
71558 /* Charge it to the socket. */
71559
71560 if (sock_queue_rcv_skb(sk, skb) < 0) {
71561- atomic_inc(&sk->sk_drops);
71562+ atomic_inc_unchecked(&sk->sk_drops);
71563 kfree_skb(skb);
71564 return NET_RX_DROP;
71565 }
71566@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
71567 int raw_rcv(struct sock *sk, struct sk_buff *skb)
71568 {
71569 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
71570- atomic_inc(&sk->sk_drops);
71571+ atomic_inc_unchecked(&sk->sk_drops);
71572 kfree_skb(skb);
71573 return NET_RX_DROP;
71574 }
71575@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
71576
71577 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
71578 {
71579+ struct icmp_filter filter;
71580+
71581+ if (optlen < 0)
71582+ return -EINVAL;
71583 if (optlen > sizeof(struct icmp_filter))
71584 optlen = sizeof(struct icmp_filter);
71585- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
71586+ if (copy_from_user(&filter, optval, optlen))
71587 return -EFAULT;
71588+ raw_sk(sk)->filter = filter;
71589+
71590 return 0;
71591 }
71592
71593 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
71594 {
71595 int len, ret = -EFAULT;
71596+ struct icmp_filter filter;
71597
71598 if (get_user(len, optlen))
71599 goto out;
71600@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
71601 if (len > sizeof(struct icmp_filter))
71602 len = sizeof(struct icmp_filter);
71603 ret = -EFAULT;
71604- if (put_user(len, optlen) ||
71605- copy_to_user(optval, &raw_sk(sk)->filter, len))
71606+ filter = raw_sk(sk)->filter;
71607+ if (put_user(len, optlen) || len > sizeof filter ||
71608+ copy_to_user(optval, &filter, len))
71609 goto out;
71610 ret = 0;
71611 out: return ret;
71612@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
71613 sk_wmem_alloc_get(sp),
71614 sk_rmem_alloc_get(sp),
71615 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71616- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
71617+ atomic_read(&sp->sk_refcnt),
71618+#ifdef CONFIG_GRKERNSEC_HIDESYM
71619+ NULL,
71620+#else
71621+ sp,
71622+#endif
71623+ atomic_read_unchecked(&sp->sk_drops));
71624 }
71625
71626 static int raw_seq_show(struct seq_file *seq, void *v)
71627diff -urNp linux-2.6.32.45/net/ipv4/route.c linux-2.6.32.45/net/ipv4/route.c
71628--- linux-2.6.32.45/net/ipv4/route.c 2011-08-16 20:37:25.000000000 -0400
71629+++ linux-2.6.32.45/net/ipv4/route.c 2011-08-07 19:48:09.000000000 -0400
71630@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be3
71631
71632 static inline int rt_genid(struct net *net)
71633 {
71634- return atomic_read(&net->ipv4.rt_genid);
71635+ return atomic_read_unchecked(&net->ipv4.rt_genid);
71636 }
71637
71638 #ifdef CONFIG_PROC_FS
71639@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct n
71640 unsigned char shuffle;
71641
71642 get_random_bytes(&shuffle, sizeof(shuffle));
71643- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
71644+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
71645 }
71646
71647 /*
71648@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_oper
71649
71650 static __net_init int rt_secret_timer_init(struct net *net)
71651 {
71652- atomic_set(&net->ipv4.rt_genid,
71653+ atomic_set_unchecked(&net->ipv4.rt_genid,
71654 (int) ((num_physpages ^ (num_physpages>>8)) ^
71655 (jiffies ^ (jiffies >> 7))));
71656
71657diff -urNp linux-2.6.32.45/net/ipv4/tcp.c linux-2.6.32.45/net/ipv4/tcp.c
71658--- linux-2.6.32.45/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
71659+++ linux-2.6.32.45/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
71660@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
71661 int val;
71662 int err = 0;
71663
71664+ pax_track_stack();
71665+
71666 /* This is a string value all the others are int's */
71667 if (optname == TCP_CONGESTION) {
71668 char name[TCP_CA_NAME_MAX];
71669@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
71670 struct tcp_sock *tp = tcp_sk(sk);
71671 int val, len;
71672
71673+ pax_track_stack();
71674+
71675 if (get_user(len, optlen))
71676 return -EFAULT;
71677
71678diff -urNp linux-2.6.32.45/net/ipv4/tcp_ipv4.c linux-2.6.32.45/net/ipv4/tcp_ipv4.c
71679--- linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-16 20:37:25.000000000 -0400
71680+++ linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-23 21:22:32.000000000 -0400
71681@@ -85,6 +85,9 @@
71682 int sysctl_tcp_tw_reuse __read_mostly;
71683 int sysctl_tcp_low_latency __read_mostly;
71684
71685+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71686+extern int grsec_enable_blackhole;
71687+#endif
71688
71689 #ifdef CONFIG_TCP_MD5SIG
71690 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
71691@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
71692 return 0;
71693
71694 reset:
71695+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71696+ if (!grsec_enable_blackhole)
71697+#endif
71698 tcp_v4_send_reset(rsk, skb);
71699 discard:
71700 kfree_skb(skb);
71701@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
71702 TCP_SKB_CB(skb)->sacked = 0;
71703
71704 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
71705- if (!sk)
71706+ if (!sk) {
71707+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71708+ ret = 1;
71709+#endif
71710 goto no_tcp_socket;
71711+ }
71712
71713 process:
71714- if (sk->sk_state == TCP_TIME_WAIT)
71715+ if (sk->sk_state == TCP_TIME_WAIT) {
71716+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71717+ ret = 2;
71718+#endif
71719 goto do_time_wait;
71720+ }
71721
71722 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
71723 goto discard_and_relse;
71724@@ -1651,6 +1665,10 @@ no_tcp_socket:
71725 bad_packet:
71726 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
71727 } else {
71728+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71729+ if (!grsec_enable_blackhole || (ret == 1 &&
71730+ (skb->dev->flags & IFF_LOOPBACK)))
71731+#endif
71732 tcp_v4_send_reset(NULL, skb);
71733 }
71734
71735@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk
71736 0, /* non standard timer */
71737 0, /* open_requests have no inode */
71738 atomic_read(&sk->sk_refcnt),
71739+#ifdef CONFIG_GRKERNSEC_HIDESYM
71740+ NULL,
71741+#else
71742 req,
71743+#endif
71744 len);
71745 }
71746
71747@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *s
71748 sock_i_uid(sk),
71749 icsk->icsk_probes_out,
71750 sock_i_ino(sk),
71751- atomic_read(&sk->sk_refcnt), sk,
71752+ atomic_read(&sk->sk_refcnt),
71753+#ifdef CONFIG_GRKERNSEC_HIDESYM
71754+ NULL,
71755+#else
71756+ sk,
71757+#endif
71758 jiffies_to_clock_t(icsk->icsk_rto),
71759 jiffies_to_clock_t(icsk->icsk_ack.ato),
71760 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
71761@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct in
71762 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
71763 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
71764 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
71765- atomic_read(&tw->tw_refcnt), tw, len);
71766+ atomic_read(&tw->tw_refcnt),
71767+#ifdef CONFIG_GRKERNSEC_HIDESYM
71768+ NULL,
71769+#else
71770+ tw,
71771+#endif
71772+ len);
71773 }
71774
71775 #define TMPSZ 150
71776diff -urNp linux-2.6.32.45/net/ipv4/tcp_minisocks.c linux-2.6.32.45/net/ipv4/tcp_minisocks.c
71777--- linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
71778+++ linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
71779@@ -26,6 +26,10 @@
71780 #include <net/inet_common.h>
71781 #include <net/xfrm.h>
71782
71783+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71784+extern int grsec_enable_blackhole;
71785+#endif
71786+
71787 #ifdef CONFIG_SYSCTL
71788 #define SYNC_INIT 0 /* let the user enable it */
71789 #else
71790@@ -672,6 +676,10 @@ listen_overflow:
71791
71792 embryonic_reset:
71793 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
71794+
71795+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71796+ if (!grsec_enable_blackhole)
71797+#endif
71798 if (!(flg & TCP_FLAG_RST))
71799 req->rsk_ops->send_reset(sk, skb);
71800
71801diff -urNp linux-2.6.32.45/net/ipv4/tcp_output.c linux-2.6.32.45/net/ipv4/tcp_output.c
71802--- linux-2.6.32.45/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
71803+++ linux-2.6.32.45/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
71804@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
71805 __u8 *md5_hash_location;
71806 int mss;
71807
71808+ pax_track_stack();
71809+
71810 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
71811 if (skb == NULL)
71812 return NULL;
71813diff -urNp linux-2.6.32.45/net/ipv4/tcp_probe.c linux-2.6.32.45/net/ipv4/tcp_probe.c
71814--- linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
71815+++ linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
71816@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
71817 if (cnt + width >= len)
71818 break;
71819
71820- if (copy_to_user(buf + cnt, tbuf, width))
71821+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
71822 return -EFAULT;
71823 cnt += width;
71824 }
71825diff -urNp linux-2.6.32.45/net/ipv4/tcp_timer.c linux-2.6.32.45/net/ipv4/tcp_timer.c
71826--- linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
71827+++ linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
71828@@ -21,6 +21,10 @@
71829 #include <linux/module.h>
71830 #include <net/tcp.h>
71831
71832+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71833+extern int grsec_lastack_retries;
71834+#endif
71835+
71836 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
71837 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
71838 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
71839@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
71840 }
71841 }
71842
71843+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71844+ if ((sk->sk_state == TCP_LAST_ACK) &&
71845+ (grsec_lastack_retries > 0) &&
71846+ (grsec_lastack_retries < retry_until))
71847+ retry_until = grsec_lastack_retries;
71848+#endif
71849+
71850 if (retransmits_timed_out(sk, retry_until)) {
71851 /* Has it gone just too far? */
71852 tcp_write_err(sk);
71853diff -urNp linux-2.6.32.45/net/ipv4/udp.c linux-2.6.32.45/net/ipv4/udp.c
71854--- linux-2.6.32.45/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
71855+++ linux-2.6.32.45/net/ipv4/udp.c 2011-08-23 21:22:32.000000000 -0400
71856@@ -86,6 +86,7 @@
71857 #include <linux/types.h>
71858 #include <linux/fcntl.h>
71859 #include <linux/module.h>
71860+#include <linux/security.h>
71861 #include <linux/socket.h>
71862 #include <linux/sockios.h>
71863 #include <linux/igmp.h>
71864@@ -106,6 +107,10 @@
71865 #include <net/xfrm.h>
71866 #include "udp_impl.h"
71867
71868+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71869+extern int grsec_enable_blackhole;
71870+#endif
71871+
71872 struct udp_table udp_table;
71873 EXPORT_SYMBOL(udp_table);
71874
71875@@ -371,6 +376,9 @@ found:
71876 return s;
71877 }
71878
71879+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
71880+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
71881+
71882 /*
71883 * This routine is called by the ICMP module when it gets some
71884 * sort of error condition. If err < 0 then the socket should
71885@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
71886 dport = usin->sin_port;
71887 if (dport == 0)
71888 return -EINVAL;
71889+
71890+ err = gr_search_udp_sendmsg(sk, usin);
71891+ if (err)
71892+ return err;
71893 } else {
71894 if (sk->sk_state != TCP_ESTABLISHED)
71895 return -EDESTADDRREQ;
71896+
71897+ err = gr_search_udp_sendmsg(sk, NULL);
71898+ if (err)
71899+ return err;
71900+
71901 daddr = inet->daddr;
71902 dport = inet->dport;
71903 /* Open fast path for connected socket.
71904@@ -945,6 +962,10 @@ try_again:
71905 if (!skb)
71906 goto out;
71907
71908+ err = gr_search_udp_recvmsg(sk, skb);
71909+ if (err)
71910+ goto out_free;
71911+
71912 ulen = skb->len - sizeof(struct udphdr);
71913 copied = len;
71914 if (copied > ulen)
71915@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
71916 if (rc == -ENOMEM) {
71917 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
71918 is_udplite);
71919- atomic_inc(&sk->sk_drops);
71920+ atomic_inc_unchecked(&sk->sk_drops);
71921 }
71922 goto drop;
71923 }
71924@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
71925 goto csum_error;
71926
71927 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
71928+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71929+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
71930+#endif
71931 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
71932
71933 /*
71934@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
71935 sk_wmem_alloc_get(sp),
71936 sk_rmem_alloc_get(sp),
71937 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71938- atomic_read(&sp->sk_refcnt), sp,
71939- atomic_read(&sp->sk_drops), len);
71940+ atomic_read(&sp->sk_refcnt),
71941+#ifdef CONFIG_GRKERNSEC_HIDESYM
71942+ NULL,
71943+#else
71944+ sp,
71945+#endif
71946+ atomic_read_unchecked(&sp->sk_drops), len);
71947 }
71948
71949 int udp4_seq_show(struct seq_file *seq, void *v)
71950diff -urNp linux-2.6.32.45/net/ipv6/inet6_connection_sock.c linux-2.6.32.45/net/ipv6/inet6_connection_sock.c
71951--- linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
71952+++ linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
71953@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
71954 #ifdef CONFIG_XFRM
71955 {
71956 struct rt6_info *rt = (struct rt6_info *)dst;
71957- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
71958+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
71959 }
71960 #endif
71961 }
71962@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
71963 #ifdef CONFIG_XFRM
71964 if (dst) {
71965 struct rt6_info *rt = (struct rt6_info *)dst;
71966- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
71967+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
71968 sk->sk_dst_cache = NULL;
71969 dst_release(dst);
71970 dst = NULL;
71971diff -urNp linux-2.6.32.45/net/ipv6/inet6_hashtables.c linux-2.6.32.45/net/ipv6/inet6_hashtables.c
71972--- linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71973+++ linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-07 19:48:09.000000000 -0400
71974@@ -119,7 +119,7 @@ out:
71975 }
71976 EXPORT_SYMBOL(__inet6_lookup_established);
71977
71978-static int inline compute_score(struct sock *sk, struct net *net,
71979+static inline int compute_score(struct sock *sk, struct net *net,
71980 const unsigned short hnum,
71981 const struct in6_addr *daddr,
71982 const int dif)
71983diff -urNp linux-2.6.32.45/net/ipv6/ip6_tunnel.c linux-2.6.32.45/net/ipv6/ip6_tunnel.c
71984--- linux-2.6.32.45/net/ipv6/ip6_tunnel.c 2011-08-09 18:35:30.000000000 -0400
71985+++ linux-2.6.32.45/net/ipv6/ip6_tunnel.c 2011-08-24 18:52:25.000000000 -0400
71986@@ -1466,7 +1466,7 @@ static int __init ip6_tunnel_init(void)
71987 {
71988 int err;
71989
71990- err = register_pernet_device(&ip6_tnl_net_ops);
71991+ err = register_pernet_gen_device(&ip6_tnl_net_id, &ip6_tnl_net_ops);
71992 if (err < 0)
71993 goto out_pernet;
71994
71995@@ -1487,7 +1487,7 @@ static int __init ip6_tunnel_init(void)
71996 out_ip6ip6:
71997 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
71998 out_ip4ip6:
71999- unregister_pernet_device(&ip6_tnl_net_ops);
72000+ unregister_pernet_gen_device(ip6_tnl_net_id, &ip6_tnl_net_ops);
72001 out_pernet:
72002 return err;
72003 }
72004diff -urNp linux-2.6.32.45/net/ipv6/ipv6_sockglue.c linux-2.6.32.45/net/ipv6/ipv6_sockglue.c
72005--- linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
72006+++ linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
72007@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
72008 int val, valbool;
72009 int retv = -ENOPROTOOPT;
72010
72011+ pax_track_stack();
72012+
72013 if (optval == NULL)
72014 val=0;
72015 else {
72016@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
72017 int len;
72018 int val;
72019
72020+ pax_track_stack();
72021+
72022 if (ip6_mroute_opt(optname))
72023 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72024
72025diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c
72026--- linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c 2011-03-27 14:31:47.000000000 -0400
72027+++ linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c 2011-08-21 18:43:32.000000000 -0400
72028@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, st
72029
72030 if (v->data_len < sizeof(*user_iph))
72031 return 0;
72032+ if (v->data_len > 65535)
72033+ return -EMSGSIZE;
72034+
72035 diff = v->data_len - e->skb->len;
72036 if (diff < 0) {
72037 if (pskb_trim(e->skb, v->data_len))
72038@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
72039 static inline void
72040 __ipq_rcv_skb(struct sk_buff *skb)
72041 {
72042- int status, type, pid, flags, nlmsglen, skblen;
72043+ int status, type, pid, flags;
72044+ unsigned int nlmsglen, skblen;
72045 struct nlmsghdr *nlh;
72046
72047 skblen = skb->len;
72048diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c
72049--- linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
72050+++ linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
72051@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
72052 private = &tmp;
72053 }
72054 #endif
72055+ memset(&info, 0, sizeof(info));
72056 info.valid_hooks = t->valid_hooks;
72057 memcpy(info.hook_entry, private->hook_entry,
72058 sizeof(info.hook_entry));
72059diff -urNp linux-2.6.32.45/net/ipv6/raw.c linux-2.6.32.45/net/ipv6/raw.c
72060--- linux-2.6.32.45/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
72061+++ linux-2.6.32.45/net/ipv6/raw.c 2011-08-14 11:48:20.000000000 -0400
72062@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
72063 {
72064 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
72065 skb_checksum_complete(skb)) {
72066- atomic_inc(&sk->sk_drops);
72067+ atomic_inc_unchecked(&sk->sk_drops);
72068 kfree_skb(skb);
72069 return NET_RX_DROP;
72070 }
72071
72072 /* Charge it to the socket. */
72073 if (sock_queue_rcv_skb(sk,skb)<0) {
72074- atomic_inc(&sk->sk_drops);
72075+ atomic_inc_unchecked(&sk->sk_drops);
72076 kfree_skb(skb);
72077 return NET_RX_DROP;
72078 }
72079@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72080 struct raw6_sock *rp = raw6_sk(sk);
72081
72082 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72083- atomic_inc(&sk->sk_drops);
72084+ atomic_inc_unchecked(&sk->sk_drops);
72085 kfree_skb(skb);
72086 return NET_RX_DROP;
72087 }
72088@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72089
72090 if (inet->hdrincl) {
72091 if (skb_checksum_complete(skb)) {
72092- atomic_inc(&sk->sk_drops);
72093+ atomic_inc_unchecked(&sk->sk_drops);
72094 kfree_skb(skb);
72095 return NET_RX_DROP;
72096 }
72097@@ -518,7 +518,7 @@ csum_copy_err:
72098 as some normal condition.
72099 */
72100 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
72101- atomic_inc(&sk->sk_drops);
72102+ atomic_inc_unchecked(&sk->sk_drops);
72103 goto out;
72104 }
72105
72106@@ -600,7 +600,7 @@ out:
72107 return err;
72108 }
72109
72110-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72111+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72112 struct flowi *fl, struct rt6_info *rt,
72113 unsigned int flags)
72114 {
72115@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
72116 u16 proto;
72117 int err;
72118
72119+ pax_track_stack();
72120+
72121 /* Rough check on arithmetic overflow,
72122 better check is made in ip6_append_data().
72123 */
72124@@ -916,12 +918,17 @@ do_confirm:
72125 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72126 char __user *optval, int optlen)
72127 {
72128+ struct icmp6_filter filter;
72129+
72130 switch (optname) {
72131 case ICMPV6_FILTER:
72132+ if (optlen < 0)
72133+ return -EINVAL;
72134 if (optlen > sizeof(struct icmp6_filter))
72135 optlen = sizeof(struct icmp6_filter);
72136- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72137+ if (copy_from_user(&filter, optval, optlen))
72138 return -EFAULT;
72139+ raw6_sk(sk)->filter = filter;
72140 return 0;
72141 default:
72142 return -ENOPROTOOPT;
72143@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct so
72144 char __user *optval, int __user *optlen)
72145 {
72146 int len;
72147+ struct icmp6_filter filter;
72148
72149 switch (optname) {
72150 case ICMPV6_FILTER:
72151@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
72152 len = sizeof(struct icmp6_filter);
72153 if (put_user(len, optlen))
72154 return -EFAULT;
72155- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72156+ filter = raw6_sk(sk)->filter;
72157+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
72158 return -EFAULT;
72159 return 0;
72160 default:
72161@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
72162 0, 0L, 0,
72163 sock_i_uid(sp), 0,
72164 sock_i_ino(sp),
72165- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72166+ atomic_read(&sp->sk_refcnt),
72167+#ifdef CONFIG_GRKERNSEC_HIDESYM
72168+ NULL,
72169+#else
72170+ sp,
72171+#endif
72172+ atomic_read_unchecked(&sp->sk_drops));
72173 }
72174
72175 static int raw6_seq_show(struct seq_file *seq, void *v)
72176diff -urNp linux-2.6.32.45/net/ipv6/tcp_ipv6.c linux-2.6.32.45/net/ipv6/tcp_ipv6.c
72177--- linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-16 20:37:25.000000000 -0400
72178+++ linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-07 19:48:09.000000000 -0400
72179@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72180 }
72181 #endif
72182
72183+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72184+extern int grsec_enable_blackhole;
72185+#endif
72186+
72187 static void tcp_v6_hash(struct sock *sk)
72188 {
72189 if (sk->sk_state != TCP_CLOSE) {
72190@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72191 return 0;
72192
72193 reset:
72194+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72195+ if (!grsec_enable_blackhole)
72196+#endif
72197 tcp_v6_send_reset(sk, skb);
72198 discard:
72199 if (opt_skb)
72200@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72201 TCP_SKB_CB(skb)->sacked = 0;
72202
72203 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72204- if (!sk)
72205+ if (!sk) {
72206+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72207+ ret = 1;
72208+#endif
72209 goto no_tcp_socket;
72210+ }
72211
72212 process:
72213- if (sk->sk_state == TCP_TIME_WAIT)
72214+ if (sk->sk_state == TCP_TIME_WAIT) {
72215+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72216+ ret = 2;
72217+#endif
72218 goto do_time_wait;
72219+ }
72220
72221 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
72222 goto discard_and_relse;
72223@@ -1701,6 +1716,10 @@ no_tcp_socket:
72224 bad_packet:
72225 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72226 } else {
72227+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72228+ if (!grsec_enable_blackhole || (ret == 1 &&
72229+ (skb->dev->flags & IFF_LOOPBACK)))
72230+#endif
72231 tcp_v6_send_reset(NULL, skb);
72232 }
72233
72234@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file
72235 uid,
72236 0, /* non standard timer */
72237 0, /* open_requests have no inode */
72238- 0, req);
72239+ 0,
72240+#ifdef CONFIG_GRKERNSEC_HIDESYM
72241+ NULL
72242+#else
72243+ req
72244+#endif
72245+ );
72246 }
72247
72248 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72249@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_fil
72250 sock_i_uid(sp),
72251 icsk->icsk_probes_out,
72252 sock_i_ino(sp),
72253- atomic_read(&sp->sk_refcnt), sp,
72254+ atomic_read(&sp->sk_refcnt),
72255+#ifdef CONFIG_GRKERNSEC_HIDESYM
72256+ NULL,
72257+#else
72258+ sp,
72259+#endif
72260 jiffies_to_clock_t(icsk->icsk_rto),
72261 jiffies_to_clock_t(icsk->icsk_ack.ato),
72262 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72263@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct se
72264 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72265 tw->tw_substate, 0, 0,
72266 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72267- atomic_read(&tw->tw_refcnt), tw);
72268+ atomic_read(&tw->tw_refcnt),
72269+#ifdef CONFIG_GRKERNSEC_HIDESYM
72270+ NULL
72271+#else
72272+ tw
72273+#endif
72274+ );
72275 }
72276
72277 static int tcp6_seq_show(struct seq_file *seq, void *v)
72278diff -urNp linux-2.6.32.45/net/ipv6/udp.c linux-2.6.32.45/net/ipv6/udp.c
72279--- linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
72280+++ linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
72281@@ -49,6 +49,10 @@
72282 #include <linux/seq_file.h>
72283 #include "udp_impl.h"
72284
72285+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72286+extern int grsec_enable_blackhole;
72287+#endif
72288+
72289 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72290 {
72291 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72292@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72293 if (rc == -ENOMEM) {
72294 UDP6_INC_STATS_BH(sock_net(sk),
72295 UDP_MIB_RCVBUFERRORS, is_udplite);
72296- atomic_inc(&sk->sk_drops);
72297+ atomic_inc_unchecked(&sk->sk_drops);
72298 }
72299 goto drop;
72300 }
72301@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72302 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72303 proto == IPPROTO_UDPLITE);
72304
72305+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72306+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72307+#endif
72308 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
72309
72310 kfree_skb(skb);
72311@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
72312 0, 0L, 0,
72313 sock_i_uid(sp), 0,
72314 sock_i_ino(sp),
72315- atomic_read(&sp->sk_refcnt), sp,
72316- atomic_read(&sp->sk_drops));
72317+ atomic_read(&sp->sk_refcnt),
72318+#ifdef CONFIG_GRKERNSEC_HIDESYM
72319+ NULL,
72320+#else
72321+ sp,
72322+#endif
72323+ atomic_read_unchecked(&sp->sk_drops));
72324 }
72325
72326 int udp6_seq_show(struct seq_file *seq, void *v)
72327diff -urNp linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c
72328--- linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
72329+++ linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
72330@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
72331 add_wait_queue(&self->open_wait, &wait);
72332
72333 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72334- __FILE__,__LINE__, tty->driver->name, self->open_count );
72335+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72336
72337 /* As far as I can see, we protect open_count - Jean II */
72338 spin_lock_irqsave(&self->spinlock, flags);
72339 if (!tty_hung_up_p(filp)) {
72340 extra_count = 1;
72341- self->open_count--;
72342+ local_dec(&self->open_count);
72343 }
72344 spin_unlock_irqrestore(&self->spinlock, flags);
72345- self->blocked_open++;
72346+ local_inc(&self->blocked_open);
72347
72348 while (1) {
72349 if (tty->termios->c_cflag & CBAUD) {
72350@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
72351 }
72352
72353 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72354- __FILE__,__LINE__, tty->driver->name, self->open_count );
72355+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72356
72357 schedule();
72358 }
72359@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
72360 if (extra_count) {
72361 /* ++ is not atomic, so this should be protected - Jean II */
72362 spin_lock_irqsave(&self->spinlock, flags);
72363- self->open_count++;
72364+ local_inc(&self->open_count);
72365 spin_unlock_irqrestore(&self->spinlock, flags);
72366 }
72367- self->blocked_open--;
72368+ local_dec(&self->blocked_open);
72369
72370 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72371- __FILE__,__LINE__, tty->driver->name, self->open_count);
72372+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72373
72374 if (!retval)
72375 self->flags |= ASYNC_NORMAL_ACTIVE;
72376@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
72377 }
72378 /* ++ is not atomic, so this should be protected - Jean II */
72379 spin_lock_irqsave(&self->spinlock, flags);
72380- self->open_count++;
72381+ local_inc(&self->open_count);
72382
72383 tty->driver_data = self;
72384 self->tty = tty;
72385 spin_unlock_irqrestore(&self->spinlock, flags);
72386
72387 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72388- self->line, self->open_count);
72389+ self->line, local_read(&self->open_count));
72390
72391 /* Not really used by us, but lets do it anyway */
72392 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72393@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
72394 return;
72395 }
72396
72397- if ((tty->count == 1) && (self->open_count != 1)) {
72398+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72399 /*
72400 * Uh, oh. tty->count is 1, which means that the tty
72401 * structure will be freed. state->count should always
72402@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
72403 */
72404 IRDA_DEBUG(0, "%s(), bad serial port count; "
72405 "tty->count is 1, state->count is %d\n", __func__ ,
72406- self->open_count);
72407- self->open_count = 1;
72408+ local_read(&self->open_count));
72409+ local_set(&self->open_count, 1);
72410 }
72411
72412- if (--self->open_count < 0) {
72413+ if (local_dec_return(&self->open_count) < 0) {
72414 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72415- __func__, self->line, self->open_count);
72416- self->open_count = 0;
72417+ __func__, self->line, local_read(&self->open_count));
72418+ local_set(&self->open_count, 0);
72419 }
72420- if (self->open_count) {
72421+ if (local_read(&self->open_count)) {
72422 spin_unlock_irqrestore(&self->spinlock, flags);
72423
72424 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72425@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
72426 tty->closing = 0;
72427 self->tty = NULL;
72428
72429- if (self->blocked_open) {
72430+ if (local_read(&self->blocked_open)) {
72431 if (self->close_delay)
72432 schedule_timeout_interruptible(self->close_delay);
72433 wake_up_interruptible(&self->open_wait);
72434@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
72435 spin_lock_irqsave(&self->spinlock, flags);
72436 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72437 self->tty = NULL;
72438- self->open_count = 0;
72439+ local_set(&self->open_count, 0);
72440 spin_unlock_irqrestore(&self->spinlock, flags);
72441
72442 wake_up_interruptible(&self->open_wait);
72443@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
72444 seq_putc(m, '\n');
72445
72446 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72447- seq_printf(m, "Open count: %d\n", self->open_count);
72448+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72449 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72450 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72451
72452diff -urNp linux-2.6.32.45/net/iucv/af_iucv.c linux-2.6.32.45/net/iucv/af_iucv.c
72453--- linux-2.6.32.45/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
72454+++ linux-2.6.32.45/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
72455@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
72456
72457 write_lock_bh(&iucv_sk_list.lock);
72458
72459- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
72460+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72461 while (__iucv_get_sock_by_name(name)) {
72462 sprintf(name, "%08x",
72463- atomic_inc_return(&iucv_sk_list.autobind_name));
72464+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72465 }
72466
72467 write_unlock_bh(&iucv_sk_list.lock);
72468diff -urNp linux-2.6.32.45/net/key/af_key.c linux-2.6.32.45/net/key/af_key.c
72469--- linux-2.6.32.45/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
72470+++ linux-2.6.32.45/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
72471@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
72472 struct xfrm_migrate m[XFRM_MAX_DEPTH];
72473 struct xfrm_kmaddress k;
72474
72475+ pax_track_stack();
72476+
72477 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
72478 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
72479 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
72480@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
72481 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
72482 else
72483 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
72484+#ifdef CONFIG_GRKERNSEC_HIDESYM
72485+ NULL,
72486+#else
72487 s,
72488+#endif
72489 atomic_read(&s->sk_refcnt),
72490 sk_rmem_alloc_get(s),
72491 sk_wmem_alloc_get(s),
72492diff -urNp linux-2.6.32.45/net/lapb/lapb_iface.c linux-2.6.32.45/net/lapb/lapb_iface.c
72493--- linux-2.6.32.45/net/lapb/lapb_iface.c 2011-03-27 14:31:47.000000000 -0400
72494+++ linux-2.6.32.45/net/lapb/lapb_iface.c 2011-08-05 20:33:55.000000000 -0400
72495@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev
72496 goto out;
72497
72498 lapb->dev = dev;
72499- lapb->callbacks = *callbacks;
72500+ lapb->callbacks = callbacks;
72501
72502 __lapb_insert_cb(lapb);
72503
72504@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device
72505
72506 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
72507 {
72508- if (lapb->callbacks.connect_confirmation)
72509- lapb->callbacks.connect_confirmation(lapb->dev, reason);
72510+ if (lapb->callbacks->connect_confirmation)
72511+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
72512 }
72513
72514 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
72515 {
72516- if (lapb->callbacks.connect_indication)
72517- lapb->callbacks.connect_indication(lapb->dev, reason);
72518+ if (lapb->callbacks->connect_indication)
72519+ lapb->callbacks->connect_indication(lapb->dev, reason);
72520 }
72521
72522 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
72523 {
72524- if (lapb->callbacks.disconnect_confirmation)
72525- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
72526+ if (lapb->callbacks->disconnect_confirmation)
72527+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
72528 }
72529
72530 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
72531 {
72532- if (lapb->callbacks.disconnect_indication)
72533- lapb->callbacks.disconnect_indication(lapb->dev, reason);
72534+ if (lapb->callbacks->disconnect_indication)
72535+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
72536 }
72537
72538 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
72539 {
72540- if (lapb->callbacks.data_indication)
72541- return lapb->callbacks.data_indication(lapb->dev, skb);
72542+ if (lapb->callbacks->data_indication)
72543+ return lapb->callbacks->data_indication(lapb->dev, skb);
72544
72545 kfree_skb(skb);
72546 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
72547@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *l
72548 {
72549 int used = 0;
72550
72551- if (lapb->callbacks.data_transmit) {
72552- lapb->callbacks.data_transmit(lapb->dev, skb);
72553+ if (lapb->callbacks->data_transmit) {
72554+ lapb->callbacks->data_transmit(lapb->dev, skb);
72555 used = 1;
72556 }
72557
72558diff -urNp linux-2.6.32.45/net/mac80211/cfg.c linux-2.6.32.45/net/mac80211/cfg.c
72559--- linux-2.6.32.45/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
72560+++ linux-2.6.32.45/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
72561@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
72562 return err;
72563 }
72564
72565-struct cfg80211_ops mac80211_config_ops = {
72566+const struct cfg80211_ops mac80211_config_ops = {
72567 .add_virtual_intf = ieee80211_add_iface,
72568 .del_virtual_intf = ieee80211_del_iface,
72569 .change_virtual_intf = ieee80211_change_iface,
72570diff -urNp linux-2.6.32.45/net/mac80211/cfg.h linux-2.6.32.45/net/mac80211/cfg.h
72571--- linux-2.6.32.45/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
72572+++ linux-2.6.32.45/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
72573@@ -4,6 +4,6 @@
72574 #ifndef __CFG_H
72575 #define __CFG_H
72576
72577-extern struct cfg80211_ops mac80211_config_ops;
72578+extern const struct cfg80211_ops mac80211_config_ops;
72579
72580 #endif /* __CFG_H */
72581diff -urNp linux-2.6.32.45/net/mac80211/debugfs_key.c linux-2.6.32.45/net/mac80211/debugfs_key.c
72582--- linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
72583+++ linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
72584@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
72585 size_t count, loff_t *ppos)
72586 {
72587 struct ieee80211_key *key = file->private_data;
72588- int i, res, bufsize = 2 * key->conf.keylen + 2;
72589+ int i, bufsize = 2 * key->conf.keylen + 2;
72590 char *buf = kmalloc(bufsize, GFP_KERNEL);
72591 char *p = buf;
72592+ ssize_t res;
72593+
72594+ if (buf == NULL)
72595+ return -ENOMEM;
72596
72597 for (i = 0; i < key->conf.keylen; i++)
72598 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
72599diff -urNp linux-2.6.32.45/net/mac80211/debugfs_sta.c linux-2.6.32.45/net/mac80211/debugfs_sta.c
72600--- linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
72601+++ linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
72602@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
72603 int i;
72604 struct sta_info *sta = file->private_data;
72605
72606+ pax_track_stack();
72607+
72608 spin_lock_bh(&sta->lock);
72609 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
72610 sta->ampdu_mlme.dialog_token_allocator + 1);
72611diff -urNp linux-2.6.32.45/net/mac80211/ieee80211_i.h linux-2.6.32.45/net/mac80211/ieee80211_i.h
72612--- linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
72613+++ linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
72614@@ -25,6 +25,7 @@
72615 #include <linux/etherdevice.h>
72616 #include <net/cfg80211.h>
72617 #include <net/mac80211.h>
72618+#include <asm/local.h>
72619 #include "key.h"
72620 #include "sta_info.h"
72621
72622@@ -635,7 +636,7 @@ struct ieee80211_local {
72623 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
72624 spinlock_t queue_stop_reason_lock;
72625
72626- int open_count;
72627+ local_t open_count;
72628 int monitors, cooked_mntrs;
72629 /* number of interfaces with corresponding FIF_ flags */
72630 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
72631diff -urNp linux-2.6.32.45/net/mac80211/iface.c linux-2.6.32.45/net/mac80211/iface.c
72632--- linux-2.6.32.45/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
72633+++ linux-2.6.32.45/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
72634@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
72635 break;
72636 }
72637
72638- if (local->open_count == 0) {
72639+ if (local_read(&local->open_count) == 0) {
72640 res = drv_start(local);
72641 if (res)
72642 goto err_del_bss;
72643@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
72644 * Validate the MAC address for this device.
72645 */
72646 if (!is_valid_ether_addr(dev->dev_addr)) {
72647- if (!local->open_count)
72648+ if (!local_read(&local->open_count))
72649 drv_stop(local);
72650 return -EADDRNOTAVAIL;
72651 }
72652@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
72653
72654 hw_reconf_flags |= __ieee80211_recalc_idle(local);
72655
72656- local->open_count++;
72657+ local_inc(&local->open_count);
72658 if (hw_reconf_flags) {
72659 ieee80211_hw_config(local, hw_reconf_flags);
72660 /*
72661@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
72662 err_del_interface:
72663 drv_remove_interface(local, &conf);
72664 err_stop:
72665- if (!local->open_count)
72666+ if (!local_read(&local->open_count))
72667 drv_stop(local);
72668 err_del_bss:
72669 sdata->bss = NULL;
72670@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
72671 WARN_ON(!list_empty(&sdata->u.ap.vlans));
72672 }
72673
72674- local->open_count--;
72675+ local_dec(&local->open_count);
72676
72677 switch (sdata->vif.type) {
72678 case NL80211_IFTYPE_AP_VLAN:
72679@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
72680
72681 ieee80211_recalc_ps(local, -1);
72682
72683- if (local->open_count == 0) {
72684+ if (local_read(&local->open_count) == 0) {
72685 ieee80211_clear_tx_pending(local);
72686 ieee80211_stop_device(local);
72687
72688diff -urNp linux-2.6.32.45/net/mac80211/main.c linux-2.6.32.45/net/mac80211/main.c
72689--- linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
72690+++ linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
72691@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
72692 local->hw.conf.power_level = power;
72693 }
72694
72695- if (changed && local->open_count) {
72696+ if (changed && local_read(&local->open_count)) {
72697 ret = drv_config(local, changed);
72698 /*
72699 * Goal:
72700diff -urNp linux-2.6.32.45/net/mac80211/mlme.c linux-2.6.32.45/net/mac80211/mlme.c
72701--- linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:35:30.000000000 -0400
72702+++ linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:34:01.000000000 -0400
72703@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
72704 bool have_higher_than_11mbit = false, newsta = false;
72705 u16 ap_ht_cap_flags;
72706
72707+ pax_track_stack();
72708+
72709 /*
72710 * AssocResp and ReassocResp have identical structure, so process both
72711 * of them in this function.
72712diff -urNp linux-2.6.32.45/net/mac80211/pm.c linux-2.6.32.45/net/mac80211/pm.c
72713--- linux-2.6.32.45/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
72714+++ linux-2.6.32.45/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
72715@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
72716 }
72717
72718 /* stop hardware - this must stop RX */
72719- if (local->open_count)
72720+ if (local_read(&local->open_count))
72721 ieee80211_stop_device(local);
72722
72723 local->suspended = true;
72724diff -urNp linux-2.6.32.45/net/mac80211/rate.c linux-2.6.32.45/net/mac80211/rate.c
72725--- linux-2.6.32.45/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
72726+++ linux-2.6.32.45/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
72727@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
72728 struct rate_control_ref *ref, *old;
72729
72730 ASSERT_RTNL();
72731- if (local->open_count)
72732+ if (local_read(&local->open_count))
72733 return -EBUSY;
72734
72735 ref = rate_control_alloc(name, local);
72736diff -urNp linux-2.6.32.45/net/mac80211/tx.c linux-2.6.32.45/net/mac80211/tx.c
72737--- linux-2.6.32.45/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
72738+++ linux-2.6.32.45/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
72739@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
72740 return cpu_to_le16(dur);
72741 }
72742
72743-static int inline is_ieee80211_device(struct ieee80211_local *local,
72744+static inline int is_ieee80211_device(struct ieee80211_local *local,
72745 struct net_device *dev)
72746 {
72747 return local == wdev_priv(dev->ieee80211_ptr);
72748diff -urNp linux-2.6.32.45/net/mac80211/util.c linux-2.6.32.45/net/mac80211/util.c
72749--- linux-2.6.32.45/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
72750+++ linux-2.6.32.45/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
72751@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
72752 local->resuming = true;
72753
72754 /* restart hardware */
72755- if (local->open_count) {
72756+ if (local_read(&local->open_count)) {
72757 /*
72758 * Upon resume hardware can sometimes be goofy due to
72759 * various platform / driver / bus issues, so restarting
72760diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c
72761--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
72762+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
72763@@ -564,7 +564,7 @@ static const struct file_operations ip_v
72764 .open = ip_vs_app_open,
72765 .read = seq_read,
72766 .llseek = seq_lseek,
72767- .release = seq_release,
72768+ .release = seq_release_net,
72769 };
72770 #endif
72771
72772diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c
72773--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
72774+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
72775@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
72776 /* if the connection is not template and is created
72777 * by sync, preserve the activity flag.
72778 */
72779- cp->flags |= atomic_read(&dest->conn_flags) &
72780+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
72781 (~IP_VS_CONN_F_INACTIVE);
72782 else
72783- cp->flags |= atomic_read(&dest->conn_flags);
72784+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
72785 cp->dest = dest;
72786
72787 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
72788@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
72789 atomic_set(&cp->refcnt, 1);
72790
72791 atomic_set(&cp->n_control, 0);
72792- atomic_set(&cp->in_pkts, 0);
72793+ atomic_set_unchecked(&cp->in_pkts, 0);
72794
72795 atomic_inc(&ip_vs_conn_count);
72796 if (flags & IP_VS_CONN_F_NO_CPORT)
72797@@ -871,7 +871,7 @@ static const struct file_operations ip_v
72798 .open = ip_vs_conn_open,
72799 .read = seq_read,
72800 .llseek = seq_lseek,
72801- .release = seq_release,
72802+ .release = seq_release_net,
72803 };
72804
72805 static const char *ip_vs_origin_name(unsigned flags)
72806@@ -934,7 +934,7 @@ static const struct file_operations ip_v
72807 .open = ip_vs_conn_sync_open,
72808 .read = seq_read,
72809 .llseek = seq_lseek,
72810- .release = seq_release,
72811+ .release = seq_release_net,
72812 };
72813
72814 #endif
72815@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
72816
72817 /* Don't drop the entry if its number of incoming packets is not
72818 located in [0, 8] */
72819- i = atomic_read(&cp->in_pkts);
72820+ i = atomic_read_unchecked(&cp->in_pkts);
72821 if (i > 8 || i < 0) return 0;
72822
72823 if (!todrop_rate[i]) return 0;
72824diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c
72825--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
72826+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
72827@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
72828 ret = cp->packet_xmit(skb, cp, pp);
72829 /* do not touch skb anymore */
72830
72831- atomic_inc(&cp->in_pkts);
72832+ atomic_inc_unchecked(&cp->in_pkts);
72833 ip_vs_conn_put(cp);
72834 return ret;
72835 }
72836@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
72837 * Sync connection if it is about to close to
72838 * encorage the standby servers to update the connections timeout
72839 */
72840- pkts = atomic_add_return(1, &cp->in_pkts);
72841+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
72842 if (af == AF_INET &&
72843 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
72844 (((cp->protocol != IPPROTO_TCP ||
72845diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c
72846--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
72847+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
72848@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
72849 ip_vs_rs_hash(dest);
72850 write_unlock_bh(&__ip_vs_rs_lock);
72851 }
72852- atomic_set(&dest->conn_flags, conn_flags);
72853+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
72854
72855 /* bind the service */
72856 if (!dest->svc) {
72857@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
72858 " %-7s %-6d %-10d %-10d\n",
72859 &dest->addr.in6,
72860 ntohs(dest->port),
72861- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72862+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72863 atomic_read(&dest->weight),
72864 atomic_read(&dest->activeconns),
72865 atomic_read(&dest->inactconns));
72866@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
72867 "%-7s %-6d %-10d %-10d\n",
72868 ntohl(dest->addr.ip),
72869 ntohs(dest->port),
72870- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72871+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72872 atomic_read(&dest->weight),
72873 atomic_read(&dest->activeconns),
72874 atomic_read(&dest->inactconns));
72875@@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
72876 .open = ip_vs_info_open,
72877 .read = seq_read,
72878 .llseek = seq_lseek,
72879- .release = seq_release_private,
72880+ .release = seq_release_net,
72881 };
72882
72883 #endif
72884@@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
72885 .open = ip_vs_stats_seq_open,
72886 .read = seq_read,
72887 .llseek = seq_lseek,
72888- .release = single_release,
72889+ .release = single_release_net,
72890 };
72891
72892 #endif
72893@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
72894
72895 entry.addr = dest->addr.ip;
72896 entry.port = dest->port;
72897- entry.conn_flags = atomic_read(&dest->conn_flags);
72898+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
72899 entry.weight = atomic_read(&dest->weight);
72900 entry.u_threshold = dest->u_threshold;
72901 entry.l_threshold = dest->l_threshold;
72902@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
72903 unsigned char arg[128];
72904 int ret = 0;
72905
72906+ pax_track_stack();
72907+
72908 if (!capable(CAP_NET_ADMIN))
72909 return -EPERM;
72910
72911@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
72912 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
72913
72914 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
72915- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72916+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72917 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
72918 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
72919 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
72920diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c
72921--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
72922+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
72923@@ -438,7 +438,7 @@ static void ip_vs_process_message(const
72924
72925 if (opt)
72926 memcpy(&cp->in_seq, opt, sizeof(*opt));
72927- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72928+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72929 cp->state = state;
72930 cp->old_state = cp->state;
72931 /*
72932diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c
72933--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
72934+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
72935@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
72936 else
72937 rc = NF_ACCEPT;
72938 /* do not touch skb anymore */
72939- atomic_inc(&cp->in_pkts);
72940+ atomic_inc_unchecked(&cp->in_pkts);
72941 goto out;
72942 }
72943
72944@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
72945 else
72946 rc = NF_ACCEPT;
72947 /* do not touch skb anymore */
72948- atomic_inc(&cp->in_pkts);
72949+ atomic_inc_unchecked(&cp->in_pkts);
72950 goto out;
72951 }
72952
72953diff -urNp linux-2.6.32.45/net/netfilter/Kconfig linux-2.6.32.45/net/netfilter/Kconfig
72954--- linux-2.6.32.45/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
72955+++ linux-2.6.32.45/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
72956@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
72957
72958 To compile it as a module, choose M here. If unsure, say N.
72959
72960+config NETFILTER_XT_MATCH_GRADM
72961+ tristate '"gradm" match support'
72962+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
72963+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
72964+ ---help---
72965+ The gradm match allows to match on grsecurity RBAC being enabled.
72966+ It is useful when iptables rules are applied early on bootup to
72967+ prevent connections to the machine (except from a trusted host)
72968+ while the RBAC system is disabled.
72969+
72970 config NETFILTER_XT_MATCH_HASHLIMIT
72971 tristate '"hashlimit" match support'
72972 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
72973diff -urNp linux-2.6.32.45/net/netfilter/Makefile linux-2.6.32.45/net/netfilter/Makefile
72974--- linux-2.6.32.45/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
72975+++ linux-2.6.32.45/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
72976@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
72977 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
72978 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
72979 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
72980+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
72981 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
72982 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
72983 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
72984diff -urNp linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c
72985--- linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
72986+++ linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
72987@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
72988 static int
72989 ctnetlink_parse_tuple(const struct nlattr * const cda[],
72990 struct nf_conntrack_tuple *tuple,
72991- enum ctattr_tuple type, u_int8_t l3num)
72992+ enum ctattr_type type, u_int8_t l3num)
72993 {
72994 struct nlattr *tb[CTA_TUPLE_MAX+1];
72995 int err;
72996diff -urNp linux-2.6.32.45/net/netfilter/nfnetlink_log.c linux-2.6.32.45/net/netfilter/nfnetlink_log.c
72997--- linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
72998+++ linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
72999@@ -68,7 +68,7 @@ struct nfulnl_instance {
73000 };
73001
73002 static DEFINE_RWLOCK(instances_lock);
73003-static atomic_t global_seq;
73004+static atomic_unchecked_t global_seq;
73005
73006 #define INSTANCE_BUCKETS 16
73007 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73008@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
73009 /* global sequence number */
73010 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73011 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73012- htonl(atomic_inc_return(&global_seq)));
73013+ htonl(atomic_inc_return_unchecked(&global_seq)));
73014
73015 if (data_len) {
73016 struct nlattr *nla;
73017diff -urNp linux-2.6.32.45/net/netfilter/xt_gradm.c linux-2.6.32.45/net/netfilter/xt_gradm.c
73018--- linux-2.6.32.45/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73019+++ linux-2.6.32.45/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
73020@@ -0,0 +1,51 @@
73021+/*
73022+ * gradm match for netfilter
73023